Merge pull request #67 from paritytech/dp/chore/undo-returning-ref

Revert some recent changes
This commit is contained in:
David 2018-10-09 15:33:03 +02:00 committed by GitHub
commit e16441a842
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 87 additions and 79 deletions

View File

@ -18,4 +18,4 @@ members = [
"trie-standardmap",
"triehash",
"uint"
]
]

View File

@ -1,6 +1,6 @@
[package]
name = "fixed-hash"
version = "0.2.3"
version = "0.2.4"
authors = ["Parity Technologies <admin@parity.io>"]
license = "MIT"
homepage = "https://github.com/paritytech/parity-common"

View File

@ -376,7 +376,7 @@ macro_rules! impl_hash_conversions {
#[macro_export]
macro_rules! impl_hash_uint_conversions {
($hash: ident, $uint: ident) => {
debug_assert_eq!(::core::mem::size_of::<$hash>(), ::core::mem::size_of::<$uint>());
debug_assert_eq!(::core::mem::size_of::<$hash>(), ::core::mem::size_of::<$uint>(), "Can't convert between differently sized uint/hash.");
impl From<$uint> for $hash {
fn from(value: $uint) -> $hash {
let mut ret = $hash::new();
@ -682,7 +682,7 @@ mod tests {
#[cfg(feature="uint_conversions")]
#[test]
#[should_panic]
#[should_panic(expected = "Can't convert between differently sized uint/hash.")]
fn converting_differently_sized_types_panics() {
use uint::U512;

View File

@ -49,7 +49,7 @@ pub trait HashDB<H: Hasher, T>: Send + Sync + AsHashDB<H, T> {
/// Look up a given hash into the bytes that hash to it, returning None if the
/// hash is not known.
fn get(&self, key: &H::Out) -> Option<&T>;
fn get(&self, key: &H::Out) -> Option<T>;
/// Check for the existance of a hash-key.
fn contains(&self, key: &H::Out) -> bool;
@ -83,4 +83,3 @@ impl<'a, H: Hasher, T> AsHashDB<H, T> for &'a mut HashDB<H, T> {
fn as_hashdb(&self) -> &HashDB<H, T> { &**self }
fn as_hashdb_mut(&mut self) -> &mut HashDB<H, T> { &mut **self }
}

View File

@ -8,9 +8,9 @@ license = "GPL-3.0"
[dependencies]
heapsize = "0.4"
hashdb = { version = "0.3", path = "../hashdb" }
hashdb = { version = "0.3.0", path = "../hashdb" }
plain_hasher = { version = "0.2", path = "../plain_hasher", default-features = false }
rlp = { version = "0.3", path = "../rlp", default-features = false }
rlp = { version = "0.3.0", path = "../rlp", default-features = false }
[dev-dependencies]
tiny-keccak = "1.4.2"

View File

@ -56,7 +56,7 @@ type FastMap<H, T> = HashMap<<H as KeyHasher>::Out, T, hash::BuildHasherDefault<
///
/// let k = m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), &d);
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.insert(d);
/// assert!(m.contains(&k));
@ -75,7 +75,7 @@ type FastMap<H, T> = HashMap<<H as KeyHasher>::Out, T, hash::BuildHasherDefault<
/// m.insert(d);
/// assert!(m.contains(&k));
/// assert_eq!(m.get(&k).unwrap(), &d);
/// assert_eq!(m.get(&k).unwrap(), d);
///
/// m.remove(&k);
/// assert!(!m.contains(&k));
@ -92,7 +92,7 @@ impl<'a, H, T> Default for MemoryDB<H, T>
where
H: KeyHasher,
H::Out: HeapSizeOf,
T: From<&'a [u8]>
T: From<&'a [u8]> + Clone
{
fn default() -> Self { Self::new() }
}
@ -101,7 +101,7 @@ impl<'a, H, T> MemoryDB<H, T>
where
H: KeyHasher,
H::Out: HeapSizeOf,
T: From<&'a [u8]>,
T: From<&'a [u8]> + Clone,
{
/// Create a new instance of the memory DB.
pub fn new() -> Self {
@ -137,7 +137,7 @@ where
}
}
impl<H: KeyHasher, T> MemoryDB<H, T> {
impl<H: KeyHasher, T: Clone> MemoryDB<H, T> {
/// Create a new `MemoryDB` from a given null key/data
pub fn from_null_node(null_key: &[u8], null_node_data: T) -> Self {
@ -188,11 +188,11 @@ impl<H: KeyHasher, T> MemoryDB<H, T> {
///
/// Even when Some is returned, the data is only guaranteed to be useful
/// when the refs > 0.
pub fn raw(&self, key: &<H as KeyHasher>::Out) -> Option<(&T, i32)> {
pub fn raw(&self, key: &<H as KeyHasher>::Out) -> Option<(T, i32)> {
if key == &self.hashed_null_node {
return Some((&self.null_node_data, 1));
return Some((self.null_node_data.clone(), 1));
}
self.data.get(key).map(|(value, count)| (value, *count))
self.data.get(key).map(|(value, count)| (value.clone(), *count))
}
/// Consolidate all the entries of `other` into `self`.
@ -229,7 +229,7 @@ where
impl<H, T> HashDB<H, T> for MemoryDB<H, T>
where
H: KeyHasher,
T: Default + PartialEq<T> + for<'a> From<&'a [u8]> + Send + Sync,
T: Default + PartialEq<T> + for<'a> From<&'a [u8]> + Send + Sync + Clone,
{
fn keys(&self) -> HashMap<H::Out, i32> {
self.data.iter()
@ -241,13 +241,13 @@ where
.collect()
}
fn get(&self, key: &H::Out) -> Option<&T> {
fn get(&self, key: &H::Out) -> Option<T> {
if key == &self.hashed_null_node {
return Some(&self.null_node_data);
return Some(self.null_node_data.clone());
}
match self.data.get(key) {
Some(&(ref d, rc)) if rc > 0 => Some(d),
Some(&(ref d, rc)) if rc > 0 => Some(d.clone()),
_ => None
}
}
@ -323,7 +323,7 @@ where
impl<H, T> AsHashDB<H, T> for MemoryDB<H, T>
where
H: KeyHasher,
T: Default + PartialEq<T> + for<'a> From<&'a[u8]> + Send + Sync,
T: Default + PartialEq<T> + for<'a> From<&'a[u8]> + Send + Sync + Clone,
{
fn as_hashdb(&self) -> &HashDB<H, T> { self }
fn as_hashdb_mut(&mut self) -> &mut HashDB<H, T> { self }

View File

@ -10,33 +10,17 @@ license = "GPL-3.0"
elastic-array = "0.10"
log = "0.3"
rand = "0.4"
hashdb = { version = "0.3", path = "../hashdb" }
hashdb = { version = "0.3.0", path = "../hashdb" }
parity-bytes = { version = "0.1", path = "../parity-bytes" }
[dev-dependencies]
env_logger = "0.5"
ethereum-types = "0.4"
keccak-hash = { version = "0.1", path = "../keccak-hash" }
memorydb = { version = "0.3", path = "../memorydb", default-features = false }
memorydb = { version = "0.3.0", path = "../memorydb", default-features = false }
rlp = { version = "0.3.0", path = "../rlp", default-features = false }
trie-standardmap = { version = "0.1", path = "../trie-standardmap", default-features = false }
triehash = { version = "0.3", path = "../triehash", default-features = false }
triehash = { version = "0.3.0", path = "../triehash", default-features = false }
parity-bytes = { version = "0.1.0", path = "../parity-bytes" }
# REVIEW: what's a better way to deal with this? The tests here in
# `patricia_trie` use `keccak-hasher` and `patricia-trie-ethereum` to
# instantiate concrete impls. Neither crate is needed/wanted in `parity-common`
# (but we do want our tests to run…). We can publish them to crates.io (and I
# did this for `keccak-hasher`, but: see below) but `patricia-trie-ethereum`
# depends on `patricia_trie` and which will eventually be published as part of
# `parity-common` but not before that. So it's a cycle. The temporary workaround
# is to copy `patricia-trie-ethereum` into `parity-common` as a "test helper".
# Note that this is a *copy*, without any git history or link to the mother
# repo. They are to be considered test-only helpers and do not necessarily need
# to be in sync with the `parity-ethereum` versions.
patricia-trie-ethereum = { version = "0.1", path = "../test-support/patricia-trie-ethereum" }
# We need this in-tree or we end up with duplicate versions when `keccak-hasher`
# from crates.io fetches `hashdb` from crates, which causes compiler error
# `error[E0277]: the trait bound `keccak_hasher::KeccakHasher: hashdb::Hasher`
# is not satisfied`. Not sure if there's any way around that.
keccak-hasher = { version = "0.1", path = "../test-support/keccak-hasher" }

View File

@ -23,16 +23,16 @@ use node_codec::NodeCodec;
///
/// Use it as a `Trie` or `TrieMut` trait object.
pub struct FatDB<'db, H, C>
where
H: Hasher + 'db,
where
H: Hasher + 'db,
C: NodeCodec<H>
{
raw: TrieDB<'db, H, C>,
}
impl<'db, H, C> FatDB<'db, H, C>
where
H: Hasher,
where
H: Hasher,
C: NodeCodec<H>
{
/// Create a new trie with the backing database `db` and empty `root`
@ -47,8 +47,8 @@ where
}
impl<'db, H, C> Trie<H, C> for FatDB<'db, H, C>
where
H: Hasher,
where
H: Hasher,
C: NodeCodec<H>
{
fn root(&self) -> &H::Out { self.raw.root() }
@ -70,8 +70,8 @@ where
/// Itarator over inserted pairs of key values.
pub struct FatDBIterator<'db, H, C>
where
H: Hasher + 'db,
where
H: Hasher + 'db,
C: NodeCodec<H> + 'db
{
trie_iterator: TrieDBIterator<'db, H, C>,
@ -79,8 +79,8 @@ where
}
impl<'db, H, C> FatDBIterator<'db, H, C>
where
H: Hasher,
where
H: Hasher,
C: NodeCodec<H>
{
/// Creates new iterator.
@ -93,8 +93,8 @@ where
}
impl<'db, H, C> TrieIterator<H, C> for FatDBIterator<'db, H, C>
where
H: Hasher,
where
H: Hasher,
C: NodeCodec<H>
{
fn seek(&mut self, key: &[u8]) -> Result<(), H::Out, C::Error> {
@ -104,8 +104,8 @@ where
}
impl<'db, H, C> Iterator for FatDBIterator<'db, H, C>
where
H: Hasher,
where
H: Hasher,
C: NodeCodec<H>
{
type Item = TrieItem<'db, H::Out, C::Error>;
@ -115,7 +115,7 @@ where
.map(|res| {
res.map(|(hash, value)| {
let aux_hash = H::hash(&hash);
(self.trie.db().get(&aux_hash).cloned().expect("Missing fatdb hash").into_vec(), value)
(self.trie.db().get(&aux_hash).expect("Missing fatdb hash").into_vec(), value)
})
})
}

View File

@ -139,7 +139,7 @@ mod test {
let mut t = FatDBMut::new(&mut memdb, &mut root);
t.insert(&key, &val).unwrap();
assert_eq!(t.get(&key), Ok(Some(DBValue::from_slice(&val))));
assert_eq!(t.db().get(&aux_hash), Some(&DBValue::from_slice(&key)));
assert_eq!(t.db().get(&aux_hash), Some(DBValue::from_slice(&key)));
t.remove(&key).unwrap();
assert_eq!(t.db().get(&aux_hash), None);
}

View File

@ -89,7 +89,7 @@ where
pub fn db(&'db self) -> &'db HashDB<H, DBValue> { self.db }
/// Get the data of the root node.
fn root_data(&self) -> Result<&DBValue, H::Out, C::Error> {
fn root_data(&self) -> Result<DBValue, H::Out, C::Error> {
self.db
.get(self.root)
.ok_or_else(|| Box::new(TrieError::InvalidStateRoot(*self.root)))
@ -103,7 +103,7 @@ where
Some(key) => {
self.db
.get(&key)
.map(|v| Cow::Borrowed(v))
.map(|v| Cow::Owned(v))
.ok_or_else(|| Box::new(TrieError::IncompleteDatabase(key)))
}
None => Ok(Cow::Owned(DBValue::from_slice(node)))
@ -345,7 +345,7 @@ impl<'a, H: Hasher, C: NodeCodec<H>> TrieIterator<H, C> for TrieDBIterator<'a, H
self.trail.clear();
self.key_nibbles.clear();
let root_rlp = self.db.root_data()?;
self.seek(root_rlp, NibbleSlice::new(key.as_ref()))
self.seek(&root_rlp, NibbleSlice::new(key.as_ref()))
}
}

View File

@ -1,9 +1,9 @@
[package]
name = "rlp"
version = "0.3.0"
description = "Recursive-length prefix encoding, decoding, and compression"
repository = "https://github.com/paritytech/parity-common"
license = "MIT/Apache-2.0"
version = "0.3.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]

View File

@ -55,7 +55,9 @@ fn calculate_payload_info(header_bytes: &[u8], len_of_len: usize) -> Result<Payl
None => return Err(DecoderError::RlpIsTooShort),
_ => (),
}
if header_bytes.len() < header_len { return Err(DecoderError::RlpIsTooShort); }
if header_bytes.len() < header_len {
return Err(DecoderError::RlpIsTooShort);
}
let value_len = decode_usize(&header_bytes[1..header_len])?;
if value_len <= 55 {
return Err(DecoderError::RlpInvalidIndirection);
@ -102,7 +104,7 @@ impl PayloadInfo {
#[derive(Debug, Clone)]
pub struct Rlp<'a> {
bytes: &'a [u8],
offset_cache: Cell<OffsetCache>,
offset_cache: Cell<Option<OffsetCache>>,
count_cache: Cell<Option<usize>>,
}
@ -128,7 +130,7 @@ impl<'a> Rlp<'a> {
pub fn new(bytes: &'a [u8]) -> Rlp<'a> {
Rlp {
bytes: bytes,
offset_cache: Cell::new(OffsetCache::new(usize::max_value(), 0)),
offset_cache: Cell::new(None),
count_cache: Cell::new(None)
}
}
@ -186,17 +188,22 @@ impl<'a> Rlp<'a> {
// move to cached position if its index is less or equal to
// current search index, otherwise move to beginning of list
let c = self.offset_cache.get();
let (mut bytes, to_skip) = match c.index <= index {
true => (Rlp::consume(self.bytes, c.offset)?, index - c.index),
false => (self.consume_list_payload()?, index),
let cache = self.offset_cache.get();
let (bytes, indexes_to_skip, bytes_consumed) = match cache {
Some(ref cache) if cache.index <= index => (
Rlp::consume(self.bytes, cache.offset)?, index - cache.index, cache.offset
),
_ => {
let (bytes, consumed) = self.consume_list_payload()?;
(bytes, index, consumed)
}
};
// skip up to x items
bytes = Rlp::consume_items(bytes, to_skip)?;
let (bytes, consumed) = Rlp::consume_items(bytes, indexes_to_skip)?;
// update the cache
self.offset_cache.set(OffsetCache::new(index, self.bytes.len() - bytes.len()));
self.offset_cache.set(Some(OffsetCache::new(index, bytes_consumed + consumed)));
// construct new rlp
let found = BasicDecoder::payload_info(bytes)?;
@ -260,29 +267,32 @@ impl<'a> Rlp<'a> {
}
/// consumes first found prefix
fn consume_list_payload(&self) -> Result<&'a [u8], DecoderError> {
fn consume_list_payload(&self) -> Result<(&'a [u8], usize), DecoderError> {
let item = BasicDecoder::payload_info(self.bytes)?;
if self.bytes.len() < (item.header_len + item.value_len) {
return Err(DecoderError::RlpIsTooShort);
}
Ok(&self.bytes[item.header_len..item.header_len + item.value_len])
Ok((&self.bytes[item.header_len..item.header_len + item.value_len], item.header_len))
}
/// consumes fixed number of items
fn consume_items(bytes: &'a [u8], items: usize) -> Result<&'a [u8], DecoderError> {
fn consume_items(bytes: &'a [u8], items: usize) -> Result<(&'a [u8], usize), DecoderError> {
let mut result = bytes;
let mut consumed = 0;
for _ in 0..items {
let i = BasicDecoder::payload_info(result)?;
result = Rlp::consume(result, i.header_len + i.value_len)?;
let to_consume = i.header_len + i.value_len;
result = Rlp::consume(result, to_consume)?;
consumed += to_consume;
}
Ok(result)
Ok((result, consumed))
}
/// consumes slice prefix of length `len`
fn consume(bytes: &'a [u8], len: usize) -> Result<&'a [u8], DecoderError> {
match bytes.len() >= len {
true => Ok(&bytes[len..]),
false => Err(DecoderError::RlpIsTooShort),
false => Err(DecoderError::RlpIsTooShort)
}
}
}

View File

@ -9,6 +9,8 @@
#[cfg(feature = "ethereum")]
extern crate ethereum_types;
extern crate rlp;
#[macro_use]
extern crate hex_literal;
use std::{fmt, cmp};
#[cfg(feature = "ethereum")]
@ -482,3 +484,16 @@ fn test_inner_length_capping_for_short_lists() {
assert_eq!(Rlp::new(&vec![0xc0 + 3, 0x82, b'a', b'b']).val_at::<String>(0), Ok("ab".to_owned()));
assert_eq!(Rlp::new(&vec![0xc0 + 4, 0x82, b'a', b'b']).val_at::<String>(0), Err(DecoderError::RlpIsTooShort));
}
// test described in
//
// https://github.com/paritytech/parity-ethereum/pull/9663
#[test]
fn test_list_at() {
let raw = hex!("f83e82022bd79020010db83c4d001500000000abcdef12820cfa8215a8d79020010db885a308d313198a2e037073488208ae82823a8443b9a355c5010203040531b9019afde696e582a78fa8d95ea13ce3297d4afb8ba6433e4154caa5ac6431af1b80ba76023fa4090c408f6b4bc3701562c031041d4702971d102c9ab7fa5eed4cd6bab8f7af956f7d565ee1917084a95398b6a21eac920fe3dd1345ec0a7ef39367ee69ddf092cbfe5b93e5e568ebc491983c09c76d922dc3");
let rlp = Rlp::new(&raw);
let _rlp1 = rlp.at(1).unwrap();
let rlp2 = rlp.at(2).unwrap();
assert_eq!(rlp2.val_at::<u16>(2).unwrap(), 33338);
}

View File

@ -6,9 +6,9 @@ description = "Merkle-Patricia Trie (Ethereum Style)"
license = "GPL-3.0"
[dependencies]
patricia-trie = { version = "0.3", path = "../../patricia_trie" }
patricia-trie = { version = "0.3.0", path = "../../patricia_trie" }
keccak-hasher = { version = "0.1", path = "../keccak-hasher" }
hashdb = { version = "0.3", path = "../../hashdb" }
hashdb = { version = "0.3.0", path = "../../hashdb" }
rlp = { version = "0.3.0", path = "../../rlp" }
parity-bytes = { version = "0.1", path = "../../parity-bytes" }

View File

@ -7,8 +7,8 @@ repository = "https://github.com/paritytech/parity-common"
license = "GPL-3.0"
[dependencies]
hashdb = { version = "0.3", path = "../hashdb", default-features = false }
rlp = { version = "0.3", path = "../rlp", default-features = false }
hashdb = { version = "0.3.0", path = "../hashdb", default-features = false }
rlp = { version = "0.3.0", path = "../rlp", default-features = false }
[dev-dependencies]
trie-standardmap = { version = "0.1", path = "../trie-standardmap" }