Use memmap for dag cache (#6193)

* Rebase and fix compilation errors (tests not yet fixed)

* Use `debug_assert` over `assert`

* Fix tests

* Assert safety, clean up

* Fix up stale cache removal, move one assert to debug_assert

* Remove printlns

* Add licenses

* Fix benches

* Inline some no-ops in a hot loop that weren't being inlined

* Add spooky comment to make sure no-one removes the inlining annotations

* Minor cleanup

* Add option to switch between mmap and ram

* Flag ethash to use less memory when running light client

* Fix tests

* Remove todo comment (it's done)

* Replace assertion with error return

* Fix indentation

* Use union instead of `transmute`

* Fix benches

* Extract to constants

* Clean up and fix soundness holes

* Fix formatting

* Ignore missing-file errors

* Make incorrect cache size an error condition instead of a panic, remove dead code

* Fix compilation errors from rebase

* Fix compilation errors in tests

* Fix compilation errors in tests
This commit is contained in:
Jef 2017-09-25 19:45:33 +02:00 committed by Gav Wood
parent 70be064aa5
commit 5c08698fa0
22 changed files with 1560 additions and 757 deletions

526
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,8 @@ hash = { path = "../util/hash" }
primal = "0.2.3"
parking_lot = "0.4"
crunchy = "0.1.0"
memmap = "0.5.2"
either = "1.0.0"
[features]
benches = []

352
ethash/src/cache.rs Normal file
View File

@ -0,0 +1,352 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use compute::Light;
use either::Either;
use keccak::{H256, keccak_512};
use memmap::{Mmap, Protection};
use parking_lot::Mutex;
use seed_compute::SeedHashCompute;
use shared::{ETHASH_CACHE_ROUNDS, NODE_BYTES, NODE_DWORDS, Node, epoch, get_cache_size, to_hex};
use std::borrow::Cow;
use std::fs;
use std::io::{self, Read, Write};
use std::path::{Path, PathBuf};
use std::slice;
use std::sync::Arc;
type Cache = Either<Vec<Node>, Mmap>;
#[derive(PartialEq, Eq, Debug, Clone, Copy)]
pub enum OptimizeFor {
Cpu,
Memory,
}
impl Default for OptimizeFor {
fn default() -> Self {
OptimizeFor::Cpu
}
}
fn byte_size(cache: &Cache) -> usize {
use self::Either::{Left, Right};
match *cache {
Left(ref vec) => vec.len() * NODE_BYTES,
Right(ref mmap) => mmap.len(),
}
}
fn new_buffer(path: &Path, num_nodes: usize, ident: &H256, optimize_for: OptimizeFor) -> Cache {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => make_memmapped_cache(path, num_nodes, ident).ok(),
};
memmap.map(Either::Right).unwrap_or_else(|| {
Either::Left(make_memory_cache(num_nodes, ident))
})
}
#[derive(Clone)]
pub struct NodeCacheBuilder {
// TODO: Remove this locking and just use an `Rc`?
seedhash: Arc<Mutex<SeedHashCompute>>,
optimize_for: OptimizeFor,
}
// TODO: Abstract the "optimize for" logic
pub struct NodeCache {
builder: NodeCacheBuilder,
cache_dir: Cow<'static, Path>,
cache_path: PathBuf,
epoch: u64,
cache: Cache,
}
impl NodeCacheBuilder {
pub fn light(&self, cache_dir: &Path, block_number: u64) -> Light {
Light::new_with_builder(self, cache_dir, block_number)
}
pub fn light_from_file(&self, cache_dir: &Path, block_number: u64) -> io::Result<Light> {
Light::from_file_with_builder(self, cache_dir, block_number)
}
pub fn new<T: Into<Option<OptimizeFor>>>(optimize_for: T) -> Self {
NodeCacheBuilder {
seedhash: Arc::new(Mutex::new(SeedHashCompute::new())),
optimize_for: optimize_for.into().unwrap_or_default(),
}
}
fn block_number_to_ident(&self, block_number: u64) -> H256 {
self.seedhash.lock().hash_block_number(block_number)
}
fn epoch_to_ident(&self, epoch: u64) -> H256 {
self.seedhash.lock().hash_epoch(epoch)
}
pub fn from_file<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> io::Result<NodeCache> {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let path = cache_path(cache_dir.as_ref(), &ident);
let cache = cache_from_path(&path, self.optimize_for)?;
let expected_cache_size = get_cache_size(block_number);
if byte_size(&cache) == expected_cache_size {
Ok(NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir,
cache_path: path,
cache: cache,
})
} else {
Err(io::Error::new(
io::ErrorKind::InvalidData,
"Node cache is of incorrect size",
))
}
}
pub fn new_cache<P: Into<Cow<'static, Path>>>(
&self,
cache_dir: P,
block_number: u64,
) -> NodeCache {
let cache_dir = cache_dir.into();
let ident = self.block_number_to_ident(block_number);
let cache_size = get_cache_size(block_number);
// We use `debug_assert` since it is impossible for `get_cache_size` to return an unaligned
// value with the current implementation. If the implementation changes, CI will catch it.
debug_assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
let path = cache_path(cache_dir.as_ref(), &ident);
let nodes = new_buffer(&path, num_nodes, &ident, self.optimize_for);
NodeCache {
builder: self.clone(),
epoch: epoch(block_number),
cache_dir: cache_dir.into(),
cache_path: path,
cache: nodes,
}
}
}
impl NodeCache {
pub fn cache_path(&self) -> &Path {
&self.cache_path
}
pub fn flush(&mut self) -> io::Result<()> {
if let Some(last) = self.epoch.checked_sub(2).map(|ep| {
cache_path(self.cache_dir.as_ref(), &self.builder.epoch_to_ident(ep))
})
{
fs::remove_file(last).unwrap_or_else(|error| match error.kind() {
io::ErrorKind::NotFound => (),
_ => warn!("Error removing stale DAG cache: {:?}", error),
});
}
consume_cache(&mut self.cache, &self.cache_path)
}
}
fn make_memmapped_cache(path: &Path, num_nodes: usize, ident: &H256) -> io::Result<Mmap> {
use std::fs::OpenOptions;
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
file.set_len((num_nodes * NODE_BYTES) as _)?;
let mut memmap = Mmap::open(&file, Protection::ReadWrite)?;
unsafe { initialize_memory(memmap.mut_ptr() as *mut Node, num_nodes, ident) };
Ok(memmap)
}
fn make_memory_cache(num_nodes: usize, ident: &H256) -> Vec<Node> {
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
unsafe {
initialize_memory(nodes.as_mut_ptr(), num_nodes, ident);
nodes.set_len(num_nodes);
}
nodes
}
fn cache_path<'a, P: Into<Cow<'a, Path>>>(path: P, ident: &H256) -> PathBuf {
let mut buf = path.into().into_owned();
buf.push(to_hex(ident));
buf
}
fn consume_cache(cache: &mut Cache, path: &Path) -> io::Result<()> {
use std::fs::OpenOptions;
match *cache {
Either::Left(ref mut vec) => {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)?;
let buf = unsafe {
slice::from_raw_parts_mut(vec.as_mut_ptr() as *mut u8, vec.len() * NODE_BYTES)
};
file.write_all(buf).map(|_| ())
}
Either::Right(ref mmap) => {
mmap.flush()
}
}
}
fn cache_from_path(path: &Path, optimize_for: OptimizeFor) -> io::Result<Cache> {
let memmap = match optimize_for {
OptimizeFor::Cpu => None,
OptimizeFor::Memory => Mmap::open_path(path, Protection::ReadWrite).ok(),
};
memmap.map(Either::Right).ok_or(()).or_else(|_| {
read_from_path(path).map(Either::Left)
})
}
fn read_from_path(path: &Path) -> io::Result<Vec<Node>> {
use std::fs::File;
use std::mem;
let mut file = File::open(path)?;
let mut nodes: Vec<u8> = Vec::with_capacity(file.metadata().map(|m| m.len() as _).unwrap_or(
NODE_BYTES * 1_000_000,
));
file.read_to_end(&mut nodes)?;
nodes.shrink_to_fit();
if nodes.len() % NODE_BYTES != 0 || nodes.capacity() % NODE_BYTES != 0 {
return Err(io::Error::new(
io::ErrorKind::Other,
"Node cache is not a multiple of node size",
));
}
let out: Vec<Node> = unsafe {
Vec::from_raw_parts(
nodes.as_mut_ptr() as *mut _,
nodes.len() / NODE_BYTES,
nodes.capacity() / NODE_BYTES,
)
};
mem::forget(nodes);
Ok(out)
}
impl AsRef<[Node]> for NodeCache {
fn as_ref(&self) -> &[Node] {
match self.cache {
Either::Left(ref vec) => vec,
Either::Right(ref mmap) => unsafe {
let bytes = mmap.ptr();
// This isn't a safety issue, so we can keep this a debug lint. We don't care about
// people manually messing with the files unless it can cause unsafety, but if we're
// generating incorrect files then we want to catch that in CI.
debug_assert_eq!(mmap.len() % NODE_BYTES, 0);
slice::from_raw_parts(bytes as _, mmap.len() / NODE_BYTES)
},
}
}
}
// This takes a raw pointer and a counter because `memory` may be uninitialized. `memory` _must_ be
// a pointer to the beginning of an allocated but possibly-uninitialized block of
// `num_nodes * NODE_BYTES` bytes
//
// We have to use raw pointers to read/write uninit, using "normal" indexing causes LLVM to freak
// out. It counts as a read and causes all writes afterwards to be elided. Yes, really. I know, I
// want to refactor this to use less `unsafe` as much as the next rustacean.
unsafe fn initialize_memory(memory: *mut Node, num_nodes: usize, ident: &H256) {
let dst = memory as *mut u8;
debug_assert_eq!(ident.len(), 32);
keccak_512::unchecked(dst, NODE_BYTES, ident.as_ptr(), ident.len());
for i in 1..num_nodes {
// We use raw pointers here, see above
let dst = memory.offset(i as _) as *mut u8;
let src = memory.offset(i as isize - 1) as *mut u8;
keccak_512::unchecked(dst, NODE_BYTES, src, NODE_BYTES);
}
// Now this is initialized, we can treat it as a slice.
let nodes: &mut [Node] = slice::from_raw_parts_mut(memory, num_nodes);
// For `unroll!`, see below. If the literal in `unroll!` is not the same as the RHS here then
// these have got out of sync! Don't let this happen!
debug_assert_eq!(NODE_DWORDS, 8);
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let data_idx = (num_nodes - 1 + i) % num_nodes;
let idx = nodes.get_unchecked_mut(i).as_words()[0] as usize % num_nodes;
let data = {
let mut data: Node = nodes.get_unchecked(data_idx).clone();
let rhs: &Node = nodes.get_unchecked(idx);
unroll! {
for w in 0..8 {
*data.as_dwords_mut().get_unchecked_mut(w) ^=
*rhs.as_dwords().get_unchecked(w);
}
}
data
};
keccak_512::write(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
}

View File

@ -19,30 +19,16 @@
// TODO: fix endianess for big endian
use primal::is_prime;
use std::cell::Cell;
use keccak::{keccak_512, keccak_256, H256};
use cache::{NodeCache, NodeCacheBuilder};
use seed_compute::SeedHashCompute;
use shared::*;
use std::io;
use std::mem;
use std::path::Path;
use std::ptr;
use hash;
use std::slice;
use std::path::{Path, PathBuf};
use std::io::{self, Read, Write};
use std::fs::{self, File};
use parking_lot::Mutex;
pub const ETHASH_EPOCH_LENGTH: u64 = 30000;
pub const ETHASH_CACHE_ROUNDS: usize = 3;
pub const ETHASH_MIX_BYTES: usize = 128;
pub const ETHASH_ACCESSES: usize = 64;
pub const ETHASH_DATASET_PARENTS: u32 = 256;
const DATASET_BYTES_INIT: u64 = 1 << 30;
const DATASET_BYTES_GROWTH: u64 = 1 << 23;
const CACHE_BYTES_INIT: u64 = 1 << 24;
const CACHE_BYTES_GROWTH: u64 = 1 << 17;
const NODE_WORDS: usize = 64 / 4;
const NODE_BYTES: usize = 64;
const MIX_WORDS: usize = ETHASH_MIX_BYTES / 4;
const MIX_NODES: usize = MIX_WORDS / NODE_WORDS;
const FNV_PRIME: u32 = 0x01000193;
@ -55,48 +41,24 @@ pub struct ProofOfWork {
pub mix_hash: H256,
}
struct Node {
bytes: [u8; NODE_BYTES],
}
impl Default for Node {
fn default() -> Self {
Node { bytes: [0u8; NODE_BYTES] }
}
}
impl Clone for Node {
fn clone(&self) -> Self {
Node { bytes: *&self.bytes }
}
}
impl Node {
#[inline]
fn as_words(&self) -> &[u32; NODE_WORDS] {
unsafe { mem::transmute(&self.bytes) }
}
#[inline]
fn as_words_mut(&mut self) -> &mut [u32; NODE_WORDS] {
unsafe { mem::transmute(&mut self.bytes) }
}
}
pub type H256 = [u8; 32];
pub struct Light {
cache_dir: PathBuf,
block_number: u64,
cache: Vec<Node>,
seed_compute: Mutex<SeedHashCompute>,
cache: NodeCache,
}
/// Light cache structure
impl Light {
/// Create a new light cache for a given block number
pub fn new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
light_new(cache_dir, block_number)
pub fn new_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
) -> Self {
let cache = builder.new_cache(cache_dir.to_path_buf(), block_number);
Light {
block_number: block_number,
cache: cache,
}
}
/// Calculate the light boundary data
@ -106,107 +68,25 @@ impl Light {
light_compute(self, header_hash, nonce)
}
pub fn file_path<T: AsRef<Path>>(cache_dir: T, seed_hash: H256) -> PathBuf {
let mut cache_dir = cache_dir.as_ref().to_path_buf();
cache_dir.push(to_hex(&seed_hash));
cache_dir
}
pub fn from_file<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> io::Result<Light> {
let seed_compute = SeedHashCompute::new();
let path = Light::file_path(&cache_dir, seed_compute.get_seedhash(block_number));
let mut file = File::open(path)?;
let cache_size = get_cache_size(block_number);
if file.metadata()?.len() != cache_size as u64 {
return Err(io::Error::new(io::ErrorKind::Other, "Cache file size mismatch"));
}
let num_nodes = cache_size / NODE_BYTES;
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
unsafe { nodes.set_len(num_nodes) };
let buf = unsafe { slice::from_raw_parts_mut(nodes.as_mut_ptr() as *mut u8, cache_size) };
file.read_exact(buf)?;
pub fn from_file_with_builder(
builder: &NodeCacheBuilder,
cache_dir: &Path,
block_number: u64,
) -> io::Result<Self> {
let cache = builder.from_file(cache_dir.to_path_buf(), block_number)?;
Ok(Light {
block_number,
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: nodes,
seed_compute: Mutex::new(seed_compute),
block_number: block_number,
cache: cache,
})
}
pub fn to_file(&self) -> io::Result<PathBuf> {
let seed_compute = self.seed_compute.lock();
let path = Light::file_path(&self.cache_dir, seed_compute.get_seedhash(self.block_number));
if self.block_number >= ETHASH_EPOCH_LENGTH * 2 {
let deprecated = Light::file_path(
&self.cache_dir,
seed_compute.get_seedhash(self.block_number - ETHASH_EPOCH_LENGTH * 2)
);
if deprecated.exists() {
debug!(target: "ethash", "removing: {:?}", &deprecated);
fs::remove_file(deprecated)?;
}
}
fs::create_dir_all(path.parent().unwrap())?;
let mut file = File::create(&path)?;
let cache_size = self.cache.len() * NODE_BYTES;
let buf = unsafe { slice::from_raw_parts(self.cache.as_ptr() as *const u8, cache_size) };
file.write(buf)?;
Ok(path)
pub fn to_file(&mut self) -> io::Result<&Path> {
self.cache.flush()?;
Ok(self.cache.cache_path())
}
}
pub struct SeedHashCompute {
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
}
impl SeedHashCompute {
#[inline]
pub fn new() -> SeedHashCompute {
SeedHashCompute {
prev_epoch: Cell::new(0),
prev_seedhash: Cell::new([0u8; 32]),
}
}
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
pub fn get_seedhash(&self, block_number: u64) -> H256 {
let epoch = block_number / ETHASH_EPOCH_LENGTH;
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(self.prev_seedhash.get(), self.prev_epoch.get(), epoch);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
unsafe { hash::keccak_256(hash[..].as_mut_ptr(), 32, hash[..].as_ptr(), 32) };
}
hash
}
}
pub fn slow_get_seedhash(block_number: u64) -> H256 {
pub fn slow_hash_block_number(block_number: u64) -> H256 {
SeedHashCompute::resume_compute_seedhash([0u8; 32], 0, block_number / ETHASH_EPOCH_LENGTH)
}
@ -214,34 +94,6 @@ fn fnv_hash(x: u32, y: u32) -> u32 {
return x.wrapping_mul(FNV_PRIME) ^ y;
}
fn keccak_512(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
fn keccak_512_inplace(input: &mut [u8]) {
// This is safe since `keccak_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
fn get_cache_size(block_number: u64) -> usize {
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
}
fn get_data_size(block_number: u64) -> usize {
let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
}
/// Difficulty quick check for POW preverification
///
/// `header_hash` The hash of the header
@ -261,12 +113,12 @@ pub fn quick_get_difficulty(header_hash: &H256, nonce: u64, mix_hash: &H256) ->
ptr::copy_nonoverlapping(header_hash.as_ptr(), buf.as_mut_ptr(), 32);
ptr::copy_nonoverlapping(mem::transmute(&nonce), buf[32..].as_mut_ptr(), 8);
hash::keccak_512(buf.as_mut_ptr(), 64, buf.as_ptr(), 40);
keccak_512::unchecked(buf.as_mut_ptr(), 64, buf.as_ptr(), 40);
ptr::copy_nonoverlapping(mix_hash.as_ptr(), buf[64..].as_mut_ptr(), 32);
// This is initialized in `keccak_256`
let mut hash: [u8; 32] = mem::uninitialized();
hash::keccak_256(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len());
keccak_256::unchecked(hash.as_mut_ptr(), hash.len(), buf.as_ptr(), buf.len());
hash
}
@ -324,11 +176,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// leaving it fully initialized.
let mut out: [u8; NODE_BYTES] = mem::uninitialized();
ptr::copy_nonoverlapping(
header_hash.as_ptr(),
out.as_mut_ptr(),
header_hash.len(),
);
ptr::copy_nonoverlapping(header_hash.as_ptr(), out.as_mut_ptr(), header_hash.len());
ptr::copy_nonoverlapping(
mem::transmute(&nonce),
out[header_hash.len()..].as_mut_ptr(),
@ -336,11 +184,11 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
);
// compute keccak-512 hash and replicate across mix
hash::keccak_512(
keccak_512::unchecked(
out.as_mut_ptr(),
NODE_BYTES,
out.as_ptr(),
header_hash.len() + mem::size_of::<u64>()
header_hash.len() + mem::size_of::<u64>(),
);
Node { bytes: out }
@ -354,7 +202,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
let page_size = 4 * MIX_WORDS;
let num_full_pages = (full_size / page_size) as u32;
// deref once for better performance
let cache: &[Node] = &light.cache;
let cache: &[Node] = light.cache.as_ref();
let first_val = buf.half_mix.as_words()[0];
debug_assert_eq!(MIX_NODES, 2);
@ -364,14 +212,10 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
let index = {
// This is trivially safe, but does not work on big-endian. The safety of this is
// asserted in debug builds (see the definition of `make_const_array!`).
let mix_words: &mut [u32; MIX_WORDS] = unsafe {
make_const_array!(MIX_WORDS, &mut mix)
};
let mix_words: &mut [u32; MIX_WORDS] =
unsafe { make_const_array!(MIX_WORDS, &mut mix) };
fnv_hash(
first_val ^ i,
mix_words[i as usize % MIX_WORDS]
) % num_full_pages
fnv_hash(first_val ^ i, mix_words[i as usize % MIX_WORDS]) % num_full_pages
};
unroll! {
@ -403,9 +247,8 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// times and set each index, leaving the array fully initialized. THIS ONLY WORKS ON LITTLE-
// ENDIAN MACHINES. See a future PR to make this and the rest of the code work correctly on
// big-endian arches like mips.
let mut compress: &mut [u32; MIX_WORDS / 4] = unsafe {
make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes)
};
let compress: &mut [u32; MIX_WORDS / 4] =
unsafe { make_const_array!(MIX_WORDS / 4, &mut buf.compress_bytes) };
// Compress mix
debug_assert_eq!(MIX_WORDS / 4, 8);
@ -430,7 +273,7 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
// We overwrite the second half since `keccak_256` has an internal buffer and so allows
// overlapping arrays as input.
let write_ptr: *mut u8 = mem::transmute(&mut buf.compress_bytes);
hash::keccak_256(
keccak_256::unchecked(
write_ptr,
buf.compress_bytes.len(),
read_ptr,
@ -439,25 +282,21 @@ fn hash_compute(light: &Light, full_size: usize, header_hash: &H256, nonce: u64)
buf.compress_bytes
};
ProofOfWork {
mix_hash: mix_hash,
value: value,
}
ProofOfWork { mix_hash: mix_hash, value: value }
}
// TODO: Use the `simd` crate
fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
let num_parent_nodes = cache.len();
let mut ret = cache[node_index as usize % num_parent_nodes].clone();
ret.as_words_mut()[0] ^= node_index;
keccak_512_inplace(&mut ret.bytes);
keccak_512::inplace(ret.as_bytes_mut());
debug_assert_eq!(NODE_WORDS, 16);
for i in 0..ETHASH_DATASET_PARENTS as u32 {
let parent_index = fnv_hash(
node_index ^ i,
ret.as_words()[i as usize % NODE_WORDS],
) % num_parent_nodes as u32;
let parent_index = fnv_hash(node_index ^ i, ret.as_words()[i as usize % NODE_WORDS]) %
num_parent_nodes as u32;
let parent = &cache[parent_index as usize];
unroll! {
@ -467,159 +306,107 @@ fn calculate_dag_item(node_index: u32, cache: &[Node]) -> Node {
}
}
keccak_512_inplace(&mut ret.bytes);
keccak_512::inplace(ret.as_bytes_mut());
ret
}
fn light_new<T: AsRef<Path>>(cache_dir: T, block_number: u64) -> Light {
let seed_compute = SeedHashCompute::new();
let seedhash = seed_compute.get_seedhash(block_number);
let cache_size = get_cache_size(block_number);
#[cfg(test)]
mod test {
use super::*;
use std::fs;
assert!(cache_size % NODE_BYTES == 0, "Unaligned cache size");
let num_nodes = cache_size / NODE_BYTES;
let mut nodes: Vec<Node> = Vec::with_capacity(num_nodes);
unsafe {
// Use uninit instead of unnecessarily writing `size_of::<Node>() * num_nodes` 0s
nodes.set_len(num_nodes);
keccak_512(&seedhash[0..32], &mut nodes.get_unchecked_mut(0).bytes);
for i in 1..num_nodes {
hash::keccak_512(nodes.get_unchecked_mut(i).bytes.as_mut_ptr(), NODE_BYTES, nodes.get_unchecked(i - 1).bytes.as_ptr(), NODE_BYTES);
}
debug_assert_eq!(NODE_WORDS, 16);
// This _should_ get unrolled by the compiler, since it's not using the loop variable.
for _ in 0..ETHASH_CACHE_ROUNDS {
for i in 0..num_nodes {
let idx = *nodes.get_unchecked_mut(i).as_words().get_unchecked(0) as usize % num_nodes;
let mut data = nodes.get_unchecked((num_nodes - 1 + i) % num_nodes).clone();
unroll! {
for w in 0..16 {
*data.as_words_mut().get_unchecked_mut(w) ^= *nodes.get_unchecked(idx).as_words().get_unchecked(w);
}
}
keccak_512(&data.bytes, &mut nodes.get_unchecked_mut(i).bytes);
}
}
#[test]
fn test_get_cache_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(16776896usize, get_cache_size(0));
assert_eq!(16776896usize, get_cache_size(1));
assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1));
}
Light {
block_number,
cache_dir: cache_dir.as_ref().to_path_buf(),
cache: nodes,
seed_compute: Mutex::new(seed_compute),
}
}
static CHARS: &'static [u8] = b"0123456789abcdef";
fn to_hex(bytes: &[u8]) -> String {
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
#[test]
fn test_get_data_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(1073739904usize, get_data_size(0));
assert_eq!(1073739904usize, get_data_size(1));
assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH));
}
unsafe { String::from_utf8_unchecked(v) }
}
#[test]
fn test_get_cache_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(16776896usize, get_cache_size(0));
assert_eq!(16776896usize, get_cache_size(1));
assert_eq!(16776896usize, get_cache_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH));
assert_eq!(16907456usize, get_cache_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(284950208usize, get_cache_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2047 * ETHASH_EPOCH_LENGTH));
assert_eq!(285081536usize, get_cache_size(2048 * ETHASH_EPOCH_LENGTH - 1));
}
#[test]
fn test_get_data_size() {
// https://github.com/ethereum/wiki/wiki/Ethash/ef6b93f9596746a088ea95d01ca2778be43ae68f#data-sizes
assert_eq!(1073739904usize, get_data_size(0));
assert_eq!(1073739904usize, get_data_size(1));
assert_eq!(1073739904usize, get_data_size(ETHASH_EPOCH_LENGTH - 1));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH));
assert_eq!(1082130304usize, get_data_size(ETHASH_EPOCH_LENGTH + 1));
assert_eq!(18236833408usize, get_data_size(2046 * ETHASH_EPOCH_LENGTH));
assert_eq!(18245220736usize, get_data_size(2047 * ETHASH_EPOCH_LENGTH));
}
#[test]
fn test_difficulty_test() {
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d];
let nonce = 0xd7b3ac70a301a249;
let boundary_good = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash)[..], boundary_good[..]);
let boundary_bad = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
assert!(quick_get_difficulty(&hash, nonce, &mix_hash)[..] != boundary_bad[..]);
}
#[test]
fn test_light_compute() {
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
let mix_hash = [0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce, 0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a, 0x64, 0x31, 0xab, 0x6d];
let boundary = [0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2, 0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a, 0xe9, 0x7e, 0x53, 0x84];
let nonce = 0xd7b3ac70a301a249;
// difficulty = 0x085657254bd9u64;
let light = Light::new(&::std::env::temp_dir(), 486382);
let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]);
}
#[test]
fn test_seed_compute_once() {
let seed_compute = SeedHashCompute::new();
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::new();
assert_eq!(seed_compute.get_seedhash(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::new();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.get_seedhash(50000);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::new();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.get_seedhash(972764);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.get_seedhash(486382), hash);
}
#[test]
fn test_drop_old_data() {
let path = ::std::env::temp_dir();
let first = Light::new(&path, 0).to_file().unwrap();
let second = Light::new(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap();
assert!(fs::metadata(&first).is_ok());
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = Light::new(&path, ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
#[test]
fn test_difficulty_test() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let nonce = 0xd7b3ac70a301a249;
let boundary_good = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert_eq!(quick_get_difficulty(&hash, nonce, &mix_hash)[..], boundary_good[..]);
let boundary_bad = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
assert!(quick_get_difficulty(&hash, nonce, &mix_hash)[..] != boundary_bad[..]);
}
#[test]
fn test_light_compute() {
let hash = [
0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3,
0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94,
0x05, 0x52, 0x7d, 0x72,
];
let mix_hash = [
0x1f, 0xff, 0x04, 0xce, 0xc9, 0x41, 0x73, 0xfd, 0x59, 0x1e, 0x3d, 0x89, 0x60, 0xce,
0x6b, 0xdf, 0x8b, 0x19, 0x71, 0x04, 0x8c, 0x71, 0xff, 0x93, 0x7b, 0xb2, 0xd3, 0x2a,
0x64, 0x31, 0xab, 0x6d,
];
let boundary = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0x9b, 0x6c, 0x69, 0xbc, 0x2c, 0xe2, 0xa2,
0x4a, 0x8e, 0x95, 0x69, 0xef, 0xc7, 0xd7, 0x1b, 0x33, 0x35, 0xdf, 0x36, 0x8c, 0x9a,
0xe9, 0x7e, 0x53, 0x84,
];
let nonce = 0xd7b3ac70a301a249;
// difficulty = 0x085657254bd9u64;
let light = NodeCacheBuilder::new(None).light(&::std::env::temp_dir(), 486382);
let result = light_compute(&light, &hash, nonce);
assert_eq!(result.mix_hash[..], mix_hash[..]);
assert_eq!(result.value[..], boundary[..]);
}
#[test]
fn test_drop_old_data() {
let path = ::std::env::temp_dir();
let builder = NodeCacheBuilder::new(None);
let first = builder.light(&path, 0).to_file().unwrap().to_owned();
let second = builder.light(&path, ETHASH_EPOCH_LENGTH).to_file().unwrap().to_owned();
assert!(fs::metadata(&first).is_ok());
let _ = builder.light(&path, ETHASH_EPOCH_LENGTH * 2).to_file();
assert!(fs::metadata(&first).is_err());
assert!(fs::metadata(&second).is_ok());
let _ = builder.light(&path, ETHASH_EPOCH_LENGTH * 3).to_file();
assert!(fs::metadata(&second).is_err());
}
}

52
ethash/src/keccak.rs Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate hash;
pub type H256 = [u8; 32];
pub mod keccak_512 {
use super::hash;
pub use self::hash::keccak_512 as unchecked;
pub fn write(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_512(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `sha3_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_512(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
}
pub mod keccak_256 {
use super::hash;
pub use self::hash::keccak_256 as unchecked;
#[allow(dead_code)]
pub fn write(input: &[u8], output: &mut [u8]) {
unsafe { hash::keccak_256(output.as_mut_ptr(), output.len(), input.as_ptr(), input.len()) };
}
pub fn inplace(input: &mut [u8]) {
// This is safe since `sha3_*` uses an internal buffer and copies the result to the output. This
// means that we can reuse the input buffer for both input and output.
unsafe { hash::keccak_256(input.as_mut_ptr(), input.len(), input.as_ptr(), input.len()) };
}
}

View File

@ -14,28 +14,35 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethash implementation
//! See https://github.com/ethereum/wiki/wiki/Ethash
#![cfg_attr(feature = "benches", feature(test))]
extern crate primal;
extern crate hash;
extern crate parking_lot;
extern crate either;
extern crate memmap;
#[macro_use]
extern crate crunchy;
#[macro_use]
extern crate log;
mod compute;
mod compute;
mod seed_compute;
mod cache;
mod keccak;
mod shared;
pub use cache::{NodeCacheBuilder, OptimizeFor};
pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number};
use compute::Light;
use keccak::H256;
use parking_lot::Mutex;
pub use seed_compute::SeedHashCompute;
pub use shared::ETHASH_EPOCH_LENGTH;
use std::mem;
use std::path::{Path, PathBuf};
use compute::Light;
pub use compute::{ETHASH_EPOCH_LENGTH, H256, ProofOfWork, SeedHashCompute, quick_get_difficulty, slow_get_seedhash};
use std::sync::Arc;
use parking_lot::Mutex;
struct LightCache {
recent_epoch: Option<u64>,
@ -46,15 +53,17 @@ struct LightCache {
/// Light/Full cache manager.
pub struct EthashManager {
nodecache_builder: NodeCacheBuilder,
cache: Mutex<LightCache>,
cache_dir: PathBuf,
}
impl EthashManager {
/// Create a new new instance of ethash manager
pub fn new<T: AsRef<Path>>(cache_dir: T) -> EthashManager {
pub fn new<T: Into<Option<OptimizeFor>>>(cache_dir: &Path, optimize_for: T) -> EthashManager {
EthashManager {
cache_dir: cache_dir.as_ref().to_path_buf(),
cache_dir: cache_dir.to_path_buf(),
nodecache_builder: NodeCacheBuilder::new(optimize_for.into().unwrap_or_default()),
cache: Mutex::new(LightCache {
recent_epoch: None,
recent: None,
@ -96,11 +105,19 @@ impl EthashManager {
};
match light {
None => {
let light = match Light::from_file(&self.cache_dir, block_number) {
let light = match Light::from_file_with_builder(
&self.nodecache_builder,
&self.cache_dir,
block_number,
) {
Ok(light) => Arc::new(light),
Err(e) => {
debug!("Light cache file not found for {}:{}", block_number, e);
let light = Light::new(&self.cache_dir, block_number);
let mut light = Light::new_with_builder(
&self.nodecache_builder,
&self.cache_dir,
block_number,
);
if let Err(e) = light.to_file() {
warn!("Light cache file write error: {}", e);
}
@ -120,7 +137,7 @@ impl EthashManager {
#[test]
fn test_lru() {
let ethash = EthashManager::new(&::std::env::temp_dir());
let ethash = EthashManager::new(&::std::env::temp_dir(), None);
let hash = [0u8; 32];
ethash.compute_light(1, &hash, 1);
ethash.compute_light(50000, &hash, 1);
@ -138,24 +155,89 @@ fn test_lru() {
mod benchmarks {
extern crate test;
use compute::{Light, light_compute, SeedHashCompute};
use self::test::Bencher;
use cache::{NodeCacheBuilder, OptimizeFor};
use compute::{Light, light_compute};
const HASH: [u8; 32] = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe,
0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f,
0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
const NONCE: u64 = 0xd7b3ac70a301a249;
#[bench]
fn bench_light_compute(b: &mut Bencher) {
use ::std::env;
fn bench_light_compute_memmap(b: &mut Bencher) {
use std::env;
let hash = [0xf5, 0x7e, 0x6f, 0x3a, 0xcf, 0xc0, 0xdd, 0x4b, 0x5b, 0xf2, 0xbe, 0xe4, 0x0a, 0xb3, 0x35, 0x8a, 0xa6, 0x87, 0x73, 0xa8, 0xd0, 0x9f, 0x5e, 0x59, 0x5e, 0xab, 0x55, 0x94, 0x05, 0x52, 0x7d, 0x72];
let nonce = 0xd7b3ac70a301a249;
let light = Light::new(env::temp_dir(), 486382);
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let light = Light::new_with_builder(&builder, &env::temp_dir(), 486382);
b.iter(|| light_compute(&light, &hash, nonce));
b.iter(|| light_compute(&light, &HASH, NONCE));
}
#[bench]
fn bench_seedhash(b: &mut Bencher) {
let seed_compute = SeedHashCompute::new();
fn bench_light_compute_memory(b: &mut Bencher) {
use std::env;
b.iter(|| seed_compute.get_seedhash(486382));
let light = Light::new(&env::temp_dir(), 486382);
b.iter(|| light_compute(&light, &HASH, NONCE));
}
#[bench]
#[ignore]
fn bench_light_new_round_trip_memmap(b: &mut Bencher) {
use std::env;
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let light = Light::new_with_builder(&builder, &env::temp_dir(), 486382);
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
#[ignore]
fn bench_light_new_round_trip_memory(b: &mut Bencher) {
use std::env;
b.iter(|| {
let light = Light::new(&env::temp_dir(), 486382);
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
fn bench_light_from_file_round_trip_memory(b: &mut Bencher) {
use std::env;
let dir = env::temp_dir();
let height = 486382;
{
let mut dummy = Light::new(&dir, height);
dummy.to_file().unwrap();
}
b.iter(|| {
let light = Light::from_file(&dir, 486382).unwrap();
light_compute(&light, &HASH, NONCE);
});
}
#[bench]
fn bench_light_from_file_round_trip_memmap(b: &mut Bencher) {
use std::env;
let dir = env::temp_dir();
let height = 486382;
{
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let mut dummy = Light::new_with_builder(&builder, &dir, height);
dummy.to_file().unwrap();
}
b.iter(|| {
let builder = NodeCacheBuilder::new(OptimizeFor::Memory);
let light = Light::from_file_with_builder(&builder, &dir, 486382).unwrap();
light_compute(&light, &HASH, NONCE);
});
}
}

109
ethash/src/seed_compute.rs Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use shared;
use keccak::{keccak_256, H256};
use std::cell::Cell;
pub struct SeedHashCompute {
prev_epoch: Cell<u64>,
prev_seedhash: Cell<H256>,
}
impl SeedHashCompute {
#[inline]
pub fn new() -> SeedHashCompute {
SeedHashCompute {
prev_epoch: Cell::new(0),
prev_seedhash: Cell::new([0u8; 32]),
}
}
#[inline]
fn reset_cache(&self) {
self.prev_epoch.set(0);
self.prev_seedhash.set([0u8; 32]);
}
#[inline]
pub fn hash_block_number(&self, block_number: u64) -> H256 {
self.hash_epoch(shared::epoch(block_number))
}
#[inline]
pub fn hash_epoch(&self, epoch: u64) -> H256 {
if epoch < self.prev_epoch.get() {
// can't build on previous hash if requesting an older block
self.reset_cache();
}
if epoch > self.prev_epoch.get() {
let seed_hash = SeedHashCompute::resume_compute_seedhash(
self.prev_seedhash.get(),
self.prev_epoch.get(),
epoch,
);
self.prev_seedhash.set(seed_hash);
self.prev_epoch.set(epoch);
}
self.prev_seedhash.get()
}
#[inline]
pub fn resume_compute_seedhash(mut hash: H256, start_epoch: u64, end_epoch: u64) -> H256 {
for _ in start_epoch..end_epoch {
keccak_256::inplace(&mut hash);
}
hash
}
}
#[cfg(test)]
mod tests {
use super::SeedHashCompute;
#[test]
fn test_seed_compute_once() {
let seed_compute = SeedHashCompute::new();
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_zero() {
let seed_compute = SeedHashCompute::new();
assert_eq!(seed_compute.hash_block_number(0), [0u8; 32]);
}
#[test]
fn test_seed_compute_after_older() {
let seed_compute = SeedHashCompute::new();
// calculating an older value first shouldn't affect the result
let _ = seed_compute.hash_block_number(50000);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
#[test]
fn test_seed_compute_after_newer() {
let seed_compute = SeedHashCompute::new();
// calculating an newer value first shouldn't affect the result
let _ = seed_compute.hash_block_number(972764);
let hash = [241, 175, 44, 134, 39, 121, 245, 239, 228, 236, 43, 160, 195, 152, 46, 7, 199, 5, 253, 147, 241, 206, 98, 43, 3, 104, 17, 40, 192, 79, 106, 162];
assert_eq!(seed_compute.hash_block_number(486382), hash);
}
}

149
ethash/src/shared.rs Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use primal::is_prime;
pub const DATASET_BYTES_INIT: u64 = 1 << 30;
pub const DATASET_BYTES_GROWTH: u64 = 1 << 23;
pub const CACHE_BYTES_INIT: u64 = 1 << 24;
pub const CACHE_BYTES_GROWTH: u64 = 1 << 17;
pub const ETHASH_EPOCH_LENGTH: u64 = 30000;
pub const ETHASH_CACHE_ROUNDS: usize = 3;
pub const ETHASH_MIX_BYTES: usize = 128;
pub const ETHASH_ACCESSES: usize = 64;
pub const ETHASH_DATASET_PARENTS: u32 = 256;
pub const NODE_DWORDS: usize = NODE_WORDS / 2;
pub const NODE_WORDS: usize = NODE_BYTES / 4;
pub const NODE_BYTES: usize = 64;
pub fn epoch(block_number: u64) -> u64 {
block_number / ETHASH_EPOCH_LENGTH
}
static CHARS: &'static [u8] = b"0123456789abcdef";
pub fn to_hex(bytes: &[u8]) -> String {
let mut v = Vec::with_capacity(bytes.len() * 2);
for &byte in bytes.iter() {
v.push(CHARS[(byte >> 4) as usize]);
v.push(CHARS[(byte & 0xf) as usize]);
}
unsafe { String::from_utf8_unchecked(v) }
}
pub fn get_cache_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = CACHE_BYTES_INIT + CACHE_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - NODE_BYTES as u64;
while !is_prime(sz / NODE_BYTES as u64) {
sz = sz - 2 * NODE_BYTES as u64;
}
sz as usize
}
pub fn get_data_size(block_number: u64) -> usize {
// TODO: Memoise
let mut sz: u64 = DATASET_BYTES_INIT + DATASET_BYTES_GROWTH * (block_number / ETHASH_EPOCH_LENGTH);
sz = sz - ETHASH_MIX_BYTES as u64;
while !is_prime(sz / ETHASH_MIX_BYTES as u64) {
sz = sz - 2 * ETHASH_MIX_BYTES as u64;
}
sz as usize
}
pub type NodeBytes = [u8; NODE_BYTES];
pub type NodeWords = [u32; NODE_WORDS];
pub type NodeDwords = [u64; NODE_DWORDS];
macro_rules! static_assert_size_eq {
(@inner $a:ty, $b:ty, $($rest:ty),*) => {
fn first() {
static_assert_size_eq!($a, $b);
}
fn second() {
static_assert_size_eq!($b, $($rest),*);
}
};
(@inner $a:ty, $b:ty) => {
unsafe {
let val: $b = ::std::mem::uninitialized();
let _: $a = ::std::mem::transmute(val);
}
};
($($rest:ty),*) => {
static_assert_size_eq!(size_eq: $($rest),*);
};
($name:ident : $($rest:ty),*) => {
#[allow(dead_code)]
fn $name() {
static_assert_size_eq!(@inner $($rest),*);
}
};
}
static_assert_size_eq!(Node, NodeBytes, NodeWords, NodeDwords);
#[repr(C)]
pub union Node {
pub dwords: NodeDwords,
pub words: NodeWords,
pub bytes: NodeBytes,
}
impl Clone for Node {
fn clone(&self) -> Self {
unsafe { Node { bytes: *&self.bytes } }
}
}
// We use `inline(always)` because I was experiencing an 100% slowdown and `perf` showed that these
// calls were taking up ~30% of the runtime. Adding these annotations fixes the issue. Remove at
// your peril, if and only if you have benchmarks to prove that this doesn't reintroduce the
// performance regression. It's not caused by the `debug_assert_eq!` either, your guess is as good
// as mine.
impl Node {
#[inline(always)]
pub fn as_bytes(&self) -> &NodeBytes {
unsafe { &self.bytes }
}
#[inline(always)]
pub fn as_bytes_mut(&mut self) -> &mut NodeBytes {
unsafe { &mut self.bytes }
}
#[inline(always)]
pub fn as_words(&self) -> &NodeWords {
unsafe { &self.words }
}
#[inline(always)]
pub fn as_words_mut(&mut self) -> &mut NodeWords {
unsafe { &mut self.words }
}
#[inline(always)]
pub fn as_dwords(&self) -> &NodeDwords {
unsafe { &self.dwords }
}
#[inline(always)]
pub fn as_dwords_mut(&mut self) -> &mut NodeDwords {
unsafe { &mut self.dwords }
}
}

View File

@ -133,7 +133,7 @@ mod test {
fn node_filter() {
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
let data = include_bytes!("../res/node_filter.json");
let spec = Spec::load(::std::env::temp_dir(), &data[..]).unwrap();
let spec = Spec::load(&::std::env::temp_dir(), &data[..]).unwrap();
let client_db = Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new(

View File

@ -270,7 +270,7 @@ mod tests {
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_authority() -> Spec {
let bytes: &[u8] = include_bytes!("../../res/basic_authority.json");
Spec::load(::std::env::temp_dir(), bytes).expect("invalid chain spec")
Spec::load(&::std::env::temp_dir(), bytes).expect("invalid chain spec")
}
#[test]

View File

@ -19,7 +19,7 @@ use std::cmp;
use std::collections::{BTreeMap, HashMap};
use std::sync::{Arc, Weak};
use hash::{KECCAK_EMPTY_LIST_RLP};
use ethash::{quick_get_difficulty, slow_get_seedhash, EthashManager};
use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor};
use bigint::prelude::U256;
use bigint::hash::{H256, H64};
use util::*;
@ -161,18 +161,19 @@ pub struct Ethash {
impl Ethash {
/// Create a new instance of Ethash engine
pub fn new<T: AsRef<Path>>(
cache_dir: T,
pub fn new<T: Into<Option<OptimizeFor>>>(
cache_dir: &Path,
params: CommonParams,
ethash_params: EthashParams,
builtins: BTreeMap<Address, Builtin>,
optimize_for: T,
) -> Arc<Self> {
Arc::new(Ethash {
tx_filter: TransactionFilter::from_params(&params),
params,
ethash_params,
builtins,
pow: EthashManager::new(cache_dir),
pow: EthashManager::new(cache_dir.as_ref(), optimize_for.into()),
})
}
}
@ -416,7 +417,7 @@ impl Engine for Arc<Ethash> {
let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, header.nonce().low_u64());
let mix = H256(result.mix_hash);
let difficulty = Ethash::boundary_to_difficulty(&H256(result.value));
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_get_seedhash(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value));
trace!(target: "miner", "num: {}, seed: {}, h: {}, non: {}, mix: {}, res: {}" , header.number() as u64, H256(slow_hash_block_number(header.number() as u64)), header.bare_hash(), header.nonce().low_u64(), H256(result.mix_hash), H256(result.value));
if mix != header.mix_hash() {
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
}
@ -849,7 +850,7 @@ mod tests {
fn difficulty_frontier() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent_header = Header::default();
parent_header.set_number(1000000);
@ -867,7 +868,7 @@ mod tests {
fn difficulty_homestead() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent_header = Header::default();
parent_header.set_number(1500000);
@ -920,7 +921,7 @@ mod tests {
ecip1010_pause_transition: 3000000,
..get_default_ethash_params()
};
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent_header = Header::default();
parent_header.set_number(3500000);
@ -954,7 +955,7 @@ mod tests {
ecip1010_continue_transition: 5000000,
..get_default_ethash_params()
};
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent_header = Header::default();
parent_header.set_number(5000102);
@ -1000,7 +1001,7 @@ mod tests {
fn gas_limit_is_multiple_of_determinant() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent = Header::new();
let mut header = Header::new();
header.set_number(1);
@ -1044,7 +1045,7 @@ mod tests {
fn difficulty_max_timestamp() {
let spec = new_homestead_test();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
let mut parent_header = Header::default();
parent_header.set_number(1000000);
@ -1072,7 +1073,7 @@ mod tests {
header.set_number(parent_header.number() + 1);
header.set_gas_limit(100_001.into());
header.set_difficulty(ethparams.minimum_difficulty);
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
assert!(ethash.verify_block_family(&header, &parent_header, None).is_ok());
parent_header.set_number(9);
@ -1127,7 +1128,7 @@ mod tests {
nonce: U256::zero(),
}.sign(keypair.secret(), None).into();
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new());
let ethash = Ethash::new(&::std::env::temp_dir(), spec.params().clone(), ethparams, BTreeMap::new(), None);
assert!(ethash.verify_transaction_basic(&tx1, &header).is_ok());
assert!(ethash.verify_transaction_basic(&tx2, &header).is_ok());

View File

@ -27,7 +27,6 @@ pub mod denominations;
pub use self::ethash::{Ethash};
pub use self::denominations::*;
use std::path::Path;
use super::spec::*;
/// Most recent fork block that we support on Mainnet.
@ -39,33 +38,47 @@ pub const FORK_SUPPORTED_ROPSTEN: u64 = 10;
/// Most recent fork block that we support on Kovan.
pub const FORK_SUPPORTED_KOVAN: u64 = 0;
fn load<'a, T: 'a + Into<Option<&'a Path>>>(cache_dir: T, b: &[u8]) -> Spec {
match cache_dir.into() {
Some(path) => Spec::load(path, b),
fn load<'a, T: Into<Option<SpecParams<'a>>>>(params: T, b: &[u8]) -> Spec {
match params.into() {
Some(params) => Spec::load(params, b),
None => Spec::load(&::std::env::temp_dir(), b)
}.expect("chain spec is invalid")
}
/// Create a new Foundation Olympic chain spec.
pub fn new_olympic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/olympic.json")) }
pub fn new_olympic<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/olympic.json"))
}
/// Create a new Foundation Mainnet chain spec.
pub fn new_foundation(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/foundation.json")) }
pub fn new_foundation<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/foundation.json"))
}
/// Create a new Classic Mainnet chain spec without the DAO hardfork.
pub fn new_classic(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/classic.json")) }
pub fn new_classic<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/classic.json"))
}
/// Create a new Expanse mainnet chain spec.
pub fn new_expanse(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/expanse.json")) }
pub fn new_expanse<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/expanse.json"))
}
/// Create a new Kovan testnet chain spec.
pub fn new_kovan(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/kovan.json")) }
pub fn new_kovan<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/kovan.json"))
}
/// Create a new Foundation Ropsten chain spec.
pub fn new_ropsten(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/ropsten.json")) }
pub fn new_ropsten<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/ropsten.json"))
}
/// Create a new Morden chain spec.
pub fn new_morden(cache_dir: &Path) -> Spec { load(cache_dir, include_bytes!("../../res/ethereum/morden.json")) }
pub fn new_morden<'a, T: Into<SpecParams<'a>>>(params: T) -> Spec {
load(params.into(), include_bytes!("../../res/ethereum/morden.json"))
}
// For tests

View File

@ -52,7 +52,8 @@ pub fn run_test_file(path: &Path, runner: fn (json_data: &[u8]) -> Vec<String>)
let mut file = File::open(&path).expect("Error opening test file");
file.read_to_end(&mut data).expect("Error reading test file");
let results = runner(&data);
assert!(results.is_empty());
let empty: [String; 0] = [];
assert_eq!(results, empty);
}
macro_rules! test {

View File

@ -171,7 +171,7 @@ impl StratumJobDispatcher {
fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String {
// TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().get_seedhash(number);
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
format!(
r#"["0x", "0x{}","0x{}","0x{}","0x{:x}"]"#,

View File

@ -72,7 +72,7 @@ impl NotifyWork for WorkPoster {
fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) {
// TODO: move this to engine
let target = Ethash::difficulty_to_boundary(&difficulty);
let seed_hash = &self.seed_compute.lock().get_seedhash(number);
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
let body = format!(
r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,

View File

@ -16,34 +16,37 @@
//! Parameters for a block chain.
use std::io::Read;
use std::collections::BTreeMap;
use std::path::Path;
use std::sync::Arc;
use rustc_hex::FromHex;
use hash::{KECCAK_NULL_RLP, keccak};
use super::genesis::Genesis;
use super::seal::Generic as GenericSeal;
use bigint::hash::{H2048, H256};
use bigint::prelude::U256;
use builtin::Builtin;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint, DEFAULT_BLOCKHASH_CONTRACT};
use vm::{EnvInfo, CallType, ActionValue, ActionParams};
use bytes::Bytes;
use engines::{AuthorityRound, BasicAuthority, DEFAULT_BLOCKHASH_CONTRACT, Engine, InstantSeal,
NullEngine, Tendermint};
use error::Error;
pub use ethash::OptimizeFor;
use ethereum;
use ethjson;
use executive::Executive;
use factory::Factories;
use hash::{KECCAK_NULL_RLP, keccak};
use header::{BlockNumber, Header};
use parking_lot::RwLock;
use pod_state::*;
use rlp::{Rlp, RlpStream};
use rustc_hex::FromHex;
use state::{Backend, State, Substate};
use state::backend::Basic as BasicBackend;
use std::collections::BTreeMap;
use std::io::Read;
use std::path::Path;
use std::sync::Arc;
use trace::{NoopTracer, NoopVMTracer};
use bigint::prelude::U256;
use bigint::hash::{H256, H2048};
use parking_lot::RwLock;
use util::*;
use bytes::Bytes;
use vm::{ActionParams, ActionValue, CallType, EnvInfo};
/// Parameters common to ethereum-like blockchains.
/// NOTE: when adding bugfix hard-fork parameters,
@ -88,7 +91,8 @@ pub struct CommonParams {
pub eip210_contract_code: Bytes,
/// Gas allocated for EIP-210 blockhash update.
pub eip210_contract_gas: U256,
/// Number of first block where EIP-211 (Metropolis: RETURNDATASIZE/RETURNDATACOPY) rules begin.
/// Number of first block where EIP-211 (Metropolis: RETURNDATASIZE/RETURNDATACOPY) rules
/// begin.
pub eip211_transition: BlockNumber,
/// Number of first block where EIP-214 rules begin.
pub eip214_transition: BlockNumber,
@ -121,7 +125,7 @@ impl CommonParams {
}
/// Apply common spec config parameters to the schedule.
pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) {
pub fn update_schedule(&self, block_number: u64, schedule: &mut ::vm::Schedule) {
schedule.have_create2 = block_number >= self.eip86_transition;
schedule.have_revert = block_number >= self.eip140_transition;
schedule.have_static_call = block_number >= self.eip214_transition;
@ -139,14 +143,10 @@ impl CommonParams {
/// Whether these params contain any bug-fix hard forks.
pub fn contains_bugfix_hard_fork(&self) -> bool {
self.eip98_transition != 0 &&
self.eip155_transition != 0 &&
self.validate_receipts_transition != 0 &&
self.eip86_transition != 0 &&
self.eip140_transition != 0 &&
self.eip210_transition != 0 &&
self.eip211_transition != 0 &&
self.eip214_transition != 0 &&
self.eip98_transition != 0 && self.eip155_transition != 0 &&
self.validate_receipts_transition != 0 && self.eip86_transition != 0 &&
self.eip140_transition != 0 && self.eip210_transition != 0 &&
self.eip211_transition != 0 && self.eip214_transition != 0 &&
self.dust_protection_transition != 0
}
}
@ -157,25 +157,59 @@ impl From<ethjson::spec::Params> for CommonParams {
account_start_nonce: p.account_start_nonce.map_or_else(U256::zero, Into::into),
maximum_extra_data_size: p.maximum_extra_data_size.into(),
network_id: p.network_id.into(),
chain_id: if let Some(n) = p.chain_id { n.into() } else { p.network_id.into() },
chain_id: if let Some(n) = p.chain_id {
n.into()
} else {
p.network_id.into()
},
subprotocol_name: p.subprotocol_name.unwrap_or_else(|| "eth".to_owned()),
min_gas_limit: p.min_gas_limit.into(),
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) { Some((n.into(), h.into())) } else { None },
fork_block: if let (Some(n), Some(h)) = (p.fork_block, p.fork_hash) {
Some((n.into(), h.into()))
} else {
None
},
eip98_transition: p.eip98_transition.map_or(0, Into::into),
eip155_transition: p.eip155_transition.map_or(0, Into::into),
validate_receipts_transition: p.validate_receipts_transition.map_or(0, Into::into),
eip86_transition: p.eip86_transition.map_or(BlockNumber::max_value(), Into::into),
eip140_transition: p.eip140_transition.map_or(BlockNumber::max_value(), Into::into),
eip210_transition: p.eip210_transition.map_or(BlockNumber::max_value(), Into::into),
eip86_transition: p.eip86_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip140_transition: p.eip140_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip210_transition: p.eip210_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip210_contract_address: p.eip210_contract_address.map_or(0xf0.into(), Into::into),
eip210_contract_code: p.eip210_contract_code.map_or_else(
|| DEFAULT_BLOCKHASH_CONTRACT.from_hex().expect("Default BLOCKHASH contract is valid"),
Into::into),
|| {
DEFAULT_BLOCKHASH_CONTRACT.from_hex().expect(
"Default BLOCKHASH contract is valid",
)
},
Into::into,
),
eip210_contract_gas: p.eip210_contract_gas.map_or(1000000.into(), Into::into),
eip211_transition: p.eip211_transition.map_or(BlockNumber::max_value(), Into::into),
eip214_transition: p.eip214_transition.map_or(BlockNumber::max_value(), Into::into),
eip658_transition: p.eip658_transition.map_or(BlockNumber::max_value(), Into::into),
dust_protection_transition: p.dust_protection_transition.map_or(BlockNumber::max_value(), Into::into),
eip211_transition: p.eip211_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip214_transition: p.eip214_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
eip658_transition: p.eip658_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
dust_protection_transition: p.dust_protection_transition.map_or(
BlockNumber::max_value(),
Into::into,
),
nonce_cap_increment: p.nonce_cap_increment.map_or(64, Into::into),
remove_dust_contracts: p.remove_dust_contracts.unwrap_or(false),
wasm: p.wasm.unwrap_or(false),
@ -188,6 +222,44 @@ impl From<ethjson::spec::Params> for CommonParams {
}
}
/// Runtime parameters for the spec that are related to how the software should run the chain,
/// rather than integral properties of the chain itself.
#[derive(Debug, Clone, Copy)]
pub struct SpecParams<'a> {
/// The path to the folder used to cache nodes. This is typically /tmp/ on Unix-like systems
pub cache_dir: &'a Path,
/// Whether to run slower at the expense of better memory usage, or run faster while using
/// more
/// memory. This may get more fine-grained in the future but for now is simply a binary
/// option.
pub optimization_setting: Option<OptimizeFor>,
}
impl<'a> SpecParams<'a> {
/// Create from a cache path, with null values for the other fields
pub fn from_path(path: &'a Path) -> Self {
SpecParams {
cache_dir: path,
optimization_setting: None,
}
}
/// Create from a cache path and an optimization setting
pub fn new(path: &'a Path, optimization: OptimizeFor) -> Self {
SpecParams {
cache_dir: path,
optimization_setting: Some(optimization),
}
}
}
impl<'a, T: AsRef<Path>> From<&'a T> for SpecParams<'a> {
fn from(path: &'a T) -> Self {
Self::from_path(path.as_ref())
}
}
/// Parameters for a block chain; includes both those intrinsic to the design of the
/// chain and those to be interpreted by the active chain engine.
pub struct Spec {
@ -258,15 +330,19 @@ impl Clone for Spec {
}
/// Load from JSON object.
pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result<Spec, Error> {
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
fn load_from(spec_params: SpecParams, s: ethjson::spec::Spec) -> Result<Spec, Error> {
let builtins = s.accounts
.builtins()
.into_iter()
.map(|p| (p.0.into(), From::from(p.1)))
.collect();
let g = Genesis::from(s.genesis);
let GenericSeal(seal_rlp) = g.seal.into();
let params = CommonParams::from(s.params);
let mut s = Spec {
name: s.name.clone().into(),
engine: Spec::engine(cache_dir, s.engine, params, builtins),
engine: Spec::engine(spec_params, s.engine, params, builtins),
data_dir: s.data_dir.unwrap_or(s.name).into(),
nodes: s.nodes.unwrap_or_else(Vec::new),
parent_hash: g.parent_hash,
@ -279,7 +355,11 @@ pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result
timestamp: g.timestamp,
extra_data: g.extra_data,
seal_rlp: seal_rlp,
constructors: s.accounts.constructors().into_iter().map(|(a, c)| (a.into(), c.into())).collect(),
constructors: s.accounts
.constructors()
.into_iter()
.map(|(a, c)| (a.into(), c.into()))
.collect(),
state_root_memo: RwLock::new(Default::default()), // will be overwritten right after.
genesis_state: s.accounts.into(),
};
@ -287,7 +367,12 @@ pub fn load_from<T: AsRef<Path>>(cache_dir: T, s: ethjson::spec::Spec) -> Result
// use memoized state root if provided.
match g.state_root {
Some(root) => *s.state_root_memo.get_mut() = root,
None => { let _ = s.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?; },
None => {
let _ = s.run_constructors(
&Default::default(),
BasicBackend(MemoryDB::new()),
)?;
}
}
Ok(s)
@ -305,8 +390,8 @@ macro_rules! load_bundled {
impl Spec {
/// Convert engine spec into a arc'd Engine of the right underlying type.
/// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.
fn engine<T: AsRef<Path>>(
cache_dir: T,
fn engine(
spec_params: SpecParams,
engine_spec: ethjson::spec::Engine,
params: CommonParams,
builtins: BTreeMap<Address, Builtin>,
@ -314,14 +399,43 @@ impl Spec {
match engine_spec {
ethjson::spec::Engine::Null => Arc::new(NullEngine::new(params, builtins)),
ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(params, builtins)),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(cache_dir, params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(
spec_params.cache_dir,
params,
From::from(ethash.params),
builtins,
spec_params.optimization_setting,
)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(
BasicAuthority::new(
params,
From::from(
basic_authority.params,
),
builtins,
),
),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(
params,
From::from(
authority_round.params,
),
builtins,
).expect(
"Failed to start AuthorityRound consensus engine.",
),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(
params,
From::from(tendermint.params),
builtins,
).expect(
"Failed to start the Tendermint consensus engine.",
),
}
}
// given a pre-constructor state, run all the given constructors and produce a new state and state root.
// given a pre-constructor state, run all the given constructors and produce a new state and
// state root.
fn run_constructors<T: Backend>(&self, factories: &Factories, mut db: T) -> Result<T, Error> {
let mut root = KECCAK_NULL_RLP;
@ -337,20 +451,18 @@ impl Spec {
for (address, account) in self.genesis_state.get().iter() {
db.note_non_null_account(address);
account.insert_additional(
&mut *factories.accountdb.create(db.as_hashdb_mut(), keccak(address)),
&factories.trie
&mut *factories.accountdb.create(
db.as_hashdb_mut(),
keccak(address),
),
&factories.trie,
);
}
let start_nonce = self.engine.account_start_nonce(0);
let (root, db) = {
let mut state = State::from_existing(
db,
root,
start_nonce,
factories.clone(),
)?;
let mut state = State::from_existing(db, root, start_nonce, factories.clone())?;
// Execute contract constructors.
let env_info = EnvInfo {
@ -385,7 +497,14 @@ impl Spec {
{
let mut exec = Executive::new(&mut state, &env_info, self.engine.as_ref());
if let Err(e) = exec.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) {
if let Err(e) = exec.create(
params,
&mut substate,
&mut None,
&mut NoopTracer,
&mut NoopVMTracer,
)
{
warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e);
}
}
@ -410,22 +529,34 @@ impl Spec {
}
/// Get common blockchain parameters.
pub fn params(&self) -> &CommonParams { &self.engine.params() }
pub fn params(&self) -> &CommonParams {
&self.engine.params()
}
/// Get the known knodes of the network in enode format.
pub fn nodes(&self) -> &[String] { &self.nodes }
pub fn nodes(&self) -> &[String] {
&self.nodes
}
/// Get the configured Network ID.
pub fn network_id(&self) -> u64 { self.params().network_id }
pub fn network_id(&self) -> u64 {
self.params().network_id
}
/// Get the chain ID used for signing.
pub fn chain_id(&self) -> u64 { self.params().chain_id }
pub fn chain_id(&self) -> u64 {
self.params().chain_id
}
/// Get the configured subprotocol name.
pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() }
pub fn subprotocol_name(&self) -> String {
self.params().subprotocol_name.clone()
}
/// Get the configured network fork block.
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> { self.params().fork_block }
pub fn fork_block(&self) -> Option<(BlockNumber, H256)> {
self.params().fork_block
}
/// Get the header of the genesis block.
pub fn genesis_header(&self) -> Header {
@ -480,7 +611,10 @@ impl Spec {
/// Alter the value of the genesis state.
pub fn set_genesis_state(&mut self, s: PodState) -> Result<(), Error> {
self.genesis_state = s;
let _ = self.run_constructors(&Default::default(), BasicBackend(MemoryDB::new()))?;
let _ = self.run_constructors(
&Default::default(),
BasicBackend(MemoryDB::new()),
)?;
Ok(())
}
@ -496,7 +630,7 @@ impl Spec {
/// Ensure that the given state DB has the trie nodes in for the genesis state.
pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> {
if db.as_hashdb().contains(&self.state_root()) {
return Ok(db)
return Ok(db);
}
// TODO: could optimize so we don't re-run, but `ensure_db_good` is barely ever
@ -507,13 +641,19 @@ impl Spec {
/// Loads spec from json file. Provide factories for executing contracts and ensuring
/// storage goes to the right place.
pub fn load<T: AsRef<Path>, R>(cache_dir: T, reader: R) -> Result<Self, String> where R: Read {
pub fn load<'a, T: Into<SpecParams<'a>>, R>(params: T, reader: R) -> Result<Self, String>
where
R: Read,
{
fn fmt<F: ::std::fmt::Display>(f: F) -> String {
format!("Spec json is invalid: {}", f)
}
ethjson::spec::Spec::load(reader).map_err(fmt)
.and_then(|x| load_from(cache_dir, x).map_err(fmt))
ethjson::spec::Spec::load(reader).map_err(fmt).and_then(
|x| {
load_from(params.into(), x).map_err(fmt)
},
)
}
/// initialize genesis epoch data, using in-memory database for
@ -543,7 +683,7 @@ impl Spec {
difficulty: *genesis.difficulty(),
gas_limit: *genesis.gas_limit(),
last_hashes: Arc::new(Vec::new()),
gas_used: 0.into()
gas_used: 0.into(),
};
let from = Address::default();
@ -566,86 +706,139 @@ impl Spec {
true,
);
res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect()))
.ok_or_else(|| "Failed to prove call: insufficient state".into())
res.map(|(out, proof)| {
(out, proof.into_iter().map(|x| x.into_vec()).collect())
}).ok_or_else(|| "Failed to prove call: insufficient state".into())
};
self.engine.genesis_epoch_data(&genesis, &call)
}
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
pub fn new_test() -> Spec { load_bundled!("null_morden") }
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a
/// NullEngine consensus.
pub fn new_test() -> Spec {
load_bundled!("null_morden")
}
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus with applying reward on block close.
pub fn new_test_with_reward() -> Spec { load_bundled!("null_morden_with_reward") }
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a
/// NullEngine consensus with applying reward on block close.
pub fn new_test_with_reward() -> Spec {
load_bundled!("null_morden_with_reward")
}
/// Create a new Spec which is a NullEngine consensus with a premine of address whose secret is keccak('').
pub fn new_null() -> Spec { load_bundled!("null") }
/// Create a new Spec which is a NullEngine consensus with a premine of address whose
/// secret is keccak('').
pub fn new_null() -> Spec {
load_bundled!("null")
}
/// Create a new Spec which constructs a contract at address 5 with storage at 0 equal to 1.
pub fn new_test_constructor() -> Spec { load_bundled!("constructor") }
pub fn new_test_constructor() -> Spec {
load_bundled!("constructor")
}
/// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring work).
pub fn new_instant() -> Spec { load_bundled!("instant_seal") }
/// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring
/// work).
pub fn new_instant() -> Spec {
load_bundled!("instant_seal")
}
/// Create a new Spec with AuthorityRound consensus which does internal sealing (not requiring work).
/// Create a new Spec with AuthorityRound consensus which does internal sealing (not
/// requiring work).
/// Accounts with secrets keccak("0") and keccak("1") are the validators.
pub fn new_test_round() -> Self { load_bundled!("authority_round") }
pub fn new_test_round() -> Self {
load_bundled!("authority_round")
}
/// Create a new Spec with Tendermint consensus which does internal sealing (not requiring work).
/// Create a new Spec with Tendermint consensus which does internal sealing (not requiring
/// work).
/// Account keccak("0") and keccak("1") are a authorities.
pub fn new_test_tendermint() -> Self { load_bundled!("tendermint") }
pub fn new_test_tendermint() -> Self {
load_bundled!("tendermint")
}
/// TestList.sol used in both specs: https://github.com/paritytech/contracts/pull/30/files
/// Accounts with secrets keccak("0") and keccak("1") are initially the validators.
/// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine the current validators using `getValidators`.
/// Second validator can be removed with "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added back in using "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".
pub fn new_validator_safe_contract() -> Self { load_bundled!("validator_safe_contract") }
/// Create a new Spec with BasicAuthority which uses a contract at address 5 to determine
/// the current validators using `getValidators`.
/// Second validator can be removed with
/// "0xbfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1" and added
/// back in using
/// "0x4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".
pub fn new_validator_safe_contract() -> Self {
load_bundled!("validator_safe_contract")
}
/// The same as the `safeContract`, but allows reporting and uses AuthorityRound.
/// Account is marked with `reportBenign` it can be checked as disliked with "0xd8f2e0bf".
/// Validator can be removed with `reportMalicious`.
pub fn new_validator_contract() -> Self { load_bundled!("validator_contract") }
pub fn new_validator_contract() -> Self {
load_bundled!("validator_contract")
}
/// Create a new Spec with BasicAuthority which uses multiple validator sets changing with height.
/// Account with secrets keccak("0") is the validator for block 1 and with keccak("1") onwards.
pub fn new_validator_multi() -> Self { load_bundled!("validator_multi") }
/// Create a new Spec with BasicAuthority which uses multiple validator sets changing with
/// height.
/// Account with secrets keccak("0") is the validator for block 1 and with keccak("1")
/// onwards.
pub fn new_validator_multi() -> Self {
load_bundled!("validator_multi")
}
/// Create a new spec for a PoW chain
pub fn new_pow_test_spec() -> Self { load_bundled!("ethereum/olympic") }
pub fn new_pow_test_spec() -> Self {
load_bundled!("ethereum/olympic")
}
}
#[cfg(test)]
mod tests {
use super::*;
use state::State;
use std::str::FromStr;
use tests::helpers::get_temp_state_db;
use util::*;
use views::*;
use tests::helpers::get_temp_state_db;
use state::State;
use super::*;
// https://github.com/paritytech/parity/issues/1840
#[test]
fn test_load_empty() {
assert!(Spec::load(::std::env::temp_dir(), &[] as &[u8]).is_err());
assert!(Spec::load(&::std::env::temp_dir(), &[] as &[u8]).is_err());
}
#[test]
fn test_chain() {
let test_spec = Spec::new_test();
assert_eq!(test_spec.state_root(), H256::from_str("f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9").unwrap());
assert_eq!(
test_spec.state_root(),
H256::from_str(
"f3f4696bbf3b3b07775128eb7a3763279a394e382130f27c21e70233e04946a9",
).unwrap()
);
let genesis = test_spec.genesis_block();
assert_eq!(BlockView::new(&genesis).header_view().hash(), H256::from_str("0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303").unwrap());
assert_eq!(
BlockView::new(&genesis).header_view().hash(),
H256::from_str(
"0cd786a2425d16f152c658316c423e6ce1181e15c3295826d7c9904cba9ce303",
).unwrap()
);
}
#[test]
fn genesis_constructor() {
::ethcore_logger::init_log();
let spec = Spec::new_test_constructor();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap();
let state = State::from_existing(db.boxed_clone(), spec.state_root(), spec.engine.account_start_nonce(0), Default::default()).unwrap();
let expected = H256::from_str("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let db = spec.ensure_db_good(get_temp_state_db(), &Default::default())
.unwrap();
let state = State::from_existing(
db.boxed_clone(),
spec.state_root(),
spec.engine.account_start_nonce(0),
Default::default(),
).unwrap();
let expected = H256::from_str(
"0000000000000000000000000000000000000000000000000000000000000001",
).unwrap();
let address = Address::from_str("0000000000000000000000000000000000000005").unwrap();
assert_eq!(state.storage_at(&address, &H256::zero()).unwrap(), expected);

View File

@ -196,7 +196,7 @@ mod test {
}
"#;
let spec = Spec::load(::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
let spec = Spec::load(&::std::env::temp_dir(), spec_data.as_bytes()).unwrap();
let client_db = Arc::new(::util::kvdb::in_memory(::db::NUM_COLUMNS.unwrap_or(0)));
let client = Client::new(

View File

@ -230,7 +230,7 @@ impl Args {
Ok(match self.flag_chain {
Some(ref filename) => {
let file = fs::File::open(filename).map_err(|e| format!("{}", e))?;
spec::Spec::load(::std::env::temp_dir(), file)?
spec::Spec::load(&::std::env::temp_dir(), file)?
},
None => {
ethcore::ethereum::new_foundation(&::std::env::temp_dir())

View File

@ -14,12 +14,12 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::{str, fs, fmt, path};
use std::{str, fs, fmt};
use std::time::Duration;
use bigint::prelude::U256;
use util::{Address, version_data};
use util::journaldb::Algorithm;
use ethcore::spec::Spec;
use ethcore::spec::{Spec, SpecParams};
use ethcore::ethereum;
use ethcore::client::Mode;
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
@ -81,20 +81,20 @@ impl fmt::Display for SpecType {
}
impl SpecType {
pub fn spec<T: AsRef<path::Path>>(&self, cache_dir: T) -> Result<Spec, String> {
let cache_dir = cache_dir.as_ref();
pub fn spec<'a, T: Into<SpecParams<'a>>>(&self, params: T) -> Result<Spec, String> {
let params = params.into();
match *self {
SpecType::Foundation => Ok(ethereum::new_foundation(cache_dir)),
SpecType::Morden => Ok(ethereum::new_morden(cache_dir)),
SpecType::Ropsten => Ok(ethereum::new_ropsten(cache_dir)),
SpecType::Olympic => Ok(ethereum::new_olympic(cache_dir)),
SpecType::Classic => Ok(ethereum::new_classic(cache_dir)),
SpecType::Expanse => Ok(ethereum::new_expanse(cache_dir)),
SpecType::Kovan => Ok(ethereum::new_kovan(cache_dir)),
SpecType::Foundation => Ok(ethereum::new_foundation(params)),
SpecType::Morden => Ok(ethereum::new_morden(params)),
SpecType::Ropsten => Ok(ethereum::new_ropsten(params)),
SpecType::Olympic => Ok(ethereum::new_olympic(params)),
SpecType::Classic => Ok(ethereum::new_classic(params)),
SpecType::Expanse => Ok(ethereum::new_expanse(params)),
SpecType::Kovan => Ok(ethereum::new_kovan(params)),
SpecType::Dev => Ok(Spec::new_instant()),
SpecType::Custom(ref filename) => {
let file = fs::File::open(filename).map_err(|e| format!("Could not load specification file at {}: {}", filename, e))?;
Spec::load(cache_dir, file)
Spec::load(params, file)
}
}
}

View File

@ -29,6 +29,7 @@ use ethcore::service::ClientService;
use ethcore::snapshot;
use ethcore::verification::queue::VerifierSettings;
use ethsync::{self, SyncConfig};
use ethcore::spec::{SpecParams, OptimizeFor};
use fdlimit::raise_fd_limit;
use hash_fetch::fetch::{Fetch, Client as FetchClient};
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
@ -175,7 +176,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
use parking_lot::{Mutex, RwLock};
// load spec
let spec = cmd.spec.spec(&cmd.dirs.cache)?;
let spec = cmd.spec.spec(SpecParams::new(cmd.dirs.cache.as_ref(), OptimizeFor::Memory))?;
// load genesis hash
let genesis_hash = spec.genesis_header().hash();

View File

@ -555,7 +555,7 @@ impl<C, SN: ?Sized, S: ?Sized, M, EM> Eth for EthClient<C, SN, S, M, EM> where
self.miner.map_sealing_work(&*self.client, |b| {
let pow_hash = b.hash();
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
let seed_hash = self.seed_compute.lock().get_seedhash(b.block().header().number());
let seed_hash = self.seed_compute.lock().hash_block_number(b.block().header().number());
if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 {
Err(errors::no_new_work())

View File

@ -1,8 +1,8 @@
verbose=false
max_width=1000
comment_width=1000
max_width=100
comment_width=100
tab_spaces=4
fn_call_width=1000
fn_call_width=100
struct_lit_width=32
fn_call_style="Visual"
single_line_if_else_max_width=100
@ -13,3 +13,4 @@ reorder_imports=true
format_strings=false
hard_tabs=true
wrap_match_arms=false
error_on_line_overflow=false