firce user to select chain on first run

This commit is contained in:
Svyatoslav Nikolsky 2017-09-01 16:57:12 +03:00
parent 5a20710066
commit 0fcf1fac5b
12 changed files with 141 additions and 43 deletions

View File

@ -23,7 +23,7 @@ use best_block::BestBlock;
use {
BlockRef, Error, BlockHeaderProvider, BlockProvider, BlockOrigin, TransactionMeta, IndexedBlockProvider,
TransactionMetaProvider, TransactionProvider, TransactionOutputProvider, BlockChain, Store,
SideChainOrigin, ForkChain, Forkable, CanonStore
SideChainOrigin, ForkChain, Forkable, CanonStore, ConfigStore
};
const KEY_BEST_BLOCK_NUMBER: &'static str = "best_block_number";
@ -567,3 +567,23 @@ impl<T> Store for BlockChainDatabase<T> where T: KeyValueDatabase {
self.best_header().bits.to_f64()
}
}
impl<T> ConfigStore for BlockChainDatabase<T> where T: KeyValueDatabase {
fn consensus_fork(&self) -> Result<Option<String>, Error> {
match self.db.get(&Key::Configuration("consensus_fork"))
.map(KeyState::into_option)
.map(|x| x.and_then(Value::as_configuration)) {
Ok(Some(consensus_fork)) => String::from_utf8(consensus_fork.into())
.map_err(|e| Error::DatabaseError(format!("{}", e)))
.map(Some),
Ok(None) => Ok(None),
Err(e) => Err(Error::DatabaseError(e.into())),
}
}
fn set_consensus_fork(&self, consensus_fork: &str) -> Result<(), Error> {
let mut update = DBTransaction::new();
update.insert(KeyValue::Configuration("consensus_fork", consensus_fork.as_bytes().into()));
self.db.write(update).map_err(Error::DatabaseError)
}
}

View File

@ -9,3 +9,14 @@ pub enum Error {
/// Ancient fork
AncientFork,
}
impl Into<String> for Error {
fn into(self) -> String {
match self {
Error::DatabaseError(s) => format!("Database error: {}", s),
Error::CannotCanonize => "Cannot canonize block".into(),
Error::UnknownParent => "Block parent is unknown".into(),
Error::AncientFork => "Fork is too long to proceed".into(),
}
}
}

View File

@ -18,6 +18,7 @@ struct InnerDatabase {
transaction: HashMap<H256, KeyState<ChainTransaction>>,
transaction_meta: HashMap<H256, KeyState<TransactionMeta>>,
block_number: HashMap<H256, KeyState<u32>>,
configuration: HashMap<&'static str, KeyState<Bytes>>,
}
#[derive(Default, Debug)]
@ -49,6 +50,9 @@ impl MemoryDatabase {
let block_number = replace(&mut db.block_number, HashMap::default()).into_iter()
.flat_map(|(key, state)| state.into_operation(key, KeyValue::BlockNumber, Key::BlockNumber));
let configuration = replace(&mut db.configuration, HashMap::default()).into_iter()
.flat_map(|(key, state)| state.into_operation(key, KeyValue::Configuration, Key::Configuration));
Transaction {
operations: meta
.chain(block_hash)
@ -57,6 +61,7 @@ impl MemoryDatabase {
.chain(transaction)
.chain(transaction_meta)
.chain(block_number)
.chain(configuration)
.collect()
}
}
@ -75,6 +80,7 @@ impl KeyValueDatabase for MemoryDatabase {
KeyValue::Transaction(key, value) => { db.transaction.insert(key, KeyState::Insert(value)); },
KeyValue::TransactionMeta(key, value) => { db.transaction_meta.insert(key, KeyState::Insert(value)); },
KeyValue::BlockNumber(key, value) => { db.block_number.insert(key, KeyState::Insert(value)); },
KeyValue::Configuration(key, value) => { db.configuration.insert(key, KeyState::Insert(value)); },
},
Operation::Delete(delete) => match delete {
Key::Meta(key) => { db.meta.insert(key, KeyState::Delete); }
@ -84,6 +90,7 @@ impl KeyValueDatabase for MemoryDatabase {
Key::Transaction(key) => { db.transaction.insert(key, KeyState::Delete); }
Key::TransactionMeta(key) => { db.transaction_meta.insert(key, KeyState::Delete); }
Key::BlockNumber(key) => { db.block_number.insert(key, KeyState::Delete); }
Key::Configuration(key) => { db.configuration.insert(key, KeyState::Delete); }
}
}
}
@ -100,6 +107,7 @@ impl KeyValueDatabase for MemoryDatabase {
Key::Transaction(ref key) => db.transaction.get(key).cloned().unwrap_or_default().map(Value::Transaction),
Key::TransactionMeta(ref key) => db.transaction_meta.get(key).cloned().unwrap_or_default().map(Value::TransactionMeta),
Key::BlockNumber(ref key) => db.block_number.get(key).cloned().unwrap_or_default().map(Value::BlockNumber),
Key::Configuration(ref key) => db.configuration.get(key).cloned().unwrap_or_default().map(Value::Configuration),
};
Ok(result)

View File

@ -12,6 +12,7 @@ pub const COL_BLOCK_TRANSACTIONS: u32 = 3;
pub const COL_TRANSACTIONS: u32 = 4;
pub const COL_TRANSACTIONS_META: u32 = 5;
pub const COL_BLOCK_NUMBERS: u32 = 6;
pub const COL_CONFIGURATION: u32 = 7;
#[derive(Debug)]
pub enum Operation {
@ -28,6 +29,7 @@ pub enum KeyValue {
Transaction(H256, ChainTransaction),
TransactionMeta(H256, TransactionMeta),
BlockNumber(H256, u32),
Configuration(&'static str, Bytes),
}
#[derive(Debug)]
@ -39,6 +41,7 @@ pub enum Key {
Transaction(H256),
TransactionMeta(H256),
BlockNumber(H256),
Configuration(&'static str),
}
#[derive(Debug, Clone)]
@ -50,6 +53,7 @@ pub enum Value {
Transaction(ChainTransaction),
TransactionMeta(TransactionMeta),
BlockNumber(u32),
Configuration(Bytes),
}
impl Value {
@ -62,6 +66,7 @@ impl Value {
Key::Transaction(_) => deserialize(bytes).map(Value::Transaction),
Key::TransactionMeta(_) => deserialize(bytes).map(Value::TransactionMeta),
Key::BlockNumber(_) => deserialize(bytes).map(Value::BlockNumber),
Key::Configuration(_) => deserialize(bytes).map(Value::Configuration),
}.map_err(|e| format!("{:?}", e))
}
@ -113,6 +118,13 @@ impl Value {
_ => None,
}
}
pub fn as_configuration(self) -> Option<Bytes> {
match self {
Value::Configuration(bytes) => Some(bytes),
_ => None,
}
}
}
#[derive(Debug, Clone)]
@ -215,6 +227,7 @@ impl<'a> From<&'a KeyValue> for RawKeyValue {
KeyValue::Transaction(ref key, ref value) => (COL_TRANSACTIONS, serialize(key), serialize(value)),
KeyValue::TransactionMeta(ref key, ref value) => (COL_TRANSACTIONS_META, serialize(key), serialize(value)),
KeyValue::BlockNumber(ref key, ref value) => (COL_BLOCK_NUMBERS, serialize(key), serialize(value)),
KeyValue::Configuration(ref key, ref value) => (COL_CONFIGURATION, serialize(key), serialize(value)),
};
RawKeyValue {
@ -249,6 +262,7 @@ impl<'a> From<&'a Key> for RawKey {
Key::Transaction(ref key) => (COL_TRANSACTIONS, serialize(key)),
Key::TransactionMeta(ref key) => (COL_TRANSACTIONS_META, serialize(key)),
Key::BlockNumber(ref key) => (COL_BLOCK_NUMBERS, serialize(key)),
Key::Configuration(ref key) => (COL_CONFIGURATION, serialize(key)),
};
RawKey {

View File

@ -36,7 +36,7 @@ pub use block_origin::{BlockOrigin, SideChainOrigin};
pub use block_provider::{BlockHeaderProvider, BlockProvider, IndexedBlockProvider};
pub use block_ref::BlockRef;
pub use error::Error;
pub use store::{AsSubstore, Store, SharedStore, CanonStore};
pub use store::{AsSubstore, Store, SharedStore, CanonStore, ConfigStore};
pub use transaction_meta::TransactionMeta;
pub use transaction_provider::{TransactionProvider, TransactionOutputProvider, TransactionMetaProvider};

View File

@ -2,13 +2,22 @@ use std::sync::Arc;
use chain::BlockHeader;
use {
BestBlock, BlockProvider, BlockHeaderProvider, TransactionProvider, TransactionMetaProvider,
TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable
TransactionOutputProvider, BlockChain, IndexedBlockProvider, Forkable, Error
};
pub trait CanonStore: Store + Forkable {
pub trait CanonStore: Store + Forkable + ConfigStore {
fn as_store(&self) -> &Store;
}
/// Configuration storage interface
pub trait ConfigStore {
/// get consensus_fork this database is configured for
fn consensus_fork(&self) -> Result<Option<String>, Error>;
/// set consensus_fork this database is configured for
fn set_consensus_fork(&self, consensus_fork: &str) -> Result<(), Error>;
}
/// Blockchain storage interface
pub trait Store: AsSubstore {
/// get best block

View File

@ -9,6 +9,9 @@ args:
- regtest:
long: regtest
help: Use a private network for regression tests.
- segwit:
long: segwit
help: Enable SegWit verification rules.
- segwit2x:
long: segwit2x
help: Enable SegWit2x verification rules.

View File

@ -1,16 +1,14 @@
use clap::ArgMatches;
use sync::{create_sync_blocks_writer, Error};
use config::Config;
use util::{open_db, init_db};
use util::init_db;
pub fn import(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
let db = open_db(&cfg);
// TODO: this might be unnecessary here!
try!(init_db(&cfg, &db));
try!(init_db(&cfg));
let blk_path = matches.value_of("PATH").expect("PATH is required in cli.yml; qed");
let mut writer = create_sync_blocks_writer(db, cfg.consensus, cfg.verification_params);
let mut writer = create_sync_blocks_writer(cfg.db, cfg.consensus, cfg.verification_params);
let blk_dir = try!(::import::open_blk_dir(blk_path).map_err(|_| "Import directory does not exist".to_owned()));
let mut counter = 0;

View File

@ -2,11 +2,10 @@ use clap::ArgMatches;
use db::BlockRef;
use config::Config;
use primitives::hash::H256;
use util::{open_db, init_db};
use util::init_db;
pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
let db = open_db(&cfg);
try!(init_db(&cfg, &db));
try!(init_db(&cfg));
let block_ref = matches.value_of("BLOCK").expect("BLOCK is required in cli.yml; qed");
let block_ref = if block_ref.len() == 64 {
@ -18,10 +17,10 @@ pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
BlockRef::Number(block_ref.parse().map_err(|e| format!("Invalid block hash: {}", e))?)
};
let required_block_hash = db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash();
let required_block_hash = cfg.db.block_header(block_ref.clone()).ok_or(format!("Block {:?} is unknown", block_ref))?.hash();
let genesis_hash = cfg.magic.genesis_block().hash();
let mut best_block_hash = db.best_block().hash;
let mut best_block_hash = cfg.db.best_block().hash;
debug_assert!(best_block_hash != H256::default()); // genesis inserted in init_db
loop {
@ -34,6 +33,6 @@ pub fn rollback(cfg: Config, matches: &ArgMatches) -> Result<(), String> {
return Err(format!("Failed to revert to block {:?}. Reverted to genesis", block_ref));
}
best_block_hash = db.rollback_best().map_err(|e| format!("{:?}", e))?;
best_block_hash = cfg.db.rollback_best().map_err(|e| format!("{:?}", e))?;
}
}

View File

@ -5,7 +5,7 @@ use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::atomic::{AtomicBool, Ordering};
use sync::{create_sync_peers, create_local_sync_node, create_sync_connection_factory, SyncListener};
use primitives::hash::H256;
use util::{open_db, init_db, node_table_path};
use util::{init_db, node_table_path};
use {config, p2p, PROTOCOL_VERSION, PROTOCOL_MINIMUM};
use super::super::rpc;
@ -84,8 +84,7 @@ impl Drop for BlockNotifier {
pub fn start(cfg: config::Config) -> Result<(), String> {
let mut el = p2p::event_loop();
let db = open_db(&cfg);
init_db(&cfg, &db)?;
init_db(&cfg)?;
let nodes_path = node_table_path(&cfg);
@ -111,7 +110,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
};
let sync_peers = create_sync_peers();
let local_sync_node = create_local_sync_node(cfg.consensus, db.clone(), sync_peers.clone(), cfg.verification_params);
let local_sync_node = create_local_sync_node(cfg.consensus, cfg.db.clone(), sync_peers.clone(), cfg.verification_params);
let sync_connection_factory = create_sync_connection_factory(sync_peers.clone(), local_sync_node.clone());
if let Some(block_notify_command) = cfg.block_notify_command {
@ -121,7 +120,7 @@ pub fn start(cfg: config::Config) -> Result<(), String> {
let p2p = try!(p2p::P2P::new(p2p_cfg, sync_connection_factory, el.handle()).map_err(|x| x.to_string()));
let rpc_deps = rpc::Dependencies {
network: cfg.magic,
storage: db,
storage: cfg.db,
local_sync_node: local_sync_node,
p2p_context: p2p.context().clone(),
remote: el.remote(),

View File

@ -1,5 +1,6 @@
use std::net;
use clap;
use db;
use message::Services;
use network::{Magic, ConsensusParams, ConsensusFork, SEGWIT2X_FORK_BLOCK, BITCOIN_CASH_FORK_BLOCK};
use p2p::InternetProtocol;
@ -10,6 +11,7 @@ use primitives::hash::H256;
use rpc::HttpConfiguration as RpcHttpConfig;
use verification::VerificationLevel;
use sync::VerificationParameters;
use util::open_db;
pub struct Config {
pub magic: Magic,
@ -29,11 +31,24 @@ pub struct Config {
pub rpc_config: RpcHttpConfig,
pub block_notify_command: Option<String>,
pub verification_params: VerificationParameters,
pub db: db::SharedStore,
}
pub const DEFAULT_DB_CACHE: usize = 512;
pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
let db_cache = match matches.value_of("db-cache") {
Some(s) => s.parse().map_err(|_| "Invalid cache size - should be number in MB".to_owned())?,
None => DEFAULT_DB_CACHE,
};
let data_dir = match matches.value_of("data-dir") {
Some(s) => Some(s.parse().map_err(|_| "Invalid data-dir".to_owned())?),
None => None,
};
let db = open_db(&data_dir, db_cache);
let quiet = matches.is_present("quiet");
let magic = match (matches.is_present("testnet"), matches.is_present("regtest")) {
(true, false) => Magic::Testnet,
@ -42,12 +57,7 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
(true, true) => return Err("Only one testnet option can be used".into()),
};
let (consensus_fork, user_agent_suffix) = match (matches.is_present("segwit2x"), matches.is_present("bitcoin-cash")) {
(true, false) => (ConsensusFork::SegWit2x(SEGWIT2X_FORK_BLOCK), "/SegWit2x"),
(false, true) => (ConsensusFork::BitcoinCash(BITCOIN_CASH_FORK_BLOCK), "/UAHF"),
(false, false) => (ConsensusFork::NoFork, ""),
(true, true) => return Err("Only one fork can be used".into()),
};
let consensus_fork = parse_consensus_fork(&db, &matches)?;
let consensus = ConsensusParams::new(magic, consensus_fork);
let (in_connections, out_connections) = match magic {
@ -61,6 +71,11 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
};
// to skip idiotic 30 seconds delay in test-scripts
let user_agent_suffix = match consensus_fork {
ConsensusFork::NoFork => "",
ConsensusFork::SegWit2x(_) => "/SegWit2x",
ConsensusFork::BitcoinCash(_) => "/UAHF",
};
let user_agent = match magic {
Magic::Testnet | Magic::Mainnet | Magic::Unitest | Magic::Other(_) => format!("{}{}", USER_AGENT, user_agent_suffix),
Magic::Regtest => REGTEST_USER_AGENT.into(),
@ -94,16 +109,6 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
_ => (),
}
let db_cache = match matches.value_of("db-cache") {
Some(s) => s.parse().map_err(|_| "Invalid cache size - should be number in MB".to_owned())?,
None => DEFAULT_DB_CACHE,
};
let data_dir = match matches.value_of("data-dir") {
Some(s) => Some(s.parse().map_err(|_| "Invalid data-dir".to_owned())?),
None => None,
};
let only_net = match matches.value_of("only-net") {
Some(s) => s.parse()?,
None => InternetProtocol::default(),
@ -159,11 +164,43 @@ pub fn parse(matches: &clap::ArgMatches) -> Result<Config, String> {
verification_level: verification_level,
verification_edge: verification_edge,
},
db: db,
};
Ok(config)
}
fn parse_consensus_fork(db: &db::SharedStore, matches: &clap::ArgMatches) -> Result<ConsensusFork, String> {
let old_consensus_fork = match db.consensus_fork() {
Ok(consensus_fork) => consensus_fork,
Err(err) => return Err(err.into()),
};
let new_consensus_fork = match (matches.is_present("segwit"), matches.is_present("segwit2x"), matches.is_present("bitcoin-cash")) {
(false, false, false) if old_consensus_fork.is_none() =>
return Err("You must select fork on first run: --segwit, --segwit2x, --bitcoin-cash".into()),
(true, false, false) => "segwit",
(false, true, false) => "segwit2x",
(false, false, true) => "bitcoin-cash",
_ => return Err("You can only pass single fork argument: --segwit, --segwit2x, --bitcoin-cash".into()),
};
match old_consensus_fork {
None => match db.set_consensus_fork(new_consensus_fork) {
Ok(()) => (),
Err(err) => return Err(err.into()),
},
Some(ref old_consensus_fork) if old_consensus_fork == new_consensus_fork => (),
Some(old_consensus_fork) => return Err(format!("Cannnot select '{}' fork with non-empty database of '{}' fork", new_consensus_fork, old_consensus_fork)),
}
Ok(match new_consensus_fork {
"segwit" => ConsensusFork::NoFork,
"segwit2x" => ConsensusFork::SegWit2x(SEGWIT2X_FORK_BLOCK),
"bitcoin-cash" => ConsensusFork::BitcoinCash(BITCOIN_CASH_FORK_BLOCK),
_ => unreachable!("hardcoded above"),
})
}
fn parse_rpc_config(magic: Magic, matches: &clap::ArgMatches) -> Result<RpcHttpConfig, String> {
let mut config = RpcHttpConfig::with_port(magic.rpc_port());
config.enabled = !matches.is_present("no-jsonrpc");

View File

@ -6,12 +6,12 @@ use {db, APP_INFO};
use config::Config;
use chain::IndexedBlock;
pub fn open_db(cfg: &Config) -> db::SharedStore {
let db_path = match cfg.data_dir {
pub fn open_db(data_dir: &Option<String>, db_cache: usize) -> db::SharedStore {
let db_path = match *data_dir {
Some(ref data_dir) => custom_path(&data_dir, "db"),
None => app_dir(AppDataType::UserData, &APP_INFO, "db").expect("Failed to get app dir"),
};
Arc::new(db::BlockChainDatabase::open_at_path(db_path, cfg.db_cache).expect("Failed to open database"))
Arc::new(db::BlockChainDatabase::open_at_path(db_path, db_cache).expect("Failed to open database"))
}
pub fn node_table_path(cfg: &Config) -> PathBuf {
@ -23,16 +23,16 @@ pub fn node_table_path(cfg: &Config) -> PathBuf {
node_table
}
pub fn init_db(cfg: &Config, db: &db::SharedStore) -> Result<(), String> {
pub fn init_db(cfg: &Config) -> Result<(), String> {
// insert genesis block if db is empty
let genesis_block: IndexedBlock = cfg.magic.genesis_block().into();
match db.block_hash(0) {
match cfg.db.block_hash(0) {
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
Some(_) => Ok(()),
None => {
let hash = genesis_block.hash().clone();
db.insert(genesis_block).expect("Failed to insert genesis block to the database");
db.canonize(&hash).expect("Failed to canonize genesis block");
cfg.db.insert(genesis_block).expect("Failed to insert genesis block to the database");
cfg.db.canonize(&hash).expect("Failed to canonize genesis block");
Ok(())
}
}