explicint Indexed*::from_raw calls

This commit is contained in:
Svyatoslav Nikolsky 2019-03-20 12:29:04 +03:00
parent 969c827429
commit eb43e22cd3
19 changed files with 89 additions and 58 deletions

View File

@ -11,3 +11,6 @@ primitives = { path = "../primitives" }
serialization = { path = "../serialization" }
serialization_derive = { path = "../serialization_derive" }
[features]
default = []
test-helpers = []

View File

@ -14,17 +14,12 @@ pub struct IndexedBlock {
pub transactions: Vec<IndexedTransaction>,
}
#[cfg(feature = "test-helpers")]
impl From<Block> for IndexedBlock {
fn from(block: Block) -> Self {
let Block { block_header, transactions } = block;
IndexedBlock {
header: block_header.into(),
transactions: transactions.into_iter().map(Into::into).collect(),
}
Self::from_raw(block)
}
}
impl cmp::PartialEq for IndexedBlock {
fn eq(&self, other: &Self) -> bool {
self.header.hash == other.header.hash
@ -39,6 +34,17 @@ impl IndexedBlock {
}
}
/// Explicit conversion of the raw Block into IndexedBlock.
///
/// Hashes block header + transactions.
pub fn from_raw(block: Block) -> Self {
let Block { block_header, transactions } = block;
Self::new(
IndexedBlockHeader::from_raw(block_header),
transactions.into_iter().map(IndexedTransaction::from_raw).collect(),
)
}
pub fn hash(&self) -> &H256 {
&self.header.hash
}

View File

@ -19,15 +19,12 @@ impl fmt::Debug for IndexedBlockHeader {
}
}
#[cfg(feature = "test-helpers")]
impl From<BlockHeader> for IndexedBlockHeader {
fn from(header: BlockHeader) -> Self {
IndexedBlockHeader {
hash: header.hash(),
raw: header,
}
Self::from_raw(header)
}
}
impl IndexedBlockHeader {
pub fn new(hash: H256, header: BlockHeader) -> Self {
IndexedBlockHeader {
@ -35,6 +32,13 @@ impl IndexedBlockHeader {
raw: header,
}
}
/// Explicit conversion of the raw BlockHeader into IndexedBlockHeader.
///
/// Hashes the contents of block header.
pub fn from_raw(header: BlockHeader) -> Self {
IndexedBlockHeader::new(header.hash(), header)
}
}
impl cmp::PartialEq for IndexedBlockHeader {

View File

@ -1,5 +1,6 @@
use std::{cmp, io, fmt};
use hash::H256;
use heapsize::HeapSizeOf;
use ser::{Deserializable, Reader, Error as ReaderError};
use transaction::Transaction;
use read_and_hash::ReadAndHash;
@ -19,13 +20,16 @@ impl fmt::Debug for IndexedTransaction {
}
}
#[cfg(feature = "test-helpers")]
impl<T> From<T> for IndexedTransaction where Transaction: From<T> {
fn from(other: T) -> Self {
let tx = Transaction::from(other);
IndexedTransaction {
hash: tx.hash(),
raw: tx,
}
Self::from_raw(other)
}
}
impl HeapSizeOf for IndexedTransaction {
fn heap_size_of_children(&self) -> usize {
self.raw.heap_size_of_children()
}
}
@ -36,6 +40,14 @@ impl IndexedTransaction {
raw: transaction,
}
}
/// Explicit conversion of the raw Transaction into IndexedTransaction.
///
/// Hashes transaction contents.
pub fn from_raw<T>(transaction: T) -> Self where Transaction: From<T> {
let transaction = Transaction::from(transaction);
Self::new(transaction.hash(), transaction)
}
}
impl cmp::PartialEq for IndexedTransaction {

View File

@ -338,7 +338,7 @@ impl<'a> BlockAssembler<'a> {
bits: bits,
height: height,
transactions: transactions,
coinbase_tx: coinbase_tx.into(),
coinbase_tx: IndexedTransaction::from_raw(coinbase_tx),
size_limit: self.max_block_size,
sigop_limit: self.max_block_sigops,
})

View File

@ -671,8 +671,9 @@ impl MemoryPool {
/// Removes single transaction by its hash.
/// All descendants remain in the pool.
pub fn remove_by_hash(&mut self, h: &H256) -> Option<Transaction> {
self.storage.remove_by_hash(h).map(|entry| entry.transaction)
pub fn remove_by_hash(&mut self, h: &H256) -> Option<IndexedTransaction> {
self.storage.remove_by_hash(h)
.map(|entry| IndexedTransaction::new(entry.hash, entry.transaction))
}
/// Checks if `transaction` spends some outputs, already spent by inpool transactions.
@ -961,7 +962,7 @@ pub mod tests {
// remove and check remaining transactions
let removed = pool.remove_by_hash(&default_tx().hash());
assert!(removed.is_some());
assert_eq!(removed.unwrap(), default_tx());
assert_eq!(removed.unwrap(), default_tx().into());
assert_eq!(pool.get_transactions_ids().len(), 0);
// remove non-existant transaction

View File

@ -26,7 +26,7 @@ pub fn node_table_path(cfg: &Config) -> PathBuf {
pub fn init_db(cfg: &Config) -> Result<(), String> {
// insert genesis block if db is empty
let genesis_block: IndexedBlock = cfg.network.genesis_block().into();
let genesis_block = IndexedBlock::from_raw(cfg.network.genesis_block());
match cfg.db.block_hash(0) {
Some(ref db_genesis_block_hash) if db_genesis_block_hash != genesis_block.hash() => Err("Trying to open database with incompatible genesis block".into()),
Some(_) => Ok(()),

View File

@ -7,7 +7,7 @@ use keys::{self, Address};
use v1::helpers::errors::{block_not_found, block_at_height_not_found, transaction_not_found,
transaction_output_not_found, transaction_of_side_branch, invalid_params};
use jsonrpc_core::Error;
use {storage, chain};
use storage;
use global_script::Script;
use chain::OutPoint;
use verification;
@ -78,9 +78,8 @@ impl BlockChainClientCoreApi for BlockChainClientCore {
}
fn verbose_block(&self, hash: GlobalH256) -> Option<VerboseBlock> {
self.storage.block(hash.into())
self.storage.indexed_block(hash.into())
.map(|block| {
let block: chain::IndexedBlock = block.into();
let height = self.storage.block_number(block.hash());
let confirmations = match height {
Some(block_number) => (self.storage.best_block().number - block_number + 1) as i64,

View File

@ -4,7 +4,10 @@ use v1::traits::Raw;
use v1::types::{RawTransaction, TransactionInput, TransactionOutput, TransactionOutputs, Transaction, GetRawTransactionResponse};
use v1::types::H256;
use v1::helpers::errors::{execution, invalid_params};
use chain::{SAPLING_TX_VERSION, SAPLING_TX_VERSION_GROUP_ID, Transaction as GlobalTransaction};
use chain::{
SAPLING_TX_VERSION, SAPLING_TX_VERSION_GROUP_ID,
Transaction as GlobalTransaction, IndexedTransaction as GlobalIndexedTransaction,
};
use primitives::bytes::Bytes as GlobalBytes;
use primitives::hash::H256 as GlobalH256;
use sync;
@ -118,7 +121,7 @@ impl RawClientCore {
impl RawClientCoreApi for RawClientCore {
fn accept_transaction(&self, transaction: GlobalTransaction) -> Result<GlobalH256, String> {
self.local_sync_node.accept_transaction(transaction)
self.local_sync_node.accept_transaction(GlobalIndexedTransaction::from_raw(transaction))
}
fn create_raw_transaction(

View File

@ -31,3 +31,4 @@ network = { path = "../network" }
[dev-dependencies]
test-data = { path = "../test-data" }
miner = { path = "../miner", features = ["test-helpers"] }
chain = { path = "../chain", features = ["test-helpers"] }

View File

@ -77,13 +77,13 @@ impl InboundSyncConnection for InboundConnection {
}
fn on_transaction(&self, message: types::Tx) {
let tx: IndexedTransaction = message.transaction.into();
let tx = IndexedTransaction::from_raw(message.transaction);
self.peers.hash_known_as(self.peer_index, tx.hash.clone(), KnownHashType::Transaction);
self.node.on_transaction(self.peer_index, tx);
}
fn on_block(&self, message: types::Block) {
let block: IndexedBlock = message.block.into();
let block = IndexedBlock::from_raw(message.block);
self.peers.hash_known_as(self.peer_index, block.hash().clone(), KnownHashType::Block);
self.node.on_block(self.peer_index, block);
}

View File

@ -2,7 +2,7 @@ use std::sync::Arc;
use parking_lot::{Mutex, Condvar};
use time;
use futures::{lazy, finished};
use chain::{Transaction, IndexedTransaction, IndexedBlock};
use chain::{IndexedTransaction, IndexedBlock};
use keys::Address;
use message::types;
use miner::BlockAssembler;
@ -217,7 +217,7 @@ impl<T, U, V> LocalNode<T, U, V> where T: TaskExecutor, U: Server, V: Client {
}
/// Verify and then schedule new transaction
pub fn accept_transaction(&self, transaction: Transaction) -> Result<H256, String> {
pub fn accept_transaction(&self, transaction: IndexedTransaction) -> Result<H256, String> {
let sink_data = Arc::new(TransactionAcceptSinkData::default());
let sink = TransactionAcceptSink::new(sink_data.clone()).boxed();
{
@ -384,7 +384,7 @@ pub mod tests {
let transaction: Transaction = test_data::TransactionBuilder::with_output(1).add_input(&genesis.transactions[0], 0).into();
let transaction_hash = transaction.hash();
let result = local_node.accept_transaction(transaction.clone());
let result = local_node.accept_transaction(transaction.clone().into());
assert_eq!(result, Ok(transaction_hash.clone()));
assert_eq!(executor.take_tasks(), vec![Task::RelayNewTransaction(transaction.into(), 0)]);
@ -405,7 +405,7 @@ pub mod tests {
let peer_index1 = 0; local_node.on_connect(peer_index1, "test".into(), types::Version::default());
executor.take_tasks();
let result = local_node.accept_transaction(transaction);
let result = local_node.accept_transaction(transaction.into());
assert_eq!(result, Err("simulated".to_owned()));
assert_eq!(executor.take_tasks(), vec![]);

View File

@ -246,7 +246,7 @@ impl Chain {
/// Get block header by number
pub fn block_header_by_number(&self, number: BlockHeight) -> Option<IndexedBlockHeader> {
if number <= self.best_storage_block.number {
self.storage.block_header(storage::BlockRef::Number(number)).map(Into::into)
self.storage.indexed_block_header(storage::BlockRef::Number(number))
} else {
self.headers_chain.at(number - self.best_storage_block.number)
}
@ -254,8 +254,8 @@ impl Chain {
/// Get block header by hash
pub fn block_header_by_hash(&self, hash: &H256) -> Option<IndexedBlockHeader> {
if let Some(block) = self.storage.block(storage::BlockRef::Hash(hash.clone())) {
return Some(block.block_header.into());
if let Some(header) = self.storage.indexed_block_header(storage::BlockRef::Hash(hash.clone())) {
return Some(header);
}
self.headers_chain.by_hash(hash)
}
@ -591,7 +591,9 @@ impl Chain {
/// Get transaction by hash (if it's in memory pool or verifying)
pub fn transaction_by_hash(&self, hash: &H256) -> Option<IndexedTransaction> {
self.verifying_transactions.get(hash).cloned()
.or_else(|| self.memory_pool.read().read_by_hash(hash).cloned().map(|t| t.into()))
.or_else(|| self.memory_pool.read().read_by_hash(hash)
.cloned()
.map(|tx| IndexedTransaction::new(hash.clone(), tx)))
}
/// Insert transaction to memory pool

View File

@ -1,6 +1,6 @@
use std::sync::Arc;
use parking_lot::Mutex;
use chain::{IndexedTransaction, Transaction, IndexedBlock};
use chain::{IndexedTransaction, IndexedBlock};
use message::types;
use synchronization_executor::TaskExecutor;
use synchronization_verifier::{Verifier, TransactionVerificationSink};
@ -129,7 +129,7 @@ pub trait Client : Send + Sync + 'static {
fn on_transaction(&self, peer_index: PeerIndex, transaction: IndexedTransaction);
fn on_notfound(&self, peer_index: PeerIndex, message: types::NotFound);
fn after_peer_nearly_blocks_verified(&self, peer_index: PeerIndex, future: EmptyBoxFuture);
fn accept_transaction(&self, transaction: Transaction, sink: Box<TransactionVerificationSink>) -> Result<(), String>;
fn accept_transaction(&self, transaction: IndexedTransaction, sink: Box<TransactionVerificationSink>) -> Result<(), String>;
fn install_sync_listener(&self, listener: SyncListenerRef);
}
@ -214,7 +214,7 @@ impl<T, U> Client for SynchronizationClient<T, U> where T: TaskExecutor, U: Veri
self.core.lock().after_peer_nearly_blocks_verified(peer_index, future);
}
fn accept_transaction(&self, transaction: Transaction, sink: Box<TransactionVerificationSink>) -> Result<(), String> {
fn accept_transaction(&self, transaction: IndexedTransaction, sink: Box<TransactionVerificationSink>) -> Result<(), String> {
let mut transactions_to_verify = try!(self.core.lock().accept_transaction(transaction, sink));
let next_block_height = self.shared_state.best_storage_block_height() + 1;

View File

@ -5,7 +5,7 @@ use std::sync::Arc;
use futures::Future;
use parking_lot::Mutex;
use time::precise_time_s;
use chain::{IndexedBlockHeader, IndexedTransaction, Transaction, IndexedBlock};
use chain::{IndexedBlockHeader, IndexedTransaction, IndexedBlock};
use message::types;
use message::common::{InventoryType, InventoryVector};
use miner::transaction_fee_rate;
@ -72,7 +72,7 @@ pub trait ClientCore {
fn on_transaction(&mut self, peer_index: PeerIndex, transaction: IndexedTransaction) -> Option<VecDeque<IndexedTransaction>>;
fn on_notfound(&mut self, peer_index: PeerIndex, message: types::NotFound);
fn after_peer_nearly_blocks_verified(&mut self, peer_index: PeerIndex, future: EmptyBoxFuture);
fn accept_transaction(&mut self, transaction: Transaction, sink: Box<TransactionVerificationSink>) -> Result<VecDeque<IndexedTransaction>, String>;
fn accept_transaction(&mut self, transaction: IndexedTransaction, sink: Box<TransactionVerificationSink>) -> Result<VecDeque<IndexedTransaction>, String>;
fn install_sync_listener(&mut self, listener: SyncListenerRef);
fn execute_synchronization_tasks(&mut self, forced_blocks_requests: Option<Vec<H256>>, final_blocks_requests: Option<Vec<H256>>);
fn try_switch_to_saturated_state(&mut self) -> bool;
@ -270,7 +270,7 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
assert!(!message.headers.is_empty(), "This must be checked in incoming connection");
// transform to indexed headers
let mut headers: Vec<_> = message.headers.into_iter().map(IndexedBlockHeader::from).collect();
let mut headers: Vec<_> = message.headers.into_iter().map(IndexedBlockHeader::from_raw).collect();
// update peers to select next tasks
self.peers_tasks.on_headers_received(peer_index);
@ -514,9 +514,9 @@ impl<T> ClientCore for SynchronizationClientCore<T> where T: TaskExecutor {
}
}
fn accept_transaction(&mut self, transaction: Transaction, sink: Box<TransactionVerificationSink>) -> Result<VecDeque<IndexedTransaction>, String> {
let hash = transaction.hash();
match self.try_append_transaction(transaction.into(), true) {
fn accept_transaction(&mut self, transaction: IndexedTransaction, sink: Box<TransactionVerificationSink>) -> Result<VecDeque<IndexedTransaction>, String> {
let hash = transaction.hash;
match self.try_append_transaction(transaction, true) {
Err(AppendTransactionError::Orphan(_)) => Err("Cannot append transaction as its inputs are unknown".to_owned()),
Err(AppendTransactionError::Synchronizing) => Err("Cannot append transaction as node is not yet fully synchronized".to_owned()),
Ok(transactions) => {
@ -1086,8 +1086,8 @@ impl<T> SynchronizationClientCore<T> where T: TaskExecutor {
// relay block to our peers
if needs_relay && (self.state.is_saturated() || self.state.is_nearly_saturated()) {
for block_hash in insert_result.canonized_blocks_hashes {
if let Some(block) = self.chain.storage().block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block.into()));
if let Some(block) = self.chain.storage().indexed_block(block_hash.into()) {
self.executor.execute(Task::RelayNewBlock(block));
}
}
}

View File

@ -269,16 +269,16 @@ impl<TExecutor> ServerTaskExecutor<TExecutor> where TExecutor: TaskExecutor {
}
},
common::InventoryType::MessageBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
trace!(target: "sync", "'getblocks' response to peer#{} is ready with block {}", peer_index, next_item.hash.to_reversed_str());
self.executor.execute(Task::Block(peer_index, block.into()));
self.executor.execute(Task::Block(peer_index, block));
} else {
notfound.inventory.push(next_item);
}
},
common::InventoryType::MessageFilteredBlock => {
if let Some(block) = self.storage.block(next_item.hash.clone().into()) {
let message_artefacts = self.peers.build_merkle_block(peer_index, &block.into());
if let Some(block) = self.storage.indexed_block(next_item.hash.clone().into()) {
let message_artefacts = self.peers.build_merkle_block(peer_index, &block);
if let Some(message_artefacts) = message_artefacts {
// send merkleblock first
trace!(target: "sync", "'getblocks' response to peer#{} is ready with merkleblock {}", peer_index, next_item.hash.to_reversed_str());

View File

@ -170,7 +170,7 @@ impl AsyncVerifier {
},
Ok(tx_output_provider) => {
let time: u32 = get_time().sec as u32;
match verifier.verifier.verify_mempool_transaction(storage.as_block_header_provider(), &tx_output_provider, height, time, &transaction.raw) {
match verifier.verifier.verify_mempool_transaction(storage.as_block_header_provider(), &tx_output_provider, height, time, &transaction) {
Ok(_) => sink.on_transaction_verification_success(transaction.into()),
Err(e) => sink.on_transaction_verification_error(&format!("{:?}", e), &transaction.hash),
}

View File

@ -24,3 +24,4 @@ rand = "0.4"
test-data = { path = "../test-data" }
db = { path = "../db" }
assert_matches = "1.3.0"
chain = { path = "../chain", features = ["test-helpers"] }

View File

@ -1,7 +1,7 @@
//! Bitcoin chain verifier
use hash::H256;
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, Transaction};
use chain::{IndexedBlock, IndexedBlockHeader, BlockHeader, IndexedTransaction};
use storage::{SharedStore, TransactionOutputProvider, BlockHeaderProvider, BlockOrigin,
DuplexTransactionOutputProvider, NoopStore};
use network::ConsensusParams;
@ -103,15 +103,14 @@ impl BackwardsCompatibleChainVerifier {
prevout_provider: &T,
height: u32,
time: u32,
transaction: &Transaction,
transaction: &IndexedTransaction,
) -> Result<(), TransactionError> where T: TransactionOutputProvider {
let indexed_tx = transaction.clone().into();
// let's do preverification first
let deployments = BlockDeployments::new(&self.deployments, height, block_header_provider, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&indexed_tx, &self.consensus);
let tx_verifier = MemoryPoolTransactionVerifier::new(&transaction, &self.consensus);
try!(tx_verifier.check());
let canon_tx = CanonTransaction::new(&indexed_tx);
let canon_tx = CanonTransaction::new(&transaction);
// now let's do full verification
let noop = NoopStore;
let output_store = DuplexTransactionOutputProvider::new(prevout_provider, &noop);