[validation] add Joinsplit related and zk specific checks

This commit is contained in:
jc 2018-05-17 09:15:28 -04:00
parent 60bc87c6a8
commit 643ce1226c
16 changed files with 548 additions and 194 deletions

View File

@ -48,7 +48,7 @@ static void DeserializeAndCheckBlockTest(benchmark::State& state)
assert(stream.Rewind(sizeof(block_bench::block413567)));
CValidationState validationState;
assert(CheckBlock(block, validationState, chainParams->GetConsensus()));
assert(CheckBlock(block, validationState, *chainParams));
}
}

View File

@ -197,7 +197,7 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
return READ_STATUS_INVALID;
CValidationState state;
if (!CheckBlock(block, state, Params().GetConsensus())) {
if (!CheckBlock(block, state, Params())) {
// TODO: We really want to just check merkle tree manually here,
// but that is expensive, and CheckBlock caches a block's
// "checked-status" (in the CBlock?). CBlock should be able to

View File

@ -124,7 +124,7 @@ public:
static_assert(Consensus::DEPLOYMENT_SEGWIT == Consensus::DEPLOYMENT_CSV, "segwit and csv deployed together");
// The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000000000000000ffffffff");
consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000000000ffffffffffff");
pchMessageStart[0] = 0xa8;
pchMessageStart[1] = 0xea;
@ -134,7 +134,6 @@ public:
nDefaultPort = 7933;
nPruneAfterHeight = 100000;
const size_t N = 200, K = 9;
nEquihashN = 200;
nEquihashK = 9;

View File

@ -16,9 +16,11 @@ static const int32_t MIN_TX_VERSION = 1;
/** The maximum allowed size for a serialized block, in bytes (only for buffer size limits) */
static const unsigned int MAX_BLOCK_SERIALIZED_SIZE = 4000000;
/** The maximum allowed weight for a block, see BIP 141 (network rule) */
static const unsigned int MAX_BLOCK_WEIGHT = 4000000;
static const unsigned int MAX_BLOCK_WEIGHT = 8000000;
/** The maximum allowed number of signature check operations in a block (network rule) */
static const int64_t MAX_BLOCK_SIGOPS_COST = 80000;
static const int64_t MAX_BLOCK_SIGOPS_COST = 200000;
/** The maximum size of a transaction excluding witnesses (network rule that is not particularly relevant any longer so not applied to witnesses) */
static const unsigned int MAX_TX_SIZE = 100000;
/** Coinbase transaction outputs can only be spent after this number of new blocks (network rule) */
static const int COINBASE_MATURITY = 100;

View File

@ -0,0 +1,42 @@
bool CheckTransactionJoinsplits(const CTransaction& tx, CValidationState &state)
{
if (tx.vjoinsplit.size() > 0) {
// Empty output script.
CScript scriptCode;
uint256 dataToBeSigned;
int hashtype = SIGHASH_ALL;
if(flags & SCRIPT_VERIFY_FORKID)
hashtype |= SIGHASH_FORKID;
try {
dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT,
hashtype, (flags & SCRIPT_VERIFY_FORKID) ? FORKID_IN_USE : FORKID_NONE);
} catch (std::logic_error ex) {
return state.DoS(100, error("CheckTransaction(): error computing signature hash"),
REJECT_INVALID, "error-computing-signature-hash");
}
BOOST_STATIC_ASSERT(crypto_sign_PUBLICKEYBYTES == 32);
// We rely on libsodium to check that the signature is canonical.
// https://github.com/jedisct1/libsodium/commit/62911edb7ff2275cccd74bf1c8aefcc4d76924e0
if (crypto_sign_verify_detached(&tx.joinSplitSig[0],
dataToBeSigned.begin(), 32,
tx.joinSplitPubKey.begin()
) != 0) {
return state.DoS(100, error("CheckTransaction(): invalid joinsplit signature"),
REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
}
// Ensure that zk-SNARKs verify
auto verifier = libzcash::ProofVerifier::Strict();
for(const JSDescription &joinsplit : tx.vjoinsplit) {
if (!joinsplit.Verify(*pzcashParams, verifier, tx.joinSplitPubKey)) {
return state.DoS(100, error("CheckTransaction(): joinsplit does not verify"),
REJECT_INVALID, "bad-txns-joinsplit-verification-failed");
}
}
}
return true;
}

10
src/consensus/joinsplit.h Normal file
View File

@ -0,0 +1,10 @@
// Copyright (c) 2018 The Bitcoin Private developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef BTCP_CONSENSUS_JOINSPLIT_H
#define BTCP_CONSENSUS_JOINSPLIT_H
bool CheckTransactionJoinsplits(const CTransaction& tx, CValidationState &state);
#endif

View File

@ -7,12 +7,16 @@
#include <consensus/consensus.h>
#include <primitives/transaction.h>
#include <script/interpreter.h>
#include <consensus/joinsplit.h>
#include <consensus/validation.h>
// TODO remove the following dependencies
#include <chain.h>
#include <coins.h>
#include <util.h>
#include <utilmoneystr.h>
#include <zcash/Zcash.h>
#include <zcash/Proof.hpp>
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
{
@ -158,17 +162,30 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i
bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fCheckDuplicateInputs)
{
// Check transaction version
if (tx.nVersion < MIN_TX_VERSION) {
return state.DoS(100, error("CheckTransaction(): version too low"),
REJECT_INVALID, "bad-txns-version-too-low");
}
// Basic checks that don't depend on any context
if (tx.vin.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
if (tx.vout.empty())
return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
// Transactions can contain empty `vin` and `vout` so long as
// `vjoinsplit` is non-empty.
if (tx.vin.empty() && tx.vjoinsplit.empty())
return state.DoS(10, error("CheckTransaction(): vin empty"),
REJECT_INVALID, "bad-txns-vin-empty");
if (tx.vout.empty() && tx.vjoinsplit.empty())
return state.DoS(10, error("CheckTransaction(): vout empty"),
REJECT_INVALID, "bad-txns-vout-empty");
// Size limits (this doesn't take the witness into account, as that hasn't been checked for malleability)
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) * WITNESS_SCALE_FACTOR > MAX_BLOCK_WEIGHT)
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS) > MAX_TX_SIZE)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
// Check for negative or overflow output values
CAmount nValueOut = 0;
CAmount nValueIn = 0;
for (const auto& txout : tx.vout)
{
if (txout.nValue < 0)
@ -180,6 +197,47 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
return state.DoS(100, false, REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
// Ensure that joinsplit values are well-formed
for (const JSDescription& joinsplit : tx.vjoinsplit)
{
if (joinsplit.vpub_old < 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old negative"),
REJECT_INVALID, "bad-txns-vpub_old-negative");
}
if (joinsplit.vpub_new < 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new negative"),
REJECT_INVALID, "bad-txns-vpub_new-negative");
}
if (joinsplit.vpub_old > MAX_MONEY) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old too high"),
REJECT_INVALID, "bad-txns-vpub_old-toolarge");
}
if (joinsplit.vpub_new > MAX_MONEY) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new too high"),
REJECT_INVALID, "bad-txns-vpub_new-toolarge");
}
if (joinsplit.vpub_new != 0 && joinsplit.vpub_old != 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new and joinsplit.vpub_old both nonzero"),
REJECT_INVALID, "bad-txns-vpubs-both-nonzero");
}
nValueOut += joinsplit.vpub_old;
if (!MoneyRange(nValueOut)) {
return state.DoS(100, error("CheckTransaction(): txout total out of range"),
REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
nValueIn += joinsplit.vpub_new;
if (!MoneyRange(joinsplit.vpub_new) || !MoneyRange(nValueIn)) {
return state.DoS(100, error("CheckTransaction(): txin total out of range"),
REJECT_INVALID, "bad-txns-txintotal-toolarge");
}
}
// Check for duplicate inputs - note that this check is slow so we skip it in CheckBlock
if (fCheckDuplicateInputs) {
std::set<COutPoint> vInOutPoints;
@ -188,10 +246,29 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
if (!vInOutPoints.insert(txin.prevout).second)
return state.DoS(100, false, REJECT_INVALID, "bad-txns-inputs-duplicate");
}
// Check for duplicate joinsplit nullifiers in this transaction
std::set<uint256> vJoinSplitNullifiers;
for (const JSDescription& joinsplit : tx.vjoinsplit)
{
for (const uint256& nf : joinsplit.nullifiers)
{
if (vJoinSplitNullifiers.count(nf))
return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"),
REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate");
vJoinSplitNullifiers.insert(nf);
}
}
}
if (tx.IsCoinBase())
{
// There should be no joinsplits in a coinbase transaction
if (tx.vjoinsplit.size() > 0)
return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"),
REJECT_INVALID, "bad-cb-has-joinsplits");
if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
}
@ -205,7 +282,7 @@ bool CheckTransaction(const CTransaction& tx, CValidationState &state, bool fChe
return true;
}
bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee)
bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, bool fCoinbaseMustBeProtected, uint64_t forkStartHeight, uint64_t forkHeightRange)
{
// are the actual inputs available?
if (!inputs.HaveInputs(tx)) {
@ -213,6 +290,11 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, c
strprintf("%s: inputs missing/spent", __func__));
}
// are the JoinSplit's requirements met?
if (!inputs.HaveJoinSplitRequirements(tx)) {
return state.Invalid(error("CheckInputs(): %s JoinSplit requirements not met", tx.GetHash().ToString()));
}
CAmount nValueIn = 0;
for (unsigned int i = 0; i < tx.vin.size(); ++i) {
const COutPoint &prevout = tx.vin[i].prevout;
@ -220,10 +302,21 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, c
assert(!coin.IsSpent());
// If prev is coinbase, check that it's matured
if (coin.IsCoinBase() && nSpendHeight - coin.nHeight < COINBASE_MATURITY) {
return state.Invalid(false,
REJECT_INVALID, "bad-txns-premature-spend-of-coinbase",
strprintf("tried to spend coinbase at depth %d", nSpendHeight - coin.nHeight));
if (coin.IsCoinBase()) {
if(nSpendHeight - coin.nHeight < COINBASE_MATURITY) {
return state.Invalid(false,
REJECT_INVALID, "bad-txns-premature-spend-of-coinbase",
strprintf("tried to spend coinbase at depth %d", nSpendHeight - coin.nHeight));
}
// Ensure that coinbases cannot be spent to transparent outputs
// Disabled on regtest
if (fCoinbaseMustBeProtected && (coin.nHeight <= forkStartHeight || coin.nHeight > forkStartHeight + forkHeightRange) &&
!tx.vout.empty()) {
return state.Invalid(
error("CheckInputs(): tried to spend coinbase with transparent outputs"),
REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs");
}
}
// Check for negative or overflow input values
@ -233,6 +326,11 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, CValidationState& state, c
}
}
nValueIn += tx.GetJoinSplitValueIn();
if (!MoneyRange(tx.GetJoinSplitValueIn()) || !MoneyRange(nValueIn))
return state.DoS(100, error("CheckInputs(): vpub_old values out of range"),
REJECT_INVALID, "bad-txns-inputvalues-outofrange");
const CAmount value_out = tx.GetValueOut();
if (nValueIn < value_out) {
return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout", false,

View File

@ -10,6 +10,9 @@
#include <stdint.h>
#include <vector>
#include <zcash/Zcash.h>
#include <zcash/Proof.hpp>
class CBlockIndex;
class CCoinsViewCache;
class CTransaction;
@ -27,7 +30,7 @@ namespace Consensus {
* @param[out] txfee Set to the transaction fee if successful.
* Preconditions: tx.IsCoinBase() is false.
*/
bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee);
bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, bool fCoinbaseMustBeProtected, uint64_t start, uint64_t range);
} // namespace Consensus
/** Auxiliary functions for transaction validation (ideally should not be exposed) */
@ -41,7 +44,7 @@ unsigned int GetLegacySigOpCount(const CTransaction& tx);
/**
* Count ECDSA signature operations in pay-to-script-hash inputs.
*
*
* @param[in] mapInputs Map of previous transactions that have outputs we're spending
* @return maximum number of sigops required to validate this transaction's inputs
* @see CTransaction::FetchInputs

142
src/fork.cpp Normal file
View File

@ -0,0 +1,142 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <ios>
#include <sstream>
#include <utility>
#include <boost/tuple/tuple.hpp>
#include <boost/range/combine.hpp>
#include <chain.h>
#include <chainparams.h>
#include <checkpoints.h>
#include <script/script.h>
#include <fork.h>
#include <util.h>
#include <validation.h>
std::string GetUTXOFileName(int nHeight, const CChainParams& chainparams)
{
boost::filesystem::path utxo_path(GetDataDir() / "utxo_snapshot");
if (utxo_path.empty() || !utxo_path.has_filename())
{
LogPrintf("GetUTXOFileName(): UTXO path is not specified, add utxo-path=<path-to-utxop-files> to your btcprivate.conf and restart");
return "";
}
std::stringstream ss;
ss << strprintf("utxo-%05d.bin", nHeight - chainparams.ForkStartHeight());
boost::filesystem::path utxo_file = utxo_path;
utxo_file /= ss.str();
return utxo_file.generic_string();
}
inline uint64_t bytes2uint64(char *array)
{
uint64_t x =
static_cast<uint64_t>(array[0]) & 0x00000000000000ff |
static_cast<uint64_t>(array[1]) << 8 & 0x000000000000ff00 |
static_cast<uint64_t>(array[2]) << 16 & 0x0000000000ff0000 |
static_cast<uint64_t>(array[3]) << 24 & 0x00000000ff000000 |
static_cast<uint64_t>(array[4]) << 32 & 0x000000ff00000000 |
static_cast<uint64_t>(array[5]) << 40 & 0x0000ff0000000000 |
static_cast<uint64_t>(array[6]) << 48 & 0x00ff000000000000 |
static_cast<uint64_t>(array[7]) << 56 & 0xff00000000000000;
return x;
}
bool ContextualCheckBlockFork(const CBlock& block, CValidationState& state,
const CChainParams& chainparams, const CBlockIndex * pindexprev)
{
bool fExpensiveChecks = true;
if (fCheckpointsEnabled) {
CBlockIndex * pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindexprev->nHeight) == pindexprev) {
// This block is an ancestor of a checkpoint: disable script checks
fExpensiveChecks = false;
}
}
int nHeight = pindexprev->nHeight + 1;
if (fExpensiveChecks && isForkBlock(nHeight, chainparams.ForkStartHeight(), chainparams.ForkHeightRange())) {
//if block is in forking region validate it agains file records
std::string utxo_file_path = GetUTXOFileName(nHeight, chainparams);
std::ifstream if_utxo(utxo_file_path, std::ios::binary | std::ios::in);
if (if_utxo.is_open()) {
LogPrintf("AcceptBlock(): FORK Block - Validating block - %u / %s with UTXO file - %s\n",
nHeight, block.GetHash().ToString(), utxo_file_path);
std::vector<std::pair<uint64_t, CScript> > txFromFile;
txFromFile.reserve(FORK_CB_PER_BLOCK);
int recs = 0;
while (if_utxo && recs < FORK_CB_PER_BLOCK) {
char term = 0;
char coin[8] = {};
if (!if_utxo.read(coin, 8)) {
LogPrintf("AcceptBlock(): FORK Block - No more data in the file \n");
break;
}
uint64_t amount = bytes2uint64(coin);
char pubkeysize[8] = {};
if (!if_utxo.read(pubkeysize, 8)) {
LogPrintf("AcceptBlock(): FORK Block - UTXO file corrupted? - Not more data (PubKeyScript size)\n");
break;
}
int pbsize = bytes2uint64(pubkeysize);
if (pbsize == 0) {
LogPrintf("AcceptBlock(): FORK Block - UTXO file corrupted? - Warning! PubKeyScript size = 0\n");
//but proceed
}
std::unique_ptr<char[]> pubKeyScript(new char[pbsize]);
if (!if_utxo.read(&pubKeyScript[0], pbsize)) {
LogPrintf("AcceptBlock(): FORK Block - UTXO file corrupted? - Not more data (PubKeyScript)\n");
break;
}
unsigned char* pks = (unsigned char*)pubKeyScript.get();
CScript script = CScript(pks, pks+pbsize);
txFromFile.push_back(std::make_pair(amount, script));
if (!if_utxo.read(&term, 1)) {
LogPrintf("AcceptBlock(): FORK Block - UTXO file corrupted? - No more data (record separator)\n");
break;
}
if (term != '\n') {
//This maybe not an error, but warning none the less
LogPrintf("AcceptBlock(): FORK Block - UTXO file corrupted? - Warning! No record separator ('0xA') was found\n");
if_utxo.seekg(-1, std::ios_base::cur); //move one char back - if it is not a separator, maybe there is not separators at all
}
recs++;
}
LogPrintf("AcceptBlock(): FORK Block - %d records read from UTXO file\n", recs);
if (txFromFile.size() != block.vtx.size() || recs != block.vtx.size()) {
return state.DoS(100, error("AcceptBlock(): Number of file records - %d doesn't match number of transcations in block - %d\n", recs, block.vtx.size()), REJECT_INVALID, "bad-fork-block");
}
int txid = 0;
typedef boost::tuple<std::pair<uint64_t, CScript>&, const CTransaction&> fork_cmp_tuple;
for(int i = 0; i < recs; i++) {
std::pair<uint64_t, CScript>& rec = txFromFile[i];
const CTransaction& tx = *block.vtx[i];
if (rec.first != tx.vout[0].nValue ||
rec.second != tx.vout[0].scriptPubKey)
{
LogPrintf("AcceptBlock(): FORK Block - Error: Transaction (%d) mismatch\n", txid);
return state.DoS(100, error("AcceptBlock(): FORK Block - Transaction (%d) doesn't match record in the UTXO file", txid), REJECT_INVALID, "bad-fork-block");
}
}
txid++;
}
}
return true;
}

View File

@ -1,5 +1,3 @@
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Copyright (c) 2018 The Bitcoin Private developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@ -7,7 +5,11 @@
#ifndef BTCP_FORK_H
#define BTCP_FORK_H
#include <consensus/validation.h>
#include <primitives/block.h>
static const uint256 forkExtraHashSentinel = uint256S("f0f0f0f0fafafafaffffffffffffffffffffffffffffffffafafafaf0f0f0f0f");
static constexpr unsigned int FORK_CB_PER_BLOCK = 10000;
inline bool isForkBlock(int nHeight, int forkStartHeight, int forkHeightRange)
{
@ -24,4 +26,9 @@ inline bool isForkEnabled(int nHeight, int forkStartHeight)
return nHeight > forkStartHeight;
}
bool ContextualCheckBlockFork(const CBlock& block, CValidationState& state,
const CChainParams& chainparams, const CBlockIndex * pindex);
std::string GetUTXOFileName(int);
#endif

View File

@ -867,7 +867,7 @@ static uint256 most_recent_block_hash;
static bool fWitnessesPresentInMostRecentCompactBlock;
/**
* Maintain state about the best-seen block and fast-announce a compact block
* Maintain state about the best-seen block and fast-announce a compact block
* to compatible peers.
*/
void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
@ -912,7 +912,7 @@ void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std:
}
/**
* Update our best height and announce any block hashes which weren't previously
* Update our best height and announce any block hashes which weren't previously
* in chainActive to our peers.
*/
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
@ -948,7 +948,7 @@ void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CB
}
/**
* Handle invalid block rejection and consequent peer banning, maintain which
* Handle invalid block rejection and consequent peer banning, maintain which
* peers announce compact blocks.
*/
void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
@ -1481,6 +1481,7 @@ bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::ve
// Download as much as possible, from earliest to latest.
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::cout << "NOT REQUESTING MORE BLOCKS" << std::endl;
// Can't download any more from this peer
break;
}
@ -1631,17 +1632,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return false;
}
if (nServices & ((1 << 7) | (1 << 5))) {
if (GetTime() < 1533096000) {
// Immediately disconnect peers that use service bits 6 or 8 until August 1st, 2018
// These bits have been used as a flag to indicate that a node is running incompatible
// consensus rules instead of changing the network magic, so we're stuck disconnecting
// based on these service bits, at least for a while.
pfrom->fDisconnect = true;
return false;
}
}
if (nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
@ -1654,8 +1644,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
return false;
}
if (nVersion == 10300)
nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty()) {
@ -1762,12 +1750,6 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
pfrom->nTimeOffset = nTimeOffset;
AddTimeData(pfrom->addr, nTimeOffset);
// If the peer is old enough to have the old alert system, send it the final alert.
if (pfrom->nVersion <= 70012) {
CDataStream finalAlert(ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION);
connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
}
// Feeler connections exist only to verify if address is online.
if (pfrom->fFeeler) {
assert(pfrom->fInbound == false);
@ -2201,7 +2183,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
if (!AlreadyHave(inv) &&
AcceptToMemoryPool(mempool, state, ptx, &fMissingInputs, &lRemovedTxn, false /* bypass_limits */, 0 /* nAbsurdFee */)) {
mempool.check(pcoinsTip.get());
mempool.check(pcoinsTip.get(), chainparams);
RelayTransaction(tx, connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
vWorkQueue.emplace_back(inv.hash, i);
@ -2268,7 +2250,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
recentRejects->insert(orphanHash);
}
}
mempool.check(pcoinsTip.get());
mempool.check(pcoinsTip.get(), chainparams);
}
}

View File

@ -284,7 +284,7 @@ void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove, b
// should be a bit faster.
// However, if we happen to be in the middle of processing a reorg, then
// the mempool can be in an inconsistent state. In this case, the set
// of ancestors reachable via mapLinks will be the same as the set of
// of ancestors reachable via mapLinks will be the same as the set of
// ancestors whose packages include this transaction, because when we
// add a new transaction to the mempool in addUnchecked(), we assume it
// has no children, and in the case of a reorg where that assumption is
@ -607,18 +607,20 @@ void CTxMemPool::clear()
_clear();
}
static void CheckInputsAndUpdateCoins(const CTransaction& tx, CCoinsViewCache& mempoolDuplicate, const int64_t spendheight)
static void CheckInputsAndUpdateCoins(const CTransaction& tx, CCoinsViewCache& mempoolDuplicate, const int64_t spendheight, bool coinbaseProtection, uint64_t forkStartHeight, uint64_t forkHeightRange)
{
CValidationState state;
CAmount txfee = 0;
bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate, spendheight, txfee);
bool fCheckResult = tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate, spendheight, txfee, coinbaseProtection, forkStartHeight, forkHeightRange);
assert(fCheckResult);
UpdateCoins(tx, mempoolDuplicate, 1000000);
}
void CTxMemPool::check(const CCoinsViewCache *pcoins) const
void CTxMemPool::check(const CCoinsViewCache *pcoins, const CChainParams& chainparams) const
{
LOCK(cs);
const Consensus::Params& params = chainparams.GetConsensus();
if (nCheckFrequency == 0)
return;
@ -709,7 +711,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
if (fDependsWait)
waitingOnDependants.push_back(&(*it));
else {
CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight);
CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight, params.fCoinbaseMustBeProtected, chainparams.ForkStartHeight(), chainparams.ForkHeightRange());
}
}
unsigned int stepsSinceLastRemove = 0;
@ -722,7 +724,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const
stepsSinceLastRemove++;
assert(stepsSinceLastRemove < waitingOnDependants.size());
} else {
CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight);
CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate, spendheight, params.fCoinbaseMustBeProtected, chainparams.ForkStartHeight(), chainparams.ForkHeightRange());
stepsSinceLastRemove = 0;
}
}

View File

@ -16,6 +16,8 @@
#include <amount.h>
#include <coins.h>
#include <indirectmap.h>
#include <consensus/params.h>
#include <chainparams.h>
#include <policy/feerate.h>
#include <primitives/transaction.h>
#include <sync.h>
@ -528,7 +530,7 @@ public:
* all inputs are in the mapNextTx array). If sanity-checking is turned off,
* check does nothing.
*/
void check(const CCoinsViewCache *pcoins) const;
void check(const CCoinsViewCache *pcoins, const CChainParams& chainparams) const;
void setSanityCheck(double dFrequency = 1.0) { LOCK(cs); nCheckFrequency = static_cast<uint32_t>(dFrequency * 4294967295.0); }
// addUnchecked must updated state for all ancestors of a given transaction,
@ -688,7 +690,7 @@ private:
void removeUnchecked(txiter entry, MemPoolRemovalReason reason = MemPoolRemovalReason::UNKNOWN) EXCLUSIVE_LOCKS_REQUIRED(cs);
};
/**
/**
* CCoinsView that brings transactions from a mempool into view.
* It does not check for spendings by memory pool transactions.
* Instead, it provides access to all Coins which are either unspent in the

View File

@ -99,13 +99,15 @@ public:
class CBlockUndo
{
public:
std::vector<CTxUndo> vtxundo; // for all but the coinbase
std::vector<CTxUndo> vtxundo; // for all but the coinbas
uint256 old_tree_root;
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
READWRITE(vtxundo);
READWRITE(old_tree_root);
}
};

View File

@ -15,6 +15,7 @@
#include <consensus/tx_verify.h>
#include <consensus/validation.h>
#include <cuckoocache.h>
#include <fork.h>
#include <hash.h>
#include <index/txindex.h>
#include <init.h>
@ -692,7 +693,7 @@ static bool AcceptToMemoryPoolWorker(const CChainParams& chainparams, CTxMemPool
return state.DoS(0, false, REJECT_NONSTANDARD, "non-BIP68-final");
CAmount nFees = 0;
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees)) {
if (!Consensus::CheckTxInputs(tx, state, view, GetSpendHeight(view), nFees, chainparams.GetConsensus().fCoinbaseMustBeProtected, chainparams.ForkStartHeight(), chainparams.ForkHeightRange())) {
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
@ -1115,7 +1116,8 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus:
}
// Check the header
if (!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
if (!CheckEquihashSolution(&block, Params()) ||
!CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
return true;
@ -1351,6 +1353,14 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txund
assert(is_spent);
}
}
// spend nullifiers
for(const JSDescription &joinsplit : tx.vjoinsplit) {
for(const uint256 &nf : joinsplit.nullifiers) {
inputs.SetNullifier(nf, true);
}
}
// add outputs
AddCoins(inputs, tx, nHeight);
}
@ -1364,7 +1374,9 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
const CScriptWitness *witness = &ptxTo->vin[nIn].scriptWitness;
return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
bool ret = VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error);
return ret;
}
int GetSpendHeight(const CCoinsViewCache& inputs)
@ -1635,7 +1647,7 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
}
// restore inputs
if (i > 0) { // not coinbases
if (i > 0 && !isForkBlock(pindex->nHeight, Params().ForkStartHeight(), Params().ForkHeightRange())) { // not coinbases
CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size()) {
error("DisconnectBlock(): transaction and undo data inconsistent");
@ -1649,8 +1661,18 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
}
// At this point, all of txundo.vprevout should have been moved out.
}
// unspend nullifiers
for(const JSDescription &joinsplit : tx.vjoinsplit) {
for(const uint256 &nf : joinsplit.nullifiers) {
view.SetNullifier(nf, false);
}
}
}
// set the old best anchor back
view.PopAnchor(blockUndo.old_tree_root);
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
@ -1772,23 +1794,7 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens
unsigned int flags = SCRIPT_VERIFY_NONE;
// BIP16 didn't become active until Apr 1 2012 (on mainnet, and
// retroactively applied to testnet)
// However, only one historical block violated the P2SH rules (on both
// mainnet and testnet), so for simplicity, always leave P2SH
// on except for the one violating block.
if (consensusparams.BIP16Exception.IsNull() || // no bip16 exception on this chain
pindex->phashBlock == nullptr || // this is a new candidate block, eg from TestBlockValidity()
*pindex->phashBlock != consensusparams.BIP16Exception) // this block isn't the historical exception
{
flags |= SCRIPT_VERIFY_P2SH;
}
// Enforce WITNESS rules whenever P2SH is in effect (and the segwit
// deployment is defined).
if (flags & SCRIPT_VERIFY_P2SH && IsScriptWitnessEnabled(consensusparams)) {
flags |= SCRIPT_VERIFY_WITNESS;
}
flags |= SCRIPT_VERIFY_P2SH;
// Start enforcing the DERSIG (BIP66) rule
if (pindex->nHeight >= consensusparams.BIP66Height) {
@ -1800,15 +1806,26 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consens
flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
}
// Start enforcing rules for first hard fork
if (isForkEnabled(pindex->nHeight)){
flags |= SCRIPT_VERIFY_FORKID;
flags |= SCRIPT_VERIFY_WITNESS_BOOTSTRAP;
}
// Start enforcing BIP68 (sequence locks) and BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
if (VersionBitsState(pindex->pprev, consensusparams, Consensus::DEPLOYMENT_CSV, versionbitscache) == ThresholdState::ACTIVE) {
assert(IsWitnessEnabled(pindex->pprev, consensusparams));
flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
flags |= SCRIPT_VERIFY_WITNESS;
}
if (IsNullDummyEnabled(pindex->pprev, consensusparams)) {
flags |= SCRIPT_VERIFY_NULLDUMMY;
}
// these are enforced from the genesis block - sanity check here to make sure chainparam heights are appropriately 0
unsigned int minimal_required = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
assert((flags & minimal_required) == minimal_required);
return flags;
}
@ -1847,7 +1864,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// is enforced in ContextualCheckBlockHeader(); we wouldn't want to
// re-enforce that rule here (at least until we make it impossible for
// GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck)) {
if (!CheckBlock(block, state, chainparams, !fJustCheck, !fJustCheck)) {
if (state.CorruptionPossible()) {
// We don't write down blocks to disk if they may have been
// corrupted, so this should be impossible unless we're having hardware
@ -1864,8 +1881,15 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
if (!fJustCheck)
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
// Before the genesis block, there was an empty tree
ZCIncrementalMerkleTree tree;
pindex->hashAnchor = tree.root();
// The genesis block contained no JoinSplits
pindex->hashAnchorEnd = pindex->hashAnchor;
}
return true;
}
@ -1900,92 +1924,12 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
int64_t nTime1 = GetTimeMicros(); nTimeCheck += nTime1 - nTimeStart;
LogPrint(BCLog::BENCH, " - Sanity checks: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime1 - nTimeStart), nTimeCheck * MICRO, nTimeCheck * MILLI / nBlocksTotal);
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied to all blocks with a timestamp after March 15, 2012, 0:00 UTC.
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks except the
// two in the chain that violate it. This prevents exploiting the issue against nodes during their
// initial block download.
bool fEnforceBIP30 = !((pindex->nHeight==91842 && pindex->GetBlockHash() == uint256S("0x00000000000a4d0a398161ffc163c503763b1f4360639393e0e4c8e300e0caec")) ||
(pindex->nHeight==91880 && pindex->GetBlockHash() == uint256S("0x00000000000743f190a18c5577a3c2d2a1f610ae9601ac046a38084ccb7cd721")));
// Once BIP34 activated it was not possible to create new duplicate coinbases and thus other than starting
// with the 2 existing duplicate coinbase pairs, not possible to create overwriting txs. But by the
// time BIP34 activated, in each of the existing pairs the duplicate coinbase had overwritten the first
// before the first had been spent. Since those coinbases are sufficiently buried it's no longer possible to create further
// duplicate transactions descending from the known pairs either.
// If we're on the known chain at height greater than where BIP34 activated, we can save the db accesses needed for the BIP30 check.
// BIP34 requires that a block at height X (block X) has its coinbase
// scriptSig start with a CScriptNum of X (indicated height X). The above
// logic of no longer requiring BIP30 once BIP34 activates is flawed in the
// case that there is a block X before the BIP34 height of 227,931 which has
// an indicated height Y where Y is greater than X. The coinbase for block
// X would also be a valid coinbase for block Y, which could be a BIP30
// violation. An exhaustive search of all mainnet coinbases before the
// BIP34 height which have an indicated height greater than the block height
// reveals many occurrences. The 3 lowest indicated heights found are
// 209,921, 490,897, and 1,983,702 and thus coinbases for blocks at these 3
// heights would be the first opportunity for BIP30 to be violated.
// The search reveals a great many blocks which have an indicated height
// greater than 1,983,702, so we simply remove the optimization to skip
// BIP30 checking for blocks at height 1,983,702 or higher. Before we reach
// that block in another 25 years or so, we should take advantage of a
// future consensus change to do a new and improved version of BIP34 that
// will actually prevent ever creating any duplicate coinbases in the
// future.
static constexpr int BIP34_IMPLIES_BIP30_LIMIT = 1983702;
// There is no potential to create a duplicate coinbase at block 209,921
// because this is still before the BIP34 height and so explicit BIP30
// checking is still active.
// The final case is block 176,684 which has an indicated height of
// 490,897. Unfortunately, this issue was not discovered until about 2 weeks
// before block 490,897 so there was not much opportunity to address this
// case other than to carefully analyze it and determine it would not be a
// problem. Block 490,897 was, in fact, mined with a different coinbase than
// block 176,684, but it is important to note that even if it hadn't been or
// is remined on an alternate fork with a duplicate coinbase, we would still
// not run into a BIP30 violation. This is because the coinbase for 176,684
// is spent in block 185,956 in transaction
// d4f7fbbf92f4a3014a230b2dc70b8058d02eb36ac06b4a0736d9d60eaa9e8781. This
// spending transaction can't be duplicated because it also spends coinbase
// 0328dd85c331237f18e781d692c92de57649529bd5edf1d01036daea32ffde29. This
// coinbase has an indicated height of over 4.2 billion, and wouldn't be
// duplicatable until that height, and it's currently impossible to create a
// chain that long. Nevertheless we may wish to consider a future soft fork
// which retroactively prevents block 490,897 from creating a duplicate
// coinbase. The two historical BIP30 violations often provide a confusing
// edge case when manipulating the UTXO and it would be simpler not to have
// another edge case to deal with.
// testnet3 has no blocks before the BIP34 height with indicated heights
// post BIP34 before approximately height 486,000,000 and presumably will
// be reset before it reaches block 1,983,702 and starts doing unnecessary
// BIP30 checking again.
assert(pindex->pprev);
CBlockIndex *pindexBIP34height = pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
//Only continue to enforce if we're below BIP34 activation height or the block hash at that height doesn't correspond.
fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height || !(pindexBIP34height->GetBlockHash() == chainparams.GetConsensus().BIP34Hash));
// TODO: Remove BIP30 checking from block height 1,983,702 on, once we have a
// consensus change that ensures coinbases at those heights can not
// duplicate earlier coinbases.
if (fEnforceBIP30 || pindex->nHeight >= BIP34_IMPLIES_BIP30_LIMIT) {
for (const auto& tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
for (const auto& tx : block.vtx) {
for (size_t o = 0; o < tx->vout.size(); o++) {
if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
}
}
}
@ -2011,6 +1955,25 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
int nInputs = 0;
int64_t nSigOpsCost = 0;
blockundo.vtxundo.reserve(block.vtx.size() - 1);
// Construct the incremental merkle tree at the current
// block position,
auto old_tree_root = view.GetBestAnchor();
// saving the top anchor in the block index as we go.
if (!fJustCheck) {
pindex->hashAnchor = old_tree_root;
}
ZCIncrementalMerkleTree tree;
// This should never fail: we should always be able to get the root
// that is on the tip of our chain
assert(view.GetAnchorAt(old_tree_root, tree));
{
// Consistency check: the root of the tree we're given should
// match what we asked for.
assert(tree.root() == old_tree_root);
}
std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
for (unsigned int i = 0; i < block.vtx.size(); i++)
@ -2022,7 +1985,7 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
if (!tx.IsCoinBase())
{
CAmount txfee = 0;
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee)) {
if (!Consensus::CheckTxInputs(tx, state, view, pindex->nHeight, txfee, chainparams.GetConsensus().fCoinbaseMustBeProtected, chainparams.ForkStartHeight(), chainparams.ForkHeightRange())) {
return error("%s: Consensus::CheckTxInputs: %s, %s", __func__, tx.GetHash().ToString(), FormatStateMessage(state));
}
nFees += txfee;
@ -2070,16 +2033,33 @@ bool CChainState::ConnectBlock(const CBlock& block, CValidationState& state, CBl
blockundo.vtxundo.push_back(CTxUndo());
}
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
for(const JSDescription &joinsplit : tx.vjoinsplit) {
for(const uint256 &note_commitment : joinsplit.commitments) {
// Insert the note commitments into our temporary tree.
tree.append(note_commitment);
}
}
}
view.PushAnchor(tree);
if (!fJustCheck) {
pindex->hashAnchorEnd = tree.root();
}
blockundo.old_tree_root = old_tree_root;
int64_t nTime3 = GetTimeMicros(); nTimeConnect += nTime3 - nTime2;
LogPrint(BCLog::BENCH, " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs (%.2fms/blk)]\n", (unsigned)block.vtx.size(), MILLI * (nTime3 - nTime2), MILLI * (nTime3 - nTime2) / block.vtx.size(), nInputs <= 1 ? 0 : MILLI * (nTime3 - nTime2) / (nInputs-1), nTimeConnect * MICRO, nTimeConnect * MILLI / nBlocksTotal);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
if (!isForkBlock(pindex->nHeight, chainparams.ForkStartHeight(), chainparams.ForkHeightRange())) {
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0]->GetValueOut() > blockReward)
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0]->GetValueOut(), blockReward),
REJECT_INVALID, "bad-cb-amount");
}
if (!control.Wait())
return state.DoS(100, error("%s: CheckQueue failed", __func__), REJECT_INVALID, "block-validation-failed");
@ -2326,6 +2306,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha
if (!ReadBlockFromDisk(block, pindexDelete, chainparams.GetConsensus()))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
uint256 anchorBeforeDisconnect = pcoinsTip->GetBestAnchor();
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip.get());
@ -2336,6 +2317,7 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha
assert(flushed);
}
LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * MILLI);
uint256 anchorAfterDisconnect = pcoinsTip->GetBestAnchor();
// Write the chain state to disk, if necessary.
if (!FlushStateToDisk(chainparams, state, FlushStateMode::IF_NEEDED))
return false;
@ -2356,6 +2338,11 @@ bool CChainState::DisconnectTip(CValidationState& state, const CChainParams& cha
chainActive.SetTip(pindexDelete->pprev);
UpdateTip(pindexDelete->pprev, chainparams);
// Get the current commitment tree
ZCIncrementalMerkleTree newTree;
assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), newTree));
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
GetMainSignals().BlockDisconnected(pblock);
@ -2454,6 +2441,11 @@ bool CChainState::ConnectTip(CValidationState& state, const CChainParams& chainp
pthisBlock = pblock;
}
const CBlock& blockConnecting = *pthisBlock;
// Get the current commitment tree
ZCIncrementalMerkleTree oldTree;
assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), oldTree));
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
@ -2640,7 +2632,7 @@ bool CChainState::ActivateBestChainStep(CValidationState& state, const CChainPar
// any disconnected transactions back to the mempool.
UpdateMempoolForReorg(disconnectpool, true);
}
mempool.check(pcoinsTip.get());
mempool.check(pcoinsTip.get(), chainparams);
// Callbacks/notifications for a new best chain.
if (fInvalidFound)
@ -3092,25 +3084,44 @@ static bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
return true;
}
static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
static bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& chainparams, bool fCheckPOW = true)
{
const Consensus::Params& consensusParams = chainparams.GetConsensus();
// Check block version
if (block.nVersion < MIN_BLOCK_VERSION)
return state.DoS(100, error("CheckBlockHeader(): block version too low"),
REJECT_INVALID, "version-too-low");
// Check Equihash solution is valid
if (fCheckPOW && !CheckEquihashSolution(&block, chainparams))
return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),
REJECT_INVALID, "invalid-solution");
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))
return state.DoS(50, false, REJECT_INVALID, "high-hash", false, "proof of work failed");
// Check timestamp
if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),
REJECT_INVALID, "time-too-new");
return true;
}
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW, bool fCheckMerkleRoot)
bool CheckBlock(const CBlock& block, CValidationState& state,
const CChainParams& chainparams, bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
const Consensus::Params& consensusParams = chainparams.GetConsensus();
if (block.fChecked)
return true;
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, consensusParams, fCheckPOW))
if (!CheckBlockHeader(block, state, chainparams, fCheckPOW))
return false;
// Check the merkle root.
@ -3140,9 +3151,23 @@ bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::P
// First transaction must be coinbase, the rest must not be
if (block.vtx.empty() || !block.vtx[0]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false, "first tx is not coinbase");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
return state.DoS(100, false, REJECT_INVALID, "bad-cb-multiple", false, "more than one coinbase");
//fork blocks might have up to fork pre-defined value coinbases and nothing else
if (looksLikeForkBlockHeader(block)) {
if (block.vtx.size() > FORK_CB_PER_BLOCK)
return state.DoS(100, error("CheckBlock(): fork block: too many txns %d > %d coinbase txns", block.vtx.size(), FORK_CB_PER_BLOCK),
REJECT_INVALID, "bad-fork-too-many-tx");
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (!block.vtx[i]->IsCoinBase())
return state.DoS(100, error("CheckBlock(): fork block: non-coinbase found"),
REJECT_INVALID, "bad-fork-non-cb");
} else {
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i]->IsCoinBase())
return state.DoS(100, error("CheckBlock(): more than one coinbase"),
REJECT_INVALID, "bad-cb-multiple");
}
// Check transactions
for (const auto& tx : block.vtx)
@ -3183,7 +3208,13 @@ static int GetWitnessCommitmentIndex(const CBlock& block)
int commitpos = -1;
if (!block.vtx.empty()) {
for (size_t o = 0; o < block.vtx[0]->vout.size(); o++) {
if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 && block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN && block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 && block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa && block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 && block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 && block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
if (block.vtx[0]->vout[o].scriptPubKey.size() >= 38 &&
block.vtx[0]->vout[o].scriptPubKey[0] == OP_RETURN &&
block.vtx[0]->vout[o].scriptPubKey[1] == 0x24 &&
block.vtx[0]->vout[o].scriptPubKey[2] == 0xaa &&
block.vtx[0]->vout[o].scriptPubKey[3] == 0x21 &&
block.vtx[0]->vout[o].scriptPubKey[4] == 0xa9 &&
block.vtx[0]->vout[o].scriptPubKey[5] == 0xed) {
commitpos = o;
}
}
@ -3246,6 +3277,17 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
assert(pindexPrev != nullptr);
const int nHeight = pindexPrev->nHeight + 1;
// because we bypass checks using the indicia in the header
// we reject any blocks that look like fork blocks but really
// are non-fork blocks
if(looksLikeForkBlockHeader(block) && !isForkBlock(nHeight, params.ForkStartHeight(), params.ForkHeightRange()))
return state.DoS(100, error("%s: non-fork block looks like fork block", __func__),
REJECT_INVALID, "bad-fork-hashreserved");
if(!looksLikeForkBlockHeader(block) && isForkBlock(nHeight, params.ForkStartHeight(), params.ForkHeightRange()))
return state.DoS(100, error("%s: fork block does not look like fork block", __func__),
REJECT_INVALID, "bad-fork-hashreserved");
// Check proof of work
const Consensus::Params& consensusParams = params.GetConsensus();
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, params))
@ -3271,11 +3313,9 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
// Reject outdated version blocks when 95% (75% on testnet) of the network has upgraded:
// check for version 2, 3 and 4 upgrades
if((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
(block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
(block.nVersion < 4 && nHeight >= consensusParams.BIP65Height))
return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
if(block.nVersion < 4)
return state.Invalid(false, REJECT_OBSOLETE, strprintf("bad-version(0x%08x)", block.nVersion),
strprintf("rejected nVersion=0x%08x block", block.nVersion));
return true;
}
@ -3286,8 +3326,9 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
* in ConnectBlock().
* Note that -reindex-chainstate skips the validation that happens here!
*/
static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const CChainParams& chainparams, const CBlockIndex* pindexPrev)
{
const Consensus::Params& consensusParams = chainparams.GetConsensus();
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
// Start enforcing BIP113 (Median Time Past) using versionbits logic.
@ -3364,6 +3405,10 @@ static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, c
return state.DoS(100, false, REJECT_INVALID, "bad-blk-weight", false, strprintf("%s : weight limit failed", __func__));
}
if (!ContextualCheckBlockFork(block, state, chainparams, pindexPrev)) {
return false;
}
return true;
}
@ -3385,7 +3430,7 @@ bool CChainState::AcceptBlockHeader(const CBlockHeader& block, CValidationState&
return true;
}
if (!CheckBlockHeader(block, state, chainparams.GetConsensus()))
if (!CheckBlockHeader(block, state, chainparams))
return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__, hash.ToString(), FormatStateMessage(state));
// Get prev block index
@ -3515,8 +3560,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
}
if (fNewBlock) *fNewBlock = true;
if (!CheckBlock(block, state, chainparams.GetConsensus()) ||
!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev)) {
if (!CheckBlock(block, state, chainparams) ||
!ContextualCheckBlock(block, state, chainparams, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
@ -3559,7 +3604,7 @@ bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<cons
CValidationState state;
// Ensure that CheckBlock() passes before calling AcceptBlock, as
// belt-and-suspenders.
bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus());
bool ret = CheckBlock(*pblock, state, chainparams);
LOCK(cs_main);
@ -3596,9 +3641,9 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime()))
return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, FormatStateMessage(state));
if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot))
if (!CheckBlock(block, state, chainparams, fCheckPOW, fCheckMerkleRoot))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev))
if (!ContextualCheckBlock(block, state, chainparams, pindexPrev))
return error("%s: Consensus::ContextualCheckBlock: %s", __func__, FormatStateMessage(state));
if (!g_chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true))
return false;
@ -3932,6 +3977,20 @@ bool static LoadBlockIndexDB(const CChainParams& chainparams)
pblocktree->ReadReindexing(fReindexing);
if(fReindexing) fReindex = true;
// Fill in-memory data
for(const std::pair<uint256, CBlockIndex*>& item : mapBlockIndex)
{
CBlockIndex* pindex = item.second;
// - This relationship will always be true even if pprev has multiple
// children, because hashAnchor is technically a property of pprev,
// not its children.
// - This will miss chain tips; we handle the best tip below, and other
// tips will be handled by ConnectTip during a re-org.
if (pindex->pprev) {
pindex->pprev->hashAnchorEnd = pindex->hashAnchor;
}
}
return true;
}
@ -3958,6 +4017,8 @@ bool LoadChainTip(const CChainParams& chainparams)
return false;
}
chainActive.SetTip(pindex);
// Set hashAnchorEnd for the end of best chain
pindex->hashAnchorEnd = pcoinsTip->GetBestAnchor();
g_chainstate.PruneBlockIndexCandidates();
@ -3995,7 +4056,9 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
int nGoodTransactions = 0;
CValidationState state;
int reportDone = 0;
LogPrintf("[0%%]..."); /* Continued */
// No need to verify JoinSplits twice
for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
@ -4018,7 +4081,7 @@ bool CVerifyDB::VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview,
if (!ReadBlockFromDisk(block, pindex, chainparams.GetConsensus()))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams.GetConsensus()))
if (nCheckLevel >= 1 && !CheckBlock(block, state, chainparams))
return error("%s: *** found bad block at %d, hash=%s (%s)\n", __func__,
pindex->nHeight, pindex->GetBlockHash().ToString(), FormatStateMessage(state));
// check level 2: verify undo validity

View File

@ -87,7 +87,7 @@ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
static const unsigned int BLOCK_STALLING_TIMEOUT = 2;
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
static const unsigned int MAX_HEADERS_RESULTS = 160;
/** Maximum depth of blocks we're willing to serve as compact blocks to peers
* when requested. For older blocks, a regular BLOCK response will be sent. */
static const int MAX_CMPCTBLOCK_DEPTH = 5;
@ -221,7 +221,7 @@ static const unsigned int DEFAULT_CHECKLEVEL = 3;
// Setting the target to > than 550MB will make it likely we can respect the target.
static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
/**
/**
* Process an incoming block. This only returns after the best known valid
* block is made active. Note that it does not, however, guarantee that the
* specific block passed to it has been checked for validity!
@ -232,7 +232,7 @@ static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
*
* Note that we guarantee that either the proof-of-work is valid on pblock, or
* (and possibly also) BlockChecked will have been called.
*
*
* Call without cs_main held.
*
* @param[in] pblock The block we want to process.
@ -358,7 +358,7 @@ bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints* lp = null
/**
* Closure representing one script verification
* Note that this stores references to the spending transaction
* Note that this stores references to the spending transaction
*/
class CScriptCheck
{
@ -404,7 +404,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex
/** Functions for validating blocks and updating the block tree */
/** Context-independent validity checks */
bool CheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
bool CheckBlock(const CBlock& block, CValidationState& state, const CChainParams& consensusParams, bool fCheckPOW = true, bool fCheckMerkleRoot = true);
/** Check a block is completely valid from start to finish (only works on top of our current best block, with cs_main held) */
bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW = true, bool fCheckMerkleRoot = true);