zcash-patched-for-explorer/src/main.cpp

6001 lines
243 KiB
C++
Raw Normal View History

// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2014 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "main.h"
#include "sodium.h"
#include "addrman.h"
#include "alert.h"
#include "arith_uint256.h"
#include "chainparams.h"
#include "checkpoints.h"
#include "checkqueue.h"
#include "consensus/upgrades.h"
#include "consensus/validation.h"
#include "deprecation.h"
2011-05-15 14:52:31 -07:00
#include "init.h"
#include "merkleblock.h"
#include "metrics.h"
#include "net.h"
#include "pow.h"
#include "txdb.h"
#include "txmempool.h"
2012-04-15 13:10:54 -07:00
#include "ui_interface.h"
#include "undo.h"
#include "util.h"
#include "utilmoneystr.h"
#include "validationinterface.h"
#include "wallet/asyncrpcoperation_sendmany.h"
2017-09-15 12:59:27 -07:00
#include "wallet/asyncrpcoperation_shieldcoinbase.h"
2013-10-27 23:36:11 -07:00
#include <sstream>
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/math/distributions/poisson.hpp>
#include <boost/thread.hpp>
2016-05-30 20:38:44 -07:00
#include <boost/static_assert.hpp>
using namespace std;
#if defined(NDEBUG)
2016-10-26 12:57:22 -07:00
# error "Zcash cannot be compiled without assertions."
#endif
/**
* Global state
*/
CCriticalSection cs_main;
BlockMap mapBlockIndex;
CChain chainActive;
2014-07-11 15:03:10 -07:00
CBlockIndex *pindexBestHeader = NULL;
int64_t nTimeBestReceived = 0;
2012-05-12 21:43:24 -07:00
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
int nScriptCheckThreads = 0;
bool fExperimentalMode = false;
bool fImporting = false;
bool fReindex = false;
bool fTxIndex = false;
bool fHavePruned = false;
bool fPruneMode = false;
bool fIsBareMultisigStd = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = true;
bool fCoinbaseEnforcedProtectionEnabled = true;
size_t nCoinCacheUsage = 5000 * 300;
uint64_t nPruneTarget = 0;
bool fAlerts = DEFAULT_ALERTS;
unsigned int expiryDelta = DEFAULT_TX_EXPIRY_DELTA;
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying and mining) */
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
2014-07-03 11:25:32 -07:00
CTxMemPool mempool(::minRelayTxFee);
struct COrphanTx {
CTransaction tx;
NodeId fromPeer;
};
map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);;
map<uint256, set<uint256> > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);;
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/**
* Returns true if there are nRequired or more blocks of minVersion or above
* in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards.
*/
static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams);
static void CheckBlockIndex();
/** Constant stuff for coinbase transactions we create: */
CScript COINBASE_FLAGS;
const string strMessageMagic = "Zcash Signed Message:\n";
// Internal stuff
namespace {
2014-04-29 23:57:11 -07:00
struct CBlockIndexWorkComparator
{
bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
2014-04-29 23:57:11 -07:00
// First sort by most total work, ...
if (pa->nChainWork > pb->nChainWork) return false;
if (pa->nChainWork < pb->nChainWork) return true;
// ... then by earliest time received, ...
if (pa->nSequenceId < pb->nSequenceId) return false;
if (pa->nSequenceId > pb->nSequenceId) return true;
// Use pointer address as tie breaker (should only happen with blocks
// loaded from disk, as those all have id 0).
if (pa < pb) return false;
if (pa > pb) return true;
// Identical blocks.
return false;
}
};
CBlockIndex *pindexBestInvalid;
/**
* The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and
* as good as our current tip or better. Entries may be failed, though, and pruning nodes may be
* missing the data for the block.
*/
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexCandidates;
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
/** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions.
* Pruned nodes may have entries where B is missing data.
*/
multimap<CBlockIndex*, CBlockIndex*> mapBlocksUnlinked;
2014-04-29 23:57:11 -07:00
CCriticalSection cs_LastBlockFile;
std::vector<CBlockFileInfo> vinfoBlockFile;
2014-04-29 23:57:11 -07:00
int nLastBlockFile = 0;
/** Global flag to indicate we should check to see if there are
* block/undo files that should be deleted. Set on startup
* or if we allocate more file space when we're in prune mode
*/
bool fCheckForPruning = false;
2014-04-29 23:57:11 -07:00
/**
* Every received block is assigned a unique and increasing identifier, so we
* know which one to give priority in case of a fork.
*/
2014-04-29 23:57:11 -07:00
CCriticalSection cs_nBlockSequenceId;
/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
2014-04-29 23:57:11 -07:00
uint32_t nBlockSequenceId = 1;
/**
2015-04-28 07:47:17 -07:00
* Sources of received blocks, saved to be able to send them reject
* messages or ban them when processing happens afterwards. Protected by
* cs_main.
*/
2014-04-29 23:57:11 -07:00
map<uint256, NodeId> mapBlockSource;
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip
* changes, at which point the entire filter is reset. Protected by
* cs_main.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.7MB
*/
boost::scoped_ptr<CRollingBloomFilter> recentRejects;
uint256 hashRecentRejectsChainTip;
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
2014-04-29 23:57:11 -07:00
struct QueuedBlock {
uint256 hash;
CBlockIndex *pindex; //! Optional.
int64_t nTime; //! Time of "getdata" request in microseconds.
bool fValidatedHeaders; //! Whether this block has validated headers at the time of request.
int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer)
2014-04-29 23:57:11 -07:00
};
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> > mapBlocksInFlight;
/** Number of blocks in flight with validated headers. */
int nQueuedValidatedHeaders = 0;
/** Number of preferable block download peers. */
2014-10-28 09:33:55 -07:00
int nPreferredDownload = 0;
/** Dirty block index entries. */
set<CBlockIndex*> setDirtyBlockIndex;
/** Dirty block file entries. */
set<int> setDirtyFileInfo;
} // anon namespace
//////////////////////////////////////////////////////////////////////////////
//
// Registration of network node signals.
//
2013-11-17 16:25:17 -08:00
namespace {
struct CBlockReject {
unsigned char chRejectCode;
string strRejectReason;
uint256 hashBlock;
};
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
2013-11-17 16:25:17 -08:00
struct CNodeState {
//! The peer's address
CService address;
//! Whether we have a fully established connection.
bool fCurrentlyConnected;
//! Accumulated misbehaviour score for this peer.
2013-11-17 16:25:17 -08:00
int nMisbehavior;
//! Whether this peer should be disconnected and banned (unless whitelisted).
2013-11-17 16:25:17 -08:00
bool fShouldBan;
//! String name of this peer (debugging/logging purposes).
2013-11-17 16:25:17 -08:00
std::string name;
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
2014-06-22 15:00:26 -07:00
CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
2014-06-22 15:00:26 -07:00
uint256 hashLastUnknownBlock;
//! The last full block we both have.
CBlockIndex *pindexLastCommonBlock;
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince;
list<QueuedBlock> vBlocksInFlight;
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
2014-10-28 09:33:55 -07:00
bool fPreferredDownload;
2013-11-17 16:25:17 -08:00
CNodeState() {
fCurrentlyConnected = false;
2013-11-17 16:25:17 -08:00
nMisbehavior = 0;
fShouldBan = false;
2014-06-22 15:00:26 -07:00
pindexBestKnownBlock = NULL;
hashLastUnknownBlock.SetNull();
pindexLastCommonBlock = NULL;
fSyncStarted = false;
nStallingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
2014-10-28 09:33:55 -07:00
fPreferredDownload = false;
2013-11-17 16:25:17 -08:00
}
};
/** Map maintaining per-node state. Requires cs_main. */
2013-11-17 16:25:17 -08:00
map<NodeId, CNodeState> mapNodeState;
// Requires cs_main.
CNodeState *State(NodeId pnode) {
map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
if (it == mapNodeState.end())
return NULL;
return &it->second;
}
int GetHeight()
{
LOCK(cs_main);
return chainActive.Height();
}
2014-10-28 09:33:55 -07:00
void UpdatePreferredDownload(CNode* node, CNodeState* state)
{
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
nPreferredDownload += state->fPreferredDownload;
}
// Returns time at which to timeout block request (nTime in microseconds)
int64_t GetBlockTimeout(int64_t nTime, int nValidatedQueuedBefore, const Consensus::Params &consensusParams)
{
return nTime + 500000 * consensusParams.nPowTargetSpacing * (4 + nValidatedQueuedBefore);
}
2013-11-17 16:25:17 -08:00
void InitializeNode(NodeId nodeid, const CNode *pnode) {
LOCK(cs_main);
CNodeState &state = mapNodeState.insert(std::make_pair(nodeid, CNodeState())).first->second;
state.name = pnode->addrName;
state.address = pnode->addr;
2013-11-17 16:25:17 -08:00
}
void FinalizeNode(NodeId nodeid) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state->fSyncStarted)
nSyncStarted--;
if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
AddressCurrentlyConnected(state->address);
}
BOOST_FOREACH(const QueuedBlock& entry, state->vBlocksInFlight)
mapBlocksInFlight.erase(entry.hash);
EraseOrphansFor(nodeid);
2014-10-28 09:33:55 -07:00
nPreferredDownload -= state->fPreferredDownload;
2013-11-17 16:25:17 -08:00
mapNodeState.erase(nodeid);
}
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
bool MarkBlockAsReceived(const uint256& hash) {
map<uint256, pair<NodeId, list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
nQueuedValidatedHeaders -= itInFlight->second.second->fValidatedHeaders;
state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
state->nStallingSince = 0;
mapBlocksInFlight.erase(itInFlight);
return true;
}
return false;
}
// Requires cs_main.
void MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const Consensus::Params& consensusParams, CBlockIndex *pindex = NULL) {
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
int64_t nNow = GetTimeMicros();
QueuedBlock newentry = {hash, pindex, nNow, pindex != NULL, GetBlockTimeout(nNow, nQueuedValidatedHeaders, consensusParams)};
nQueuedValidatedHeaders += newentry.fValidatedHeaders;
list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), newentry);
state->nBlocksInFlight++;
state->nBlocksInFlightValidHeaders += newentry.fValidatedHeaders;
mapBlocksInFlight[hash] = std::make_pair(nodeid, it);
}
2014-06-22 15:00:26 -07:00
/** Check whether the last unknown block a peer advertized is not yet known. */
void ProcessBlockAvailability(NodeId nodeid) {
CNodeState *state = State(nodeid);
assert(state != NULL);
if (!state->hashLastUnknownBlock.IsNull()) {
BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
2014-06-22 15:00:26 -07:00
if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
if (state->pindexBestKnownBlock == NULL || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = itOld->second;
state->hashLastUnknownBlock.SetNull();
2014-06-22 15:00:26 -07:00
}
}
}
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
CNodeState *state = State(nodeid);
assert(state != NULL);
ProcessBlockAvailability(nodeid);
BlockMap::iterator it = mapBlockIndex.find(hash);
2014-06-22 15:00:26 -07:00
if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == NULL || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = it->second;
} else {
// An unknown block was announced; just assume that the latest one is the best one.
state->hashLastUnknownBlock = hash;
}
}
/** Find the last common ancestor two blocks have.
* Both pa and pb must be non-NULL. */
CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
if (pa->nHeight > pb->nHeight) {
pa = pa->GetAncestor(pb->nHeight);
} else if (pb->nHeight > pa->nHeight) {
pb = pb->GetAncestor(pa->nHeight);
}
while (pa != pb && pa && pb) {
pa = pa->pprev;
pb = pb->pprev;
}
// Eventually all chain branches meet at the genesis block.
assert(pa == pb);
return pa;
}
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
if (count == 0)
return;
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
assert(state != NULL);
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
if (state->pindexBestKnownBlock == NULL || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == NULL) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
2015-04-28 07:47:17 -07:00
// of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
std::vector<CBlockIndex*> vToFetch;
CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
2014-10-14 15:41:23 -07:00
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
}
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
Squashed commit of the following: commit 5e7222e4bc0401ef8c6d8049b12a62d4854ac85c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:20:51 2018 +0200 Cleanup commit 2e1bc7a7cd6c72e7c3d2ff74cb30f7a56515006c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:19:53 2018 +0200 Cleanup commit edd7fa87fb2c839c17457ff004d258a049df832f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:18:57 2018 +0200 Cleanup commit ee34e1433806655a7123f0617802aa4771507dff Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:18:10 2018 +0200 Cleanup commit 20779e4021b8ab95a87289d2741ad2f0fbc7fb39 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:16:52 2018 +0200 Cleanup commit 084e1aa563807f5625ad3aaff376b598e139f2a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:42:49 2018 +0200 Fix typo commit c61a7c2319d3b9b96d1b5ad52ecf9d4f2fd92658 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:37:43 2018 +0200 Cleanup commit e435c0229b0cbe3f4a77f43b01ca87ed0552d405 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:17:58 2018 +0200 Fix typos commit e05bff3fea8915e95a473fe3266b2b1f727deca0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:59:32 2018 +0200 Fix typo commit 8c55c7840232cef7fa4389a12f6f220e86f5f581 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:46:33 2018 +0200 Fix typos commit a1edfcc5cc29d815ba7e8c4baaf14c23ef93af64 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:39:41 2018 +0200 Fix typos commit 2ce2c4d180e936ccc5c10745a6430fda5de38a9b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:14:10 2018 +0200 Fix typo commit 5bdc6cd5bc9cff93aa48fbdeda36d4d9774bfa18 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:12:14 2018 +0200 Fix typo commit d08749f549575efc6f44a7f80850bc439c12ad5c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:54:06 2018 +0200 Revert one change commit a734bb1191c692f09f58bcc8e85160ce7c839905 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:52:45 2018 +0200 Fix typo commit 95fbc8d94bbefc0db989c83d0f053111bfed45e7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:51:33 2018 +0200 Fix typos commit d17d540a83d035cf9a200f9a8b19f0fab6084728 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:49:36 2018 +0200 Fix typo commit c4bf4402210bcb926ccfb3928afeb3a8a7490b42 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:48:09 2018 +0200 Fix typo commit 25e7990848a1d723702e2d041c04bc68a6c1275f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:47:01 2018 +0200 Fix typo commit d72ffb5b0253e0d7b992ffe13c40695421378dc3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:45:20 2018 +0200 Fix typo commit 705e6f271192a575cc99d794545b0efe75d964c4 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:41:19 2018 +0200 Revert one change commit 4fd26cd29e21c42b027e37da2616761ebc399d16 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:39:41 2018 +0200 Revert commit commit 8a5cc627b1048368fe8807973d1f542bab2e045f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:28:56 2018 +0200 Fix typo commit 0a24baa7258c0ae0f244d82af8d0831b148ab012 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:15:45 2018 +0200 Fix typo commit 38f93ecd90171fb881243f0de55b3452daccff20 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:56:49 2018 +0200 Fix typos commit 15446fd62400c36c2a51f7e6f13725cc8adfd924 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:48:00 2018 +0200 Fix typos commit 76533b41986bbc5826070a1e644215a74757c1db Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:04:38 2018 +0200 Fix typo commit aea330c2b0bf76975ec69142a732288cc8b192bd Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:46:29 2018 +0200 Fix typo commit 8b1b1d0be1dc44f36c22c54d1a3d56d84d456b92 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:40:23 2018 +0200 Fix typo commit 46ea76785a26cf20a664ed211c8f3fb9a283e127 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:14:17 2018 +0200 Fix typo commit e0d7c5748545dd0975507ad603623072fcc6bdea Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:08:24 2018 +0200 Fix typo commit 604d5a244323b17ba596b12d245407e1cf63a375 Merge: 6c081ca 1c65b2b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:03:23 2018 +0200 Merge pull request #36 from rex4539/patch-36 Fix typo commit 6c081caf28b7cef9e62ed523284dff90e4add16d Merge: 899e5d2 88fa2d9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:02:49 2018 +0200 Merge pull request #35 from rex4539/patch-35 Fix typo commit 899e5d2c343ac7ea5069b8548e5df86c8e963e21 Merge: 6380c7f 40e73e2 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:02:16 2018 +0200 Merge pull request #34 from rex4539/patch-34 Fix typo commit 6380c7f740246474c69d8145bde61688551efe83 Merge: f592274 4567667 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:01:47 2018 +0200 Merge pull request #33 from rex4539/patch-33 Fix typos commit f592274a713162da0083bd6d22fb47cb1afcdba9 Merge: d86ef7e 4aeaa3a Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:01:14 2018 +0200 Merge pull request #32 from rex4539/patch-32 Fix typo commit d86ef7e5e4f7e9c2014358ec5b647d1815eb304d Merge: fe0b432 5cdd1b2 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:00:41 2018 +0200 Merge pull request #31 from rex4539/patch-31 Fix typo commit fe0b432ee125ae0b876af2c26139dfc979005a3b Merge: 6fd6d0d 70130d0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:00:12 2018 +0200 Merge pull request #30 from rex4539/patch-30 Fix typos commit 6fd6d0dcf3714118a623c0d8d84aabb4578410a8 Merge: 389660f 3377426 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:59:42 2018 +0200 Merge pull request #29 from rex4539/patch-29 Fix typo commit 389660f856cb60ff475a8757aad3873b99213cc0 Merge: a0b85ce 40643eb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:59:15 2018 +0200 Merge pull request #28 from rex4539/patch-28 Fix typo commit a0b85ce3b4d2e6596da0727e05c1fe15c289b1e7 Merge: 6f9a1c7 23ead80 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:58:42 2018 +0200 Merge pull request #27 from rex4539/patch-27 Fix typo commit 6f9a1c71a680bb3ed1c249dd42bf0a54663d0af3 Merge: b880547 3612eab Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:58:06 2018 +0200 Merge pull request #26 from rex4539/patch-26 Patch 26 commit b880547415afeae36bd19867388e60a3040a15ca Merge: a3b7da2 5c3177f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:57:24 2018 +0200 Merge pull request #25 from rex4539/patch-25 Fix typo commit a3b7da2c6d6691f38751292e1aea63498a325788 Merge: edd8586 60026ef Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:56:52 2018 +0200 Merge pull request #24 from rex4539/patch-24 Fix typo commit edd8586fdf8c112f4c513804610c237d7e2e80ef Merge: 0c28eb7 f979c00 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:56:12 2018 +0200 Merge pull request #23 from rex4539/patch-23 Fix typo commit 0c28eb7717821b1d68016f40911d07f2a7231b4f Merge: 775beb6 c900722 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:55:42 2018 +0200 Merge pull request #22 from rex4539/patch-22 Fix typo commit 775beb625beb1fc5f72388c076b295de4b8ff039 Merge: a0cf889 1027543 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:55:16 2018 +0200 Merge pull request #21 from rex4539/patch-21 Fix typo commit a0cf88971e756c37c406bab3066c11d6fc7f6d74 Merge: 4504b48 f3fa89b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:54:44 2018 +0200 Merge pull request #20 from rex4539/patch-20 Fix typo commit 4504b4824b3438e931ca8d24a56b1887657e87cd Merge: dd0bcbf 2699eca Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:54:20 2018 +0200 Merge pull request #19 from rex4539/patch-19 Fix typo commit dd0bcbfc89293e9760156d5534f3a558451e1f29 Merge: abfb65a f02ef2e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:53:46 2018 +0200 Merge pull request #18 from rex4539/patch-18 Fix typos commit abfb65afaed49c34b9875df79f6fe6eb2b7bf769 Merge: 68b46b7 6485c90 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:53:08 2018 +0200 Merge pull request #17 from rex4539/patch-17 Fix typo commit 68b46b75d2e5b7ae97e83fc5541c46b4907a7899 Merge: a131e84 fcc0828 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:52:39 2018 +0200 Merge pull request #16 from rex4539/patch-16 Fix typo commit a131e844652e58aff78fa8952e7547a9ba82b8a1 Merge: 8487c0e 8a688ff Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:54 2018 +0200 Merge pull request #15 from rex4539/patch-15 Fix typo commit 8487c0e39092b74e977c7a60f4a07a27606756a8 Merge: bcc4cb4 bb60b83 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:28 2018 +0200 Merge pull request #14 from rex4539/patch-14 Fix typos commit bcc4cb46130e789faa9adae9b159ca818f67ec52 Merge: 23e66e9 53539bb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:00 2018 +0200 Merge pull request #13 from rex4539/patch-13 Fix typos commit 23e66e956bff2d6935c7a4dd570d457294018a77 Merge: 56956cf 0808445 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:50:27 2018 +0200 Merge pull request #12 from rex4539/patch-12 Fix typo commit 56956cf23ba1208aa39cb3ab1ef60375c6630263 Merge: 77007d4 7a4f064 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:49:59 2018 +0200 Merge pull request #11 from rex4539/patch-11 Fix typo commit 77007d49fa1d8cb80aef02bea1dd15e522a47c90 Merge: e78ad0c 48c33fb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:49:16 2018 +0200 Merge pull request #10 from rex4539/patch-10 Fix typo commit e78ad0cf0d91955a848f5e953a042eabdcdac198 Merge: 38a3e08 809f01c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:48:33 2018 +0200 Merge pull request #9 from rex4539/patch-9 Fix typo commit 38a3e08699fe4c4ec715b1783dba18bff6b829fb Merge: eee3c28 fec279c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:48:05 2018 +0200 Merge pull request #8 from rex4539/patch-8 Fix typo commit eee3c286eb84f994310142a9e7fdbd36a671e593 Merge: 702635b cf81b4e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:47:33 2018 +0200 Merge pull request #7 from rex4539/patch-7 Fix typo commit 702635bb34abb2f83ded27ae95deefd5b6e7df93 Merge: d7497ea 3bbcc3d Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:47:01 2018 +0200 Merge pull request #6 from rex4539/patch-6 Fix typo commit d7497ea070e03380cf1d4f533b7dc4b881f724f8 Merge: bfcc1e8 f639727 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:45:33 2018 +0200 Merge pull request #5 from rex4539/patch-5 Fix typos commit bfcc1e8ae2094ca4e9837f623999705f538aff04 Merge: f4440ec 55262fe Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:45:05 2018 +0200 Merge pull request #4 from rex4539/patch-4 Remove space for word "backup" commit f4440ecd4a7367e6bc4a5f75bea112290017ed2b Merge: f8b487f 61d5279 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:44:31 2018 +0200 Merge pull request #3 from rex4539/patch-3 Fix typos in zmq.md commit f8b487f5699990fabc7fc383d02bc728db3cb9aa Merge: 60104a7 f2ce50f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:42:51 2018 +0200 Merge pull request #2 from rex4539/patch-2 Fix typo in security-warnings.md commit 60104a7034f55284afb814e81a1430a8b2b0d8d1 Merge: be262f0 af7dfe0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:41:48 2018 +0200 Merge pull request #1 from rex4539/patch-1 Fix typos commit 1c65b2bd0c49f7f392d0e3a2db14ce1366a87171 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:35:26 2018 +0200 Fix typo commit 88fa2d966a3b462ed34a9a4659fc390711cc0276 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:21:00 2018 +0200 Fix typo commit 40e73e258671f21d2b2205509e9cae1f50294752 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:14:21 2018 +0200 Fix typo commit 4567667fcc8b4197dfd51da34fe82b0f2fb78127 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 06:44:40 2018 +0200 Fix typos commit 4aeaa3a3d6335302c53c0f5f4ef81de05e266479 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:55:25 2018 +0200 Fix typo commit 5cdd1b29b4c90492aa15fed7940984e1d675052f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:36:46 2018 +0200 Fix typo commit 70130d05f1646c8b9fb1f33c4efbe2a5fcf7138b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:28:21 2018 +0200 Fix typos commit 33774261b1c63e5640aa1dd251edb67892ed7a5b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:00:56 2018 +0200 Fix typo commit 40643ebfcd85ee257a4576e85d2fb6c73dad17b5 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:51:57 2018 +0200 Fix typo commit 23ead80e05116ebfeaac0a00d5bd4a158fbeb54e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:29:53 2018 +0200 Fix typo commit 3612eaba2dcf273e94cac9ad889723776ce55108 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:19:36 2018 +0200 Fix typos commit 5c3177f5d191d1f4e4d9f78ae4b75381010f7768 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:13:03 2018 +0200 Fix typo commit 60026efe27a39300e428879ad8dba94f19934870 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:08:09 2018 +0200 Fix typo commit f979c0074efd66804f229c8b3cc6e812d7f26406 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:01:44 2018 +0200 Fix typo commit c9007220a8a727c1cfe3b25b453c178eacd431f3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:52:33 2018 +0200 Fix typo commit 1027543bd30701c4b09aa66226281a10563db910 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:36:19 2018 +0200 Fix typo commit f3fa89bcd30e0cb45ff4391e78d02452c9227be0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:34:37 2018 +0200 Fix typo commit 2699eca938f1e413a29d4408a271aaafd27969cc Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:30:29 2018 +0200 Fix typo commit f02ef2e495fe43142d305f5c4f40dcfa3d2cb423 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:22:36 2018 +0200 Fix typos commit 6485c908433bb91fd70d7e18cf3611c9a96115a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:10:06 2018 +0200 Fix typo commit fcc082850564b14b86b1932dfc5a099816c72ef1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:06:26 2018 +0200 Fix typo commit 8a688ff7405d67bd4c77b0aa0ebdd4b4a8a9a6a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:02:50 2018 +0200 Fix typo commit bb60b83853ed0a82ca47dd58d55f1849ddcf23ab Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 02:59:25 2018 +0200 Fix typos commit 53539bb720c7676b9d37e25dde3423db3aa7bfa1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 02:50:55 2018 +0200 Fix typos commit 080844581d6488ab797ac188acae9c4b2e1d0c59 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 01:05:54 2018 +0200 Fix typo commit 7a4f0649ac5e71f39f0bef7f2e1fcb6fafad0291 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:16:31 2018 +0200 Fix typo commit 48c33fb3f9ab1ad287987d147ee4bbe186f7ade1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:07:42 2018 +0200 Fix typo commit 809f01ca4f785a7b5bc9cc2c388e0ae814ecaa95 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:02:34 2018 +0200 Fix typo commit fec279cac89aa917be929447c81177811728361a Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:55:27 2018 +0200 Fix typo commit cf81b4e12399570545372d4c9daceca8e70142d5 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:48:43 2018 +0200 Fix typo commit 3bbcc3d9986caf8df99bec5d8a18d0f0c8990e06 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:28:52 2018 +0200 Fix typo commit f639727525dbd23f5f2d0f89e7be13d868e984c3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:13:12 2018 +0200 Fix typos commit 55262fe9c5e1e127c6b817a0c2ab3f9db3ac35b9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:46:52 2018 +0200 Remove space for word "backup" commit 61d52797d4d26a90dcc15e2bcd6f19a5f36faac3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:23:31 2018 +0200 Fix typos in zmq.md commit f2ce50f10e67b4265e559a432681bc44828ae59b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:12:58 2018 +0200 Fix typo in security-warnings.md commit af7dfe046c12109e44ddc18dff07ede8755cf4f9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 21:59:24 2018 +0200 Fix typos Signed-off-by: Daira Hopwood <daira@jacaranda.org>
2018-03-02 03:45:05 -08:00
// are not yet downloaded and not in flight to vBlocks. In the meantime, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
BOOST_FOREACH(CBlockIndex* pindex, vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
if (pindex->nChainTx)
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
2014-10-14 15:41:23 -07:00
if (pindex->nHeight > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
nodeStaller = waitingfor;
}
return;
}
vBlocks.push_back(pindex);
if (vBlocks.size() == count) {
return;
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
}
}
}
}
} // anon namespace
2013-11-17 16:25:17 -08:00
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state == NULL)
return false;
stats.nMisbehavior = state->nMisbehavior;
2014-06-22 15:00:26 -07:00
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
2014-07-11 15:03:10 -07:00
stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
BOOST_FOREACH(const QueuedBlock& queue, state->vBlocksInFlight) {
if (queue.pindex)
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
}
2013-11-17 16:25:17 -08:00
return true;
}
void RegisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.GetHeight.connect(&GetHeight);
nodeSignals.ProcessMessages.connect(&ProcessMessages);
nodeSignals.SendMessages.connect(&SendMessages);
2013-11-17 16:25:17 -08:00
nodeSignals.InitializeNode.connect(&InitializeNode);
nodeSignals.FinalizeNode.connect(&FinalizeNode);
}
void UnregisterNodeSignals(CNodeSignals& nodeSignals)
{
nodeSignals.GetHeight.disconnect(&GetHeight);
nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
nodeSignals.SendMessages.disconnect(&SendMessages);
2013-11-17 16:25:17 -08:00
nodeSignals.InitializeNode.disconnect(&InitializeNode);
nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
}
2014-09-02 17:52:01 -07:00
CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator)
{
// Find the first block the caller has in the main chain
BOOST_FOREACH(const uint256& hash, locator.vHave) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
{
CBlockIndex* pindex = (*mi).second;
2014-09-02 17:52:01 -07:00
if (chain.Contains(pindex))
return pindex;
if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
return chain.Tip();
}
}
}
2014-09-02 17:52:01 -07:00
return chain.Genesis();
}
CCoinsViewCache *pcoinsTip = NULL;
CBlockTreeDB *pblocktree = NULL;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
unsigned int sz = tx.GetSerializeSize(SER_NETWORK, tx.nVersion);
if (sz > 5000)
{
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
2012-02-29 07:14:18 -08:00
mapOrphanTransactions[hash].tx = tx;
mapOrphanTransactions[hash].fromPeer = peer;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
return;
BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin)
{
map<uint256, set<uint256> >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash);
if (itPrev == mapOrphanTransactionsByPrev.end())
continue;
itPrev->second.erase(hash);
if (itPrev->second.empty())
mapOrphanTransactionsByPrev.erase(itPrev);
}
mapOrphanTransactions.erase(it);
}
void EraseOrphansFor(NodeId peer)
{
int nErased = 0;
map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
{
map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
EraseOrphanTx(maybeErase->second.tx.GetHash());
++nErased;
}
}
if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
2012-02-29 07:14:18 -08:00
{
unsigned int nEvicted = 0;
2012-02-29 07:14:18 -08:00
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
2012-02-29 07:14:18 -08:00
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
bool IsStandardTx(const CTransaction& tx, string& reason, const int nHeight)
{
bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
if (isOverwinter) {
// Overwinter standard rules apply
if (tx.nVersion > CTransaction::OVERWINTER_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::OVERWINTER_MIN_CURRENT_VERSION) {
reason = "overwinter-version";
return false;
}
} else {
// Sprout standard rules apply
if (tx.nVersion > CTransaction::SPROUT_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SPROUT_MIN_CURRENT_VERSION) {
reason = "version";
return false;
}
}
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
// Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed
// keys. (remember the 520 byte limit on redeemScript size) That works
// out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627
// bytes of scriptSig, which we round off to 1650 bytes for some minor
// future-proofing. That's also enough to spend a 20-of-20
// CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not
// considered standard)
if (txin.scriptSig.size() > 1650) {
reason = "scriptsig-size";
return false;
}
if (!txin.scriptSig.IsPushOnly()) {
reason = "scriptsig-not-pushonly";
return false;
}
}
unsigned int nDataOut = 0;
txnouttype whichType;
BOOST_FOREACH(const CTxOut& txout, tx.vout) {
if (!::IsStandard(txout.scriptPubKey, whichType)) {
reason = "scriptpubkey";
return false;
}
if (whichType == TX_NULL_DATA)
nDataOut++;
else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) {
reason = "bare-multisig";
return false;
} else if (txout.IsDust(::minRelayTxFee)) {
reason = "dust";
2012-08-24 03:14:48 -07:00
return false;
}
2012-08-24 03:14:48 -07:00
}
// only one OP_RETURN txout is permitted
if (nDataOut > 1) {
reason = "multi-op-return";
return false;
}
return true;
}
bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime)
{
if (tx.nLockTime == 0)
return true;
if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime))
return true;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
if (!txin.IsFinal())
return false;
return true;
}
bool IsExpiredTx(const CTransaction &tx, int nBlockHeight)
{
if (tx.nExpiryHeight == 0 || tx.IsCoinBase()) {
return false;
}
return static_cast<uint32_t>(nBlockHeight) > tx.nExpiryHeight;
}
bool CheckFinalTx(const CTransaction &tx, int flags)
{
AssertLockHeld(cs_main);
// By convention a negative value for flags indicates that the
// current network-enforced consensus rules should be used. In
// a future soft-fork scenario that would mean checking which
// rules would be enforced for the next block and setting the
// appropriate flags. At the present time no soft-forks are
// scheduled, so no flags are set.
flags = std::max(flags, 0);
// CheckFinalTx() uses chainActive.Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
// CBlock::AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// IsFinalTx() with one more than chainActive.Height().
const int nBlockHeight = chainActive.Height() + 1;
// Timestamps on the other hand don't get any special treatment,
// because we can't know what timestamp the next block will have,
// and there aren't timestamp applications where it matters.
// However this changes once median past time-locks are enforced:
const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST)
? chainActive.Tip()->GetMedianTimePast()
: GetAdjustedTime();
return IsFinalTx(tx, nBlockHeight, nBlockTime);
}
/**
* Check transaction inputs to mitigate two
* potential denial-of-service attacks:
2015-05-31 06:36:44 -07:00
*
* 1. scriptSigs with extra data stuffed into them,
* not consumed by scriptPubKey (or P2SH script)
* 2. P2SH scripts with a crazy number of expensive
* CHECKSIG/CHECKMULTISIG operations
*/
bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, uint32_t consensusBranchId)
{
if (tx.IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]);
vector<vector<unsigned char> > vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig
2015-01-14 01:25:06 -08:00
// IsStandardTx() will have already returned false
// and this method isn't called.
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), consensusBranchId))
return false;
if (whichType == TX_SCRIPTHASH)
{
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end());
vector<vector<unsigned char> > vSolutions2;
txnouttype whichType2;
if (Solver(subscript, whichType2, vSolutions2))
{
int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
if (tmpExpected < 0)
return false;
nArgsExpected += tmpExpected;
}
else
{
// Any other Script with less than 15 sigops OK:
unsigned int sigops = subscript.GetSigOpCount(true);
// ... extra data left on the stack after execution is OK, too:
return (sigops <= MAX_P2SH_SIGOPS);
}
}
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
return true;
}
unsigned int GetLegacySigOpCount(const CTransaction& tx)
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, tx.vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs)
{
if (tx.IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig);
}
return nSigOps;
}
/**
* Check a transaction contextually against a set of consensus rules valid at a given block height.
*
* Notes:
* 1. AcceptToMemoryPool calls CheckTransaction and this function.
* 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction)
* and ContextualCheckBlock (which calls this function).
*/
bool ContextualCheckTransaction(const CTransaction& tx, CValidationState &state, const int nHeight, const int dosLevel)
{
bool isOverwinter = NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER);
bool isSprout = !isOverwinter;
// If Sprout rules apply, reject transactions which are intended for Overwinter and beyond
if (isSprout && tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter is not active yet"),
REJECT_INVALID, "tx-overwinter-not-active");
}
// If Overwinter rules apply:
if (isOverwinter) {
// Reject transactions with valid version but missing overwinter flag
if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"),
REJECT_INVALID, "tx-overwinter-flag-not-set");
}
// Reject transactions with invalid version
if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) {
return state.DoS(100, error("CheckTransaction(): overwinter version too high"),
REJECT_INVALID, "bad-tx-overwinter-version-too-high");
}
// Reject transactions intended for Sprout
if (!tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction: overwinter is active"),
REJECT_INVALID, "tx-overwinter-active");
}
// Check that all transactions are unexpired
if (IsExpiredTx(tx, nHeight)) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): transaction is expired"), REJECT_INVALID, "tx-overwinter-expired");
}
}
if (!(tx.IsCoinBase() || tx.vjoinsplit.empty())) {
auto consensusBranchId = CurrentEpochBranchId(nHeight, Params().GetConsensus());
// Empty output script.
CScript scriptCode;
uint256 dataToBeSigned;
try {
dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId);
} catch (std::logic_error ex) {
return state.DoS(100, error("CheckTransaction(): error computing signature hash"),
REJECT_INVALID, "error-computing-signature-hash");
}
BOOST_STATIC_ASSERT(crypto_sign_PUBLICKEYBYTES == 32);
// We rely on libsodium to check that the signature is canonical.
// https://github.com/jedisct1/libsodium/commit/62911edb7ff2275cccd74bf1c8aefcc4d76924e0
if (crypto_sign_verify_detached(&tx.joinSplitSig[0],
dataToBeSigned.begin(), 32,
tx.joinSplitPubKey.begin()
) != 0) {
return state.DoS(100, error("CheckTransaction(): invalid joinsplit signature"),
REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
}
}
return true;
}
bool CheckTransaction(const CTransaction& tx, CValidationState &state,
libzcash::ProofVerifier& verifier)
{
// Don't count coinbase transactions because mining skews the count
if (!tx.IsCoinBase()) {
transactionsValidated.increment();
}
if (!CheckTransactionWithoutProofVerification(tx, state)) {
return false;
} else {
// Ensure that zk-SNARKs verify
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
if (!joinsplit.Verify(*pzcashParams, verifier, tx.joinSplitPubKey)) {
2016-07-14 15:10:41 -07:00
return state.DoS(100, error("CheckTransaction(): joinsplit does not verify"),
REJECT_INVALID, "bad-txns-joinsplit-verification-failed");
}
}
return true;
}
}
bool CheckTransactionWithoutProofVerification(const CTransaction& tx, CValidationState &state)
{
// Basic checks that don't depend on any context
/**
* Previously:
* 1. The consensus rule below was:
* if (tx.nVersion < SPROUT_MIN_TX_VERSION) { ... }
* which checked if tx.nVersion fell within the range:
* INT32_MIN <= tx.nVersion < SPROUT_MIN_TX_VERSION
* 2. The parser allowed tx.nVersion to be negative
*
* Now:
* 1. The consensus rule checks to see if tx.Version falls within the range:
* 0 <= tx.nVersion < SPROUT_MIN_TX_VERSION
* 2. The previous consensus rule checked for negative values within the range:
* INT32_MIN <= tx.nVersion < 0
* This is unnecessary for Overwinter transactions since the parser now
* interprets the sign bit as fOverwintered, so tx.nVersion is always >=0,
* and when Overwinter is not active ContextualCheckTransaction rejects
* transactions with fOverwintered set. When fOverwintered is set,
* this function and ContextualCheckTransaction will together check to
* ensure tx.nVersion avoids the following ranges:
* 0 <= tx.nVersion < OVERWINTER_MIN_TX_VERSION
* OVERWINTER_MAX_TX_VERSION < tx.nVersion <= INT32_MAX
*/
if (!tx.fOverwintered && tx.nVersion < SPROUT_MIN_TX_VERSION) {
return state.DoS(100, error("CheckTransaction(): version too low"),
REJECT_INVALID, "bad-txns-version-too-low");
}
else if (tx.fOverwintered) {
if (tx.nVersion < OVERWINTER_MIN_TX_VERSION) {
return state.DoS(100, error("CheckTransaction(): overwinter version too low"),
REJECT_INVALID, "bad-tx-overwinter-version-too-low");
}
if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) {
return state.DoS(100, error("CheckTransaction(): unknown tx version group id"),
REJECT_INVALID, "bad-tx-version-group-id");
}
if (tx.nExpiryHeight >= TX_EXPIRY_HEIGHT_THRESHOLD) {
return state.DoS(100, error("CheckTransaction(): expiry height is too high"),
REJECT_INVALID, "bad-tx-expiry-height-too-high");
}
}
// Transactions can contain empty `vin` and `vout` so long as
2016-07-10 22:08:20 -07:00
// `vjoinsplit` is non-empty.
if (tx.vin.empty() && tx.vjoinsplit.empty())
return state.DoS(10, error("CheckTransaction(): vin empty"),
REJECT_INVALID, "bad-txns-vin-empty");
2016-07-10 22:08:20 -07:00
if (tx.vout.empty() && tx.vjoinsplit.empty())
return state.DoS(10, error("CheckTransaction(): vout empty"),
REJECT_INVALID, "bad-txns-vout-empty");
// Size limits
BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE); // sanity
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE)
return state.DoS(100, error("CheckTransaction(): size limits failed"),
REJECT_INVALID, "bad-txns-oversize");
// Check for negative or overflow output values
2014-04-22 15:46:19 -07:00
CAmount nValueOut = 0;
BOOST_FOREACH(const CTxOut& txout, tx.vout)
{
if (txout.nValue < 0)
return state.DoS(100, error("CheckTransaction(): txout.nValue negative"),
REJECT_INVALID, "bad-txns-vout-negative");
if (txout.nValue > MAX_MONEY)
return state.DoS(100, error("CheckTransaction(): txout.nValue too high"),
REJECT_INVALID, "bad-txns-vout-toolarge");
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut))
return state.DoS(100, error("CheckTransaction(): txout total out of range"),
REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
2016-07-14 15:10:41 -07:00
// Ensure that joinsplit values are well-formed
BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
{
2016-07-14 15:10:41 -07:00
if (joinsplit.vpub_old < 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old negative"),
REJECT_INVALID, "bad-txns-vpub_old-negative");
}
2016-07-14 15:10:41 -07:00
if (joinsplit.vpub_new < 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new negative"),
REJECT_INVALID, "bad-txns-vpub_new-negative");
}
2016-07-14 15:10:41 -07:00
if (joinsplit.vpub_old > MAX_MONEY) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_old too high"),
REJECT_INVALID, "bad-txns-vpub_old-toolarge");
}
2016-07-14 15:10:41 -07:00
if (joinsplit.vpub_new > MAX_MONEY) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new too high"),
REJECT_INVALID, "bad-txns-vpub_new-toolarge");
}
2016-07-14 15:10:41 -07:00
if (joinsplit.vpub_new != 0 && joinsplit.vpub_old != 0) {
return state.DoS(100, error("CheckTransaction(): joinsplit.vpub_new and joinsplit.vpub_old both nonzero"),
REJECT_INVALID, "bad-txns-vpubs-both-nonzero");
}
2016-09-05 11:18:43 -07:00
nValueOut += joinsplit.vpub_old;
if (!MoneyRange(nValueOut)) {
return state.DoS(100, error("CheckTransaction(): txout total out of range"),
REJECT_INVALID, "bad-txns-txouttotal-toolarge");
}
}
2016-09-05 11:18:43 -07:00
// Ensure input values do not exceed MAX_MONEY
// We have not resolved the txin values at this stage,
// but we do know what the joinsplits claim to add
// to the value pool.
{
CAmount nValueIn = 0;
for (std::vector<JSDescription>::const_iterator it(tx.vjoinsplit.begin()); it != tx.vjoinsplit.end(); ++it)
{
nValueIn += it->vpub_new;
if (!MoneyRange(it->vpub_new) || !MoneyRange(nValueIn)) {
return state.DoS(100, error("CheckTransaction(): txin total out of range"),
REJECT_INVALID, "bad-txns-txintotal-toolarge");
}
}
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
if (vInOutPoints.count(txin.prevout))
return state.DoS(100, error("CheckTransaction(): duplicate inputs"),
REJECT_INVALID, "bad-txns-inputs-duplicate");
vInOutPoints.insert(txin.prevout);
}
2016-07-14 15:10:41 -07:00
// Check for duplicate joinsplit nullifiers in this transaction
set<uint256> vJoinSplitNullifiers;
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription& joinsplit, tx.vjoinsplit)
{
2016-07-14 15:32:34 -07:00
BOOST_FOREACH(const uint256& nf, joinsplit.nullifiers)
{
2016-07-14 15:32:34 -07:00
if (vJoinSplitNullifiers.count(nf))
return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"),
2016-07-14 15:10:41 -07:00
REJECT_INVALID, "bad-joinsplits-nullifiers-duplicate");
2016-07-14 15:32:34 -07:00
vJoinSplitNullifiers.insert(nf);
}
}
if (tx.IsCoinBase())
{
2016-07-14 15:10:41 -07:00
// There should be no joinsplits in a coinbase transaction
2016-07-10 22:08:20 -07:00
if (tx.vjoinsplit.size() > 0)
2016-07-14 15:10:41 -07:00
return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"),
REJECT_INVALID, "bad-cb-has-joinsplits");
if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100)
return state.DoS(100, error("CheckTransaction(): coinbase script size"),
REJECT_INVALID, "bad-cb-length");
}
else
{
BOOST_FOREACH(const CTxIn& txin, tx.vin)
if (txin.prevout.IsNull())
return state.DoS(10, error("CheckTransaction(): prevout is null"),
REJECT_INVALID, "bad-txns-prevout-null");
}
return true;
}
2014-04-22 15:46:19 -07:00
CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree)
{
{
LOCK(mempool.cs);
uint256 hash = tx.GetHash();
double dPriorityDelta = 0;
2014-04-22 15:46:19 -07:00
CAmount nFeeDelta = 0;
mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta);
if (dPriorityDelta > 0 || nFeeDelta > 0)
return 0;
}
2014-04-22 15:46:19 -07:00
CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes);
if (fAllowFree)
{
// There is a free transaction area in blocks created by most miners,
// * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000
// to be considered to fall into this category. We don't want to encourage sending
// multiple transactions instead of one big transaction to avoid fees.
if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000))
nMinFee = 0;
}
if (!MoneyRange(nMinFee))
nMinFee = MAX_MONEY;
return nMinFee;
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,
bool* pfMissingInputs, bool fRejectAbsurdFee)
{
AssertLockHeld(cs_main);
if (pfMissingInputs)
*pfMissingInputs = false;
int nextBlockHeight = chainActive.Height() + 1;
auto consensusBranchId = CurrentEpochBranchId(nextBlockHeight, Params().GetConsensus());
// Node operator can choose to reject tx by number of transparent inputs
static_assert(std::numeric_limits<size_t>::max() >= std::numeric_limits<int64_t>::max(), "size_t too small");
size_t limit = (size_t) GetArg("-mempooltxinputlimit", 0);
if (limit > 0) {
size_t n = tx.vin.size();
if (n > limit) {
LogPrint("mempool", "Dropping txid %s : too many transparent inputs %zu > limit %zu\n", tx.GetHash().ToString(), n, limit );
return false;
}
}
auto verifier = libzcash::ProofVerifier::Strict();
if (!CheckTransaction(tx, state, verifier))
return error("AcceptToMemoryPool: CheckTransaction failed");
// DoS level set to 10 to be more forgiving.
// Check transaction contextually against the set of consensus rules which apply in the next block to be mined.
if (!ContextualCheckTransaction(tx, state, nextBlockHeight, 10)) {
return error("AcceptToMemoryPool: ContextualCheckTransaction failed");
}
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"),
2013-10-27 23:36:11 -07:00
REJECT_INVALID, "coinbase");
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
string reason;
if (Params().RequireStandard() && !IsStandardTx(tx, reason, nextBlockHeight))
2013-10-27 23:36:11 -07:00
return state.DoS(0,
error("AcceptToMemoryPool: nonstandard transaction: %s", reason),
2013-10-27 23:36:11 -07:00
REJECT_NONSTANDARD, reason);
// Only accept nLockTime-using transactions that can be mined in the next
// block; we don't want our mempool filled up with transactions that can't
// be mined yet.
if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS))
return state.DoS(0, false, REJECT_NONSTANDARD, "non-final");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// is it already in the memory pool?
uint256 hash = tx.GetHash();
if (pool.exists(hash))
return false;
// Check for conflicts with in-memory transactions
{
LOCK(pool.cs); // protect pool.mapNextTx
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (pool.mapNextTx.count(outpoint))
{
// Disable replacement feature for now
return false;
}
}
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
2016-07-14 15:32:34 -07:00
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
if (pool.mapNullifiers.count(nf))
{
return false;
}
}
}
}
{
2012-10-22 16:16:26 -07:00
CCoinsView dummy;
CCoinsViewCache view(&dummy);
2012-10-22 16:16:26 -07:00
2014-04-22 15:46:19 -07:00
CAmount nValueIn = 0;
2012-10-22 16:16:26 -07:00
{
LOCK(pool.cs);
CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
2012-10-22 16:16:26 -07:00
view.SetBackend(viewMemPool);
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// do we already have it?
if (view.HaveCoins(hash))
return false;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// do all inputs exist?
// Note that this does not check for the presence of actual outputs (see the next check for that),
2015-04-28 07:48:28 -07:00
// and only helps with filling in pfMissingInputs (to determine missing vs spent).
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
BOOST_FOREACH(const CTxIn txin, tx.vin) {
if (!view.HaveCoins(txin.prevout.hash)) {
if (pfMissingInputs)
*pfMissingInputs = true;
return false;
}
}
// are the actual inputs available?
if (!view.HaveInputs(tx))
return state.Invalid(error("AcceptToMemoryPool: inputs already spent"),
REJECT_DUPLICATE, "bad-txns-inputs-spent");
2016-07-14 15:10:41 -07:00
// are the joinsplit's requirements met?
if (!view.HaveJoinSplitRequirements(tx))
2016-07-14 15:10:41 -07:00
return state.Invalid(error("AcceptToMemoryPool: joinsplit requirements not met"),
REJECT_DUPLICATE, "bad-txns-joinsplit-requirements-not-met");
2012-10-22 16:16:26 -07:00
// Bring the best block into scope
view.GetBestBlock();
estimatefee / estimatepriority RPC methods New RPC methods: return an estimate of the fee (or priority) a transaction needs to be likely to confirm in a given number of blocks. Mike Hearn created the first version of this method for estimating fees. It works as follows: For transactions that took 1 to N (I picked N=25) blocks to confirm, keep N buckets with at most 100 entries in each recording the fees-per-kilobyte paid by those transactions. (separate buckets are kept for transactions that confirmed because they are high-priority) The buckets are filled as blocks are found, and are saved/restored in a new fee_estiamtes.dat file in the data directory. A few variations on Mike's initial scheme: To estimate the fee needed for a transaction to confirm in X buckets, all of the samples in all of the buckets are used and a median of all of the data is used to make the estimate. For example, imagine 25 buckets each containing the full 100 entries. Those 2,500 samples are sorted, and the estimate of the fee needed to confirm in the very next block is the 50'th-highest-fee-entry in that sorted list; the estimate of the fee needed to confirm in the next two blocks is the 150'th-highest-fee-entry, etc. That algorithm has the nice property that estimates of how much fee you need to pay to get confirmed in block N will always be greater than or equal to the estimate for block N+1. It would clearly be wrong to say "pay 11 uBTC and you'll get confirmed in 3 blocks, but pay 12 uBTC and it will take LONGER". A single block will not contribute more than 10 entries to any one bucket, so a single miner and a large block cannot overwhelm the estimates.
2014-03-17 05:19:54 -07:00
nValueIn = view.GetValueIn(tx);
2012-10-22 16:16:26 -07:00
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
}
// Check for non-standard pay-to-script-hash in inputs
if (Params().RequireStandard() && !AreInputsStandard(tx, view, consensusBranchId))
return error("AcceptToMemoryPool: nonstandard transaction input");
// Check that the transaction doesn't have an excessive number of
// sigops, making it impossible to mine. Since the coinbase transaction
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
// merely non-standard transaction.
unsigned int nSigOps = GetLegacySigOpCount(tx);
nSigOps += GetP2SHSigOpCount(tx, view);
if (nSigOps > MAX_STANDARD_TX_SIGOPS)
return state.DoS(0,
error("AcceptToMemoryPool: too many sigops %s, %d > %d",
hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),
REJECT_NONSTANDARD, "bad-txns-too-many-sigops");
2014-04-22 15:46:19 -07:00
CAmount nValueOut = tx.GetValueOut();
CAmount nFees = nValueIn-nValueOut;
double dPriority = view.GetPriority(tx, chainActive.Height());
// Keep track of transactions that spend a coinbase, which we re-scan
// during reorgs to ensure COINBASE_MATURITY is still met.
bool fSpendsCoinbase = false;
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
const CCoins *coins = view.AccessCoins(txin.prevout.hash);
if (coins->IsCoinBase()) {
fSpendsCoinbase = true;
break;
}
}
// Grab the branch ID we expect this transaction to commit to. We don't
// yet know if it does, but if the entry gets added to the mempool, then
// it has passed ContextualCheckInputs and therefore this is correct.
auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus());
CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx), fSpendsCoinbase, consensusBranchId);
unsigned int nSize = entry.GetTxSize();
// Accept a tx if it contains joinsplits and has at least the default fee specified by z_sendmany.
if (tx.vjoinsplit.size() > 0 && nFees >= ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE) {
// In future we will we have more accurate and dynamic computation of fees for tx with joinsplits.
} else {
// Don't accept it if it can't get into a block
CAmount txMinFee = GetMinRelayFee(tx, nSize, true);
if (fLimitFree && nFees < txMinFee)
return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d",
hash.ToString(), nFees, txMinFee),
REJECT_INSUFFICIENTFEE, "insufficient fee");
}
// Require that free transactions have sufficient priority to be mined in the next block.
if (GetBoolArg("-relaypriority", false) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) {
return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority");
}
// Continuously rate-limit free (really, very-low-fee) transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make others' transactions take longer to confirm.
2014-07-03 11:25:32 -07:00
if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize))
{
static CCriticalSection csFreeLimiter;
static double dFreeCount;
static int64_t nLastTime;
int64_t nNow = GetTime();
LOCK(csFreeLimiter);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000)
return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"),
REJECT_INSUFFICIENTFEE, "rate limited free transaction");
LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
if (fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000)
return error("AcceptToMemoryPool: absurdly high fees %s, %d > %d",
hash.ToString(),
2014-07-03 11:25:32 -07:00
nFees, ::minRelayTxFee.GetFee(nSize) * 10000);
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
2016-08-26 09:38:20 -07:00
PrecomputedTransactionData txdata(tx);
if (!ContextualCheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId))
{
return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString());
}
// Check again against just the consensus-critical mandatory script
// verification flags, in case of bugs in the standard flags that cause
// transactions to pass as valid when they're actually invalid. For
// instance the STRICTENC flag was incorrectly allowing certain
// CHECKSIG NOT scripts to pass, even though they were invalid.
//
// There is a similar check in CreateNewBlock() to prevent creating
// invalid blocks, however allowing such transactions into the mempool
// can be exploited as a DoS attack.
if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId))
{
return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString());
}
// Store transaction in memory
pool.addUnchecked(hash, entry, !IsInitialBlockDownload());
}
SyncWithWallets(tx, NULL);
return true;
}
/** Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock */
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow)
{
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
CBlockIndex *pindexSlow = NULL;
LOCK(cs_main);
if (mempool.lookup(hash, txOut))
{
return true;
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
if (fTxIndex) {
CDiskTxPos postx;
if (pblocktree->ReadTxIndex(hash, postx)) {
CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
if (file.IsNull())
return error("%s: OpenBlockFile failed", __func__);
CBlockHeader header;
try {
file >> header;
fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
hashBlock = header.GetHash();
if (txOut.GetHash() != hash)
return error("%s: txid mismatch", __func__);
return true;
}
}
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
int nHeight = -1;
{
CCoinsViewCache &view = *pcoinsTip;
const CCoins* coins = view.AccessCoins(hash);
if (coins)
nHeight = coins->nHeight;
}
if (nHeight > 0)
pindexSlow = chainActive[nHeight];
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
if (pindexSlow) {
CBlock block;
if (ReadBlockFromDisk(block, pindexSlow)) {
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
if (tx.GetHash() == hash) {
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
}
}
}
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
bool WriteBlockToDisk(CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("WriteBlockToDisk: OpenBlockFile failed");
// Write index header
unsigned int nSize = fileout.GetSerializeSize(block);
fileout << FLATDATA(messageStart) << nSize;
// Write block
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("WriteBlockToDisk: ftell failed");
pos.nPos = (unsigned int)fileOutPos;
fileout << block;
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos)
{
block.SetNull();
// Open history file to read
CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString());
// Read block
try {
filein >> block;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString());
}
// Check the header
if (!(CheckEquihashSolution(&block, Params()) &&
CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus())))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
return true;
}
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex)
{
if (!ReadBlockFromDisk(block, pindex->GetBlockPos()))
return false;
if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
pindex->ToString(), pindex->GetBlockPos().ToString());
return true;
}
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams)
{
2016-06-03 00:09:39 -07:00
CAmount nSubsidy = 12.5 * COIN;
// Mining slow start
// The subsidy is ramped up linearly, skipping the middle payout of
// MAX_SUBSIDY/2 to keep the monetary curve consistent with no slow start.
if (nHeight < consensusParams.nSubsidySlowStartInterval / 2) {
nSubsidy /= consensusParams.nSubsidySlowStartInterval;
nSubsidy *= nHeight;
return nSubsidy;
} else if (nHeight < consensusParams.nSubsidySlowStartInterval) {
nSubsidy /= consensusParams.nSubsidySlowStartInterval;
nSubsidy *= (nHeight+1);
return nSubsidy;
}
assert(nHeight > consensusParams.SubsidySlowStartShift());
int halvings = (nHeight - consensusParams.SubsidySlowStartShift()) / consensusParams.nSubsidyHalvingInterval;
// Force block reward to zero when right shift is undefined.
if (halvings >= 64)
return 0;
2016-06-03 00:09:39 -07:00
// Subsidy is cut in half every 840,000 blocks which will occur approximately every 4 years.
nSubsidy >>= halvings;
return nSubsidy;
}
bool IsInitialBlockDownload()
{
const CChainParams& chainParams = Params();
LOCK(cs_main);
if (fImporting || fReindex)
return true;
if (fCheckpointsEnabled && chainActive.Height() < Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()))
return true;
static bool lockIBDState = false;
if (lockIBDState)
return false;
bool state = (chainActive.Height() < pindexBestHeader->nHeight - 24 * 6 ||
pindexBestHeader->GetBlockTime() < GetTime() - chainParams.MaxTipAge());
if (!state)
lockIBDState = true;
return state;
}
bool fLargeWorkForkFound = false;
bool fLargeWorkInvalidChainFound = false;
CBlockIndex *pindexBestForkTip = NULL, *pindexBestForkBase = NULL;
void CheckForkWarningConditions()
{
AssertLockHeld(cs_main);
// Before we get past initial download, we cannot reliably alert about forks
// (we assume we don't get stuck on a fork before the last checkpoint)
if (IsInitialBlockDownload())
return;
// If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it)
// of our head, drop it
if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->nHeight >= 288)
pindexBestForkTip = NULL;
if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->nChainWork > chainActive.Tip()->nChainWork + (GetBlockProof(*chainActive.Tip()) * 6)))
{
2014-10-27 20:00:55 -07:00
if (!fLargeWorkForkFound && pindexBestForkBase)
{
std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") +
pindexBestForkBase->phashBlock->ToString() + std::string("'");
CAlert::Notify(warning, true);
}
2014-10-27 20:00:55 -07:00
if (pindexBestForkTip && pindexBestForkBase)
{
LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__,
pindexBestForkBase->nHeight, pindexBestForkBase->phashBlock->ToString(),
pindexBestForkTip->nHeight, pindexBestForkTip->phashBlock->ToString());
fLargeWorkForkFound = true;
}
else
{
std::string warning = std::string("Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.");
LogPrintf("%s: %s\n", warning.c_str(), __func__);
CAlert::Notify(warning, true);
fLargeWorkInvalidChainFound = true;
}
}
else
{
fLargeWorkForkFound = false;
fLargeWorkInvalidChainFound = false;
}
}
void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip)
{
AssertLockHeld(cs_main);
// If we are on a fork that is sufficiently large, set a warning flag
CBlockIndex* pfork = pindexNewForkTip;
CBlockIndex* plonger = chainActive.Tip();
while (pfork && pfork != plonger)
{
while (plonger && plonger->nHeight > pfork->nHeight)
plonger = plonger->pprev;
if (pfork == plonger)
break;
pfork = pfork->pprev;
}
2015-04-28 07:48:28 -07:00
// We define a condition where we should warn the user about as a fork of at least 7 blocks
// with a tip within 72 blocks (+/- 3 hours if no one mines it) of ours
// We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network
// hash rate operating on the fork.
// or a chain that is entirely longer than ours and invalid (note that this should be detected by both)
// We define it this way because it allows us to only store the highest fork tip (+ base) which meets
// the 7-block condition and from this always have the most-likely-to-cause-warning fork
if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
pindexNewForkTip->nChainWork - pfork->nChainWork > (GetBlockProof(*pfork) * 7) &&
chainActive.Height() - pindexNewForkTip->nHeight < 72)
{
pindexBestForkTip = pindexNewForkTip;
pindexBestForkBase = pfork;
}
CheckForkWarningConditions();
}
// Requires cs_main.
void Misbehaving(NodeId pnode, int howmuch)
{
if (howmuch == 0)
return;
CNodeState *state = State(pnode);
if (state == NULL)
return;
state->nMisbehavior += howmuch;
int banscore = GetArg("-banscore", 100);
if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
{
LogPrintf("%s: %s (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
state->fShouldBan = true;
} else
LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork)
pindexBestInvalid = pindexNew;
LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__,
pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()));
CBlockIndex *tip = chainActive.Tip();
assert (tip);
LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__,
tip->GetBlockHash().ToString(), chainActive.Height(), log(tip->nChainWork.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
CheckForkWarningConditions();
}
void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) {
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
std::map<uint256, NodeId>::iterator it = mapBlockSource.find(pindex->GetBlockHash());
if (it != mapBlockSource.end() && State(it->second)) {
CBlockReject reject = {state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), pindex->GetBlockHash()};
State(it->second)->rejects.push_back(reject);
if (nDoS > 0)
Misbehaving(it->second, nDoS);
}
}
if (!state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
InvalidChainFound(pindex);
}
}
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight)
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
{
// mark inputs spent
if (!tx.IsCoinBase()) {
2014-09-03 06:54:37 -07:00
txundo.vprevout.reserve(tx.vin.size());
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
2014-10-18 17:57:02 -07:00
CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash);
unsigned nPos = txin.prevout.n;
if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull())
assert(false);
// mark an outpoint spent, and construct undo information
txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos]));
coins->Spend(nPos);
if (coins->vout.size() == 0) {
CTxInUndo& undo = txundo.vprevout.back();
undo.nHeight = coins->nHeight;
undo.fCoinBase = coins->fCoinBase;
undo.nVersion = coins->nVersion;
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
}
// spend nullifiers
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
2016-07-14 15:32:34 -07:00
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
inputs.SetNullifier(nf, true);
}
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// add outputs
inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight);
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
{
CTxUndo txundo;
UpdateCoins(tx, inputs, txundo, nHeight);
}
bool CScriptCheck::operator()() {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, scriptPubKey, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), consensusBranchId, &error)) {
return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error));
}
return true;
}
int GetSpendHeight(const CCoinsViewCache& inputs)
{
LOCK(cs_main);
CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
return pindexPrev->nHeight + 1;
}
namespace Consensus {
bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, const Consensus::Params& consensusParams)
{
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!inputs.HaveInputs(tx))
return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString()));
2016-07-14 15:10:41 -07:00
// are the JoinSplit's requirements met?
if (!inputs.HaveJoinSplitRequirements(tx))
return state.Invalid(error("CheckInputs(): %s JoinSplit requirements not met", tx.GetHash().ToString()));
2014-04-22 15:46:19 -07:00
CAmount nValueIn = 0;
CAmount nFees = 0;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins *coins = inputs.AccessCoins(prevout.hash);
assert(coins);
if (coins->IsCoinBase()) {
// Ensure that coinbases are matured
if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) {
return state.Invalid(
error("CheckInputs(): tried to spend coinbase at depth %d", nSpendHeight - coins->nHeight),
REJECT_INVALID, "bad-txns-premature-spend-of-coinbase");
}
// Ensure that coinbases cannot be spent to transparent outputs
// Disabled on regtest
if (fCoinbaseEnforcedProtectionEnabled &&
consensusParams.fCoinbaseMustBeProtected &&
!tx.vout.empty()) {
2013-10-27 23:36:11 -07:00
return state.Invalid(
error("CheckInputs(): tried to spend coinbase with transparent outputs"),
REJECT_INVALID, "bad-txns-coinbase-spend-has-transparent-outputs");
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
// Check for negative or overflow input values
nValueIn += coins->vout[prevout.n].nValue;
if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return state.DoS(100, error("CheckInputs(): txin values out of range"),
REJECT_INVALID, "bad-txns-inputvalues-outofrange");
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
nValueIn += tx.GetJoinSplitValueIn();
if (!MoneyRange(nValueIn))
return state.DoS(100, error("CheckInputs(): vpub_old values out of range"),
REJECT_INVALID, "bad-txns-inputvalues-outofrange");
if (nValueIn < tx.GetValueOut())
return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s)",
tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut())),
REJECT_INVALID, "bad-txns-in-belowout");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// Tally transaction fees
2014-04-22 15:46:19 -07:00
CAmount nTxFee = nValueIn - tx.GetValueOut();
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
if (nTxFee < 0)
return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()),
REJECT_INVALID, "bad-txns-fee-negative");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
nFees += nTxFee;
if (!MoneyRange(nFees))
return state.DoS(100, error("CheckInputs(): nFees out of range"),
REJECT_INVALID, "bad-txns-fee-outofrange");
return true;
}
}// namespace Consensus
2018-02-19 16:57:02 -08:00
bool ContextualCheckInputs(
const CTransaction& tx,
CValidationState &state,
const CCoinsViewCache &inputs,
bool fScriptChecks,
unsigned int flags,
bool cacheStore,
PrecomputedTransactionData& txdata,
const Consensus::Params& consensusParams,
uint32_t consensusBranchId,
std::vector<CScriptCheck> *pvChecks)
{
if (!tx.IsCoinBase())
{
if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs), consensusParams)) {
return false;
}
if (pvChecks)
pvChecks->reserve(tx.vin.size());
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// Skip ECDSA signature verification when connecting blocks
// before the last block chain checkpoint. This is safe because block merkle hashes are
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// still computed and checked, and any change will be caught at the next checkpoint.
if (fScriptChecks) {
for (unsigned int i = 0; i < tx.vin.size(); i++) {
const COutPoint &prevout = tx.vin[i].prevout;
const CCoins* coins = inputs.AccessCoins(prevout.hash);
assert(coins);
// Verify signature
CScriptCheck check(*coins, tx, i, flags, cacheStore, consensusBranchId, &txdata);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
} else if (!check()) {
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
// Check whether the failure was caused by a
// non-mandatory script verification check, such as
// non-standard DER encodings or non-null dummy
// arguments; if so, don't trigger DoS protection to
// avoid splitting the network between upgraded and
// non-upgraded nodes.
CScriptCheck check2(*coins, tx, i,
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, consensusBranchId, &txdata);
if (check2())
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
}
// Failures of other flags indicate a transaction that is
// invalid in new blocks, e.g. a invalid P2SH. We DoS ban
// such nodes as they are not following the protocol. That
// said during an upgrade careful thought should be taken
// as to the correct behavior - we may want to continue
// peering with non-upgraded nodes even after a soft-fork
// super-majority vote has passed.
return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError())));
}
}
}
}
return true;
}
namespace {
bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
{
// Open history file to append
CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
if (fileout.IsNull())
return error("%s: OpenUndoFile failed", __func__);
// Write index header
unsigned int nSize = fileout.GetSerializeSize(blockundo);
fileout << FLATDATA(messageStart) << nSize;
// Write undo data
long fileOutPos = ftell(fileout.Get());
if (fileOutPos < 0)
return error("%s: ftell failed", __func__);
pos.nPos = (unsigned int)fileOutPos;
fileout << blockundo;
// calculate & write checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
fileout << hasher.GetHash();
return true;
}
bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock)
{
// Open history file to read
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
if (filein.IsNull())
return error("%s: OpenBlockFile failed", __func__);
// Read block
uint256 hashChecksum;
try {
filein >> blockundo;
filein >> hashChecksum;
}
catch (const std::exception& e) {
return error("%s: Deserialize or I/O error - %s", __func__, e.what());
}
// Verify checksum
CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
hasher << hashBlock;
hasher << blockundo;
if (hashChecksum != hasher.GetHash())
return error("%s: Checksum mismatch", __func__);
return true;
}
/** Abort with a message */
bool AbortNode(const std::string& strMessage, const std::string& userMessage="")
{
strMiscWarning = strMessage;
LogPrintf("*** %s\n", strMessage);
uiInterface.ThreadSafeMessageBox(
userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage,
"", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="")
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
}
} // anon namespace
/**
* Apply the undo operation of a CTxInUndo to the given chain state.
* @param undo The undo object.
* @param view The coins view to which to apply the changes.
* @param out The out point that corresponds to the tx input.
* @return True on success.
*/
static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out)
{
bool fClean = true;
CCoinsModifier coins = view.ModifyCoins(out.hash);
if (undo.nHeight != 0) {
// undo data contains height: this is the last output of the prevout tx being spent
if (!coins->IsPruned())
fClean = fClean && error("%s: undo data overwriting existing transaction", __func__);
coins->Clear();
coins->fCoinBase = undo.fCoinBase;
coins->nHeight = undo.nHeight;
coins->nVersion = undo.nVersion;
} else {
if (coins->IsPruned())
fClean = fClean && error("%s: undo data adding output to missing transaction", __func__);
}
if (coins->IsAvailable(out.n))
fClean = fClean && error("%s: undo data overwriting existing output", __func__);
if (coins->vout.size() < out.n+1)
coins->vout.resize(out.n+1);
coins->vout[out.n] = undo.txout;
return fClean;
}
2013-06-23 18:32:58 -07:00
bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean)
{
assert(pindex->GetBlockHash() == view.GetBestBlock());
2012-12-30 06:29:39 -08:00
if (pfClean)
*pfClean = false;
bool fClean = true;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
CBlockUndo blockUndo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull())
return error("DisconnectBlock(): no undo data available");
2014-10-27 06:35:52 -07:00
if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash()))
return error("DisconnectBlock(): failure reading undo data");
2013-06-23 18:32:58 -07:00
if (blockUndo.vtxundo.size() + 1 != block.vtx.size())
return error("DisconnectBlock(): block and undo data inconsistent");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// undo transactions in reverse order
2013-06-23 18:32:58 -07:00
for (int i = block.vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = block.vtx[i];
uint256 hash = tx.GetHash();
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// Check that all outputs are available and match the outputs in the block itself
// exactly.
{
CCoinsModifier outs = view.ModifyCoins(hash);
outs->ClearUnspendable();
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
CCoins outsBlock(tx, pindex->nHeight);
// The CCoins serialization does not serialize negative numbers.
// No network rules currently depend on the version here, so an inconsistency is harmless
// but it must be corrected before txout nversion ever influences a network rule.
if (outsBlock.nVersion < 0)
outs->nVersion = outsBlock.nVersion;
if (*outs != outsBlock)
fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// remove outputs
outs->Clear();
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// unspend nullifiers
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
2016-07-14 15:32:34 -07:00
BOOST_FOREACH(const uint256 &nf, joinsplit.nullifiers) {
view.SetNullifier(nf, false);
}
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// restore inputs
if (i > 0) { // not coinbases
const CTxUndo &txundo = blockUndo.vtxundo[i-1];
2012-12-30 06:29:39 -08:00
if (txundo.vprevout.size() != tx.vin.size())
return error("DisconnectBlock(): transaction and undo data inconsistent");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
for (unsigned int j = tx.vin.size(); j-- > 0;) {
const COutPoint &out = tx.vin[j].prevout;
const CTxInUndo &undo = txundo.vprevout[j];
if (!ApplyTxInUndo(undo, view, out))
fClean = false;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
}
}
// set the old best anchor back
view.PopAnchor(blockUndo.old_tree_root);
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
2012-12-30 06:29:39 -08:00
if (pfClean) {
*pfClean = fClean;
return true;
}
return fClean;
}
void static FlushBlockFile(bool fFinalize = false)
2012-09-05 18:21:18 -07:00
{
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
2012-09-05 18:21:18 -07:00
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
FileCommit(fileOld);
fclose(fileOld);
}
2012-09-05 18:21:18 -07:00
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
2012-09-05 18:21:18 -07:00
}
2013-01-26 15:14:11 -08:00
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void ThreadScriptCheck() {
2016-10-26 12:57:22 -07:00
RenameThread("zcash-scriptch");
scriptcheckqueue.Thread();
}
//
// Called periodically asynchronously; alerts if it smells like
// we're being fed a bad chain (blocks being generated much
// too slowly or too quickly).
//
void PartitionCheck(bool (*initialDownloadCheck)(), CCriticalSection& cs, const CBlockIndex *const &bestHeader,
int64_t nPowTargetSpacing)
{
if (bestHeader == NULL || initialDownloadCheck()) return;
static int64_t lastAlertTime = 0;
int64_t now = GetAdjustedTime();
if (lastAlertTime > now-60*60*24) return; // Alert at most once per day
const int SPAN_HOURS=4;
const int SPAN_SECONDS=SPAN_HOURS*60*60;
int BLOCKS_EXPECTED = SPAN_SECONDS / nPowTargetSpacing;
boost::math::poisson_distribution<double> poisson(BLOCKS_EXPECTED);
std::string strWarning;
int64_t startTime = GetAdjustedTime()-SPAN_SECONDS;
LOCK(cs);
const CBlockIndex* i = bestHeader;
int nBlocks = 0;
while (i->GetBlockTime() >= startTime) {
++nBlocks;
i = i->pprev;
Squashed commit of the following: commit 5e7222e4bc0401ef8c6d8049b12a62d4854ac85c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:20:51 2018 +0200 Cleanup commit 2e1bc7a7cd6c72e7c3d2ff74cb30f7a56515006c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:19:53 2018 +0200 Cleanup commit edd7fa87fb2c839c17457ff004d258a049df832f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:18:57 2018 +0200 Cleanup commit ee34e1433806655a7123f0617802aa4771507dff Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:18:10 2018 +0200 Cleanup commit 20779e4021b8ab95a87289d2741ad2f0fbc7fb39 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Thu Mar 1 21:16:52 2018 +0200 Cleanup commit 084e1aa563807f5625ad3aaff376b598e139f2a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:42:49 2018 +0200 Fix typo commit c61a7c2319d3b9b96d1b5ad52ecf9d4f2fd92658 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:37:43 2018 +0200 Cleanup commit e435c0229b0cbe3f4a77f43b01ca87ed0552d405 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 18:17:58 2018 +0200 Fix typos commit e05bff3fea8915e95a473fe3266b2b1f727deca0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:59:32 2018 +0200 Fix typo commit 8c55c7840232cef7fa4389a12f6f220e86f5f581 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:46:33 2018 +0200 Fix typos commit a1edfcc5cc29d815ba7e8c4baaf14c23ef93af64 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:39:41 2018 +0200 Fix typos commit 2ce2c4d180e936ccc5c10745a6430fda5de38a9b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:14:10 2018 +0200 Fix typo commit 5bdc6cd5bc9cff93aa48fbdeda36d4d9774bfa18 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 17:12:14 2018 +0200 Fix typo commit d08749f549575efc6f44a7f80850bc439c12ad5c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:54:06 2018 +0200 Revert one change commit a734bb1191c692f09f58bcc8e85160ce7c839905 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:52:45 2018 +0200 Fix typo commit 95fbc8d94bbefc0db989c83d0f053111bfed45e7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:51:33 2018 +0200 Fix typos commit d17d540a83d035cf9a200f9a8b19f0fab6084728 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:49:36 2018 +0200 Fix typo commit c4bf4402210bcb926ccfb3928afeb3a8a7490b42 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:48:09 2018 +0200 Fix typo commit 25e7990848a1d723702e2d041c04bc68a6c1275f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:47:01 2018 +0200 Fix typo commit d72ffb5b0253e0d7b992ffe13c40695421378dc3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:45:20 2018 +0200 Fix typo commit 705e6f271192a575cc99d794545b0efe75d964c4 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:41:19 2018 +0200 Revert one change commit 4fd26cd29e21c42b027e37da2616761ebc399d16 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:39:41 2018 +0200 Revert commit commit 8a5cc627b1048368fe8807973d1f542bab2e045f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:28:56 2018 +0200 Fix typo commit 0a24baa7258c0ae0f244d82af8d0831b148ab012 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 16:15:45 2018 +0200 Fix typo commit 38f93ecd90171fb881243f0de55b3452daccff20 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:56:49 2018 +0200 Fix typos commit 15446fd62400c36c2a51f7e6f13725cc8adfd924 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:48:00 2018 +0200 Fix typos commit 76533b41986bbc5826070a1e644215a74757c1db Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 15:04:38 2018 +0200 Fix typo commit aea330c2b0bf76975ec69142a732288cc8b192bd Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:46:29 2018 +0200 Fix typo commit 8b1b1d0be1dc44f36c22c54d1a3d56d84d456b92 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:40:23 2018 +0200 Fix typo commit 46ea76785a26cf20a664ed211c8f3fb9a283e127 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:14:17 2018 +0200 Fix typo commit e0d7c5748545dd0975507ad603623072fcc6bdea Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 14:08:24 2018 +0200 Fix typo commit 604d5a244323b17ba596b12d245407e1cf63a375 Merge: 6c081ca 1c65b2b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:03:23 2018 +0200 Merge pull request #36 from rex4539/patch-36 Fix typo commit 6c081caf28b7cef9e62ed523284dff90e4add16d Merge: 899e5d2 88fa2d9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:02:49 2018 +0200 Merge pull request #35 from rex4539/patch-35 Fix typo commit 899e5d2c343ac7ea5069b8548e5df86c8e963e21 Merge: 6380c7f 40e73e2 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:02:16 2018 +0200 Merge pull request #34 from rex4539/patch-34 Fix typo commit 6380c7f740246474c69d8145bde61688551efe83 Merge: f592274 4567667 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:01:47 2018 +0200 Merge pull request #33 from rex4539/patch-33 Fix typos commit f592274a713162da0083bd6d22fb47cb1afcdba9 Merge: d86ef7e 4aeaa3a Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:01:14 2018 +0200 Merge pull request #32 from rex4539/patch-32 Fix typo commit d86ef7e5e4f7e9c2014358ec5b647d1815eb304d Merge: fe0b432 5cdd1b2 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:00:41 2018 +0200 Merge pull request #31 from rex4539/patch-31 Fix typo commit fe0b432ee125ae0b876af2c26139dfc979005a3b Merge: 6fd6d0d 70130d0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 09:00:12 2018 +0200 Merge pull request #30 from rex4539/patch-30 Fix typos commit 6fd6d0dcf3714118a623c0d8d84aabb4578410a8 Merge: 389660f 3377426 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:59:42 2018 +0200 Merge pull request #29 from rex4539/patch-29 Fix typo commit 389660f856cb60ff475a8757aad3873b99213cc0 Merge: a0b85ce 40643eb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:59:15 2018 +0200 Merge pull request #28 from rex4539/patch-28 Fix typo commit a0b85ce3b4d2e6596da0727e05c1fe15c289b1e7 Merge: 6f9a1c7 23ead80 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:58:42 2018 +0200 Merge pull request #27 from rex4539/patch-27 Fix typo commit 6f9a1c71a680bb3ed1c249dd42bf0a54663d0af3 Merge: b880547 3612eab Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:58:06 2018 +0200 Merge pull request #26 from rex4539/patch-26 Patch 26 commit b880547415afeae36bd19867388e60a3040a15ca Merge: a3b7da2 5c3177f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:57:24 2018 +0200 Merge pull request #25 from rex4539/patch-25 Fix typo commit a3b7da2c6d6691f38751292e1aea63498a325788 Merge: edd8586 60026ef Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:56:52 2018 +0200 Merge pull request #24 from rex4539/patch-24 Fix typo commit edd8586fdf8c112f4c513804610c237d7e2e80ef Merge: 0c28eb7 f979c00 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:56:12 2018 +0200 Merge pull request #23 from rex4539/patch-23 Fix typo commit 0c28eb7717821b1d68016f40911d07f2a7231b4f Merge: 775beb6 c900722 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:55:42 2018 +0200 Merge pull request #22 from rex4539/patch-22 Fix typo commit 775beb625beb1fc5f72388c076b295de4b8ff039 Merge: a0cf889 1027543 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:55:16 2018 +0200 Merge pull request #21 from rex4539/patch-21 Fix typo commit a0cf88971e756c37c406bab3066c11d6fc7f6d74 Merge: 4504b48 f3fa89b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:54:44 2018 +0200 Merge pull request #20 from rex4539/patch-20 Fix typo commit 4504b4824b3438e931ca8d24a56b1887657e87cd Merge: dd0bcbf 2699eca Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:54:20 2018 +0200 Merge pull request #19 from rex4539/patch-19 Fix typo commit dd0bcbfc89293e9760156d5534f3a558451e1f29 Merge: abfb65a f02ef2e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:53:46 2018 +0200 Merge pull request #18 from rex4539/patch-18 Fix typos commit abfb65afaed49c34b9875df79f6fe6eb2b7bf769 Merge: 68b46b7 6485c90 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:53:08 2018 +0200 Merge pull request #17 from rex4539/patch-17 Fix typo commit 68b46b75d2e5b7ae97e83fc5541c46b4907a7899 Merge: a131e84 fcc0828 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:52:39 2018 +0200 Merge pull request #16 from rex4539/patch-16 Fix typo commit a131e844652e58aff78fa8952e7547a9ba82b8a1 Merge: 8487c0e 8a688ff Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:54 2018 +0200 Merge pull request #15 from rex4539/patch-15 Fix typo commit 8487c0e39092b74e977c7a60f4a07a27606756a8 Merge: bcc4cb4 bb60b83 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:28 2018 +0200 Merge pull request #14 from rex4539/patch-14 Fix typos commit bcc4cb46130e789faa9adae9b159ca818f67ec52 Merge: 23e66e9 53539bb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:51:00 2018 +0200 Merge pull request #13 from rex4539/patch-13 Fix typos commit 23e66e956bff2d6935c7a4dd570d457294018a77 Merge: 56956cf 0808445 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:50:27 2018 +0200 Merge pull request #12 from rex4539/patch-12 Fix typo commit 56956cf23ba1208aa39cb3ab1ef60375c6630263 Merge: 77007d4 7a4f064 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:49:59 2018 +0200 Merge pull request #11 from rex4539/patch-11 Fix typo commit 77007d49fa1d8cb80aef02bea1dd15e522a47c90 Merge: e78ad0c 48c33fb Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:49:16 2018 +0200 Merge pull request #10 from rex4539/patch-10 Fix typo commit e78ad0cf0d91955a848f5e953a042eabdcdac198 Merge: 38a3e08 809f01c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:48:33 2018 +0200 Merge pull request #9 from rex4539/patch-9 Fix typo commit 38a3e08699fe4c4ec715b1783dba18bff6b829fb Merge: eee3c28 fec279c Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:48:05 2018 +0200 Merge pull request #8 from rex4539/patch-8 Fix typo commit eee3c286eb84f994310142a9e7fdbd36a671e593 Merge: 702635b cf81b4e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:47:33 2018 +0200 Merge pull request #7 from rex4539/patch-7 Fix typo commit 702635bb34abb2f83ded27ae95deefd5b6e7df93 Merge: d7497ea 3bbcc3d Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:47:01 2018 +0200 Merge pull request #6 from rex4539/patch-6 Fix typo commit d7497ea070e03380cf1d4f533b7dc4b881f724f8 Merge: bfcc1e8 f639727 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:45:33 2018 +0200 Merge pull request #5 from rex4539/patch-5 Fix typos commit bfcc1e8ae2094ca4e9837f623999705f538aff04 Merge: f4440ec 55262fe Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:45:05 2018 +0200 Merge pull request #4 from rex4539/patch-4 Remove space for word "backup" commit f4440ecd4a7367e6bc4a5f75bea112290017ed2b Merge: f8b487f 61d5279 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:44:31 2018 +0200 Merge pull request #3 from rex4539/patch-3 Fix typos in zmq.md commit f8b487f5699990fabc7fc383d02bc728db3cb9aa Merge: 60104a7 f2ce50f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:42:51 2018 +0200 Merge pull request #2 from rex4539/patch-2 Fix typo in security-warnings.md commit 60104a7034f55284afb814e81a1430a8b2b0d8d1 Merge: be262f0 af7dfe0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 08:41:48 2018 +0200 Merge pull request #1 from rex4539/patch-1 Fix typos commit 1c65b2bd0c49f7f392d0e3a2db14ce1366a87171 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:35:26 2018 +0200 Fix typo commit 88fa2d966a3b462ed34a9a4659fc390711cc0276 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:21:00 2018 +0200 Fix typo commit 40e73e258671f21d2b2205509e9cae1f50294752 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 07:14:21 2018 +0200 Fix typo commit 4567667fcc8b4197dfd51da34fe82b0f2fb78127 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 06:44:40 2018 +0200 Fix typos commit 4aeaa3a3d6335302c53c0f5f4ef81de05e266479 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:55:25 2018 +0200 Fix typo commit 5cdd1b29b4c90492aa15fed7940984e1d675052f Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:36:46 2018 +0200 Fix typo commit 70130d05f1646c8b9fb1f33c4efbe2a5fcf7138b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:28:21 2018 +0200 Fix typos commit 33774261b1c63e5640aa1dd251edb67892ed7a5b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 05:00:56 2018 +0200 Fix typo commit 40643ebfcd85ee257a4576e85d2fb6c73dad17b5 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:51:57 2018 +0200 Fix typo commit 23ead80e05116ebfeaac0a00d5bd4a158fbeb54e Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:29:53 2018 +0200 Fix typo commit 3612eaba2dcf273e94cac9ad889723776ce55108 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:19:36 2018 +0200 Fix typos commit 5c3177f5d191d1f4e4d9f78ae4b75381010f7768 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:13:03 2018 +0200 Fix typo commit 60026efe27a39300e428879ad8dba94f19934870 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:08:09 2018 +0200 Fix typo commit f979c0074efd66804f229c8b3cc6e812d7f26406 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 04:01:44 2018 +0200 Fix typo commit c9007220a8a727c1cfe3b25b453c178eacd431f3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:52:33 2018 +0200 Fix typo commit 1027543bd30701c4b09aa66226281a10563db910 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:36:19 2018 +0200 Fix typo commit f3fa89bcd30e0cb45ff4391e78d02452c9227be0 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:34:37 2018 +0200 Fix typo commit 2699eca938f1e413a29d4408a271aaafd27969cc Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:30:29 2018 +0200 Fix typo commit f02ef2e495fe43142d305f5c4f40dcfa3d2cb423 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:22:36 2018 +0200 Fix typos commit 6485c908433bb91fd70d7e18cf3611c9a96115a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:10:06 2018 +0200 Fix typo commit fcc082850564b14b86b1932dfc5a099816c72ef1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:06:26 2018 +0200 Fix typo commit 8a688ff7405d67bd4c77b0aa0ebdd4b4a8a9a6a7 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 03:02:50 2018 +0200 Fix typo commit bb60b83853ed0a82ca47dd58d55f1849ddcf23ab Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 02:59:25 2018 +0200 Fix typos commit 53539bb720c7676b9d37e25dde3423db3aa7bfa1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 02:50:55 2018 +0200 Fix typos commit 080844581d6488ab797ac188acae9c4b2e1d0c59 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 01:05:54 2018 +0200 Fix typo commit 7a4f0649ac5e71f39f0bef7f2e1fcb6fafad0291 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:16:31 2018 +0200 Fix typo commit 48c33fb3f9ab1ad287987d147ee4bbe186f7ade1 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:07:42 2018 +0200 Fix typo commit 809f01ca4f785a7b5bc9cc2c388e0ae814ecaa95 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Tue Feb 27 00:02:34 2018 +0200 Fix typo commit fec279cac89aa917be929447c81177811728361a Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:55:27 2018 +0200 Fix typo commit cf81b4e12399570545372d4c9daceca8e70142d5 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:48:43 2018 +0200 Fix typo commit 3bbcc3d9986caf8df99bec5d8a18d0f0c8990e06 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:28:52 2018 +0200 Fix typo commit f639727525dbd23f5f2d0f89e7be13d868e984c3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 23:13:12 2018 +0200 Fix typos commit 55262fe9c5e1e127c6b817a0c2ab3f9db3ac35b9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:46:52 2018 +0200 Remove space for word "backup" commit 61d52797d4d26a90dcc15e2bcd6f19a5f36faac3 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:23:31 2018 +0200 Fix typos in zmq.md commit f2ce50f10e67b4265e559a432681bc44828ae59b Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 22:12:58 2018 +0200 Fix typo in security-warnings.md commit af7dfe046c12109e44ddc18dff07ede8755cf4f9 Author: Dimitris Apostolou <dimitris.apostolou@icloud.com> Date: Mon Feb 26 21:59:24 2018 +0200 Fix typos Signed-off-by: Daira Hopwood <daira@jacaranda.org>
2018-03-02 03:45:05 -08:00
if (i == NULL) return; // Ran out of chain, we must not be fully synced
}
// How likely is it to find that many by chance?
double p = boost::math::pdf(poisson, nBlocks);
LogPrint("partitioncheck", "%s : Found %d blocks in the last %d hours\n", __func__, nBlocks, SPAN_HOURS);
LogPrint("partitioncheck", "%s : likelihood: %g\n", __func__, p);
// Aim for one false-positive about every fifty years of normal running:
const int FIFTY_YEARS = 50*365*24*60*60;
double alertThreshold = 1.0 / (FIFTY_YEARS / SPAN_SECONDS);
if (p <= alertThreshold && nBlocks < BLOCKS_EXPECTED)
{
// Many fewer blocks than expected: alert!
strWarning = strprintf(_("WARNING: check your network connection, %d blocks received in the last %d hours (%d expected)"),
nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
}
else if (p <= alertThreshold && nBlocks > BLOCKS_EXPECTED)
{
// Many more blocks than expected: alert!
strWarning = strprintf(_("WARNING: abnormally high number of blocks generated, %d blocks received in the last %d hours (%d expected)"),
nBlocks, SPAN_HOURS, BLOCKS_EXPECTED);
}
if (!strWarning.empty())
{
strMiscWarning = strWarning;
CAlert::Notify(strWarning, true);
lastAlertTime = now;
}
}
static int64_t nTimeVerify = 0;
static int64_t nTimeConnect = 0;
static int64_t nTimeIndex = 0;
static int64_t nTimeCallbacks = 0;
static int64_t nTimeTotal = 0;
bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck)
{
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
bool fExpensiveChecks = true;
if (fCheckpointsEnabled) {
CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->nHeight) == pindex) {
// This block is an ancestor of a checkpoint: disable script checks
fExpensiveChecks = false;
}
}
auto verifier = libzcash::ProofVerifier::Strict();
auto disabledVerifier = libzcash::ProofVerifier::Disabled();
// Check it again to verify JoinSplit proofs, and in case a previous version let a bad block in
if (!CheckBlock(block, state, fExpensiveChecks ? verifier : disabledVerifier, !fJustCheck, !fJustCheck))
return false;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// verify that the view's current state corresponds to the previous block
uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash();
assert(hashPrevBlock == view.GetBestBlock());
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
if (!fJustCheck) {
view.SetBestBlock(pindex->GetBlockHash());
// Before the genesis block, there was an empty tree
ZCIncrementalMerkleTree tree;
pindex->hashAnchor = tree.root();
// The genesis block contained no JoinSplits
pindex->hashAnchorEnd = pindex->hashAnchor;
}
return true;
}
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
const CCoins* coins = view.AccessCoins(tx.GetHash());
if (coins && !coins->IsPruned())
return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"),
REJECT_INVALID, "bad-txns-BIP30");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
unsigned int flags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
// DERSIG (BIP66) is also always enforced, but does not have a flag.
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fExpensiveChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
int64_t nTimeStart = GetTimeMicros();
2014-04-22 15:46:19 -07:00
CAmount nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
2013-06-23 18:50:06 -07:00
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
2013-06-23 18:50:06 -07:00
vPos.reserve(block.vtx.size());
2014-09-03 06:54:37 -07:00
blockundo.vtxundo.reserve(block.vtx.size() - 1);
// Construct the incremental merkle tree at the current
// block position,
auto old_tree_root = view.GetBestAnchor();
// saving the top anchor in the block index as we go.
if (!fJustCheck) {
pindex->hashAnchor = old_tree_root;
}
ZCIncrementalMerkleTree tree;
// This should never fail: we should always be able to get the root
// that is on the tip of our chain
assert(view.GetAnchorAt(old_tree_root, tree));
{
// Consistency check: the root of the tree we're given should
// match what we asked for.
assert(tree.root() == old_tree_root);
}
// Grab the consensus branch ID for the block's height
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, Params().GetConsensus());
2016-08-26 09:38:20 -07:00
std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
2013-06-23 18:50:06 -07:00
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
2013-06-23 18:50:06 -07:00
const CTransaction &tx = block.vtx[i];
nInputs += tx.vin.size();
nSigOps += GetLegacySigOpCount(tx);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
if (!tx.IsCoinBase())
{
if (!view.HaveInputs(tx))
return state.DoS(100, error("ConnectBlock(): inputs missing/spent"),
REJECT_INVALID, "bad-txns-inputs-missingorspent");
2016-07-14 15:10:41 -07:00
// are the JoinSplit's requirements met?
if (!view.HaveJoinSplitRequirements(tx))
2016-07-14 15:10:41 -07:00
return state.DoS(100, error("ConnectBlock(): JoinSplit requirements not met"),
REJECT_INVALID, "bad-txns-joinsplit-requirements-not-met");
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += GetP2SHSigOpCount(tx, view);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock(): too many sigops"),
REJECT_INVALID, "bad-blk-sigops");
}
2016-08-26 09:38:20 -07:00
txdata.emplace_back(tx);
if (!tx.IsCoinBase())
{
nFees += view.GetValueIn(tx)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
if (!ContextualCheckInputs(tx, state, view, fExpensiveChecks, flags, false, txdata[i], chainparams.GetConsensus(), consensusBranchId, nScriptCheckThreads ? &vChecks : NULL))
return false;
control.Add(vChecks);
}
2014-09-03 06:54:37 -07:00
CTxUndo undoDummy;
if (i > 0) {
blockundo.vtxundo.push_back(CTxUndo());
}
UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->nHeight);
2016-07-14 15:10:41 -07:00
BOOST_FOREACH(const JSDescription &joinsplit, tx.vjoinsplit) {
BOOST_FOREACH(const uint256 &note_commitment, joinsplit.commitments) {
// Insert the note commitments into our temporary tree.
tree.append(note_commitment);
}
}
vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
view.PushAnchor(tree);
if (!fJustCheck) {
pindex->hashAnchorEnd = tree.root();
}
blockundo.old_tree_root = old_tree_root;
int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001);
CAmount blockReward = nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
if (block.vtx[0].GetValueOut() > blockReward)
2013-10-27 23:36:11 -07:00
return state.DoS(100,
error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)",
block.vtx[0].GetValueOut(), blockReward),
2014-02-10 07:31:06 -08:00
REJECT_INVALID, "bad-cb-amount");
2012-10-22 12:46:00 -07:00
if (!control.Wait())
2013-01-26 15:14:11 -08:00
return state.DoS(100, false);
int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart;
LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001);
if (fJustCheck)
return true;
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS))
{
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos pos;
2013-01-26 15:14:11 -08:00
if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
return error("ConnectBlock(): FindUndoPos failed");
if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
return AbortNode(state, "Failed to write undo data");
// update nUndoPos in block index
pindex->nUndoPos = pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
}
// Now that all consensus rules have been validated, set nCachedBranchId.
// Move this if BLOCK_VALID_CONSENSUS is ever altered.
static_assert(BLOCK_VALID_CONSENSUS == BLOCK_VALID_SCRIPTS,
"nCachedBranchId must be set after all consensus rules have been validated.");
if (IsActivationHeightForAnyUpgrade(pindex->nHeight, Params().GetConsensus())) {
pindex->nStatus |= BLOCK_ACTIVATES_UPGRADE;
pindex->nCachedBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
} else if (pindex->pprev) {
pindex->nCachedBranchId = pindex->pprev->nCachedBranchId;
}
pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
setDirtyBlockIndex.insert(pindex);
}
if (fTxIndex)
2013-01-26 15:14:11 -08:00
if (!pblocktree->WriteTxIndex(vPos))
return AbortNode(state, "Failed to write transaction index");
// add this block to the view's block chain
view.SetBestBlock(pindex->GetBlockHash());
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2;
LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001);
// Watch for changes to the previous coinbase transaction.
static uint256 hashPrevBestCoinBase;
GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = block.vtx[0].GetHash();
int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3;
LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001);
return true;
}
2014-11-14 09:19:26 -08:00
enum FlushStateMode {
FLUSH_STATE_NONE,
2014-11-14 09:19:26 -08:00
FLUSH_STATE_IF_NEEDED,
FLUSH_STATE_PERIODIC,
FLUSH_STATE_ALWAYS
};
/**
* Update the on-disk chain state.
* The caches and indexes are flushed depending on the mode we're called with
* if they're too large, if it's been a while since the last write,
* or always and in all cases if we're in prune mode and are deleting files.
*/
2014-11-14 09:19:26 -08:00
bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
LOCK2(cs_main, cs_LastBlockFile);
static int64_t nLastWrite = 0;
static int64_t nLastFlush = 0;
static int64_t nLastSetChain = 0;
std::set<int> setFilesToPrune;
bool fFlushForPrune = false;
2015-01-04 10:11:44 -08:00
try {
if (fPruneMode && fCheckForPruning && !fReindex) {
FindFilesToPrune(setFilesToPrune);
fCheckForPruning = false;
if (!setFilesToPrune.empty()) {
fFlushForPrune = true;
if (!fHavePruned) {
pblocktree->WriteFlag("prunedblockfiles", true);
fHavePruned = true;
}
}
}
int64_t nNow = GetTimeMicros();
// Avoid writing/flushing immediately after startup.
if (nLastWrite == 0) {
nLastWrite = nNow;
}
if (nLastFlush == 0) {
nLastFlush = nNow;
}
if (nLastSetChain == 0) {
nLastSetChain = nNow;
}
size_t cacheSize = pcoinsTip->DynamicMemoryUsage();
// The cache is large and close to the limit, but we have time now (not in the middle of a block processing).
bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage;
// The cache is over the limit, we have to write now.
bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage;
// It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash.
bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
// It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage.
bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
// Combine all conditions that result in a full cache flush.
bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune;
// Write blocks and block index to disk.
if (fDoFullFlush || fPeriodicWrite) {
// Depend on nMinDiskSpace to ensure we can write block index
if (!CheckDiskSpace(0))
return state.Error("out of disk space");
// First make sure all block and undo data is flushed to disk.
2012-09-05 18:21:18 -07:00
FlushBlockFile();
// Then update all block file information (which may refer to block and undo files).
2014-11-25 07:26:20 -08:00
{
std::vector<std::pair<int, const CBlockFileInfo*> > vFiles;
vFiles.reserve(setDirtyFileInfo.size());
for (set<int>::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) {
vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it]));
setDirtyFileInfo.erase(it++);
}
std::vector<const CBlockIndex*> vBlocks;
vBlocks.reserve(setDirtyBlockIndex.size());
for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
vBlocks.push_back(*it);
setDirtyBlockIndex.erase(it++);
}
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
return AbortNode(state, "Files to write to block index database");
}
}
// Finally remove any pruned files
if (fFlushForPrune)
UnlinkPrunedFiles(setFilesToPrune);
nLastWrite = nNow;
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush) {
// Typical CCoins structures on disk are around 128 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize()))
return state.Error("out of disk space");
// Flush the chainstate (which may refer to block index entries).
if (!pcoinsTip->Flush())
return AbortNode(state, "Failed to write to coin database");
nLastFlush = nNow;
}
if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) {
// Update best block in wallet (so we can detect restored wallets).
GetMainSignals().SetBestChain(chainActive.GetLocator());
nLastSetChain = nNow;
2012-09-05 18:21:18 -07:00
}
2015-01-04 10:11:44 -08:00
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error while flushing: ") + e.what());
2015-01-04 10:11:44 -08:00
}
return true;
}
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
void FlushStateToDisk() {
CValidationState state;
2014-11-14 09:19:26 -08:00
FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
}
void PruneAndFlush() {
CValidationState state;
fCheckForPruning = true;
FlushStateToDisk(state, FLUSH_STATE_NONE);
}
/** Update chainActive and related internal data structures. */
void static UpdateTip(CBlockIndex *pindexNew) {
const CChainParams& chainParams = Params();
chainActive.SetTip(pindexNew);
// New best block
nTimeBestReceived = GetTime();
mempool.AddTransactionsUpdated(1);
2012-05-12 21:43:24 -07:00
2015-05-03 16:56:42 -07:00
LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(), log(chainActive.Tip()->nChainWork.getdouble())/log(2.0), (unsigned long)chainActive.Tip()->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
2015-05-03 16:56:42 -07:00
Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.Tip()), pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize());
2012-05-12 21:43:24 -07:00
cvBlockChange.notify_all();
// Check the version of the last 100 blocks to see if we need to upgrade:
static bool fWarned = false;
if (!IsInitialBlockDownload() && !fWarned)
{
int nUpgraded = 0;
const CBlockIndex* pindex = chainActive.Tip();
for (int i = 0; i < 100 && pindex != NULL; i++)
{
if (pindex->nVersion > CBlock::CURRENT_VERSION)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
LogPrintf("%s: %d of last 100 blocks above version %d\n", __func__, nUpgraded, (int)CBlock::CURRENT_VERSION);
if (nUpgraded > 100/2)
{
2017-03-09 17:04:59 -08:00
// strMiscWarning is read by GetWarnings(), called by the JSON-RPC code to warn the user:
2015-04-28 07:48:28 -07:00
strMiscWarning = _("Warning: This version is obsolete; upgrade required!");
CAlert::Notify(strMiscWarning, true);
fWarned = true;
}
}
}
/**
* Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and
* mempool.removeWithoutBranchId after this, with cs_main held.
*/
bool static DisconnectTip(CValidationState &state, bool fBare = false) {
CBlockIndex *pindexDelete = chainActive.Tip();
assert(pindexDelete);
// Read block from disk.
CBlock block;
if (!ReadBlockFromDisk(block, pindexDelete))
return AbortNode(state, "Failed to read block");
// Apply the block atomically to the chain state.
uint256 anchorBeforeDisconnect = pcoinsTip->GetBestAnchor();
int64_t nStart = GetTimeMicros();
{
CCoinsViewCache view(pcoinsTip);
if (!DisconnectBlock(block, state, pindexDelete, view))
return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString());
assert(view.Flush());
}
LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
uint256 anchorAfterDisconnect = pcoinsTip->GetBestAnchor();
// Write the chain state to disk, if necessary.
2014-11-14 09:19:26 -08:00
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
if (!fBare) {
// Resurrect mempool transactions from the disconnected block.
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
// ignore validation errors in resurrected transactions
list<CTransaction> removed;
CValidationState stateDummy;
if (tx.IsCoinBase() || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL))
mempool.remove(tx, removed, true);
}
if (anchorBeforeDisconnect != anchorAfterDisconnect) {
// The anchor may not change between block disconnects,
// in which case we don't want to evict from the mempool yet!
mempool.removeWithAnchor(anchorBeforeDisconnect);
}
}
// Update chainActive and related variables.
UpdateTip(pindexDelete->pprev);
// Get the current commitment tree
ZCIncrementalMerkleTree newTree;
assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), newTree));
// Let wallets know transactions went from 1-confirmed to
// 0-confirmed or conflicted:
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
SyncWithWallets(tx, NULL);
}
// Update cached incremental witnesses
GetMainSignals().ChainTip(pindexDelete, &block, newTree, false);
return true;
}
static int64_t nTimeReadFromDisk = 0;
static int64_t nTimeConnectTotal = 0;
static int64_t nTimeFlush = 0;
static int64_t nTimeChainState = 0;
static int64_t nTimePostConnect = 0;
2015-05-31 06:36:44 -07:00
/**
* Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock
* corresponding to pindexNew, to bypass loading it again from disk.
* You probably want to call mempool.removeWithoutBranchId after this, with cs_main held.
*/
2014-08-25 17:26:41 -07:00
bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *pblock) {
assert(pindexNew->pprev == chainActive.Tip());
// Read block from disk.
int64_t nTime1 = GetTimeMicros();
CBlock block;
2014-08-25 17:26:41 -07:00
if (!pblock) {
if (!ReadBlockFromDisk(block, pindexNew))
return AbortNode(state, "Failed to read block");
2014-08-25 17:26:41 -07:00
pblock = &block;
}
// Get the current commitment tree
ZCIncrementalMerkleTree oldTree;
assert(pcoinsTip->GetAnchorAt(pcoinsTip->GetBestAnchor(), oldTree));
// Apply the block atomically to the chain state.
int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1;
int64_t nTime3;
LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
{
CCoinsViewCache view(pcoinsTip);
bool rv = ConnectBlock(*pblock, state, pindexNew, view);
GetMainSignals().BlockChecked(*pblock, state);
if (!rv) {
if (state.IsInvalid())
InvalidBlockFound(pindexNew, state);
return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString());
2013-01-26 16:24:06 -08:00
}
2015-06-30 13:10:27 -07:00
mapBlockSource.erase(pindexNew->GetBlockHash());
nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2;
LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
assert(view.Flush());
}
int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3;
LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
// Write the chain state to disk, if necessary.
2014-11-14 09:19:26 -08:00
if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED))
return false;
int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4;
LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
// Remove conflicting transactions from the mempool.
list<CTransaction> txConflicted;
mempool.removeForBlock(pblock->vtx, pindexNew->nHeight, txConflicted, !IsInitialBlockDownload());
// Remove transactions that expire at new block height from mempool
mempool.removeExpired(pindexNew->nHeight);
// Update chainActive & related variables.
UpdateTip(pindexNew);
// Tell wallet about transactions that went from mempool
// to conflicted:
BOOST_FOREACH(const CTransaction &tx, txConflicted) {
SyncWithWallets(tx, NULL);
}
// ... and about transactions that got confirmed:
2014-08-25 17:26:41 -07:00
BOOST_FOREACH(const CTransaction &tx, pblock->vtx) {
SyncWithWallets(tx, pblock);
}
// Update cached incremental witnesses
GetMainSignals().ChainTip(pindexNew, pblock, oldTree, true);
EnforceNodeDeprecation(pindexNew->nHeight);
int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1;
LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
return true;
}
/**
* Return the tip of the chain with the most work in it, that isn't
* known to be invalid (it's however far from certain to be valid).
*/
static CBlockIndex* FindMostWorkChain() {
do {
CBlockIndex *pindexNew = NULL;
// Find the best candidate header.
{
std::set<CBlockIndex*, CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexCandidates.rbegin();
if (it == setBlockIndexCandidates.rend())
return NULL;
pindexNew = *it;
}
// Check whether all blocks on the path between the currently active chain and the candidate are valid.
// Just going until the active chain is an optimization, as we know all blocks in it are valid already.
CBlockIndex *pindexTest = pindexNew;
bool fInvalidAncestor = false;
while (pindexTest && !chainActive.Contains(pindexTest)) {
assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
// Pruned nodes may have entries in setBlockIndexCandidates for
// which block files have been deleted. Remove those as candidates
// for the most work chain if we come across them; we can't switch
// to a chain unless we have all the non-active-chain parent blocks.
bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
if (fFailedChain || fMissingData) {
// Candidate chain is not usable (either invalid or missing data)
if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindexNew;
CBlockIndex *pindexFailed = pindexNew;
// Remove the entire chain from the set.
while (pindexTest != pindexFailed) {
if (fFailedChain) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
} else if (fMissingData) {
// If we're missing data, then add back to mapBlocksUnlinked,
// so that if the block arrives in the future we can try adding
// to setBlockIndexCandidates again.
mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed));
}
setBlockIndexCandidates.erase(pindexFailed);
pindexFailed = pindexFailed->pprev;
}
setBlockIndexCandidates.erase(pindexTest);
fInvalidAncestor = true;
break;
2013-01-26 15:14:11 -08:00
}
pindexTest = pindexTest->pprev;
}
if (!fInvalidAncestor)
return pindexNew;
} while(true);
}
/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */
static void PruneBlockIndexCandidates() {
// Note that we can't delete the current block itself, as we may need to return to it later in case a
// reorganization to a better block fails.
std::set<CBlockIndex*, CBlockIndexWorkComparator>::iterator it = setBlockIndexCandidates.begin();
2014-11-20 03:43:50 -08:00
while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
setBlockIndexCandidates.erase(it++);
}
2014-11-20 03:43:50 -08:00
// Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates.
assert(!setBlockIndexCandidates.empty());
}
/**
* Try to make some progress towards making pindexMostWork the active block.
* pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork.
*/
2014-08-25 17:26:41 -07:00
static bool ActivateBestChainStep(CValidationState &state, CBlockIndex *pindexMostWork, CBlock *pblock) {
AssertLockHeld(cs_main);
bool fInvalidFound = false;
const CBlockIndex *pindexOldTip = chainActive.Tip();
const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
// - On ChainDB initialization, pindexOldTip will be null, so there are no removable blocks.
// - If pindexMostWork is in a chain that doesn't have the same genesis block as our chain,
// then pindexFork will be null, and we would need to remove the entire chain including
// our genesis block. In practice this (probably) won't happen because of checks elsewhere.
auto reorgLength = pindexOldTip ? pindexOldTip->nHeight - (pindexFork ? pindexFork->nHeight : -1) : 0;
static_assert(MAX_REORG_LENGTH > 0, "We must be able to reorg some distance");
if (reorgLength > MAX_REORG_LENGTH) {
auto msg = strprintf(_(
"A block chain reorganization has been detected that would roll back %d blocks! "
"This is larger than the maximum of %d blocks, and so the node is shutting down for your safety."
), reorgLength, MAX_REORG_LENGTH) + "\n\n" +
_("Reorganization details") + ":\n" +
"- " + strprintf(_("Current tip: %s, height %d, work %s"),
pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight, pindexOldTip->nChainWork.GetHex()) + "\n" +
"- " + strprintf(_("New tip: %s, height %d, work %s"),
pindexMostWork->phashBlock->GetHex(), pindexMostWork->nHeight, pindexMostWork->nChainWork.GetHex()) + "\n" +
"- " + strprintf(_("Fork point: %s, height %d"),
pindexFork->phashBlock->GetHex(), pindexFork->nHeight) + "\n\n" +
_("Please help, human!");
LogPrintf("*** %s\n", msg);
uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
// Disconnect active blocks which are no longer in the best chain.
bool fBlocksDisconnected = false;
while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
if (!DisconnectTip(state))
return false;
fBlocksDisconnected = true;
}
// Build list of new blocks to connect.
std::vector<CBlockIndex*> vpindexToConnect;
bool fContinue = true;
int nHeight = pindexFork ? pindexFork->nHeight : -1;
while (fContinue && nHeight != pindexMostWork->nHeight) {
// Don't iterate the entire list of potential improvements toward the best tip, as we likely only need
// a few blocks along the way.
int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
vpindexToConnect.clear();
vpindexToConnect.reserve(nTargetHeight - nHeight);
CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
while (pindexIter && pindexIter->nHeight != nHeight) {
vpindexToConnect.push_back(pindexIter);
pindexIter = pindexIter->pprev;
}
nHeight = nTargetHeight;
// Connect new blocks.
BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) {
if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) {
if (state.IsInvalid()) {
// The block violates a consensus rule.
if (!state.CorruptionPossible())
InvalidChainFound(vpindexToConnect.back());
state = CValidationState();
fInvalidFound = true;
fContinue = false;
break;
} else {
// A system error occurred (disk space, database error, ...).
return false;
}
} else {
PruneBlockIndexCandidates();
if (!pindexOldTip || chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
// We're in a better position than we were. Return temporarily to release the lock.
fContinue = false;
break;
}
}
}
}
if (fBlocksDisconnected) {
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
}
mempool.removeWithoutBranchId(
CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
mempool.check(pcoinsTip);
// Callbacks/notifications for a new best chain.
if (fInvalidFound)
CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
else
CheckForkWarningConditions();
return true;
}
/**
* Make the best chain active, in multiple steps. The result is either failure
* or an activated best chain. pblock is either NULL or a pointer to a block
* that is already loaded (to avoid loading it again from disk).
*/
2014-08-25 17:26:41 -07:00
bool ActivateBestChain(CValidationState &state, CBlock *pblock) {
CBlockIndex *pindexNewTip = NULL;
CBlockIndex *pindexMostWork = NULL;
const CChainParams& chainParams = Params();
do {
boost::this_thread::interruption_point();
bool fInitialDownload;
{
LOCK(cs_main);
pindexMostWork = FindMostWorkChain();
// Whether we have anything to do at all.
if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip())
return true;
2014-08-25 17:26:41 -07:00
if (!ActivateBestChainStep(state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL))
return false;
pindexNewTip = chainActive.Tip();
fInitialDownload = IsInitialBlockDownload();
}
// When we reach this point, we switched to a new tip (stored in pindexNewTip).
// Notifications/callbacks that can run without cs_main
if (!fInitialDownload) {
uint256 hashNewTip = pindexNewTip->GetBlockHash();
// Relay inventory, but don't relay old inventory during initial block download.
int nBlockEstimate = 0;
if (fCheckpointsEnabled)
nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints());
// Don't relay blocks if pruning -- could cause a peer to try to download, resulting
// in a stalled download if the block file is pruned before the request.
if (nLocalServices & NODE_NETWORK) {
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip));
}
// Notify external listeners about the new tip.
GetMainSignals().UpdatedBlockTip(pindexNewTip);
uiInterface.NotifyBlockTip(hashNewTip);
}
} while(pindexMostWork != chainActive.Tip());
CheckBlockIndex();
// Write changes periodically to disk, after relay.
2014-11-14 09:19:26 -08:00
if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
return false;
}
return true;
}
bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) {
AssertLockHeld(cs_main);
// Mark the block itself as invalid.
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
setBlockIndexCandidates.erase(pindex);
while (chainActive.Contains(pindex)) {
CBlockIndex *pindexWalk = chainActive.Tip();
pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
setDirtyBlockIndex.insert(pindexWalk);
setBlockIndexCandidates.erase(pindexWalk);
// ActivateBestChain considers blocks already in chainActive
// unconditionally valid already, so force disconnect away from it.
if (!DisconnectTip(state)) {
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
mempool.removeWithoutBranchId(
CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
return false;
}
}
// The resulting new best tip may not be in setBlockIndexCandidates anymore, so
2015-04-28 07:47:17 -07:00
// add it again.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) {
setBlockIndexCandidates.insert(it->second);
}
it++;
}
InvalidChainFound(pindex);
mempool.removeForReorg(pcoinsTip, chainActive.Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS);
mempool.removeWithoutBranchId(
CurrentEpochBranchId(chainActive.Tip()->nHeight + 1, Params().GetConsensus()));
return true;
}
bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) {
AssertLockHeld(cs_main);
int nHeight = pindex->nHeight;
// Remove the invalidity flag from this block and all its descendants.
BlockMap::iterator it = mapBlockIndex.begin();
while (it != mapBlockIndex.end()) {
if (!it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) {
it->second->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(it->second);
if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) {
setBlockIndexCandidates.insert(it->second);
}
if (it->second == pindexBestInvalid) {
// Reset invalid block marker if it was pointing to one of those.
pindexBestInvalid = NULL;
}
}
it++;
}
// Remove the invalidity flag from all ancestors too.
while (pindex != NULL) {
if (pindex->nStatus & BLOCK_FAILED_MASK) {
pindex->nStatus &= ~BLOCK_FAILED_MASK;
setDirtyBlockIndex.insert(pindex);
}
pindex = pindex->pprev;
}
return true;
}
CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
{
// Check for duplicate
2013-06-23 19:00:18 -07:00
uint256 hash = block.GetHash();
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end())
return it->second;
// Construct new block index object
2013-06-23 19:00:18 -07:00
CBlockIndex* pindexNew = new CBlockIndex(block);
assert(pindexNew);
// We assign the sequence id to blocks only when the full data is available,
// to avoid miners withholding blocks but broadcasting headers, to get a
// competitive advantage.
pindexNew->nSequenceId = 0;
BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
pindexNew->BuildSkip();
}
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
pindexNew->RaiseValidity(BLOCK_VALID_TREE);
if (pindexBestHeader == NULL || pindexBestHeader->nChainWork < pindexNew->nChainWork)
pindexBestHeader = pindexNew;
setDirtyBlockIndex.insert(pindexNew);
return pindexNew;
}
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos)
{
pindexNew->nTx = block.vtx.size();
pindexNew->nChainTx = 0;
CAmount sproutValue = 0;
for (auto tx : block.vtx) {
for (auto js : tx.vjoinsplit) {
sproutValue += js.vpub_old;
sproutValue -= js.vpub_new;
}
}
pindexNew->nSproutValue = sproutValue;
pindexNew->nChainSproutValue = boost::none;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus |= BLOCK_HAVE_DATA;
pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
setDirtyBlockIndex.insert(pindexNew);
if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) {
// If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS.
deque<CBlockIndex*> queue;
queue.push_back(pindexNew);
// Recursively process any descendant blocks that now may be eligible to be connected.
while (!queue.empty()) {
CBlockIndex *pindex = queue.front();
queue.pop_front();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
if (pindex->pprev) {
if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) {
pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue;
} else {
pindex->nChainSproutValue = boost::none;
}
} else {
pindex->nChainSproutValue = pindex->nSproutValue;
}
{
LOCK(cs_nBlockSequenceId);
pindex->nSequenceId = nBlockSequenceId++;
}
if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) {
setBlockIndexCandidates.insert(pindex);
}
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex);
while (range.first != range.second) {
std::multimap<CBlockIndex*, CBlockIndex*>::iterator it = range.first;
queue.push_back(it->second);
range.first++;
mapBlocksUnlinked.erase(it);
}
}
} else {
if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew));
}
}
2014-05-07 08:10:35 -07:00
return true;
}
bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false)
{
LOCK(cs_LastBlockFile);
unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
if (!fKnown) {
while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
nFile++;
if (vinfoBlockFile.size() <= nFile) {
vinfoBlockFile.resize(nFile + 1);
}
}
pos.nFile = nFile;
pos.nPos = vinfoBlockFile[nFile].nSize;
}
if (nFile != nLastBlockFile) {
if (!fKnown) {
LogPrintf("Leaving block file %i: %s\n", nFile, vinfoBlockFile[nFile].ToString());
}
FlushBlockFile(!fKnown);
nLastBlockFile = nFile;
}
vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
if (fKnown)
vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
else
vinfoBlockFile[nFile].nSize += nAddSize;
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error("out of disk space");
}
}
setDirtyFileInfo.insert(nFile);
return true;
}
2013-01-26 15:14:11 -08:00
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
pos.nPos = vinfoBlockFile[nFile].nUndoSize;
nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
setDirtyFileInfo.insert(nFile);
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (fPruneMode)
fCheckForPruning = true;
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error("out of disk space");
}
return true;
}
bool CheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool fCheckPOW)
{
// Check block version
if (block.nVersion < MIN_BLOCK_VERSION)
return state.DoS(100, error("CheckBlockHeader(): block version too low"),
REJECT_INVALID, "version-too-low");
// Check Equihash solution is valid
if (fCheckPOW && !CheckEquihashSolution(&block, Params()))
return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),
REJECT_INVALID, "invalid-solution");
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, Params().GetConsensus()))
return state.DoS(50, error("CheckBlockHeader(): proof of work failed"),
REJECT_INVALID, "high-hash");
// Check timestamp
2013-06-23 19:14:11 -07:00
if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),
REJECT_INVALID, "time-too-new");
return true;
}
bool CheckBlock(const CBlock& block, CValidationState& state,
libzcash::ProofVerifier& verifier,
bool fCheckPOW, bool fCheckMerkleRoot)
{
// These are checks that are independent of context.
// Check that the header is valid (particularly PoW). This is mostly
// redundant with the call in AcceptBlockHeader.
if (!CheckBlockHeader(block, state, fCheckPOW))
return false;
// Check the merkle root.
if (fCheckMerkleRoot) {
bool mutated;
uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated);
if (block.hashMerkleRoot != hashMerkleRoot2)
return state.DoS(100, error("CheckBlock(): hashMerkleRoot mismatch"),
REJECT_INVALID, "bad-txnmrklroot", true);
// Check for merkle tree malleability (CVE-2012-2459): repeating sequences
// of transactions in a block without affecting the merkle root of a block,
// while still invalidating it.
if (mutated)
return state.DoS(100, error("CheckBlock(): duplicate transaction"),
REJECT_INVALID, "bad-txns-duplicate", true);
}
// All potential-corruption validation must be done before we do any
// transaction validation, as otherwise we may mark the header as invalid
// because we receive the wrong transactions for it.
// Size limits
2013-06-23 19:14:11 -07:00
if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CheckBlock(): size limits failed"),
REJECT_INVALID, "bad-blk-length");
// First transaction must be coinbase, the rest must not be
2013-06-23 19:14:11 -07:00
if (block.vtx.empty() || !block.vtx[0].IsCoinBase())
return state.DoS(100, error("CheckBlock(): first tx is not coinbase"),
REJECT_INVALID, "bad-cb-missing");
2013-06-23 19:14:11 -07:00
for (unsigned int i = 1; i < block.vtx.size(); i++)
if (block.vtx[i].IsCoinBase())
return state.DoS(100, error("CheckBlock(): more than one coinbase"),
REJECT_INVALID, "bad-cb-multiple");
// Check transactions
2013-06-23 19:14:11 -07:00
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!CheckTransaction(tx, state, verifier))
return error("CheckBlock(): CheckTransaction failed");
unsigned int nSigOps = 0;
2013-06-23 19:14:11 -07:00
BOOST_FOREACH(const CTransaction& tx, block.vtx)
{
nSigOps += GetLegacySigOpCount(tx);
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("CheckBlock(): out-of-bounds SigOpCount"),
REJECT_INVALID, "bad-blk-sigops", true);
return true;
}
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev)
{
const CChainParams& chainParams = Params();
const Consensus::Params& consensusParams = chainParams.GetConsensus();
uint256 hash = block.GetHash();
if (hash == consensusParams.hashGenesisBlock)
return true;
assert(pindexPrev);
int nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
return state.DoS(100, error("%s: incorrect proof of work", __func__),
REJECT_INVALID, "bad-diffbits");
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(error("%s: block's timestamp is too early", __func__),
REJECT_INVALID, "time-too-old");
if (fCheckpointsEnabled)
{
// Don't accept any forks from the main chain prior to last checkpoint
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
}
// Reject block.nVersion < 4 blocks
if (block.nVersion < 4)
return state.Invalid(error("%s : rejected nVersion<4 block", __func__),
REJECT_OBSOLETE, "bad-version");
return true;
}
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev)
{
const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->nHeight + 1;
const Consensus::Params& consensusParams = Params().GetConsensus();
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
// Check transaction contextually against consensus rules at block height
if (!ContextualCheckTransaction(tx, state, nHeight, 100)) {
return false; // Failure reason has been set in validation state object
}
int nLockTimeFlags = 0;
int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
? pindexPrev->GetMedianTimePast()
: block.GetBlockTime();
if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal");
}
}
// Enforce BIP 34 rule that the coinbase starts with serialized block height.
// In Zcash this has been enforced since launch, except that the genesis
// block didn't include the height in the coinbase (see Zcash protocol spec
// section '6.8 Bitcoin Improvement Proposals').
if (nHeight > 0)
{
CScript expect = CScript() << nHeight;
if (block.vtx[0].vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) {
return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height");
}
}
// Coinbase transaction must include an output sending 20% of
2016-09-29 19:37:38 -07:00
// the block reward to a founders reward script, until the last founders
// reward block is reached, with exception of the genesis block.
// The last founders reward block is defined as the block just before the
// first subsidy halving block, which occurs at halving_interval + slow_start_shift
if ((nHeight > 0) && (nHeight <= consensusParams.GetLastFoundersRewardBlockHeight())) {
bool found = false;
BOOST_FOREACH(const CTxOut& output, block.vtx[0].vout) {
if (output.scriptPubKey == Params().GetFoundersRewardScriptAtHeight(nHeight)) {
if (output.nValue == (GetBlockSubsidy(nHeight, consensusParams) / 5)) {
found = true;
break;
}
}
}
if (!found) {
return state.DoS(100, error("%s: founders reward missing", __func__), REJECT_INVALID, "cb-no-founders-reward");
}
}
return true;
}
bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex)
{
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
// Check for duplicate
2013-06-23 19:27:02 -07:00
uint256 hash = block.GetHash();
BlockMap::iterator miSelf = mapBlockIndex.find(hash);
CBlockIndex *pindex = NULL;
if (miSelf != mapBlockIndex.end()) {
// Block header is already known.
pindex = miSelf->second;
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK)
return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate");
return true;
}
if (!CheckBlockHeader(block, state))
return false;
// Get prev block index
CBlockIndex* pindexPrev = NULL;
if (hash != chainparams.GetConsensus().hashGenesisBlock) {
BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk");
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK)
return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk");
}
if (!ContextualCheckBlockHeader(block, state, pindexPrev))
return false;
if (pindex == NULL)
pindex = AddToBlockIndex(block);
if (ppindex)
*ppindex = pindex;
return true;
}
bool AcceptBlock(CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp)
{
const CChainParams& chainparams = Params();
AssertLockHeld(cs_main);
CBlockIndex *&pindex = *ppindex;
if (!AcceptBlockHeader(block, state, &pindex))
return false;
// Try to process all requested blocks that we don't have, but only
// process an unrequested block if it's new and has enough work to
// advance our tip, and isn't too many blocks ahead.
bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
bool fHasMoreWork = (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork : true);
// Blocks that are too out-of-order needlessly limit the effectiveness of
// pruning, because pruning will not delete block files that contain any
// blocks which are too close in height to the tip. Apply this test
// regardless of whether pruning is enabled; it should generally be safe to
// not process unrequested blocks.
bool fTooFarAhead = (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
// TODO: deal better with return value and error conditions for duplicate
// and unrequested blocks.
if (fAlreadyHave) return true;
if (!fRequested) { // If we didn't ask for it:
if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned
if (!fHasMoreWork) return true; // Don't process less-work chains
if (fTooFarAhead) return true; // Block height is too high
}
// See method docstring for why this is always disabled
auto verifier = libzcash::ProofVerifier::Disabled();
if ((!CheckBlock(block, state, verifier)) || !ContextualCheckBlock(block, state, pindex->pprev)) {
if (state.IsInvalid() && !state.CorruptionPossible()) {
pindex->nStatus |= BLOCK_FAILED_VALID;
setDirtyBlockIndex.insert(pindex);
}
return false;
}
int nHeight = pindex->nHeight;
// Write block to history file
2013-01-28 16:44:19 -08:00
try {
2013-06-23 19:27:02 -07:00
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
2013-01-28 16:44:19 -08:00
CDiskBlockPos blockPos;
if (dbp != NULL)
blockPos = *dbp;
2014-06-28 14:36:06 -07:00
if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL))
return error("AcceptBlock(): FindBlockPos failed");
2013-01-28 16:44:19 -08:00
if (dbp == NULL)
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
AbortNode(state, "Failed to write block");
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("AcceptBlock(): ReceivedBlockTransactions failed");
} catch (const std::runtime_error& e) {
return AbortNode(state, std::string("System error: ") + e.what());
2013-01-28 16:44:19 -08:00
}
if (fCheckForPruning)
FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files
return true;
}
static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams)
{
unsigned int nFound = 0;
for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++)
{
if (pstart->nVersion >= minVersion)
++nFound;
pstart = pstart->pprev;
}
return (nFound >= nRequired);
}
bool ProcessNewBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp)
{
// Preliminary checks
auto verifier = libzcash::ProofVerifier::Disabled();
bool checked = CheckBlock(*pblock, state, verifier);
{
LOCK(cs_main);
bool fRequested = MarkBlockAsReceived(pblock->GetHash());
fRequested |= fForceProcessing;
if (!checked) {
return error("%s: CheckBlock FAILED", __func__);
}
// Store to disk
CBlockIndex *pindex = NULL;
bool ret = AcceptBlock(*pblock, state, &pindex, fRequested, dbp);
if (pindex && pfrom) {
mapBlockSource[pindex->GetBlockHash()] = pfrom->GetId();
}
CheckBlockIndex();
if (!ret)
return error("%s: AcceptBlock FAILED", __func__);
2014-05-07 08:10:35 -07:00
}
2014-08-25 17:26:41 -07:00
if (!ActivateBestChain(state, pblock))
return error("%s: ActivateBestChain failed", __func__);
2014-05-07 08:10:35 -07:00
return true;
}
bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot)
{
AssertLockHeld(cs_main);
assert(pindexPrev == chainActive.Tip());
CCoinsViewCache viewNew(pcoinsTip);
CBlockIndex indexDummy(block);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
// JoinSplit proofs are verified in ConnectBlock
auto verifier = libzcash::ProofVerifier::Disabled();
// NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, pindexPrev))
return false;
if (!CheckBlock(block, state, verifier, fCheckPOW, fCheckMerkleRoot))
return false;
if (!ContextualCheckBlock(block, state, pindexPrev))
return false;
if (!ConnectBlock(block, state, &indexDummy, viewNew, true))
return false;
assert(state.IsValid());
return true;
}
/**
* BLOCK PRUNING CODE
*/
/* Calculate the amount of disk space the block & undo files currently use */
uint64_t CalculateCurrentUsage()
{
uint64_t retval = 0;
BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) {
retval += file.nSize + file.nUndoSize;
}
return retval;
}
/* Prune a block file (modify associated database entries)*/
void PruneOneBlockFile(const int fileNumber)
{
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
CBlockIndex* pindex = it->second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
pindex->nFile = 0;
pindex->nDataPos = 0;
pindex->nUndoPos = 0;
setDirtyBlockIndex.insert(pindex);
// Prune from mapBlocksUnlinked -- any block we prune would have
// to be downloaded again in order to consider its chain, at which
// point it would be considered as a candidate for
// mapBlocksUnlinked or setBlockIndexCandidates.
std::pair<std::multimap<CBlockIndex*, CBlockIndex*>::iterator, std::multimap<CBlockIndex*, CBlockIndex*>::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev);
while (range.first != range.second) {
std::multimap<CBlockIndex *, CBlockIndex *>::iterator it = range.first;
range.first++;
if (it->second == pindex) {
mapBlocksUnlinked.erase(it);
}
}
}
}
vinfoBlockFile[fileNumber].SetNull();
setDirtyFileInfo.insert(fileNumber);
}
void UnlinkPrunedFiles(std::set<int>& setFilesToPrune)
{
for (set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
CDiskBlockPos pos(*it, 0);
boost::filesystem::remove(GetBlockPosFilename(pos, "blk"));
boost::filesystem::remove(GetBlockPosFilename(pos, "rev"));
LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
/* Calculate the block/rev files that should be deleted to remain under target*/
void FindFilesToPrune(std::set<int>& setFilesToPrune)
{
LOCK2(cs_main, cs_LastBlockFile);
if (chainActive.Tip() == NULL || nPruneTarget == 0) {
return;
}
if (chainActive.Tip()->nHeight <= Params().PruneAfterHeight()) {
return;
}
unsigned int nLastBlockWeCanPrune = chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
uint64_t nCurrentUsage = CalculateCurrentUsage();
// We don't check to prune until after we've allocated new space for files
// So we should leave a buffer under our target to account for another allocation
// before the next pruning.
uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
uint64_t nBytesToPrune;
int count=0;
if (nCurrentUsage + nBuffer >= nPruneTarget) {
for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
if (vinfoBlockFile[fileNumber].nSize == 0)
continue;
if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target?
break;
// don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune)
continue;
PruneOneBlockFile(fileNumber);
// Queue up the files for removal
setFilesToPrune.insert(fileNumber);
nCurrentUsage -= nBytesToPrune;
count++;
}
}
LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
nLastBlockWeCanPrune, count);
}
bool CheckDiskSpace(uint64_t nAdditionalBytes)
{
uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
2013-01-26 16:24:06 -08:00
return true;
}
FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
if (pos.IsNull())
return NULL;
boost::filesystem::path path = GetBlockPosFilename(pos, prefix);
boost::filesystem::create_directories(path.parent_path());
FILE* file = fopen(path.string().c_str(), "rb+");
if (!file && !fReadOnly)
file = fopen(path.string().c_str(), "wb+");
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
if (!file) {
LogPrintf("Unable to open file %s\n", path.string());
return NULL;
Ultraprune This switches bitcoin's transaction/block verification logic to use a "coin database", which contains all unredeemed transaction output scripts, amounts and heights. The name ultraprune comes from the fact that instead of a full transaction index, we only (need to) keep an index with unspent outputs. For now, the blocks themselves are kept as usual, although they are only necessary for serving, rescanning and reorganizing. The basic datastructures are CCoins (representing the coins of a single transaction), and CCoinsView (representing a state of the coins database). There are several implementations for CCoinsView. A dummy, one backed by the coins database (coins.dat), one backed by the memory pool, and one that adds a cache on top of it. FetchInputs, ConnectInputs, ConnectBlock, DisconnectBlock, ... now operate on a generic CCoinsView. The block switching logic now builds a single cached CCoinsView with changes to be committed to the database before any changes are made. This means no uncommitted changes are ever read from the database, and should ease the transition to another database layer which does not support transactions (but does support atomic writes), like LevelDB. For the getrawtransaction() RPC call, access to a txid-to-disk index would be preferable. As this index is not necessary or even useful for any other part of the implementation, it is not provided. Instead, getrawtransaction() uses the coin database to find the block height, and then scans that block to find the requested transaction. This is slow, but should suffice for debug purposes.
2012-07-01 09:54:00 -07:00
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
fclose(file);
return NULL;
}
}
return file;
}
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix)
{
2014-09-15 06:56:10 -07:00
return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
}
CBlockIndex * InsertBlockIndex(uint256 hash)
{
if (hash.IsNull())
return NULL;
// Return existing
BlockMap::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
if (!pindexNew)
throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool static LoadBlockIndexDB()
{
const CChainParams& chainparams = Params();
if (!pblocktree->LoadBlockIndexGuts())
return false;
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
// Calculate nChainWork
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
// We can link the chain of blocks for which we've received transactions at some point.
// Pruned nodes may have deleted the block.
if (pindex->nTx > 0) {
if (pindex->pprev) {
if (pindex->pprev->nChainTx) {
pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) {
pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue;
} else {
pindex->nChainSproutValue = boost::none;
}
} else {
pindex->nChainTx = 0;
pindex->nChainSproutValue = boost::none;
mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex));
}
} else {
pindex->nChainTx = pindex->nTx;
pindex->nChainSproutValue = pindex->nSproutValue;
}
}
// Construct in-memory chain of branch IDs.
// Relies on invariant: a block that does not activate a network upgrade
// will always be valid under the same consensus rules as its parent.
// Genesis block has a branch ID of zero by definition, but has no
// validity status because it is side-loaded into a fresh chain.
// Activation blocks will have branch IDs set (read from disk).
if (pindex->pprev) {
if (pindex->IsValid(BLOCK_VALID_CONSENSUS) && !pindex->nCachedBranchId) {
pindex->nCachedBranchId = pindex->pprev->nCachedBranchId;
}
} else {
pindex->nCachedBranchId = SPROUT_BRANCH_ID;
}
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL))
setBlockIndexCandidates.insert(pindex);
if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->nChainWork > pindexBestInvalid->nChainWork))
pindexBestInvalid = pindex;
if (pindex->pprev)
pindex->BuildSkip();
if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
pindexBestHeader = pindex;
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
vinfoBlockFile.resize(nLastBlockFile + 1);
LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
}
LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
for (int nFile = nLastBlockFile + 1; true; nFile++) {
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(nFile, info)) {
vinfoBlockFile.push_back(info);
} else {
break;
}
}
// Check presence of blk files
LogPrintf("Checking all blk files are present...\n");
set<int> setBlkDataFiles;
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
if (pindex->nStatus & BLOCK_HAVE_DATA) {
setBlkDataFiles.insert(pindex->nFile);
}
}
for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
{
CDiskBlockPos pos(*it, 0);
if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
return false;
}
}
// Check whether we have ever pruned block & undo files
pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
if (fHavePruned)
LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
fReindex |= fReindexing;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled");
// Fill in-memory data
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
// - This relationship will always be true even if pprev has multiple
// children, because hashAnchor is technically a property of pprev,
// not its children.
// - This will miss chain tips; we handle the best tip below, and other
// tips will be handled by ConnectTip during a re-org.
if (pindex->pprev) {
pindex->pprev->hashAnchorEnd = pindex->hashAnchor;
}
}
// Load pointer to end of best chain
BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
if (it == mapBlockIndex.end())
return true;
chainActive.SetTip(it->second);
// Set hashAnchorEnd for the end of best chain
it->second->hashAnchorEnd = pcoinsTip->GetBestAnchor();
PruneBlockIndexCandidates();
LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.Tip()->GetBlockTime()),
Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.Tip()));
EnforceNodeDeprecation(chainActive.Height(), true);
return true;
}
2014-05-23 09:04:09 -07:00
CVerifyDB::CVerifyDB()
{
uiInterface.ShowProgress(_("Verifying blocks..."), 0);
}
CVerifyDB::~CVerifyDB()
{
uiInterface.ShowProgress("", 100);
}
bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth)
{
LOCK(cs_main);
if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL)
return true;
// Verify blocks in the best chain
if (nCheckDepth <= 0)
nCheckDepth = 1000000000; // suffices until the year 19000
if (nCheckDepth > chainActive.Height())
nCheckDepth = chainActive.Height();
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(coinsview);
CBlockIndex* pindexState = chainActive.Tip();
CBlockIndex* pindexFailure = NULL;
int nGoodTransactions = 0;
2013-01-26 15:14:11 -08:00
CValidationState state;
// No need to verify JoinSplits twice
auto verifier = libzcash::ProofVerifier::Disabled();
for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev)
{
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
2014-05-23 09:04:09 -07:00
uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100)))));
if (pindex->nHeight < chainActive.Height()-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
if (!ReadBlockFromDisk(block, pindex))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !CheckBlock(block, state, verifier))
return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
2014-10-27 06:35:52 -07:00
if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash()))
return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) {
bool fClean = true;
2013-06-23 18:32:58 -07:00
if (!DisconnectBlock(block, state, pindex, coins, &fClean))
return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
pindexState = pindex->pprev;
if (!fClean) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else
nGoodTransactions += block.vtx.size();
}
if (ShutdownRequested())
return true;
}
if (pindexFailure)
return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != chainActive.Tip()) {
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
2014-05-23 09:04:09 -07:00
uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->nHeight)) / (double)nCheckDepth * 50))));
pindex = chainActive.Next(pindex);
CBlock block;
if (!ReadBlockFromDisk(block, pindex))
return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
2013-06-23 18:50:06 -07:00
if (!ConnectBlock(block, state, pindex, coins))
return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
}
}
LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->nHeight, nGoodTransactions);
return true;
}
bool RewindBlockIndex(const CChainParams& params)
{
LOCK(cs_main);
// RewindBlockIndex is called after LoadBlockIndex, so at this point every block
// index will have nCachedBranchId set based on the values previously persisted
// to disk. By definition, a set nCachedBranchId means that the block was
// fully-validated under the corresponding consensus rules. Thus we can quickly
// identify whether the current active chain matches our expected sequence of
// consensus rule changes, with two checks:
//
// - BLOCK_ACTIVATES_UPGRADE is set only on blocks that activate upgrades.
// - nCachedBranchId for each block matches what we expect.
auto sufficientlyValidated = [&params](const CBlockIndex* pindex) {
auto consensus = params.GetConsensus();
bool fFlagSet = pindex->nStatus & BLOCK_ACTIVATES_UPGRADE;
bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->nHeight, consensus);
return fFlagSet == fFlagExpected &&
pindex->nCachedBranchId &&
*pindex->nCachedBranchId == CurrentEpochBranchId(pindex->nHeight, consensus);
};
int nHeight = 1;
while (nHeight <= chainActive.Height()) {
if (!sufficientlyValidated(chainActive[nHeight])) {
break;
}
nHeight++;
}
// nHeight is now the height of the first insufficiently-validated block, or tipheight + 1
auto rewindLength = chainActive.Height() - nHeight;
if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH) {
auto pindexOldTip = chainActive.Tip();
auto pindexRewind = chainActive[nHeight - 1];
auto msg = strprintf(_(
"A block chain rewind has been detected that would roll back %d blocks! "
"This is larger than the maximum of %d blocks, and so the node is shutting down for your safety."
), rewindLength, MAX_REORG_LENGTH) + "\n\n" +
_("Rewind details") + ":\n" +
"- " + strprintf(_("Current tip: %s, height %d"),
pindexOldTip->phashBlock->GetHex(), pindexOldTip->nHeight) + "\n" +
"- " + strprintf(_("Rewinding to: %s, height %d"),
pindexRewind->phashBlock->GetHex(), pindexRewind->nHeight) + "\n\n" +
_("Please help, human!");
LogPrintf("*** %s\n", msg);
uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
CValidationState state;
CBlockIndex* pindex = chainActive.Tip();
while (chainActive.Height() >= nHeight) {
if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
// If pruning, don't try rewinding past the HAVE_DATA point;
// since older blocks can't be served anyway, there's
// no need to walk further, and trying to DisconnectTip()
// will fail (and require a needless reindex/redownload
// of the blockchain).
break;
}
if (!DisconnectTip(state, true)) {
return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->nHeight);
}
// Occasionally flush state to disk.
if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC))
return false;
}
// Reduce validity flag and have-data flags.
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
CBlockIndex* pindexIter = it->second;
// Note: If we encounter an insufficiently validated block that
// is on chainActive, it must be because we are a pruning node, and
// this block or some successor doesn't HAVE_DATA, so we were unable to
// rewind all the way. Blocks remaining on chainActive at this point
// must not have their validity reduced.
if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
// Reduce validity
pindexIter->nStatus =
std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) |
(pindexIter->nStatus & ~BLOCK_VALID_MASK);
// Remove have-data flags
pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
// Remove branch ID
pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE;
pindexIter->nCachedBranchId = boost::none;
// Remove storage location
pindexIter->nFile = 0;
pindexIter->nDataPos = 0;
pindexIter->nUndoPos = 0;
// Remove various other things
pindexIter->nTx = 0;
pindexIter->nChainTx = 0;
pindexIter->nSproutValue = boost::none;
pindexIter->nChainSproutValue = boost::none;
pindexIter->nSequenceId = 0;
// Make sure it gets written
setDirtyBlockIndex.insert(pindexIter);
// Update indices
setBlockIndexCandidates.erase(pindexIter);
auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
while (ret.first != ret.second) {
if (ret.first->second == pindexIter) {
mapBlocksUnlinked.erase(ret.first++);
} else {
++ret.first;
}
}
} else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) {
setBlockIndexCandidates.insert(pindexIter);
}
}
PruneBlockIndexCandidates();
CheckBlockIndex();
if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) {
return false;
}
return true;
}
void UnloadBlockIndex()
{
LOCK(cs_main);
setBlockIndexCandidates.clear();
chainActive.SetTip(NULL);
pindexBestInvalid = NULL;
pindexBestHeader = NULL;
mempool.clear();
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
nSyncStarted = 0;
mapBlocksUnlinked.clear();
vinfoBlockFile.clear();
nLastBlockFile = 0;
nBlockSequenceId = 1;
mapBlockSource.clear();
mapBlocksInFlight.clear();
nQueuedValidatedHeaders = 0;
nPreferredDownload = 0;
setDirtyBlockIndex.clear();
setDirtyFileInfo.clear();
mapNodeState.clear();
recentRejects.reset(NULL);
BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) {
delete entry.second;
}
mapBlockIndex.clear();
fHavePruned = false;
}
bool LoadBlockIndex()
{
// Load block index from databases
if (!fReindex && !LoadBlockIndexDB())
return false;
return true;
}
bool InitBlockIndex() {
const CChainParams& chainparams = Params();
LOCK(cs_main);
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
// Check whether we're already initialized
if (chainActive.Genesis() != NULL)
return true;
// Use the provided setting for -txindex in the new database
fTxIndex = GetBoolArg("-txindex", false);
pblocktree->WriteFlag("txindex", fTxIndex);
LogPrintf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
if (!fReindex) {
try {
CBlock &block = const_cast<CBlock&>(Params().GenesisBlock());
// Start new block file
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
CValidationState state;
2014-06-28 14:36:06 -07:00
if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.GetBlockTime()))
return error("LoadBlockIndex(): FindBlockPos failed");
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
return error("LoadBlockIndex(): writing genesis block to disk failed");
CBlockIndex *pindex = AddToBlockIndex(block);
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
return error("LoadBlockIndex(): genesis block not accepted");
2014-08-25 17:26:41 -07:00
if (!ActivateBestChain(state, &block))
return error("LoadBlockIndex(): genesis block cannot be activated");
// Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
2014-11-14 09:19:26 -08:00
return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
} catch (const std::runtime_error& e) {
return error("LoadBlockIndex(): failed to initialize block database: %s", e.what());
}
}
return true;
}
bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
{
const CChainParams& chainparams = Params();
// Map of disk positions for blocks with unknown parent (only used for reindex)
static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
int64_t nStart = GetTimeMillis();
int nLoaded = 0;
2013-01-28 16:44:19 -08:00
try {
// This takes over fileIn and calls fclose() on it in the CBufferedFile destructor
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64_t nRewind = blkdat.GetPos();
while (!blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
unsigned int nSize = 0;
try {
// locate a header
unsigned char buf[MESSAGE_START_SIZE];
blkdat.FindByte(Params().MessageStart()[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> FLATDATA(buf);
if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE))
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
continue;
} catch (const std::exception&) {
// no valid block header found; don't complain
break;
}
try {
// read block
uint64_t nBlockPos = blkdat.GetPos();
if (dbp)
dbp->nPos = nBlockPos;
blkdat.SetLimit(nBlockPos + nSize);
blkdat.SetPos(nBlockPos);
CBlock block;
blkdat >> block;
nRewind = blkdat.GetPos();
// detect out of order blocks, and store them for later
uint256 hash = block.GetHash();
if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) {
LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(),
block.hashPrevBlock.ToString());
if (dbp)
mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp));
continue;
}
// process in case the block isn't known yet
if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
CValidationState state;
if (ProcessNewBlock(state, NULL, &block, true, dbp))
nLoaded++;
if (state.IsError())
break;
} else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) {
LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight);
}
// Recursively process earlier encountered successors of this block
deque<uint256> queue;
queue.push_back(hash);
while (!queue.empty()) {
uint256 head = queue.front();
queue.pop_front();
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
if (ReadBlockFromDisk(block, it->second))
{
LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
head.ToString());
CValidationState dummy;
if (ProcessNewBlock(dummy, NULL, &block, true, &it->second))
{
nLoaded++;
queue.push_back(block.GetHash());
}
}
range.first++;
mapBlocksUnknownParent.erase(it);
}
}
} catch (const std::exception& e) {
LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what());
}
}
} catch (const std::runtime_error& e) {
AbortNode(std::string("System error: ") + e.what());
}
if (nLoaded > 0)
LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart);
return nLoaded > 0;
}
void static CheckBlockIndex()
{
const Consensus::Params& consensusParams = Params().GetConsensus();
if (!fCheckBlockIndex) {
return;
}
LOCK(cs_main);
// During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain,
// so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when
// iterating the block tree require that chainActive has been initialized.)
if (chainActive.Height() < 0) {
assert(mapBlockIndex.size() <= 1);
return;
}
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
forward.insert(std::make_pair(it->second->pprev, it->second));
}
assert(forward.size() == mapBlockIndex.size());
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeGenesis = forward.equal_range(NULL);
CBlockIndex *pindex = rangeGenesis.first->second;
rangeGenesis.first++;
assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL.
// Iterate over the entire block tree, using depth-first search.
// Along the way, remember whether there are blocks on the path from genesis
// block being explored which are the first to have certain properties.
size_t nNodes = 0;
int nHeight = 0;
CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid.
CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0.
CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not).
CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not).
CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not).
CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not).
while (pindex != NULL) {
nNodes++;
if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex;
if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex;
if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex;
if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex;
if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex;
// Begin: actual consistency checks.
if (pindex->pprev == NULL) {
// Genesis block checks.
assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match.
assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block.
}
if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked
// VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred).
// HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred.
if (!fHavePruned) {
// If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0
assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
assert(pindexFirstMissing == pindexFirstNeverProcessed);
} else {
// If we have pruned, then we can only say that HAVE_DATA implies nTx > 0
if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
}
if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA);
assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent.
// All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set.
assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned).
assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0));
assert(pindex->nHeight == nHeight); // nHeight must be consistent.
assert(pindex->pprev == NULL || pindex->nChainWork >= pindex->pprev->nChainWork); // For every block except the genesis block, the chainwork must be larger than the parent's.
assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->nHeight < nHeight))); // The pskip pointer must point back for all but the first 2 blocks.
assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid
if (pindexFirstInvalid == NULL) {
// Checks for not-invalid blocks.
assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents.
}
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) {
if (pindexFirstInvalid == NULL) {
// If this block sorts at least as good as the current tip and
// is valid and we have all data for its parents, it must be in
// setBlockIndexCandidates. chainActive.Tip() must also be there
// even if some data has been pruned.
if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) {
assert(setBlockIndexCandidates.count(pindex));
}
// If some parent is missing, then it could be that this block was in
// setBlockIndexCandidates but had to be removed because of the missing data.
// In this case it must be in mapBlocksUnlinked -- see test below.
}
} else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates.
assert(setBlockIndexCandidates.count(pindex) == 0);
}
// Check whether this block is in mapBlocksUnlinked.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
bool foundInUnlinked = false;
while (rangeUnlinked.first != rangeUnlinked.second) {
assert(rangeUnlinked.first->first == pindex->pprev);
if (rangeUnlinked.first->second == pindex) {
foundInUnlinked = true;
break;
}
rangeUnlinked.first++;
}
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) {
// If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked.
assert(foundInUnlinked);
}
if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked.
if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) {
// We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent.
assert(fHavePruned); // We must have pruned.
// This block may have entered mapBlocksUnlinked if:
// - it has a descendant that at some point had more work than the
// tip, and
// - we tried switching to that descendant but were missing
// data for some intermediate block between chainActive and the
// tip.
// So if this block is itself better than chainActive.Tip() and it wasn't in
// setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) {
if (pindexFirstInvalid == NULL) {
assert(foundInUnlinked);
}
}
}
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// End: actual consistency checks.
// Try descending into the first subnode.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> range = forward.equal_range(pindex);
if (range.first != range.second) {
// A subnode was found.
pindex = range.first->second;
nHeight++;
continue;
}
// This is a leaf node.
// Move upwards until we reach a node of which we have not yet visited the last child.
while (pindex) {
// We are going to either move to a parent or a sibling of pindex.
// If pindex was the first with a certain property, unset the corresponding variable.
if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL;
if (pindex == pindexFirstMissing) pindexFirstMissing = NULL;
if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL;
if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL;
if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL;
if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL;
if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL;
// Find our parent.
CBlockIndex* pindexPar = pindex->pprev;
// Find which child we just visited.
std::pair<std::multimap<CBlockIndex*,CBlockIndex*>::iterator,std::multimap<CBlockIndex*,CBlockIndex*>::iterator> rangePar = forward.equal_range(pindexPar);
while (rangePar.first->second != pindex) {
assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child.
rangePar.first++;
}
// Proceed to the next one.
rangePar.first++;
if (rangePar.first != rangePar.second) {
// Move to the sibling.
pindex = rangePar.first->second;
break;
} else {
// Move up further.
pindex = pindexPar;
nHeight--;
continue;
}
}
}
// Check that we actually traversed the entire map.
assert(nNodes == forward.size());
}
//////////////////////////////////////////////////////////////////////////////
//
// CAlert
//
2015-05-31 06:36:44 -07:00
std::string GetWarnings(const std::string& strFor)
{
int nPriority = 0;
string strStatusBar;
string strRPC;
if (!CLIENT_VERSION_IS_RELEASE)
strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
if (GetBoolArg("-testsafemode", false))
strStatusBar = strRPC = "testsafemode enabled";
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
nPriority = 1000;
strStatusBar = strMiscWarning;
}
if (fLargeWorkForkFound)
{
nPriority = 2000;
strStatusBar = strRPC = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
}
else if (fLargeWorkInvalidChainFound)
{
nPriority = 2000;
strStatusBar = strRPC = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
}
// Alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.AppliesToMe() && alert.nPriority > nPriority)
{
nPriority = alert.nPriority;
strStatusBar = alert.strStatusBar;
if (alert.nPriority >= ALERT_PRIORITY_SAFE_MODE) {
strRPC = alert.strRPCError;
}
}
}
}
if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings(): invalid parameter");
return "error";
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
case MSG_TX:
{
assert(recentRejects);
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
recentRejects->reset();
}
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
vector<CInv> vNotFound;
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
const CInv &inv = *it;
{
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
{
bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
if (chainActive.Contains(mi->second)) {
2014-02-10 07:31:06 -08:00
send = true;
} else {
static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
// chain if they are valid, and no more than a month older (both in time, and in
// best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != NULL) &&
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
(GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, Params().GetConsensus()) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
// Send block from disk
CBlock block;
if (!ReadBlockFromDisk(block, (*mi).second))
assert(!"cannot load block from disk");
if (inv.type == MSG_BLOCK)
pfrom->PushMessage("block", block);
else // MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
{
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
pfrom->PushMessage("merkleblock", merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
2015-04-28 07:48:28 -07:00
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second)))
pfrom->PushMessage("tx", block.vtx[pair.first]);
}
// else
// no response
}
2015-04-28 07:47:17 -07:00
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue.SetNull();
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
bool pushed = false;
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TX) {
CTransaction tx;
if (mempool.lookup(inv.hash, tx)) {
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << tx;
pfrom->PushMessage("tx", ss);
pushed = true;
}
}
if (!pushed) {
vNotFound.push_back(inv);
}
}
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
break;
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
pfrom->PushMessage("notfound", vNotFound);
}
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv, int64_t nTimeReceived)
{
const CChainParams& chainparams = Params();
LogPrint("net", "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id);
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (strCommand == "version")
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
2013-10-27 23:36:11 -07:00
pfrom->PushMessage("reject", strCommand, REJECT_DUPLICATE, string("Duplicate version message"));
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 1);
return false;
}
int64_t nTime;
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
2013-10-27 23:36:11 -07:00
pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION));
pfrom->fDisconnect = true;
return false;
}
// When Overwinter is active, reject incoming connections from non-Overwinter nodes
const Consensus::Params& params = Params().GetConsensus();
if (NetworkUpgradeActive(GetHeight(), params, Consensus::UPGRADE_OVERWINTER)
&& pfrom->nVersion < params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion)
{
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater",
params.vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
2013-11-26 03:52:21 -08:00
if (!vRecv.empty()) {
vRecv >> LIMITED_STRING(pfrom->strSubVer, 256);
2013-11-26 03:52:21 -08:00
pfrom->cleanSubVer = SanitizeString(pfrom->strSubVer);
}
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
pfrom->fDisconnect = true;
return true;
}
pfrom->addrLocal = addrMe;
if (pfrom->fInbound && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
2011-01-24 07:42:17 -08:00
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
2014-10-28 09:33:55 -07:00
// Potentially mark this peer as a preferred download peer.
UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
// Change version
pfrom->PushMessage("verack");
pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (fListen && !IsInitialBlockDownload())
{
2012-02-12 04:45:24 -08:00
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
{
LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
pfrom->PushAddress(addr);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(pfrom->addrLocal);
LogPrintf("ProcessMessages: advertizing address %s\n", addr.ToString());
2012-02-12 04:45:24 -08:00
pfrom->PushAddress(addr);
}
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage("getaddr");
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom);
}
pfrom->fSuccessfullyConnected = true;
string remoteAddr;
if (fLogIPs)
remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
pfrom->cleanSubVer, pfrom->nVersion,
pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
remoteAddr);
2014-12-15 02:06:15 -08:00
int64_t nTimeOffset = nTime - GetTime();
pfrom->nTimeOffset = nTimeOffset;
AddTimeData(pfrom->addr, nTimeOffset);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 1);
return false;
}
else if (strCommand == "verack")
{
pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
// Mark this node as currently connected, so we update its timestamp later.
if (pfrom->fNetworkNode) {
LOCK(cs_main);
State(pfrom->GetId())->fCurrentlyConnected = true;
}
}
// Disconnect existing peer connection when:
// 1. The version message has been received
// 2. Overwinter is active
// 3. Peer version is pre-Overwinter
else if (NetworkUpgradeActive(GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_OVERWINTER)
&& (pfrom->nVersion < chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion))
{
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, pfrom->nVersion);
pfrom->PushMessage("reject", strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater",
chainparams.GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nProtocolVersion));
pfrom->fDisconnect = true;
return false;
}
else if (strCommand == "addr")
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 20);
return error("message addr size() = %u", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint64_t hashAddr = addr.GetHash();
uint256 hashRand = ArithToUint256(UintToArith256(hashSalt) ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60)));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = ArithToUint256(UintToArith256(hashRand) ^ nPointer);
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == "inv")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 20);
return error("message inv size() = %u", vInv.size());
}
LOCK(cs_main);
std::vector<CInv> vToFetch;
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(inv);
LogPrint("net", "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id);
if (!fAlreadyHave && !fImporting && !fReindex && inv.type != MSG_BLOCK)
pfrom->AskFor(inv);
if (inv.type == MSG_BLOCK) {
2014-06-22 15:00:26 -07:00
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
2015-04-28 07:48:28 -07:00
// First request the headers preceding the announced block. In the normal fully-synced
// case where a new block is announced that succeeds the current tip (no reorganization),
// there are no such headers.
// Secondly, and only when we are close to being synced, we request the announced block directly,
// to avoid an extra round-trip. Note that we must *first* ask for the headers, so by the
// time the block arrives, the header chain leading up to it is already validated. Not
// doing this will result in the received block being rejected as an orphan in case it is
// not a direct successor.
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexBestHeader), inv.hash);
CNodeState *nodestate = State(pfrom->GetId());
if (chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - chainparams.GetConsensus().nPowTargetSpacing * 20 &&
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vToFetch.push_back(inv);
// Mark block as in flight already, even though the actual "getdata" message only goes out
// later (within the same cs_main lock, though).
MarkBlockAsInFlight(pfrom->GetId(), inv.hash, chainparams.GetConsensus());
}
2014-09-02 03:16:32 -07:00
LogPrint("net", "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id);
}
}
2014-06-22 15:00:26 -07:00
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
2014-09-09 00:26:52 -07:00
if (pfrom->nSendSize > (SendBufferSize() * 2)) {
Misbehaving(pfrom->GetId(), 50);
return error("send buffer size() = %u", pfrom->nSendSize);
}
}
if (!vToFetch.empty())
pfrom->PushMessage("getdata", vToFetch);
}
else if (strCommand == "getdata")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 20);
return error("message getdata size() = %u", vInv.size());
}
if (fDebug || (vInv.size() != 1))
LogPrint("net", "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id);
if ((fDebug && vInv.size() > 0) || (vInv.size() == 1))
LogPrint("net", "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id);
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom);
}
else if (strCommand == "getblocks")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
// Find the last block the caller has in the main chain
2014-09-02 17:52:01 -07:00
CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
LogPrint("net", "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
LogPrint("net", " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
2015-04-28 07:47:17 -07:00
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint("net", " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == "getheaders")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
if (IsInitialBlockDownload())
return true;
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
BlockMap::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
2014-09-02 17:52:01 -07:00
pindex = FindForkInGlobalIndex(chainActive, locator);
if (pindex)
pindex = chainActive.Next(pindex);
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
2014-09-02 03:16:32 -07:00
LogPrint("net", "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString(), pfrom->id);
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
pfrom->PushMessage("headers", vHeaders);
}
else if (strCommand == "tx")
{
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
LOCK(cs_main);
bool fMissingInputs = false;
2013-01-26 15:14:11 -08:00
CValidationState state;
pfrom->setAskFor.erase(inv.hash);
mapAlreadyAskedFor.erase(inv);
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, tx, true, &fMissingInputs))
{
mempool.check(pcoinsTip);
RelayTransaction(tx);
vWorkQueue.push_back(inv.hash);
LogPrint("mempool", "AcceptToMemoryPool: peer=%d %s: accepted %s (poolsz %u)\n",
pfrom->id, pfrom->cleanSubVer,
tx.GetHash().ToString(),
mempool.mapTx.size());
// Recursively process any orphan transactions that depended on this one
set<NodeId> setMisbehaving;
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
map<uint256, set<uint256> >::iterator itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue[i]);
if (itByPrev == mapOrphanTransactionsByPrev.end())
continue;
for (set<uint256>::iterator mi = itByPrev->second.begin();
mi != itByPrev->second.end();
++mi)
{
const uint256& orphanHash = *mi;
const CTransaction& orphanTx = mapOrphanTransactions[orphanHash].tx;
NodeId fromPeer = mapOrphanTransactions[orphanHash].fromPeer;
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
if (setMisbehaving.count(fromPeer))
continue;
if (AcceptToMemoryPool(mempool, stateDummy, orphanTx, true, &fMissingInputs2))
{
LogPrint("mempool", " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx);
vWorkQueue.push_back(orphanHash);
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
int nDos = 0;
if (stateDummy.IsInvalid(nDos) && nDos > 0)
{
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
setMisbehaving.insert(fromPeer);
LogPrint("mempool", " invalid orphan tx %s\n", orphanHash.ToString());
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee/priority
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
assert(recentRejects);
recentRejects->insert(orphanHash);
}
mempool.check(pcoinsTip);
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
2016-07-14 15:10:41 -07:00
// TODO: currently, prohibit joinsplits from entering mapOrphans
2016-07-10 22:08:20 -07:00
else if (fMissingInputs && tx.vjoinsplit.size() == 0)
{
AddOrphanTx(tx, pfrom->GetId());
2012-02-29 07:14:18 -08:00
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
2012-02-29 07:14:18 -08:00
if (nEvicted > 0)
LogPrint("mempool", "mapOrphan overflow, removed %u tx\n", nEvicted);
} else {
assert(recentRejects);
recentRejects->insert(tx.GetHash());
if (pfrom->fWhitelisted) {
// Always relay transactions received from whitelisted peers, even
// if they were already in the mempool or rejected from it due
// to policy, allowing the node to function as a gateway for
// nodes hidden behind it.
//
// Never relay transactions that we would assign a non-zero DoS
// score for, as we expect peers to do the same with us in that
// case.
int nDoS = 0;
if (!state.IsInvalid(nDoS) || nDoS == 0) {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->id);
RelayTransaction(tx);
} else {
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s (code %d))\n",
tx.GetHash().ToString(), pfrom->id, state.GetRejectReason(), state.GetRejectCode());
}
}
}
int nDoS = 0;
if (state.IsInvalid(nDoS))
2014-02-10 07:31:06 -08:00
{
LogPrint("mempool", "%s from peer=%d %s was not accepted into the memory pool: %s\n", tx.GetHash().ToString(),
pfrom->id, pfrom->cleanSubVer,
state.GetRejectReason());
2013-10-27 23:36:11 -07:00
pfrom->PushMessage("reject", strCommand, state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0)
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), nDoS);
2013-10-27 23:36:11 -07:00
}
}
else if (strCommand == "headers" && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
Misbehaving(pfrom->GetId(), 20);
return error("headers message size = %u", nCount);
}
headers.resize(nCount);
for (unsigned int n = 0; n < nCount; n++) {
vRecv >> headers[n];
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
LOCK(cs_main);
if (nCount == 0) {
// Nothing interesting. Stop asking this peers for more headers.
return true;
}
CBlockIndex *pindexLast = NULL;
BOOST_FOREACH(const CBlockHeader& header, headers) {
CValidationState state;
if (pindexLast != NULL && header.hashPrevBlock != pindexLast->GetBlockHash()) {
Misbehaving(pfrom->GetId(), 20);
return error("non-continuous headers sequence");
}
if (!AcceptBlockHeader(header, state, &pindexLast)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
if (nDoS > 0)
Misbehaving(pfrom->GetId(), nDoS);
return error("invalid header received");
}
}
}
if (pindexLast)
UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
if (nCount == MAX_HEADERS_RESULTS && pindexLast) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
2014-09-02 03:16:32 -07:00
LogPrint("net", "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
pfrom->PushMessage("getheaders", chainActive.GetLocator(pindexLast), uint256());
}
CheckBlockIndex();
}
else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlock block;
vRecv >> block;
CInv inv(MSG_BLOCK, block.GetHash());
LogPrint("net", "received block %s peer=%d\n", inv.hash.ToString(), pfrom->id);
pfrom->AddInventoryKnown(inv);
2013-01-26 15:14:11 -08:00
CValidationState state;
// Process all blocks from whitelisted peers, even if not requested,
// unless we're still syncing with the network.
// Such an unrequested block may still be processed, subject to the
// conditions in AcceptBlock().
bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
ProcessNewBlock(state, pfrom, &block, forceProcessing, NULL);
int nDoS;
if (state.IsInvalid(nDoS)) {
pfrom->PushMessage("reject", strCommand, state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
if (nDoS > 0) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), nDoS);
}
}
}
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
2015-04-28 07:47:17 -07:00
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
else if ((strCommand == "getaddr") && (pfrom->fInbound))
{
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (pfrom->fSentAddr) {
LogPrint("net", "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id);
return true;
}
pfrom->fSentAddr = true;
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = addrman.GetAddr();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == "mempool")
{
LOCK2(cs_main, pfrom->cs_filter);
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
vector<CInv> vInv;
2013-01-10 11:06:30 -08:00
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(tx)) ||
2013-01-10 11:06:30 -08:00
(!pfrom->pfilter))
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pfrom->PushMessage("inv", vInv);
vInv.clear();
}
}
if (vInv.size() > 0)
pfrom->PushMessage("inv", vInv);
}
else if (strCommand == "ping")
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64_t nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
pfrom->PushMessage("pong", nonce);
}
}
else if (strCommand == "pong")
{
int64_t pingUsecEnd = nTimeReceived;
uint64_t nonce = 0;
size_t nAvail = vRecv.in_avail();
bool bPingFinished = false;
std::string sProblem;
if (nAvail >= sizeof(nonce)) {
vRecv >> nonce;
// Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
if (pfrom->nPingNonceSent != 0) {
if (nonce == pfrom->nPingNonceSent) {
// Matching pong received, this ping is no longer outstanding
bPingFinished = true;
int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
if (pingUsecTime > 0) {
// Successful ping time measurement, replace previous
pfrom->nPingUsecTime = pingUsecTime;
2015-08-13 02:31:46 -07:00
pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime, pingUsecTime);
} else {
// This should never happen
sProblem = "Timing mishap";
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
2015-04-28 07:48:28 -07:00
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
}
} else {
sProblem = "Unsolicited pong without ping";
}
} else {
2015-04-28 07:48:28 -07:00
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
if (!(sProblem.empty())) {
LogPrint("net", "pong peer=%d %s: %s, %x expected, %x received, %u bytes\n",
pfrom->id,
pfrom->cleanSubVer,
sProblem,
pfrom->nPingNonceSent,
nonce,
nAvail);
}
if (bPingFinished) {
pfrom->nPingNonceSent = 0;
}
}
else if (fAlerts && strCommand == "alert")
{
CAlert alert;
vRecv >> alert;
uint256 alertHash = alert.GetHash();
if (pfrom->setKnown.count(alertHash) == 0)
{
if (alert.ProcessAlert(Params().AlertKey()))
{
// Relay
pfrom->setKnown.insert(alertHash);
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
alert.RelayTo(pnode);
}
}
else {
// Small DoS penalty so peers that send us lots of
// duplicate/expired/invalid-signature/whatever alerts
// eventually get banned.
// This isn't a Misbehaving(100) (immediate ban) because the
// peer might be an older or different implementation with
// a different signature key, etc.
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 10);
}
}
}
else if (strCommand == "filterload")
{
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints())
// There is no excuse for sending a too-large filter
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 100);
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
}
pfrom->fRelayTxes = true;
}
else if (strCommand == "filteradd")
{
vector<unsigned char> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 100);
} else {
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
pfrom->pfilter->insert(vData);
else
2013-11-17 16:25:17 -08:00
Misbehaving(pfrom->GetId(), 100);
}
}
else if (strCommand == "filterclear")
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter();
pfrom->fRelayTxes = true;
}
2013-10-27 23:36:11 -07:00
else if (strCommand == "reject")
{
2014-09-12 07:37:53 -07:00
if (fDebug) {
try {
string strMsg; unsigned char ccode; string strReason;
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
2013-10-27 23:36:11 -07:00
2014-09-12 07:37:53 -07:00
ostringstream ss;
ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
2013-10-27 23:36:11 -07:00
2014-09-12 07:37:53 -07:00
if (strMsg == "block" || strMsg == "tx")
{
uint256 hash;
vRecv >> hash;
ss << ": hash " << hash.ToString();
}
LogPrint("net", "Reject %s\n", SanitizeString(ss.str()));
} catch (const std::ios_base::failure&) {
2014-09-12 07:37:53 -07:00
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
LogPrint("net", "Unparseable reject message received\n");
2013-10-27 23:36:11 -07:00
}
}
}
else if (strCommand == "notfound") {
2016-07-29 08:42:12 -07:00
// We do not care about the NOTFOUND message, but logging an Unknown Command
// message would be undesirable as we transmit it ourselves.
}
else {
// Ignore unknown commands for extensibility
2014-06-26 21:03:24 -07:00
LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
}
return true;
}
// requires LOCK(cs_vRecvMsg)
bool ProcessMessages(CNode* pfrom)
{
//if (fDebug)
// LogPrintf("%s(%u messages)\n", __func__, pfrom->vRecvMsg.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fOk = true;
if (!pfrom->vRecvGetData.empty())
ProcessGetData(pfrom);
// this maintains the order of responses
if (!pfrom->vRecvGetData.empty()) return fOk;
std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
// get next message
CNetMessage& msg = *it;
//if (fDebug)
// LogPrintf("%s(message %u msgsz, %u bytes, complete:%s)\n", __func__,
// msg.hdr.nMessageSize, msg.vRecv.size(),
// msg.complete() ? "Y" : "N");
// end, if an incomplete message is found
if (!msg.complete())
break;
// at this point, any failure means we can delete the current message
it++;
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, Params().MessageStart(), MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id);
fOk = false;
break;
}
// Read header
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid(Params().MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id);
continue;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
// Checksum
CDataStream& vRecv = msg.vRecv;
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
unsigned int nChecksum = ReadLE32((unsigned char*)&hash);
if (nChecksum != hdr.nChecksum)
{
LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n", __func__,
SanitizeString(strCommand), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
// Process message
bool fRet = false;
try
{
fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime);
2013-03-09 09:02:57 -08:00
boost::this_thread::interruption_point();
}
catch (const std::ios_base::failure& e)
{
2013-10-27 23:36:11 -07:00
pfrom->PushMessage("reject", strCommand, REJECT_MALFORMED, string("error parsing message"));
if (strstr(e.what(), "end of data"))
{
2012-07-25 17:48:39 -07:00
// Allow exceptions from under-length message on vRecv
LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
2012-07-25 17:48:39 -07:00
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (const boost::thread_interrupted&) {
2013-03-09 09:02:57 -08:00
throw;
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id);
break;
}
// In case the connection got shut down, its receive buffer was wiped
if (!pfrom->fDisconnect)
pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
return fOk;
}
bool SendMessages(CNode* pto, bool fSendTrickle)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
2015-04-28 07:47:17 -07:00
// Don't send anything until we get its version message
if (pto->nVersion == 0)
return true;
//
// Message: ping
//
bool pingSend = false;
if (pto->fPingQueued) {
// RPC ping request by user
pingSend = true;
}
if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true;
}
if (pingSend) {
uint64_t nonce = 0;
while (nonce == 0) {
GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
}
pto->fPingQueued = false;
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
pto->PushMessage("ping", nonce);
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
pto->PushMessage("ping");
}
}
TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
if (!lockMain)
return true;
// Address refresh broadcast
static int64_t nLastRebroadcast;
if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
// Periodically clear addrKnown to allow refresh broadcasts
if (nLastRebroadcast)
pnode->addrKnown.reset();
// Rebroadcast our address
AdvertizeLocal(pnode);
}
if (!vNodes.empty())
nLastRebroadcast = GetTime();
}
//
// Message: addr
//
if (fSendTrickle)
{
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
if (!pto->addrKnown.contains(addr.GetKey()))
{
pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
pto->PushMessage("addr", vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
pto->PushMessage("addr", vAddr);
}
CNodeState &state = *State(pto->GetId());
if (state.fShouldBan) {
if (pto->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pto->addr.ToString());
2013-11-17 16:25:17 -08:00
else {
pto->fDisconnect = true;
if (pto->addr.IsLocal())
LogPrintf("Warning: not banning local peer %s!\n", pto->addr.ToString());
else
{
CNode::Ban(pto->addr);
}
2013-11-17 16:25:17 -08:00
}
state.fShouldBan = false;
2013-11-17 16:25:17 -08:00
}
BOOST_FOREACH(const CBlockReject& reject, state.rejects)
pto->PushMessage("reject", (string)"block", reject.chRejectCode, reject.strRejectReason, reject.hashBlock);
state.rejects.clear();
// Start block sync
if (pindexBestHeader == NULL)
pindexBestHeader = chainActive.Tip();
2014-10-28 09:33:55 -07:00
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
nSyncStarted++;
CBlockIndex *pindexStart = pindexBestHeader->pprev ? pindexBestHeader->pprev : pindexBestHeader;
2014-09-02 03:16:32 -07:00
LogPrint("net", "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight);
pto->PushMessage("getheaders", chainActive.GetLocator(pindexStart), uint256());
}
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
GetMainSignals().Broadcast(nTimeBestReceived);
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
LOCK(pto->cs_inventory);
vInv.reserve(pto->vInventoryToSend.size());
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (pto->setInventoryKnown.count(inv))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt.IsNull())
hashSalt = GetRandHash();
uint256 hashRand = ArithToUint256(UintToArith256(inv.hash) ^ UintToArith256(hashSalt));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((UintToArith256(hashRand) & 3) != 0);
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;
}
}
// returns true if wasn't already contained in the set
if (pto->setInventoryKnown.insert(inv).second)
{
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage("inv", vInv);
vInv.clear();
}
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage("inv", vInv);
// Detect whether we're stalling
int64_t nNow = GetTimeMicros();
if (!pto->fDisconnect && state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id);
pto->fDisconnect = true;
}
// In case there is a block that has been in flight from this peer for (2 + 0.5 * N) times the block interval
// (with N the number of validated blocks that were in flight at the time it was requested), disconnect due to
// timeout. We compensate for in-flight blocks to prevent killing off peers due to our own downstream link
2015-04-28 07:48:28 -07:00
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
// We also compare the block download timeout originally calculated against the time at which we'd disconnect
// if we assumed the block were being requested now (ignoring blocks we've requested from this peer, since we're
// only looking at this peer's oldest request). This way a large queue in the past doesn't result in a
// permanently large window for this block to be delivered (ie if the number of blocks in flight is decreasing
// more quickly than once every 5 minutes, then we'll shorten the download window for this block).
if (!pto->fDisconnect && state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int64_t nTimeoutIfRequestedNow = GetBlockTimeout(nNow, nQueuedValidatedHeaders - state.nBlocksInFlightValidHeaders, consensusParams);
if (queuedBlock.nTimeDisconnect > nTimeoutIfRequestedNow) {
LogPrint("net", "Reducing block download timeout for peer=%d block=%s, orig=%d new=%d\n", pto->id, queuedBlock.hash.ToString(), queuedBlock.nTimeDisconnect, nTimeoutIfRequestedNow);
queuedBlock.nTimeDisconnect = nTimeoutIfRequestedNow;
}
if (queuedBlock.nTimeDisconnect < nNow) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->id);
pto->fDisconnect = true;
}
}
//
// Message: getdata (blocks)
//
vector<CInv> vGetData;
if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
vector<CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
2014-09-05 08:32:22 -07:00
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->id);
}
if (state.nBlocksInFlight == 0 && staller != -1) {
2014-09-03 11:31:01 -07:00
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = nNow;
2014-09-03 11:31:01 -07:00
LogPrint("net", "Stall started peer=%d\n", staller);
}
}
}
//
// Message: getdata (non-blocks)
//
while (!pto->fDisconnect && !pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
if (fDebug)
LogPrint("net", "Requesting %s peer=%d\n", inv.ToString(), pto->id);
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
pto->PushMessage("getdata", vGetData);
vGetData.clear();
}
} else {
//If we're not going to ask, don't expect a response.
pto->setAskFor.erase(inv.hash);
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
pto->PushMessage("getdata", vGetData);
}
return true;
}
std::string CBlockFileInfo::ToString() const {
return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, DateTimeStrFormat("%Y-%m-%d", nTimeFirst), DateTimeStrFormat("%Y-%m-%d", nTimeLast));
}
class CMainCleanup
{
public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
BlockMap::iterator it1 = mapBlockIndex.begin();
for (; it1 != mapBlockIndex.end(); it1++)
delete (*it1).second;
mapBlockIndex.clear();
// orphan transactions
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
}
} instance_of_cmaincleanup;
// Set default values of new CMutableTransaction based on consensus rules at given height.
CMutableTransaction CreateNewContextualCMutableTransaction(const Consensus::Params& consensusParams, int nHeight)
{
CMutableTransaction mtx;
bool isOverwintered = NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_OVERWINTER);
if (isOverwintered) {
mtx.fOverwintered = true;
mtx.nVersionGroupId = OVERWINTER_VERSION_GROUP_ID;
mtx.nVersion = 3;
// Expiry height is not set. Only fields required for a parser to treat as a valid Overwinter V3 tx.
// TODO: In future, when moving from Overwinter to Sapling, it will be useful
// to set the expiry height to: min(activation_height - 1, default_expiry_height)
}
return mtx;
}