Merge #11824: Block ActivateBestChain to empty validationinterface queue

97d2b09c12 Add helper to wait for validation interface queue to catch up (Matt Corallo)
36137497f1 Block ActivateBestChain to empty validationinterface queue (Matt Corallo)
5a933cefcc Add an interface to get the queue depth out of CValidationInterface (Matt Corallo)
a99b76f269 Require no cs_main lock for ProcessNewBlock/ActivateBestChain (Matt Corallo)
a734896038 Avoid cs_main in net_processing ActivateBestChain calls (Matt Corallo)
66aa1d58a1 Refactor ProcessGetData in anticipation of avoiding cs_main for ABC (Matt Corallo)
818075adac Create new mutex for orphans, no cs_main in PLV::BlockConnected (Matt Corallo)

Pull request description:

  This should fix #11822.

  It ended up bigger than I hoped for, but its not too gnarly. Note that "
  Require no cs_main lock for ProcessNewBlock/ActivateBestChain" is mostly pure code-movement.

Tree-SHA512: 1127688545926f6099449dca6a4e6609eefc3abbd72f1c66e03d32bd8c7b31e82097d8307822cfd1dec0321703579cfdd82069cab6e17b1024e75eac694122cb
This commit is contained in:
Pieter Wuille 2017-12-29 01:45:17 -08:00
commit d9fdac130a
No known key found for this signature in database
GPG Key ID: A636E97631F767E0
11 changed files with 293 additions and 214 deletions

View File

@ -51,12 +51,13 @@ struct COrphanTx {
NodeId fromPeer; NodeId fromPeer;
int64_t nTimeExpire; int64_t nTimeExpire;
}; };
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main); static CCriticalSection g_cs_orphans;
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
void EraseOrphansFor(NodeId peer);
static size_t vExtraTxnForCompactIt = 0; static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main); static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8] static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
@ -127,7 +128,7 @@ namespace {
int g_outbound_peers_with_protect_from_disconnect = 0; int g_outbound_peers_with_protect_from_disconnect = 0;
/** When our tip was last updated. */ /** When our tip was last updated. */
int64_t g_last_tip_update = 0; std::atomic<int64_t> g_last_tip_update(0);
/** Relay map, protected by cs_main. */ /** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay; typedef std::map<uint256, CTransactionRef> MapRelay;
@ -631,7 +632,7 @@ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
// mapOrphanTransactions // mapOrphanTransactions
// //
void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_main) void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{ {
size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
if (max_extra_txn <= 0) if (max_extra_txn <= 0)
@ -642,7 +643,7 @@ void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_RE
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
} }
bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{ {
const uint256& hash = tx->GetHash(); const uint256& hash = tx->GetHash();
if (mapOrphanTransactions.count(hash)) if (mapOrphanTransactions.count(hash))
@ -675,7 +676,7 @@ bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRE
return true; return true;
} }
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans)
{ {
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash); std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end()) if (it == mapOrphanTransactions.end())
@ -695,6 +696,7 @@ int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
void EraseOrphansFor(NodeId peer) void EraseOrphansFor(NodeId peer)
{ {
LOCK(g_cs_orphans);
int nErased = 0; int nErased = 0;
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin(); std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end()) while (iter != mapOrphanTransactions.end())
@ -709,8 +711,10 @@ void EraseOrphansFor(NodeId peer)
} }
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{ {
LOCK(g_cs_orphans);
unsigned int nEvicted = 0; unsigned int nEvicted = 0;
static int64_t nNextSweep; static int64_t nNextSweep;
int64_t nNow = GetTime(); int64_t nNow = GetTime();
@ -804,7 +808,7 @@ PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &schedu
} }
void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) { void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
LOCK(cs_main); LOCK(g_cs_orphans);
std::vector<uint256> vOrphanErase; std::vector<uint256> vOrphanErase;
@ -971,9 +975,13 @@ bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
recentRejects->reset(); recentRejects->reset();
} }
{
LOCK(g_cs_orphans);
if (mapOrphanTransactions.count(inv.hash)) return true;
}
return recentRejects->contains(inv.hash) || return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) || mempool.exists(inv.hash) ||
mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1 pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1)); pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
} }
@ -1030,180 +1038,198 @@ static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connma
connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
} }
void static ProcessGetBlockData(CNode* pfrom, const Consensus::Params& consensusParams, const CInv& inv, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
bool send = false;
std::shared_ptr<const CBlock> a_recent_block;
std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
bool fWitnessesPresentInARecentCompactBlock;
{
LOCK(cs_most_recent_block);
a_recent_block = most_recent_block;
a_recent_compact_block = most_recent_compact_block;
fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
}
bool need_activate_chain = false;
{
LOCK(cs_main);
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end())
{
if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
mi->second->IsValid(BLOCK_VALID_TREE)) {
// If we have the block and all of its parents, but have not yet validated it,
// we might be in the middle of connecting it (ie in the unlock of cs_main
// before ActivateBestChain but after AcceptBlock).
// In this case, we need to run ActivateBestChain prior to checking the relay
// conditions below.
need_activate_chain = true;
}
}
} // release cs_main before calling ActivateBestChain
if (need_activate_chain) {
CValidationState dummy;
ActivateBestChain(dummy, Params(), a_recent_block);
}
LOCK(cs_main);
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
if (mi != mapBlockIndex.end()) {
send = BlockRequestAllowed(mi->second, consensusParams);
if (!send) {
LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
send = false;
}
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if (send && !pfrom->fWhitelisted && (
(((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
)) {
LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom->fDisconnect = true;
send = false;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
std::shared_ptr<const CBlock> pblock;
if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
pblock = a_recent_block;
} else {
// Send block from disk
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
pblock = pblockRead;
}
if (inv.type == MSG_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_WITNESS_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_FILTERED_BLOCK)
{
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter) {
sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
}
}
if (sendMerkleBlock) {
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
for (PairType& pair : merkleBlock.vMatchedTxn)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
}
// else
// no response
}
else if (inv.type == MSG_CMPCT_BLOCK)
{
// If a peer is asking for old blocks, we're almost guaranteed
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
} else {
CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
} else {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
}
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
std::vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
pfrom->hashContinue.SetNull();
}
}
}
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc) void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{ {
AssertLockNotHeld(cs_main);
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin(); std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
std::vector<CInv> vNotFound; std::vector<CInv> vNotFound;
const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
LOCK(cs_main); {
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) { while (it != pfrom->vRecvGetData.end() && (it->type == MSG_TX || it->type == MSG_WITNESS_TX)) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend)
break;
const CInv &inv = *it;
{
if (interruptMsgProc) if (interruptMsgProc)
return; return;
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend)
break;
const CInv &inv = *it;
it++; it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) // Send stream from relay memory
{ bool push = false;
bool send = false; auto mi = mapRelay.find(inv.hash);
BlockMap::iterator mi = mapBlockIndex.find(inv.hash); int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
std::shared_ptr<const CBlock> a_recent_block; if (mi != mapRelay.end()) {
std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
bool fWitnessesPresentInARecentCompactBlock; push = true;
{ } else if (pfrom->timeLastMempoolReq) {
LOCK(cs_most_recent_block); auto txinfo = mempool.info(inv.hash);
a_recent_block = most_recent_block; // To protect privacy, do not answer getdata using the mempool when
a_recent_compact_block = most_recent_compact_block; // that TX couldn't have been INVed in reply to a MEMPOOL request.
fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock; if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
} connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
if (mi != mapBlockIndex.end()) push = true;
{
if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
mi->second->IsValid(BLOCK_VALID_TREE)) {
// If we have the block and all of its parents, but have not yet validated it,
// we might be in the middle of connecting it (ie in the unlock of cs_main
// before ActivateBestChain but after AcceptBlock).
// In this case, we need to run ActivateBestChain prior to checking the relay
// conditions below.
CValidationState dummy;
ActivateBestChain(dummy, Params(), a_recent_block);
}
send = BlockRequestAllowed(mi->second, consensusParams);
if (!send) {
LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
send = false;
}
// Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold
if (send && !pfrom->fWhitelisted && (
(((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (chainActive.Tip()->nHeight - mi->second->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) )
)) {
LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold from peer=%d\n", pfrom->GetId());
//disconnect node and prevent it from stalling (would otherwise wait for the missing block)
pfrom->fDisconnect = true;
send = false;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
std::shared_ptr<const CBlock> pblock;
if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
pblock = a_recent_block;
} else {
// Send block from disk
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
pblock = pblockRead;
}
if (inv.type == MSG_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_WITNESS_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_FILTERED_BLOCK)
{
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter) {
sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
}
}
if (sendMerkleBlock) {
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
for (PairType& pair : merkleBlock.vMatchedTxn)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
}
// else
// no response
}
else if (inv.type == MSG_CMPCT_BLOCK)
{
// If a peer is asking for old blocks, we're almost guaranteed
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
} else {
CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
} else {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
}
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
std::vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
pfrom->hashContinue.SetNull();
}
} }
} }
else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX) if (!push) {
{ vNotFound.push_back(inv);
// Send stream from relay memory
bool push = false;
auto mi = mapRelay.find(inv.hash);
int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
if (mi != mapRelay.end()) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
push = true;
} else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
push = true;
}
}
if (!push) {
vNotFound.push_back(inv);
}
} }
// Track requests for our stuff. // Track requests for our stuff.
GetMainSignals().Inventory(inv.hash); GetMainSignals().Inventory(inv.hash);
}
} // release cs_main
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) if (it != pfrom->vRecvGetData.end()) {
break; const CInv &inv = *it;
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK) {
ProcessGetBlockData(pfrom, consensusParams, inv, connman, interruptMsgProc);
} }
} }
@ -2008,7 +2034,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK; inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash; inv.hash = req.blockhash;
pfrom->vRecvGetData.push_back(inv); pfrom->vRecvGetData.push_back(inv);
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); // The message processing loop will go around again (without pausing) and we'll respond then (without cs_main)
return true; return true;
} }
@ -2101,7 +2127,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
CInv inv(MSG_TX, tx.GetHash()); CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv); pfrom->AddInventoryKnown(inv);
LOCK(cs_main); LOCK2(cs_main, g_cs_orphans);
bool fMissingInputs = false; bool fMissingInputs = false;
CValidationState state; CValidationState state;
@ -2324,7 +2350,7 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr
bool fBlockReconstructed = false; bool fBlockReconstructed = false;
{ {
LOCK(cs_main); LOCK2(cs_main, g_cs_orphans);
// If AcceptBlockHeader returned true, it set pindex // If AcceptBlockHeader returned true, it set pindex
assert(pindex); assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());

View File

@ -206,3 +206,8 @@ void SingleThreadedSchedulerClient::EmptyQueue() {
should_continue = !m_callbacks_pending.empty(); should_continue = !m_callbacks_pending.empty();
} }
} }
size_t SingleThreadedSchedulerClient::CallbacksPending() {
LOCK(m_cs_callbacks_pending);
return m_callbacks_pending.size();
}

View File

@ -108,6 +108,8 @@ public:
// Processes all remaining queue members on the calling thread, blocking until queue is empty // Processes all remaining queue members on the calling thread, blocking until queue is empty
// Must be called after the CScheduler has no remaining processing threads! // Must be called after the CScheduler has no remaining processing threads!
void EmptyQueue(); void EmptyQueue();
size_t CallbacksPending();
}; };
#endif #endif

View File

@ -216,7 +216,6 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
entry.nFee = 11; entry.nFee = 11;
entry.nHeight = 11; entry.nHeight = 11;
LOCK(cs_main);
fCheckpointsEnabled = false; fCheckpointsEnabled = false;
// Simple block creation, nothing special yet: // Simple block creation, nothing special yet:
@ -229,27 +228,32 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i) for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i)
{ {
CBlock *pblock = &pblocktemplate->block; // pointer for convenience CBlock *pblock = &pblocktemplate->block; // pointer for convenience
pblock->nVersion = 1; {
pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1; LOCK(cs_main);
CMutableTransaction txCoinbase(*pblock->vtx[0]); pblock->nVersion = 1;
txCoinbase.nVersion = 1; pblock->nTime = chainActive.Tip()->GetMedianTimePast()+1;
txCoinbase.vin[0].scriptSig = CScript(); CMutableTransaction txCoinbase(*pblock->vtx[0]);
txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); txCoinbase.nVersion = 1;
txCoinbase.vin[0].scriptSig.push_back(chainActive.Height()); txCoinbase.vin[0].scriptSig = CScript();
txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce);
txCoinbase.vout[0].scriptPubKey = CScript(); txCoinbase.vin[0].scriptSig.push_back(chainActive.Height());
pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this)
if (txFirst.size() == 0) txCoinbase.vout[0].scriptPubKey = CScript();
baseheight = chainActive.Height(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase));
if (txFirst.size() < 4) if (txFirst.size() == 0)
txFirst.push_back(pblock->vtx[0]); baseheight = chainActive.Height();
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); if (txFirst.size() < 4)
pblock->nNonce = blockinfo[i].nonce; txFirst.push_back(pblock->vtx[0]);
pblock->hashMerkleRoot = BlockMerkleRoot(*pblock);
pblock->nNonce = blockinfo[i].nonce;
}
std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock);
BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, nullptr)); BOOST_CHECK(ProcessNewBlock(chainparams, shared_pblock, true, nullptr));
pblock->hashPrevBlock = pblock->GetHash(); pblock->hashPrevBlock = pblock->GetHash();
} }
LOCK(cs_main);
// Just to make sure we can still make simple blocks // Just to make sure we can still make simple blocks
BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey)); BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey));

View File

@ -69,9 +69,9 @@ TestingSetup::TestingSetup(const std::string& chainName) : BasicTestingSetup(cha
fs::create_directories(pathTemp); fs::create_directories(pathTemp);
gArgs.ForceSetArg("-datadir", pathTemp.string()); gArgs.ForceSetArg("-datadir", pathTemp.string());
// Note that because we don't bother running a scheduler thread here, // We have to run a scheduler thread to prevent ActivateBestChain
// callbacks via CValidationInterface are unreliable, but that's OK, // from blocking due to queue overrun.
// our unit tests aren't testing multiple parts of the code at once. threadGroup.create_thread(boost::bind(&CScheduler::serviceQueue, &scheduler));
GetMainSignals().RegisterBackgroundSignalScheduler(scheduler); GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
mempool.setSanityCheck(1.0); mempool.setSanityCheck(1.0);

View File

@ -66,7 +66,6 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup)
// Test 1: block with both of those transactions should be rejected. // Test 1: block with both of those transactions should be rejected.
block = CreateAndProcessBlock(spends, scriptPubKey); block = CreateAndProcessBlock(spends, scriptPubKey);
LOCK(cs_main);
BOOST_CHECK(chainActive.Tip()->GetBlockHash() != block.GetHash()); BOOST_CHECK(chainActive.Tip()->GetBlockHash() != block.GetHash());
// Test 2: ... and should be rejected if spend1 is in the memory pool // Test 2: ... and should be rejected if spend1 is in the memory pool
@ -190,12 +189,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
spend_tx.vin[0].scriptSig << vchSig; spend_tx.vin[0].scriptSig << vchSig;
} }
LOCK(cs_main);
// Test that invalidity under a set of flags doesn't preclude validity // Test that invalidity under a set of flags doesn't preclude validity
// under other (eg consensus) flags. // under other (eg consensus) flags.
// spend_tx is invalid according to DERSIG // spend_tx is invalid according to DERSIG
{ {
LOCK(cs_main);
CValidationState state; CValidationState state;
PrecomputedTransactionData ptd_spend_tx(spend_tx); PrecomputedTransactionData ptd_spend_tx(spend_tx);
@ -213,16 +212,18 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup)
// test later that block validation works fine in the absence of cached // test later that block validation works fine in the absence of cached
// successes. // successes.
ValidateCheckInputsForAllFlags(spend_tx, SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false); ValidateCheckInputsForAllFlags(spend_tx, SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false);
// And if we produce a block with this tx, it should be valid (DERSIG not
// enabled yet), even though there's no cache entry.
CBlock block;
block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
} }
// And if we produce a block with this tx, it should be valid (DERSIG not
// enabled yet), even though there's no cache entry.
CBlock block;
block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey);
BOOST_CHECK(chainActive.Tip()->GetBlockHash() == block.GetHash());
BOOST_CHECK(pcoinsTip->GetBestBlock() == block.GetHash());
LOCK(cs_main);
// Test P2SH: construct a transaction that is valid without P2SH, and // Test P2SH: construct a transaction that is valid without P2SH, and
// then test validity with P2SH. // then test validity with P2SH.
{ {

View File

@ -40,6 +40,7 @@
#include <validationinterface.h> #include <validationinterface.h>
#include <warnings.h> #include <warnings.h>
#include <future>
#include <sstream> #include <sstream>
#include <boost/algorithm/string/replace.hpp> #include <boost/algorithm/string/replace.hpp>
@ -2559,12 +2560,21 @@ bool CChainState::ActivateBestChain(CValidationState &state, const CChainParams&
// far from a guarantee. Things in the P2P/RPC will often end up calling // far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set // us in the middle of ProcessNewBlock - do not assume pblock is set
// sanely for performance or correctness! // sanely for performance or correctness!
AssertLockNotHeld(cs_main);
CBlockIndex *pindexMostWork = nullptr; CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr; CBlockIndex *pindexNewTip = nullptr;
int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT); int nStopAtHeight = gArgs.GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
do { do {
boost::this_thread::interruption_point(); boost::this_thread::interruption_point();
if (GetMainSignals().CallbacksPending() > 10) {
// Block until the validation queue drains. This should largely
// never happen in normal operation, however may happen during
// reindex, causing memory blowup if we run too far ahead.
SyncWithValidationInterfaceQueue();
}
if (ShutdownRequested()) if (ShutdownRequested())
break; break;
@ -3383,6 +3393,8 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, CVali
bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock) bool ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool *fNewBlock)
{ {
AssertLockNotHeld(cs_main);
{ {
CBlockIndex *pindex = nullptr; CBlockIndex *pindex = nullptr;
if (fNewBlock) *fNewBlock = false; if (fNewBlock) *fNewBlock = false;

View File

@ -11,9 +11,11 @@
#include <sync.h> #include <sync.h>
#include <txmempool.h> #include <txmempool.h>
#include <util.h> #include <util.h>
#include <validation.h>
#include <list> #include <list>
#include <atomic> #include <atomic>
#include <future>
#include <boost/signals2/signal.hpp> #include <boost/signals2/signal.hpp>
@ -54,6 +56,11 @@ void CMainSignals::FlushBackgroundCallbacks() {
} }
} }
size_t CMainSignals::CallbacksPending() {
if (!m_internals) return 0;
return m_internals->m_schedulerClient.CallbacksPending();
}
void CMainSignals::RegisterWithMempoolSignals(CTxMemPool& pool) { void CMainSignals::RegisterWithMempoolSignals(CTxMemPool& pool) {
pool.NotifyEntryRemoved.connect(boost::bind(&CMainSignals::MempoolEntryRemoved, this, _1, _2)); pool.NotifyEntryRemoved.connect(boost::bind(&CMainSignals::MempoolEntryRemoved, this, _1, _2));
} }
@ -113,6 +120,16 @@ void CallFunctionInValidationInterfaceQueue(std::function<void ()> func) {
g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func)); g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func));
} }
void SyncWithValidationInterfaceQueue() {
AssertLockNotHeld(cs_main);
// Block until the validation queue drains
std::promise<void> promise;
CallFunctionInValidationInterfaceQueue([&promise] {
promise.set_value();
});
promise.get_future().wait();
}
void CMainSignals::MempoolEntryRemoved(CTransactionRef ptx, MemPoolRemovalReason reason) { void CMainSignals::MempoolEntryRemoved(CTransactionRef ptx, MemPoolRemovalReason reason) {
if (reason != MemPoolRemovalReason::BLOCK && reason != MemPoolRemovalReason::CONFLICT) { if (reason != MemPoolRemovalReason::BLOCK && reason != MemPoolRemovalReason::CONFLICT) {
m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] { m_internals->m_schedulerClient.AddToProcessQueue([ptx, this] {

View File

@ -42,6 +42,16 @@ void UnregisterAllValidationInterfaces();
* will result in a deadlock (that DEBUG_LOCKORDER will miss). * will result in a deadlock (that DEBUG_LOCKORDER will miss).
*/ */
void CallFunctionInValidationInterfaceQueue(std::function<void ()> func); void CallFunctionInValidationInterfaceQueue(std::function<void ()> func);
/**
* This is a synonym for the following, which asserts certain locks are not
* held:
* std::promise<void> promise;
* CallFunctionInValidationInterfaceQueue([&promise] {
* promise.set_value();
* });
* promise.get_future().wait();
*/
void SyncWithValidationInterfaceQueue();
class CValidationInterface { class CValidationInterface {
protected: protected:
@ -131,6 +141,8 @@ public:
/** Call any remaining callbacks on the calling thread */ /** Call any remaining callbacks on the calling thread */
void FlushBackgroundCallbacks(); void FlushBackgroundCallbacks();
size_t CallbacksPending();
/** Register with mempool to call TransactionRemovedFromMempool callbacks */ /** Register with mempool to call TransactionRemovedFromMempool callbacks */
void RegisterWithMempoolSignals(CTxMemPool& pool); void RegisterWithMempoolSignals(CTxMemPool& pool);
/** Unregister with mempool */ /** Unregister with mempool */

View File

@ -370,8 +370,6 @@ static void AddKey(CWallet& wallet, const CKey& key)
BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
{ {
LOCK(cs_main);
// Cap last block file size, and mine new block in a new block file. // Cap last block file size, and mine new block in a new block file.
CBlockIndex* const nullBlock = nullptr; CBlockIndex* const nullBlock = nullptr;
CBlockIndex* oldTip = chainActive.Tip(); CBlockIndex* oldTip = chainActive.Tip();
@ -379,6 +377,8 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
CBlockIndex* newTip = chainActive.Tip(); CBlockIndex* newTip = chainActive.Tip();
LOCK(cs_main);
// Verify ScanForWalletTransactions picks up transactions in both the old // Verify ScanForWalletTransactions picks up transactions in both the old
// and new block files. // and new block files.
{ {
@ -447,8 +447,6 @@ BOOST_FIXTURE_TEST_CASE(rescan, TestChain100Setup)
// than or equal to key birthday. // than or equal to key birthday.
BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
{ {
LOCK(cs_main);
// Create two blocks with same timestamp to verify that importwallet rescan // Create two blocks with same timestamp to verify that importwallet rescan
// will pick up both blocks, not just the first. // will pick up both blocks, not just the first.
const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5; const int64_t BLOCK_TIME = chainActive.Tip()->GetBlockTimeMax() + 5;
@ -462,6 +460,8 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME); SetMockTime(KEY_TIME);
coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]); coinbaseTxns.emplace_back(*CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
LOCK(cs_main);
// Import key into wallet and call dumpwallet to create backup file. // Import key into wallet and call dumpwallet to create backup file.
{ {
CWallet wallet; CWallet wallet;
@ -627,10 +627,15 @@ public:
BOOST_CHECK(wallet->CreateTransaction({recipient}, wtx, reservekey, fee, changePos, error, dummy)); BOOST_CHECK(wallet->CreateTransaction({recipient}, wtx, reservekey, fee, changePos, error, dummy));
CValidationState state; CValidationState state;
BOOST_CHECK(wallet->CommitTransaction(wtx, reservekey, nullptr, state)); BOOST_CHECK(wallet->CommitTransaction(wtx, reservekey, nullptr, state));
CMutableTransaction blocktx;
{
LOCK(wallet->cs_wallet);
blocktx = CMutableTransaction(*wallet->mapWallet.at(wtx.GetHash()).tx);
}
CreateAndProcessBlock({CMutableTransaction(blocktx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
LOCK(wallet->cs_wallet); LOCK(wallet->cs_wallet);
auto it = wallet->mapWallet.find(wtx.GetHash()); auto it = wallet->mapWallet.find(wtx.GetHash());
BOOST_CHECK(it != wallet->mapWallet.end()); BOOST_CHECK(it != wallet->mapWallet.end());
CreateAndProcessBlock({CMutableTransaction(*it->second.tx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
it->second.SetMerkleBranch(chainActive.Tip(), 1); it->second.SetMerkleBranch(chainActive.Tip(), 1);
return it->second; return it->second;
} }
@ -641,7 +646,6 @@ public:
BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup) BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
{ {
std::string coinbaseAddress = coinbaseKey.GetPubKey().GetID().ToString(); std::string coinbaseAddress = coinbaseKey.GetPubKey().GetID().ToString();
LOCK2(cs_main, wallet->cs_wallet);
// Confirm ListCoins initially returns 1 coin grouped under coinbaseKey // Confirm ListCoins initially returns 1 coin grouped under coinbaseKey
// address. // address.
@ -669,6 +673,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
BOOST_CHECK_EQUAL(available.size(), 2); BOOST_CHECK_EQUAL(available.size(), 2);
for (const auto& group : list) { for (const auto& group : list) {
for (const auto& coin : group.second) { for (const auto& coin : group.second) {
LOCK(wallet->cs_wallet);
wallet->LockCoin(COutPoint(coin.tx->GetHash(), coin.i)); wallet->LockCoin(COutPoint(coin.tx->GetHash(), coin.i));
} }
} }

View File

@ -1292,12 +1292,7 @@ void CWallet::BlockUntilSyncedToCurrentChain() {
// ...otherwise put a callback in the validation interface queue and wait // ...otherwise put a callback in the validation interface queue and wait
// for the queue to drain enough to execute it (indicating we are caught up // for the queue to drain enough to execute it (indicating we are caught up
// at least with the time we entered this function). // at least with the time we entered this function).
SyncWithValidationInterfaceQueue();
std::promise<void> promise;
CallFunctionInValidationInterfaceQueue([&promise] {
promise.set_value();
});
promise.get_future().wait();
} }