Handle mempool requests in send loop, subject to trickle

By eliminating queued entries from the mempool response and responding only at
trickle time, this makes the mempool no longer leak transaction arrival order
information (as the mempool itself is also sorted)-- at least no more than
relay itself leaks it.

(cherry picked from commit ed7068302c7490e8061cb3a558a0f83a465beeea)
This commit is contained in:
Pieter Wuille 2016-04-08 16:26:41 +02:00 committed by Jack Grigg
parent 709c5163d4
commit 86f8b51096
3 changed files with 43 additions and 26 deletions

View File

@ -6923,31 +6923,8 @@ bool static ProcessMessage(const CChainParams& chainparams, CNode* pfrom, string
return true;
}
LOCK2(cs_main, pfrom->cs_filter);
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
vector<CInv> vInv;
for (uint256& hash : vtxid) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (fInMemPool && IsExpiringSoonTx(tx, currentHeight + 1)) {
continue;
}
CInv inv(MSG_TX, hash);
if (pfrom->pfilter) {
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pfrom->pfilter->IsRelevantAndUpdate(tx)) continue;
}
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pfrom->PushMessage("inv", vInv);
vInv.clear();
}
}
if (vInv.size() > 0)
pfrom->PushMessage("inv", vInv);
LOCK(pfrom->cs_inventory);
pfrom->fSendMempool = true;
}
@ -7454,13 +7431,46 @@ bool SendMessages(const Consensus::Params& params, CNode* pto)
}
pto->vInventoryBlockToSend.clear();
// Determine transactions to relay
// Check whether periodic sends should happen
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
// Respond to BIP35 mempool requests
if (fSendTrickle && pto->fSendMempool) {
std::vector<uint256> vtxid;
mempool.queryHashes(vtxid);
pto->fSendMempool = false;
LOCK(pto->cs_filter);
int currentHeight = GetHeight();
for (const uint256& hash : vtxid) {
CTransaction tx;
bool fInMemPool = mempool.lookup(hash, tx);
if (fInMemPool && IsExpiringSoonTx(tx, currentHeight + 1)) {
continue;
}
CInv inv(MSG_TX, hash);
pto->setInventoryTxToSend.erase(hash);
if (pto->pfilter) {
if (!fInMemPool) continue; // another thread removed since queryHashes, maybe...
if (!pto->pfilter->IsRelevantAndUpdate(tx)) continue;
}
pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage("inv", vInv);
vInv.clear();
}
}
}
// Determine transactions to relay
if (fSendTrickle) {
// Produce a vector with all candidates for sending
vector<std::set<uint256>::iterator> vInvTx;
@ -7490,6 +7500,10 @@ bool SendMessages(const Consensus::Params& params, CNode* pto)
// Send
vInv.push_back(CInv(MSG_TX, hash));
nRelayedTransactions++;
if (vInv.size() == MAX_INV_SZ) {
pto->PushMessage("inv", vInv);
vInv.clear();
}
pto->filterInventoryKnown.insert(hash);
}
}

View File

@ -2245,6 +2245,7 @@ CNode::CNode(SOCKET hSocketIn, const CAddress& addrIn, const std::string& addrNa
hashContinue = uint256();
nStartingHeight = -1;
filterInventoryKnown.reset();
fSendMempool = false;
fGetAddr = false;
nNextLocalAddrSend = 0;
nNextAddrSend = 0;

View File

@ -352,6 +352,8 @@ public:
std::set<uint256> setAskFor;
std::multimap<int64_t, CInv> mapAskFor;
int64_t nNextInvSend;
// Used for BIP35 mempool sending, also protected by cs_inventory
bool fSendMempool;
// Ping time measurement:
// The pong reply we're expecting, or 0 if no pong expected.