When rewinding, remove insufficiently-validated blocks

If a block is insufficiently-validated against a particular branch ID, then we
cannot guarantee that even the block header will be valid under the actual
consensus rules the node will want to apply. Instead require that the blocks are
completely re-validated, by removing them from the block index (which is
equivalent to reducing their validity to BLOCK_VALID_UNKNOWN).
This commit is contained in:
Jack Grigg 2018-03-15 11:49:30 +01:00
parent fe87fd2b36
commit f5007d8912
No known key found for this signature in database
GPG Key ID: 665DBCD284F7DAFF
3 changed files with 31 additions and 22 deletions

View File

@ -4041,9 +4041,10 @@ bool RewindBlockIndex(const CChainParams& params)
return false;
}
// Reduce validity flag and have-data flags.
// Collect blocks to be removed (blocks in mapBlockIndex must be at least BLOCK_VALID_TREE).
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
std::vector<const CBlockIndex*> vBlocks;
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
CBlockIndex* pindexIter = it->second;
@ -4053,27 +4054,8 @@ bool RewindBlockIndex(const CChainParams& params)
// rewind all the way. Blocks remaining on chainActive at this point
// must not have their validity reduced.
if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
// Reduce validity
pindexIter->nStatus =
std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) |
(pindexIter->nStatus & ~BLOCK_VALID_MASK);
// Remove have-data flags
pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
// Remove branch ID
pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE;
pindexIter->nCachedBranchId = boost::none;
// Remove storage location
pindexIter->nFile = 0;
pindexIter->nDataPos = 0;
pindexIter->nUndoPos = 0;
// Remove various other things
pindexIter->nTx = 0;
pindexIter->nChainTx = 0;
pindexIter->nSproutValue = boost::none;
pindexIter->nChainSproutValue = boost::none;
pindexIter->nSequenceId = 0;
// Make sure it gets written
setDirtyBlockIndex.insert(pindexIter);
// Add to the list of blocks to remove
vBlocks.push_back(pindexIter);
// Update indices
setBlockIndexCandidates.erase(pindexIter);
auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
@ -4089,6 +4071,24 @@ bool RewindBlockIndex(const CChainParams& params)
}
}
// Set pindexBestHeader to the current chain tip
// (since we are about to delete the block it is pointing to)
pindexBestHeader = chainActive.Tip();
// Erase block indices on-disk
if (!pblocktree->EraseBatchSync(vBlocks)) {
return AbortNode(state, "Failed to erase from block index database");
}
// Erase block indices in-memory
for (auto pindex : vBlocks) {
auto ret = mapBlockIndex.find(*pindex->phashBlock);
if (ret != mapBlockIndex.end()) {
mapBlockIndex.erase(ret);
delete pindex;
}
}
PruneBlockIndexCandidates();
CheckBlockIndex();

View File

@ -249,6 +249,14 @@ bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockF
return WriteBatch(batch, true);
}
bool CBlockTreeDB::EraseBatchSync(const std::vector<const CBlockIndex*>& blockinfo) {
CLevelDBBatch batch;
for (std::vector<const CBlockIndex*>::const_iterator it=blockinfo.begin(); it != blockinfo.end(); it++) {
batch.Erase(make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()));
}
return WriteBatch(batch, true);
}
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
return Read(make_pair(DB_TXINDEX, txid), pos);
}

View File

@ -59,6 +59,7 @@ private:
void operator=(const CBlockTreeDB&);
public:
bool WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
bool EraseBatchSync(const std::vector<const CBlockIndex*>& blockinfo);
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &fileinfo);
bool ReadLastBlockFile(int &nFile);
bool WriteReindexing(bool fReindex);