Auto merge of #3132 - str4d:2898-rewind-block-index, r=daira
When rewinding, remove insufficiently-validated blocks If a block is insufficiently-validated against a particular branch ID, then we cannot guarantee that even the block header will be valid under the actual consensus rules the node will want to apply. Instead require that the blocks are completely re-validated, by removing them from the block index (which is equivalent to reducing their validity to BLOCK_VALID_UNKNOWN). Closes #3100.
This commit is contained in:
commit
3a47b9bcfd
|
@ -54,6 +54,7 @@ testScripts=(
|
||||||
'bip65-cltv-p2p.py'
|
'bip65-cltv-p2p.py'
|
||||||
'bipdersig-p2p.py'
|
'bipdersig-p2p.py'
|
||||||
'overwinter_peer_management.py'
|
'overwinter_peer_management.py'
|
||||||
|
'rewind_index.py'
|
||||||
);
|
);
|
||||||
testScriptsExt=(
|
testScriptsExt=(
|
||||||
'getblocktemplate_longpoll.py'
|
'getblocktemplate_longpoll.py'
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
#!/usr/bin/env python2
|
||||||
|
# Copyright (c) 2018 The Zcash developers
|
||||||
|
# Distributed under the MIT software license, see the accompanying
|
||||||
|
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||||
|
|
||||||
|
from test_framework.test_framework import BitcoinTestFramework
|
||||||
|
from test_framework.util import assert_equal, initialize_chain_clean, \
|
||||||
|
start_nodes, start_node, connect_nodes_bi, bitcoind_processes
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class RewindBlockIndexTest (BitcoinTestFramework):
|
||||||
|
|
||||||
|
def setup_chain(self):
|
||||||
|
print("Initializing test directory "+self.options.tmpdir)
|
||||||
|
initialize_chain_clean(self.options.tmpdir, 3)
|
||||||
|
|
||||||
|
def setup_network(self, split=False):
|
||||||
|
# Node 0 - Overwinter, then Sprout, then Overwinter again
|
||||||
|
# Node 1 - Sprout
|
||||||
|
# Node 2 - Overwinter
|
||||||
|
self.nodes = start_nodes(3, self.options.tmpdir, extra_args=[['-nuparams=5ba81b19:10'], [], ['-nuparams=5ba81b19:10']])
|
||||||
|
connect_nodes_bi(self.nodes,0,1)
|
||||||
|
connect_nodes_bi(self.nodes,1,2)
|
||||||
|
connect_nodes_bi(self.nodes,0,2)
|
||||||
|
self.is_network_split=False
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
def run_test (self):
|
||||||
|
# Bring all nodes to just before the activation block
|
||||||
|
print("Mining blocks...")
|
||||||
|
self.nodes[0].generate(8)
|
||||||
|
block9 = self.nodes[0].generate(1)[0]
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
assert_equal(self.nodes[0].getbestblockhash(), block9)
|
||||||
|
assert_equal(self.nodes[1].getbestblockhash(), block9)
|
||||||
|
|
||||||
|
print("Mining diverging blocks")
|
||||||
|
block10s = self.nodes[1].generate(1)[0]
|
||||||
|
block10o = self.nodes[2].generate(1)[0]
|
||||||
|
self.sync_all()
|
||||||
|
|
||||||
|
assert_equal(self.nodes[0].getbestblockhash(), block10o)
|
||||||
|
assert_equal(self.nodes[1].getbestblockhash(), block10s)
|
||||||
|
assert_equal(self.nodes[2].getbestblockhash(), block10o)
|
||||||
|
|
||||||
|
# Restart node 0 using Sprout instead of Overwinter
|
||||||
|
print("Switching node 0 from Overwinter to Sprout")
|
||||||
|
self.nodes[0].stop()
|
||||||
|
bitcoind_processes[0].wait()
|
||||||
|
self.nodes[0] = start_node(0,self.options.tmpdir)
|
||||||
|
connect_nodes_bi(self.nodes,0,1)
|
||||||
|
connect_nodes_bi(self.nodes,1,2)
|
||||||
|
connect_nodes_bi(self.nodes,0,2)
|
||||||
|
|
||||||
|
# Assume node 1 will send block10s to node 0 quickly
|
||||||
|
# (if we used self.sync_all() here and there was a bug, the test would hang)
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# Node 0 has rewound and is now on the Sprout chain
|
||||||
|
assert_equal(self.nodes[0].getblockcount(), 10)
|
||||||
|
assert_equal(self.nodes[0].getbestblockhash(), block10s)
|
||||||
|
|
||||||
|
# Restart node 0 using Overwinter instead of Sprout
|
||||||
|
print("Switching node 0 from Sprout to Overwinter")
|
||||||
|
self.nodes[0].stop()
|
||||||
|
bitcoind_processes[0].wait()
|
||||||
|
self.nodes[0] = start_node(0,self.options.tmpdir, extra_args=['-nuparams=5ba81b19:10'])
|
||||||
|
connect_nodes_bi(self.nodes,0,1)
|
||||||
|
connect_nodes_bi(self.nodes,1,2)
|
||||||
|
connect_nodes_bi(self.nodes,0,2)
|
||||||
|
|
||||||
|
# Assume node 2 will send block10o to node 0 quickly
|
||||||
|
# (if we used self.sync_all() here and there was a bug, the test would hang)
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# Node 0 has rewound and is now on the Overwinter chain again
|
||||||
|
assert_equal(self.nodes[0].getblockcount(), 10)
|
||||||
|
assert_equal(self.nodes[0].getbestblockhash(), block10o)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
RewindBlockIndexTest().main()
|
44
src/main.cpp
44
src/main.cpp
|
@ -4041,9 +4041,10 @@ bool RewindBlockIndex(const CChainParams& params)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reduce validity flag and have-data flags.
|
// Collect blocks to be removed (blocks in mapBlockIndex must be at least BLOCK_VALID_TREE).
|
||||||
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
|
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
|
||||||
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
|
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
|
||||||
|
std::vector<const CBlockIndex*> vBlocks;
|
||||||
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
|
for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
|
||||||
CBlockIndex* pindexIter = it->second;
|
CBlockIndex* pindexIter = it->second;
|
||||||
|
|
||||||
|
@ -4053,27 +4054,8 @@ bool RewindBlockIndex(const CChainParams& params)
|
||||||
// rewind all the way. Blocks remaining on chainActive at this point
|
// rewind all the way. Blocks remaining on chainActive at this point
|
||||||
// must not have their validity reduced.
|
// must not have their validity reduced.
|
||||||
if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
|
if (!sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) {
|
||||||
// Reduce validity
|
// Add to the list of blocks to remove
|
||||||
pindexIter->nStatus =
|
vBlocks.push_back(pindexIter);
|
||||||
std::min<unsigned int>(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) |
|
|
||||||
(pindexIter->nStatus & ~BLOCK_VALID_MASK);
|
|
||||||
// Remove have-data flags
|
|
||||||
pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO);
|
|
||||||
// Remove branch ID
|
|
||||||
pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE;
|
|
||||||
pindexIter->nCachedBranchId = boost::none;
|
|
||||||
// Remove storage location
|
|
||||||
pindexIter->nFile = 0;
|
|
||||||
pindexIter->nDataPos = 0;
|
|
||||||
pindexIter->nUndoPos = 0;
|
|
||||||
// Remove various other things
|
|
||||||
pindexIter->nTx = 0;
|
|
||||||
pindexIter->nChainTx = 0;
|
|
||||||
pindexIter->nSproutValue = boost::none;
|
|
||||||
pindexIter->nChainSproutValue = boost::none;
|
|
||||||
pindexIter->nSequenceId = 0;
|
|
||||||
// Make sure it gets written
|
|
||||||
setDirtyBlockIndex.insert(pindexIter);
|
|
||||||
// Update indices
|
// Update indices
|
||||||
setBlockIndexCandidates.erase(pindexIter);
|
setBlockIndexCandidates.erase(pindexIter);
|
||||||
auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
|
auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev);
|
||||||
|
@ -4089,6 +4071,24 @@ bool RewindBlockIndex(const CChainParams& params)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set pindexBestHeader to the current chain tip
|
||||||
|
// (since we are about to delete the block it is pointing to)
|
||||||
|
pindexBestHeader = chainActive.Tip();
|
||||||
|
|
||||||
|
// Erase block indices on-disk
|
||||||
|
if (!pblocktree->EraseBatchSync(vBlocks)) {
|
||||||
|
return AbortNode(state, "Failed to erase from block index database");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Erase block indices in-memory
|
||||||
|
for (auto pindex : vBlocks) {
|
||||||
|
auto ret = mapBlockIndex.find(*pindex->phashBlock);
|
||||||
|
if (ret != mapBlockIndex.end()) {
|
||||||
|
mapBlockIndex.erase(ret);
|
||||||
|
delete pindex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
PruneBlockIndexCandidates();
|
PruneBlockIndexCandidates();
|
||||||
|
|
||||||
CheckBlockIndex();
|
CheckBlockIndex();
|
||||||
|
|
|
@ -249,6 +249,14 @@ bool CBlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockF
|
||||||
return WriteBatch(batch, true);
|
return WriteBatch(batch, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool CBlockTreeDB::EraseBatchSync(const std::vector<const CBlockIndex*>& blockinfo) {
|
||||||
|
CLevelDBBatch batch;
|
||||||
|
for (std::vector<const CBlockIndex*>::const_iterator it=blockinfo.begin(); it != blockinfo.end(); it++) {
|
||||||
|
batch.Erase(make_pair(DB_BLOCK_INDEX, (*it)->GetBlockHash()));
|
||||||
|
}
|
||||||
|
return WriteBatch(batch, true);
|
||||||
|
}
|
||||||
|
|
||||||
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
|
bool CBlockTreeDB::ReadTxIndex(const uint256 &txid, CDiskTxPos &pos) {
|
||||||
return Read(make_pair(DB_TXINDEX, txid), pos);
|
return Read(make_pair(DB_TXINDEX, txid), pos);
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,7 @@ private:
|
||||||
void operator=(const CBlockTreeDB&);
|
void operator=(const CBlockTreeDB&);
|
||||||
public:
|
public:
|
||||||
bool WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
|
bool WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*> >& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo);
|
||||||
|
bool EraseBatchSync(const std::vector<const CBlockIndex*>& blockinfo);
|
||||||
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &fileinfo);
|
bool ReadBlockFileInfo(int nFile, CBlockFileInfo &fileinfo);
|
||||||
bool ReadLastBlockFile(int &nFile);
|
bool ReadLastBlockFile(int &nFile);
|
||||||
bool WriteReindexing(bool fReindex);
|
bool WriteReindexing(bool fReindex);
|
||||||
|
|
Loading…
Reference in New Issue