Merge pull request #5624 from str4d/fix-nu5-testnet-bug

Test and fix for chain forks on NU5 testnet
This commit is contained in:
Kris Nuttycombe 2022-03-03 08:29:40 -07:00 committed by GitHub
commit 1732a50957
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 212 additions and 16 deletions

View File

@ -60,6 +60,7 @@ BASE_SCRIPTS= [
'wallet_persistence.py',
'wallet_listnotes.py',
# vv Tests less than 60s vv
'orchard_reorg.py',
'fundrawtransaction.py',
'reorg_limit.py',
'mempool_limit.py',

146
qa/rpc-tests/orchard_reorg.py Executable file
View File

@ -0,0 +1,146 @@
#!/usr/bin/env python3
# Copyright (c) 2022 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
#
# Test the effect of reorgs on the Orchard commitment tree.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
BLOSSOM_BRANCH_ID,
HEARTWOOD_BRANCH_ID,
CANOPY_BRANCH_ID,
NU5_BRANCH_ID,
assert_equal,
connect_nodes_bi,
get_coinbase_address,
nuparams,
start_nodes,
stop_nodes,
sync_blocks,
wait_and_assert_operationid_status,
wait_bitcoinds,
)
from finalsaplingroot import ORCHARD_TREE_EMPTY_ROOT
from decimal import Decimal
class OrchardReorgTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[[
nuparams(BLOSSOM_BRANCH_ID, 1),
nuparams(HEARTWOOD_BRANCH_ID, 5),
nuparams(CANOPY_BRANCH_ID, 5),
nuparams(NU5_BRANCH_ID, 10),
'-nurejectoldversions=false',
'-experimentalfeatures',
'-orchardwallet',
# '-debug',
]] * self.num_nodes)
def run_test(self):
# Activate NU5 so we can test Orchard.
self.nodes[0].generate(10)
self.sync_all()
# Generate a UA with only an Orchard receiver.
account = self.nodes[0].z_getnewaccount()['account']
addr = self.nodes[0].z_getaddressforaccount(account, ['orchard'])
assert_equal(addr['account'], account)
assert_equal(set(addr['pools']), set(['orchard']))
ua = addr['unifiedaddress']
# Before mining any Orchard notes, finalorchardroot should be the empty Orchard root.
assert_equal(
ORCHARD_TREE_EMPTY_ROOT,
self.nodes[0].getblock(self.nodes[0].getbestblockhash())['finalorchardroot'],
)
# finalorchardroot should not change if we mine additional blocks without Orchard notes.
self.nodes[0].generate(100)
self.sync_all()
assert_equal(
ORCHARD_TREE_EMPTY_ROOT,
self.nodes[0].getblock(self.nodes[0].getbestblockhash())['finalorchardroot'],
)
# Create an Orchard note.
recipients = [{'address': ua, 'amount': Decimal('12.5')}]
opid = self.nodes[0].z_sendmany(get_coinbase_address(self.nodes[0]), recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[0], opid)
# After mining a block, finalorchardroot should have changed.
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
orchardroot_oneleaf = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['finalorchardroot']
print("Root of Orchard commitment tree with one leaf:", orchardroot_oneleaf)
assert(orchardroot_oneleaf != ORCHARD_TREE_EMPTY_ROOT)
# finalorchardroot should not change if we mine additional blocks without Orchard notes.
self.nodes[0].generate(4)
self.sync_all()
assert_equal(
orchardroot_oneleaf,
self.nodes[0].getblock(self.nodes[0].getbestblockhash())['finalorchardroot'],
)
# Split the network so we can test the effect of a reorg.
print("Splitting the network")
self.split_network()
# Create another Orchard note on node 0.
recipients = [{'address': ua, 'amount': Decimal('12.5')}]
opid = self.nodes[0].z_sendmany(get_coinbase_address(self.nodes[0]), recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[0], opid)
# Mine two blocks on node 0.
print("Mining 2 blocks on node 0")
self.nodes[0].generate(2)
self.sync_all()
orchardroot_twoleaf = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['finalorchardroot']
print("Root of Orchard commitment tree with two leaves:", orchardroot_twoleaf)
assert(orchardroot_twoleaf != ORCHARD_TREE_EMPTY_ROOT)
assert(orchardroot_twoleaf != orchardroot_oneleaf)
# Generate 10 blocks on node 2.
print("Mining alternate chain on node 2")
self.nodes[2].generate(10)
self.sync_all()
assert_equal(
orchardroot_oneleaf,
self.nodes[2].getblock(self.nodes[2].getbestblockhash())['finalorchardroot'],
)
# Reconnect the nodes; node 0 will re-org to node 2's chain.
print("Re-joining the network so that node 0 reorgs")
# We can't use `self.join_network()` because the coinbase-spending second Orchard
# transaction doesn't propagate from node 1's mempool to node 2 on restart. Inline
# the block-syncing parts here.
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = False
sync_blocks(self.nodes)
# Verify that node 0's latest Orchard root matches what we expect.
orchardroot_postreorg = self.nodes[0].getblock(self.nodes[2].getbestblockhash())['finalorchardroot']
print("Root of Orchard commitment tree after reorg:", orchardroot_postreorg)
assert_equal(orchardroot_postreorg, orchardroot_oneleaf)
if __name__ == '__main__':
OrchardReorgTest().main()

View File

@ -683,6 +683,14 @@ void CCoinsViewCache::PopAnchor(const uint256 &newrt, ShieldedType type) {
hashSaplingAnchor
);
break;
case ORCHARD:
AbstractPopAnchor<OrchardMerkleFrontier, CAnchorsOrchardMap, CAnchorsOrchardCacheEntry>(
newrt,
ORCHARD,
cacheOrchardAnchors,
hashOrchardAnchor
);
break;
default:
throw std::runtime_error("Unknown shielded type");
}

View File

@ -305,7 +305,7 @@ bool static Bind(const CService &addr, unsigned int flags) {
void OnRPCStopped()
{
cvBlockChange.notify_all();
g_best_block_cv.notify_all();
LogPrint("rpc", "RPC stopped.\n");
}

View File

@ -66,8 +66,10 @@ BlockMap mapBlockIndex;
CChain chainActive;
CBlockIndex *pindexBestHeader = NULL;
static std::atomic<int64_t> nTimeBestReceived(0); // Used only to inform the wallet of when we last received a block
CWaitableCriticalSection csBestBlock;
CConditionVariable cvBlockChange;
CWaitableCriticalSection g_best_block_mutex;
CConditionVariable g_best_block_cv;
uint256 g_best_block;
int g_best_block_height;
int nScriptCheckThreads = 0;
std::atomic_bool fImporting(false);
std::atomic_bool fReindex(false);
@ -2907,6 +2909,17 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s
view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING);
}
// Set the old best Orchard anchor back. We can get this from the
// `hashFinalOrchardRoot` of the last block. However, if the last
// block was not on or after the Orchard activation height, this
// will be set to `null`. For logical consistency, in this case we
// set the last anchor to the empty root.
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->pprev->nHeight, Consensus::UPGRADE_NU5)) {
view.PopAnchor(pindex->pprev->hashFinalOrchardRoot, ORCHARD);
} else {
view.PopAnchor(OrchardMerkleFrontier::empty_root(), ORCHARD);
}
// This is guaranteed to be filled by LoadBlockIndex.
assert(pindex->nCachedBranchId);
auto consensusBranchId = pindex->nCachedBranchId.value();
@ -3134,7 +3147,19 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree));
OrchardMerkleFrontier orchard_tree;
assert(view.GetOrchardAnchorAt(view.GetBestAnchor(ORCHARD), orchard_tree));
if (pindex->pprev && chainparams.GetConsensus().NetworkUpgradeActive(pindex->pprev->nHeight, Consensus::UPGRADE_NU5)) {
// Verify that the view's current state corresponds to the previous block.
assert(pindex->pprev->hashFinalOrchardRoot == view.GetBestAnchor(ORCHARD));
// We only call ConnectBlock() on top of the active chain's tip.
assert(!pindex->pprev->hashFinalOrchardRoot.IsNull());
assert(view.GetOrchardAnchorAt(pindex->pprev->hashFinalOrchardRoot, orchard_tree));
} else {
if (pindex->pprev) {
assert(pindex->pprev->hashFinalOrchardRoot.IsNull());
}
assert(view.GetOrchardAnchorAt(OrchardMerkleFrontier::empty_root(), orchard_tree));
}
// Grab the consensus branch ID for this block and its parent
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
@ -3757,7 +3782,12 @@ void static UpdateTip(CBlockIndex *pindexNew, const CChainParams& chainParams) {
RenderPoolMetrics("sapling", saplingPool);
RenderPoolMetrics("transparent", transparentPool);
cvBlockChange.notify_all();
{
boost::unique_lock<boost::mutex> lock(g_best_block_mutex);
g_best_block = pindexNew->GetBlockHash();
g_best_block_height = pindexNew->nHeight;
g_best_block_cv.notify_all();
}
}
/**

View File

@ -160,8 +160,19 @@ extern BlockMap mapBlockIndex;
extern std::optional<uint64_t> last_block_num_txs;
extern std::optional<uint64_t> last_block_size;
extern const std::string strMessageMagic;
extern CWaitableCriticalSection csBestBlock;
extern CConditionVariable cvBlockChange;
//! These four variables are used to notify getblocktemplate RPC of new tips.
//! When UpdateTip() establishes a new tip (best block), it must awaken a
//! waiting getblocktemplate RPC (if there is one) immediately. But upon waking
//! up, getblocktemplate cannot call chainActive->Tip() because it does not
//! (and cannot) hold cs_main. So the g_best_block_height and g_best_block variables
//! (protected by g_best_block_mutex) provide the needed height and block
//! hash respectively to getblocktemplate without it requiring cs_main.
extern CWaitableCriticalSection g_best_block_mutex;
extern CConditionVariable g_best_block_cv;
extern int g_best_block_height;
extern uint256 g_best_block;
extern std::atomic_bool fImporting;
extern std::atomic_bool fReindex;
extern int nScriptCheckThreads;

View File

@ -607,15 +607,15 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
nTransactionsUpdatedLastLP = nTransactionsUpdatedLast;
}
// Release the main lock while waiting
// Don't call chainActive->Tip() without holding cs_main
LEAVE_CRITICAL_SECTION(cs_main);
{
checktxtime = boost::get_system_time() + boost::posix_time::seconds(10);
boost::unique_lock<boost::mutex> lock(csBestBlock);
while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning())
boost::unique_lock<boost::mutex> lock(g_best_block_mutex);
while (g_best_block == hashWatchedChain && IsRPCRunning())
{
// Release the main lock while waiting
LEAVE_CRITICAL_SECTION(cs_main);
// Before waiting, generate the coinbase for the block following the next
// block (since this is cpu-intensive), so that when next block arrives,
// we can quickly respond with a template for following block.
@ -628,12 +628,11 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
Params(), CAmount{0}, minerAddress, cached_next_cb_height);
next_cb_mtx = cached_next_cb_mtx;
}
bool timedout = !cvBlockChange.timed_wait(lock, checktxtime);
ENTER_CRITICAL_SECTION(cs_main);
bool timedout = !g_best_block_cv.timed_wait(lock, checktxtime);
// Optimization: even if timed out, a new block may have arrived
// while waiting for cs_main; if so, don't discard next_cb_mtx.
if (chainActive.Tip()->GetBlockHash() != hashWatchedChain) break;
if (g_best_block != hashWatchedChain) break;
// Timeout: Check transactions for update
if (timedout && mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) {
@ -643,11 +642,12 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
}
checktxtime += boost::posix_time::seconds(10);
}
if (chainActive.Tip()->nHeight != nHeight + 1) {
if (g_best_block_height != nHeight + 1) {
// Unexpected height (reorg or >1 blocks arrived while waiting) invalidates coinbase tx.
next_cb_mtx = nullopt;
}
}
ENTER_CRITICAL_SECTION(cs_main);
if (!IsRPCRunning())
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down");