Auto merge of #4264 - NikVolf:mmr, r=str4d

[ZIP 221] FlyClient - Consensus-Layer Changes

Implements https://zips.z.cash/zip-0221
This commit is contained in:
Homu 2020-04-14 20:09:34 +00:00
commit 084303d8c2
24 changed files with 1318 additions and 46 deletions

View File

@ -83,6 +83,7 @@ testScripts=(
'turnstile.py'
'mining_shielded_coinbase.py'
'framework.py'
'feature_zip221.py'
);
testScriptsExt=(
'getblocktemplate_longpoll.py'

134
qa/rpc-tests/feature_zip221.py Executable file
View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
# Copyright (c) 2020 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.flyclient import (ZcashMMRNode, append, delete, make_root_commitment)
from test_framework.mininode import (HEARTWOOD_BRANCH_ID, CBlockHeader)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
initialize_chain_clean,
start_nodes,
)
from io import BytesIO
NULL_FIELD = "00" * 32
CHAIN_HISTORY_ROOT_VERSION = 2010200
# Verify block header field 'hashLightClientRoot' is set correctly for Heartwood blocks.
class Zip221Test(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, extra_args=[[
'-nuparams=2bb40e60:1', # Blossom
'-nuparams=f5b9230b:10', # Heartwood
'-nurejectoldversions=false',
]] * 4)
def node_for_block(self, height):
block_header = CBlockHeader()
block_header.deserialize(BytesIO(hex_str_to_bytes(
self.nodes[0].getblock(str(height), 0))))
sapling_root = hex_str_to_bytes(
self.nodes[0].getblock(str(height))["finalsaplingroot"])[::-1]
return ZcashMMRNode.from_block(
block_header, height, sapling_root, 0, HEARTWOOD_BRANCH_ID)
def run_test(self):
self.nodes[0].generate(10)
self.sync_all()
# Verify all blocks up to and including Heartwood activation set
# hashChainHistoryRoot to null.
print("Verifying blocks up to and including Heartwood activation")
blockcount = self.nodes[0].getblockcount()
assert_equal(blockcount, 10)
for height in range(0, blockcount + 1):
blk = self.nodes[0].getblock(str(height))
assert_equal(blk["chainhistoryroot"], NULL_FIELD)
# Create the initial history tree, containing a single node.
root = self.node_for_block(10)
# Generate the first block that contains a non-null chain history root.
print("Verifying first non-null chain history root")
self.nodes[0].generate(1)
self.sync_all()
# Verify that hashChainHistoryRoot is set correctly.
assert_equal(
self.nodes[0].getblock('11')["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# Generate 9 more blocks on node 0, and verify their chain history roots.
print("Mining 9 blocks on node 0")
self.nodes[0].generate(9)
self.sync_all()
print("Verifying node 0's chain history")
for height in range(12, 21):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[0].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# The rest of the test only applies to Heartwood-aware node versions.
# Earlier versions won't serialize chain history roots in the block
# index, and splitting the network below requires restarting the nodes.
if self.nodes[0].getnetworkinfo()["version"] < CHAIN_HISTORY_ROOT_VERSION:
print("Node's block index is not Heartwood-aware, skipping reorg test")
return
# Split the network so we can test the effect of a reorg.
print("Splitting the network")
self.split_network()
# Generate 10 more blocks on node 0, and verify their chain history roots.
print("Mining 10 more blocks on node 0")
self.nodes[0].generate(10)
self.sync_all()
print("Verifying node 0's chain history")
for height in range(21, 31):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[0].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# Generate 11 blocks on node 2.
print("Mining alternate chain on node 2")
self.nodes[2].generate(11)
self.sync_all()
# Reconnect the nodes; node 0 will re-org to node 2's chain.
print("Re-joining the network so that node 0 reorgs")
self.join_network()
# Verify that node 0's chain history was correctly updated.
print("Deleting orphaned blocks from the expected chain history")
for _ in range(10):
root = delete(root)
print("Verifying that node 0 is now on node 1's chain history")
for height in range(21, 32):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[2].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
if __name__ == '__main__':
Zip221Test().main()

View File

@ -0,0 +1,176 @@
from pyblake2 import blake2b
import struct
from typing import (List, Optional)
from .mininode import (CBlockHeader, block_work_from_compact, ser_compactsize, ser_uint256)
def H(msg: bytes, consensusBranchId: int) -> bytes:
digest = blake2b(
digest_size=32,
person=b'ZcashHistory' + struct.pack("<I", consensusBranchId))
digest.update(msg)
return digest.digest()
class ZcashMMRNode():
# leaf nodes have no children
left_child: Optional['ZcashMMRNode']
right_child: Optional['ZcashMMRNode']
# commitments
hashSubtreeCommitment: bytes
nEarliestTimestamp: int
nLatestTimestamp: int
nEarliestTargetBits: int
nLatestTargetBits: int
hashEarliestSaplingRoot: bytes # left child's sapling root
hashLatestSaplingRoot: bytes # right child's sapling root
nSubTreeTotalWork: int # total difficulty accumulated within each subtree
nEarliestHeight: int
nLatestHeight: int
nSaplingTxCount: int # number of Sapling transactions in block
consensusBranchId: bytes
@classmethod
def from_block(Z, block: CBlockHeader, height, sapling_root, sapling_tx_count, consensusBranchId) -> 'ZcashMMRNode':
'''Create a leaf node from a block'''
node = Z()
node.left_child = None
node.right_child = None
node.hashSubtreeCommitment = ser_uint256(block.rehash())
node.nEarliestTimestamp = block.nTime
node.nLatestTimestamp = block.nTime
node.nEarliestTargetBits = block.nBits
node.nLatestTargetBits = block.nBits
node.hashEarliestSaplingRoot = sapling_root
node.hashLatestSaplingRoot = sapling_root
node.nSubTreeTotalWork = block_work_from_compact(block.nBits)
node.nEarliestHeight = height
node.nLatestHeight = height
node.nSaplingTxCount = sapling_tx_count
node.consensusBranchId = consensusBranchId
return node
def serialize(self) -> bytes:
'''serializes a node'''
buf = b''
buf += self.hashSubtreeCommitment
buf += struct.pack("<I", self.nEarliestTimestamp)
buf += struct.pack("<I", self.nLatestTimestamp)
buf += struct.pack("<I", self.nEarliestTargetBits)
buf += struct.pack("<I", self.nLatestTargetBits)
buf += self.hashEarliestSaplingRoot
buf += self.hashLatestSaplingRoot
buf += ser_uint256(self.nSubTreeTotalWork)
buf += ser_compactsize(self.nEarliestHeight)
buf += ser_compactsize(self.nLatestHeight)
buf += ser_compactsize(self.nSaplingTxCount)
return buf
def make_parent(
left_child: ZcashMMRNode,
right_child: ZcashMMRNode) -> ZcashMMRNode:
parent = ZcashMMRNode()
parent.left_child = left_child
parent.right_child = right_child
parent.hashSubtreeCommitment = H(
left_child.serialize() + right_child.serialize(),
left_child.consensusBranchId,
)
parent.nEarliestTimestamp = left_child.nEarliestTimestamp
parent.nLatestTimestamp = right_child.nLatestTimestamp
parent.nEarliestTargetBits = left_child.nEarliestTargetBits
parent.nLatestTargetBits = right_child.nLatestTargetBits
parent.hashEarliestSaplingRoot = left_child.hashEarliestSaplingRoot
parent.hashLatestSaplingRoot = right_child.hashLatestSaplingRoot
parent.nSubTreeTotalWork = left_child.nSubTreeTotalWork + right_child.nSubTreeTotalWork
parent.nEarliestHeight = left_child.nEarliestHeight
parent.nLatestHeight = right_child.nLatestHeight
parent.nSaplingTxCount = left_child.nSaplingTxCount + right_child.nSaplingTxCount
parent.consensusBranchId = left_child.consensusBranchId
return parent
def make_root_commitment(root: ZcashMMRNode) -> bytes:
'''Makes the root commitment for a blockheader'''
return H(root.serialize(), root.consensusBranchId)
def get_peaks(node: ZcashMMRNode) -> List[ZcashMMRNode]:
peaks: List[ZcashMMRNode] = []
# Get number of leaves.
leaves = node.nLatestHeight - (node.nEarliestHeight - 1)
assert(leaves > 0)
# Check if the number of leaves in this subtree is a power of two.
if (leaves & (leaves - 1)) == 0:
# This subtree is full, and therefore a single peak. This also covers
# the case of a single isolated leaf.
peaks.append(node)
else:
# This is one of the generated nodes; search within its children.
peaks.extend(get_peaks(node.left_child))
peaks.extend(get_peaks(node.right_child))
return peaks
def bag_peaks(peaks: List[ZcashMMRNode]) -> ZcashMMRNode:
'''
"Bag" a list of peaks, and return the final root
'''
root = peaks[0]
for i in range(1, len(peaks)):
root = make_parent(root, peaks[i])
return root
def append(root: ZcashMMRNode, leaf: ZcashMMRNode) -> ZcashMMRNode:
'''Append a leaf to an existing tree, return the new tree root'''
# recursively find a list of peaks in the current tree
peaks: List[ZcashMMRNode] = get_peaks(root)
merged: List[ZcashMMRNode] = []
# Merge peaks from right to left.
# This will produce a list of peaks in reverse order
current = leaf
for peak in peaks[::-1]:
current_leaves = current.nLatestHeight - (current.nEarliestHeight - 1)
peak_leaves = peak.nLatestHeight - (peak.nEarliestHeight - 1)
if current_leaves == peak_leaves:
current = make_parent(peak, current)
else:
merged.append(current)
current = peak
merged.append(current)
# finally, bag the merged peaks
return bag_peaks(merged[::-1])
def delete(root: ZcashMMRNode) -> ZcashMMRNode:
'''
Delete the rightmost leaf node from an existing MMR
Return the new tree root
'''
n_leaves = root.nLatestHeight - (root.nEarliestHeight - 1)
# if there were an odd number of leaves,
# simply replace root with left_child
if n_leaves & 1:
return root.left_child
# otherwise, we need to re-bag the peaks.
else:
# first peak
peaks = [root.left_child]
# we do this traversing the right (unbalanced) side of the tree
# we keep the left side (balanced subtree or leaf) of each subtree
# until we reach a leaf
subtree_root = root.right_child
while subtree_root.left_child:
peaks.append(subtree_root.left_child)
subtree_root = subtree_root.right_child
new_root = bag_peaks(peaks)
return new_root

View File

@ -57,6 +57,7 @@ SPROUT_BRANCH_ID = 0x00000000
OVERWINTER_BRANCH_ID = 0x5BA81B19
SAPLING_BRANCH_ID = 0x76B809BB
BLOSSOM_BRANCH_ID = 0x2BB40E60
HEARTWOOD_BRANCH_ID = 0xF5B9230B
MAX_INV_SZ = 50000
@ -85,6 +86,15 @@ def hash256(s):
return sha256(sha256(s))
def ser_compactsize(n):
if n < 253:
return struct.pack("B", n)
elif n < 0x10000:
return struct.pack("<BH", 253, n)
elif n < 0x100000000:
return struct.pack("<BI", 254, n)
return struct.pack("<BQ", 255, n)
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
@ -134,6 +144,11 @@ def uint256_from_compact(c):
return v
def block_work_from_compact(c):
target = uint256_from_compact(c)
return 2**256 // (target + 1)
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:

View File

@ -140,6 +140,7 @@ LIBZCASH_H = \
zcash/address/sapling.hpp \
zcash/address/sprout.hpp \
zcash/address/zip32.h \
zcash/History.hpp \
zcash/JoinSplit.hpp \
zcash/Note.hpp \
zcash/prf.h \
@ -569,6 +570,7 @@ libzcash_a_SOURCES = \
zcash/address/sapling.cpp \
zcash/address/sprout.cpp \
zcash/address/zip32.cpp \
zcash/History.cpp \
zcash/JoinSplit.cpp \
zcash/Proof.cpp \
zcash/Note.cpp \

View File

@ -28,6 +28,7 @@ zcash_gtest_SOURCES += \
gtest/test_deprecation.cpp \
gtest/test_dynamicusage.cpp \
gtest/test_equihash.cpp \
gtest/test_history.cpp \
gtest/test_httprpc.cpp \
gtest/test_joinsplit.cpp \
gtest/test_keys.cpp \

View File

@ -16,6 +16,7 @@
static const int SPROUT_VALUE_VERSION = 1001400;
static const int SAPLING_VALUE_VERSION = 1010100;
static const int CHAIN_HISTORY_ROOT_VERSION = 2010200;
/**
* Maximum amount of time that a block timestamp is allowed to be ahead of the
@ -253,10 +254,26 @@ public:
//! Will be boost::none if nChainTx is zero.
boost::optional<CAmount> nChainSaplingValue;
//! Root of the Sapling commitment tree as of the end of this block. This is only set
//! once a block has been connected to the main chain, and will be null otherwise.
//!
//! For blocks prior to (not including) the Heartwood activation block, this is
//! always equal to hashLightClientRoot.
uint256 hashFinalSaplingRoot;
//! Root of the ZIP 221 history tree as of the end of the previous block. This is only
//! set once a block has been connected to the main chain, and will be null otherwise.
//!
//! - For blocks prior to and including the Heartwood activation block, this is
//! always null.
//! - For blocks after (not including) the Heartwood activation block, this is
//! always equal to hashLightClientRoot.
uint256 hashChainHistoryRoot;
//! block header
int nVersion;
uint256 hashMerkleRoot;
uint256 hashFinalSaplingRoot;
uint256 hashLightClientRoot;
unsigned int nTime;
unsigned int nBits;
uint256 nNonce;
@ -289,7 +306,7 @@ public:
nVersion = 0;
hashMerkleRoot = uint256();
hashFinalSaplingRoot = uint256();
hashLightClientRoot = uint256();
nTime = 0;
nBits = 0;
nNonce = uint256();
@ -307,7 +324,7 @@ public:
nVersion = block.nVersion;
hashMerkleRoot = block.hashMerkleRoot;
hashFinalSaplingRoot = block.hashFinalSaplingRoot;
hashLightClientRoot = block.hashLightClientRoot;
nTime = block.nTime;
nBits = block.nBits;
nNonce = block.nNonce;
@ -339,7 +356,7 @@ public:
if (pprev)
block.hashPrevBlock = pprev->GetBlockHash();
block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot;
block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime;
block.nBits = nBits;
block.nNonce = nNonce;
@ -461,7 +478,7 @@ public:
READWRITE(this->nVersion);
READWRITE(hashPrev);
READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot);
READWRITE(hashLightClientRoot);
READWRITE(nTime);
READWRITE(nBits);
READWRITE(nNonce);
@ -479,6 +496,17 @@ public:
READWRITE(nSaplingValue);
}
// Only read/write hashFinalSaplingRoot and hashChainHistoryRoot if the
// client version used to create this index was storing them.
if ((s.GetType() & SER_DISK) && (nVersion >= CHAIN_HISTORY_ROOT_VERSION)) {
READWRITE(hashFinalSaplingRoot);
READWRITE(hashChainHistoryRoot);
} else if (ser_action.ForRead()) {
// For block indices written before the client was Heartwood-aware,
// these are always identical.
hashFinalSaplingRoot = hashLightClientRoot;
}
// If you have just added new serialized fields above, remember to add
// them to CBlockTreeDB::LoadBlockIndexGuts() in txdb.cpp :)
}
@ -489,7 +517,7 @@ public:
block.nVersion = nVersion;
block.hashPrevBlock = hashPrev;
block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot;
block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime;
block.nBits = nBits;
block.nNonce = nNonce;

View File

@ -49,6 +49,10 @@ bool CCoinsView::GetCoins(const uint256 &txid, CCoins &coins) const { return fal
bool CCoinsView::HaveCoins(const uint256 &txid) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); }
uint256 CCoinsView::GetBestAnchor(ShieldedType type) const { return uint256(); };
HistoryIndex CCoinsView::GetHistoryLength(uint32_t epochId) const { return 0; }
HistoryNode CCoinsView::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return HistoryNode(); }
uint256 CCoinsView::GetHistoryRoot(uint32_t epochId) const { return uint256(); }
bool CCoinsView::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
@ -56,7 +60,8 @@ bool CCoinsView::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { return false; }
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) { return false; }
bool CCoinsView::GetStats(CCoinsStats &stats) const { return false; }
@ -69,6 +74,9 @@ bool CCoinsViewBacked::GetCoins(const uint256 &txid, CCoins &coins) const { retu
bool CCoinsViewBacked::HaveCoins(const uint256 &txid) const { return base->HaveCoins(txid); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
uint256 CCoinsViewBacked::GetBestAnchor(ShieldedType type) const { return base->GetBestAnchor(type); }
HistoryIndex CCoinsViewBacked::GetHistoryLength(uint32_t epochId) const { return base->GetHistoryLength(epochId); }
HistoryNode CCoinsViewBacked::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return base->GetHistoryAt(epochId, index); }
uint256 CCoinsViewBacked::GetHistoryRoot(uint32_t epochId) const { return base->GetHistoryRoot(epochId); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
@ -77,7 +85,12 @@ bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { return base->BatchWrite(mapCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor, mapSproutAnchors, mapSaplingAnchors, mapSproutNullifiers, mapSaplingNullifiers); }
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) {
return base->BatchWrite(mapCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor,
mapSproutAnchors, mapSaplingAnchors, mapSproutNullifiers, mapSaplingNullifiers,
historyCacheMap);
}
bool CCoinsViewBacked::GetStats(CCoinsStats &stats) const { return base->GetStats(stats); }
CCoinsKeyHasher::CCoinsKeyHasher() : salt(GetRandHash()) {}
@ -95,6 +108,7 @@ size_t CCoinsViewCache::DynamicMemoryUsage() const {
memusage::DynamicUsage(cacheSaplingAnchors) +
memusage::DynamicUsage(cacheSproutNullifiers) +
memusage::DynamicUsage(cacheSaplingNullifiers) +
memusage::DynamicUsage(historyCacheMap) +
cachedCoinsUsage;
}
@ -188,6 +202,31 @@ bool CCoinsViewCache::GetNullifier(const uint256 &nullifier, ShieldedType type)
return tmp;
}
HistoryIndex CCoinsViewCache::GetHistoryLength(uint32_t epochId) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
return historyCache.length;
}
HistoryNode CCoinsViewCache::GetHistoryAt(uint32_t epochId, HistoryIndex index) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (index >= historyCache.length) {
// Caller should ensure that it is limiting history
// request to 0..GetHistoryLength(epochId)-1 range
throw std::runtime_error("Invalid history request");
}
if (index >= historyCache.updateDepth) {
return historyCache.appends[index];
}
return base->GetHistoryAt(epochId, index);
}
uint256 CCoinsViewCache::GetHistoryRoot(uint32_t epochId) const {
return SelectHistoryCache(epochId).root;
}
template<typename Tree, typename Cache, typename CacheIterator, typename CacheEntry>
void CCoinsViewCache::AbstractPushAnchor(
const Tree &tree,
@ -260,6 +299,262 @@ void CCoinsViewCache::BringBestAnchorIntoCache(
assert(GetSaplingAnchorAt(currentRoot, tree));
}
void draftMMRNode(std::vector<uint32_t> &indices,
std::vector<HistoryEntry> &entries,
HistoryNode nodeData,
uint32_t alt,
uint32_t peak_pos)
{
HistoryEntry newEntry = alt == 0
? libzcash::LeafToEntry(nodeData)
// peak_pos - (1 << alt) is the array position of left child.
// peak_pos - 1 is the array position of right child.
: libzcash::NodeToEntry(nodeData, peak_pos - (1 << alt), peak_pos - 1);
indices.push_back(peak_pos);
entries.push_back(newEntry);
}
// Computes floor(log2(x)).
static inline uint32_t floor_log2(uint32_t x) {
assert(x > 0);
int log = 0;
while (x >>= 1) { ++log; }
return log;
}
// Computes the altitude of the largest subtree for an MMR with n nodes,
// which is floor(log2(n + 1)) - 1.
static inline uint32_t altitude(uint32_t n) {
return floor_log2(n + 1) - 1;
}
uint32_t CCoinsViewCache::PreloadHistoryTree(uint32_t epochId, bool extra, std::vector<HistoryEntry> &entries, std::vector<uint32_t> &entry_indices) {
auto treeLength = GetHistoryLength(epochId);
if (treeLength <= 0) {
throw std::runtime_error("Invalid PreloadHistoryTree state called - tree should exist");
} else if (treeLength == 1) {
entries.push_back(libzcash::LeafToEntry(GetHistoryAt(epochId, 0)));
entry_indices.push_back(0);
return 1;
}
uint32_t last_peak_pos = 0;
uint32_t last_peak_alt = 0;
uint32_t alt = 0;
uint32_t peak_pos = 0;
uint32_t total_peaks = 0;
// Assume the following example peak layout with 14 leaves, and 25 stored nodes in
// total (the "tree length"):
//
// P
// /\
// / \
// / \ \
// / \ \ Altitude
// _A_ \ \ 3
// _/ \_ B \ 2
// / \ / \ / \ C 1
// /\ /\ /\ /\ /\ /\ /\ 0
//
// We start by determining the altitude of the highest peak (A).
alt = altitude(treeLength);
// We determine the position of the highest peak (A) by pretending it is the right
// sibling in a tree, and its left-most leaf has position 0. Then the left sibling
// of (A) has position -1, and so we can "jump" to the peak's position by computing
// -1 + 2^(alt + 1) - 1.
peak_pos = (1 << (alt + 1)) - 2;
// Now that we have the position and altitude of the highest peak (A), we collect
// the remaining peaks (B, C). We navigate the peaks as if they were nodes in this
// Merkle tree (with additional imaginary nodes 1 and 2, that have positions beyond
// the MMR's length):
//
// / \
// / \
// / \
// / \
// A ==========> 1
// / \ // \
// _/ \_ B ==> 2
// /\ /\ /\ //
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
//
while (alt != 0) {
// If peak_pos is out of bounds of the tree, we compute the position of its left
// child, and drop down one level in the tree.
if (peak_pos >= treeLength) {
// left child, -2^alt
peak_pos = peak_pos - (1 << alt);
alt = alt - 1;
}
// If the peak exists, we take it and then continue with its right sibling.
if (peak_pos < treeLength) {
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, peak_pos), alt, peak_pos);
last_peak_pos = peak_pos;
last_peak_alt = alt;
// right sibling
peak_pos = peak_pos + (1 << (alt + 1)) - 1;
}
}
total_peaks = entries.size();
// Return early if we don't require extra nodes.
if (!extra) return total_peaks;
alt = last_peak_alt;
peak_pos = last_peak_pos;
// P
// /\
// / \
// / \ \
// / \ \
// _A_ \ \
// _/ \_ B \
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
// D E
//
// For extra peaks needed for deletion, we do extra pass on right slope of the last peak
// and add those nodes + their siblings. Extra would be (D, E) for the picture above.
while (alt > 0) {
uint32_t left_pos = peak_pos - (1 << alt);
uint32_t right_pos = peak_pos - 1;
alt = alt - 1;
// drafting left child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, left_pos), alt, left_pos);
// drafting right child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, right_pos), alt, right_pos);
// continuing on right slope
peak_pos = right_pos;
}
return total_peaks;
}
HistoryCache& CCoinsViewCache::SelectHistoryCache(uint32_t epochId) const {
auto entry = historyCacheMap.find(epochId);
if (entry != historyCacheMap.end()) {
return entry->second;
} else {
auto cache = HistoryCache(
base->GetHistoryLength(epochId),
base->GetHistoryRoot(epochId),
epochId
);
return historyCacheMap.insert({epochId, cache}).first->second;
}
}
void CCoinsViewCache::PushHistoryNode(uint32_t epochId, const HistoryNode node) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (historyCache.length == 0) {
// special case, it just goes into the cache right away
historyCache.Extend(node);
if (librustzcash_mmr_hash_node(epochId, node.data(), historyCache.root.begin()) != 0) {
throw std::runtime_error("hashing node failed");
};
return;
}
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
PreloadHistoryTree(epochId, false, entries, entry_indices);
uint256 newRoot;
std::array<HistoryNode, 32> appendBuf;
uint32_t appends = librustzcash_mmr_append(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
entry_indices.size(),
node.data(),
newRoot.begin(),
appendBuf.data()->data()
);
for (size_t i = 0; i < appends; i++) {
historyCache.Extend(appendBuf[i]);
}
historyCache.root = newRoot;
}
void CCoinsViewCache::PopHistoryNode(uint32_t epochId) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
uint256 newRoot;
switch (historyCache.length) {
case 0:
// Caller is not expected to pop from empty tree! Caller should
// switch to previous epoch and pop history from there.
throw std::runtime_error("popping history node from empty history");
case 1:
// Just resetting tree to empty
historyCache.Truncate(0);
historyCache.root = uint256();
return;
case 2:
// - A tree with one leaf has length 1.
// - A tree with two leaves has length 3.
throw std::runtime_error("a history tree cannot have two nodes");
case 3:
// After removing a leaf from a tree with two leaves, we are left
// with a single-node tree, whose root is just the hash of that
// node.
if (librustzcash_mmr_hash_node(
epochId,
GetHistoryAt(epochId, 0).data(),
newRoot.begin()
) != 0) {
throw std::runtime_error("hashing node failed");
}
historyCache.Truncate(1);
historyCache.root = newRoot;
return;
default:
// This is a non-elementary pop, so use the full tree logic.
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
uint32_t peak_count = PreloadHistoryTree(epochId, true, entries, entry_indices);
uint32_t numberOfDeletes = librustzcash_mmr_delete(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
peak_count,
entries.size() - peak_count,
newRoot.begin()
);
historyCache.Truncate(historyCache.length - numberOfDeletes);
historyCache.root = newRoot;
return;
}
}
template<typename Tree, typename Cache, typename CacheEntry>
void CCoinsViewCache::AbstractPopAnchor(
const uint256 &newrt,
@ -470,6 +765,35 @@ void BatchWriteAnchors(
}
}
void BatchWriteHistory(CHistoryCacheMap& historyCacheMap, CHistoryCacheMap& historyCacheMapIn) {
for (auto nextHistoryCache = historyCacheMapIn.begin(); nextHistoryCache != historyCacheMapIn.end(); nextHistoryCache++) {
auto historyCacheIn = nextHistoryCache->second;
auto epochId = nextHistoryCache->first;
auto historyCache = historyCacheMap.find(epochId);
if (historyCache != historyCacheMap.end()) {
// delete old entries since updateDepth
historyCache->second.Truncate(historyCacheIn.updateDepth);
// Replace/append new/updated entries. HistoryCache.Extend
// auto-indexes the nodes, so we need to extend in the same order as
// this cache is indexed.
for (size_t i = historyCacheIn.updateDepth; i < historyCacheIn.length; i++) {
historyCache->second.Extend(historyCacheIn.appends[i]);
}
// the lengths should now match
assert(historyCache->second.length == historyCacheIn.length);
// write current root
historyCache->second.root = historyCacheIn.root;
} else {
// Just insert the history cache into its parent
historyCacheMap.insert({epochId, historyCacheIn});
}
}
}
bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlockIn,
const uint256 &hashSproutAnchorIn,
@ -477,7 +801,8 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) {
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMapIn) {
assert(!hasModifier);
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization).
@ -520,6 +845,8 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
::BatchWriteNullifiers(mapSproutNullifiers, cacheSproutNullifiers);
::BatchWriteNullifiers(mapSaplingNullifiers, cacheSaplingNullifiers);
::BatchWriteHistory(historyCacheMap, historyCacheMapIn);
hashSproutAnchor = hashSproutAnchorIn;
hashSaplingAnchor = hashSaplingAnchorIn;
hashBlock = hashBlockIn;
@ -527,12 +854,21 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
}
bool CCoinsViewCache::Flush() {
bool fOk = base->BatchWrite(cacheCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor, cacheSproutAnchors, cacheSaplingAnchors, cacheSproutNullifiers, cacheSaplingNullifiers);
bool fOk = base->BatchWrite(cacheCoins,
hashBlock,
hashSproutAnchor,
hashSaplingAnchor,
cacheSproutAnchors,
cacheSaplingAnchors,
cacheSproutNullifiers,
cacheSaplingNullifiers,
historyCacheMap);
cacheCoins.clear();
cacheSproutAnchors.clear();
cacheSaplingAnchors.clear();
cacheSproutNullifiers.clear();
cacheSaplingNullifiers.clear();
historyCacheMap.clear();
cachedCoinsUsage = 0;
return fOk;
}

View File

@ -17,6 +17,7 @@
#include <boost/foreach.hpp>
#include <boost/unordered_map.hpp>
#include "zcash/History.hpp"
#include "zcash/IncrementalMerkleTree.hpp"
/**
@ -321,6 +322,7 @@ typedef boost::unordered_map<uint256, CCoinsCacheEntry, CCoinsKeyHasher> CCoinsM
typedef boost::unordered_map<uint256, CAnchorsSproutCacheEntry, CCoinsKeyHasher> CAnchorsSproutMap;
typedef boost::unordered_map<uint256, CAnchorsSaplingCacheEntry, CCoinsKeyHasher> CAnchorsSaplingMap;
typedef boost::unordered_map<uint256, CNullifiersCacheEntry, CCoinsKeyHasher> CNullifiersMap;
typedef boost::unordered_map<uint32_t, HistoryCache> CHistoryCacheMap;
struct CCoinsStats
{
@ -362,6 +364,15 @@ public:
//! Get the current "tip" or the latest anchored tree root in the chain
virtual uint256 GetBestAnchor(ShieldedType type) const;
//! Get the current chain history length (which should be roughly chain height x2)
virtual HistoryIndex GetHistoryLength(uint32_t epochId) const;
//! Get history node at specified index
virtual HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
//! Get current history root
virtual uint256 GetHistoryRoot(uint32_t epochId) const;
//! Do a bulk modification (multiple CCoins changes + BestBlock change).
//! The passed mapCoins can be modified.
virtual bool BatchWrite(CCoinsMap &mapCoins,
@ -371,7 +382,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers);
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
//! Calculate statistics about the unspent transaction output set
virtual bool GetStats(CCoinsStats &stats) const;
@ -396,6 +408,9 @@ public:
bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
void SetBackend(CCoinsView &viewIn);
bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
@ -404,7 +419,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers);
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
bool GetStats(CCoinsStats &stats) const;
};
@ -451,6 +467,7 @@ protected:
mutable CAnchorsSaplingMap cacheSaplingAnchors;
mutable CNullifiersMap cacheSproutNullifiers;
mutable CNullifiersMap cacheSaplingNullifiers;
mutable CHistoryCacheMap historyCacheMap;
/* Cached dynamic memory usage for the inner CCoins objects. */
mutable size_t cachedCoinsUsage;
@ -467,6 +484,9 @@ public:
bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
void SetBestBlock(const uint256 &hashBlock);
bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
@ -475,8 +495,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers);
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
// Adds the tree to mapSproutAnchors (or mapSaplingAnchors based on the type of tree)
// and sets the current commitment root to this root.
@ -489,6 +509,12 @@ public:
// Marks nullifiers for a given transaction as spent or not.
void SetNullifiers(const CTransaction& tx, bool spent);
// Push MMR node history at the end of the history tree
void PushHistoryNode(uint32_t epochId, const HistoryNode node);
// Pop MMR node history from the end of the history tree
void PopHistoryNode(uint32_t epochId);
/**
* Return a pointer to CCoins in the cache, or NULL if not found. This is
* more efficient than GetCoins. Modifications to other cache entries are
@ -582,6 +608,18 @@ private:
const uint256 &currentRoot,
Tree &tree
);
//! Preload history tree for further update.
//!
//! If extra = true, extra nodes for deletion are also preloaded.
//! This will allow to delete tail entries from preloaded tree without
//! any further database lookups.
//!
//! Returns number of peaks, not total number of loaded nodes.
uint32_t PreloadHistoryTree(uint32_t epochId, bool extra, std::vector<HistoryEntry> &entries, std::vector<uint32_t> &entry_indices);
//! Selects history cache for specified epoch.
HistoryCache& SelectHistoryCache(uint32_t epochId) const;
};
#endif // BITCOIN_COINS_H

171
src/gtest/test_history.cpp Normal file
View File

@ -0,0 +1,171 @@
#include <gtest/gtest.h>
#include "main.h"
#include "utiltest.h"
#include "zcash/History.hpp"
// Fake an empty view
class FakeCoinsViewDB : public CCoinsView {
public:
FakeCoinsViewDB() {}
bool GetSproutAnchorAt(const uint256 &rt, SproutMerkleTree &tree) const {
return false;
}
bool GetSaplingAnchorAt(const uint256 &rt, SaplingMerkleTree &tree) const {
return false;
}
bool GetNullifier(const uint256 &nf, ShieldedType type) const {
return false;
}
bool GetCoins(const uint256 &txid, CCoins &coins) const {
return false;
}
bool HaveCoins(const uint256 &txid) const {
return false;
}
uint256 GetBestBlock() const {
uint256 a;
return a;
}
uint256 GetBestAnchor(ShieldedType type) const {
uint256 a;
return a;
}
bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
const uint256 &hashSaplingAnchor,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap saplingNullifiersMap) {
return false;
}
bool GetStats(CCoinsStats &stats) const {
return false;
}
HistoryIndex GetHistoryLength(uint32_t branchId) const {
return 0;
}
HistoryNode GetHistoryAt(uint32_t branchId, HistoryIndex index) const {
return HistoryNode();
}
};
HistoryNode getLeafN(uint64_t block_num) {
HistoryNode node = libzcash::NewLeaf(
uint256(),
block_num*10,
block_num*13,
uint256(),
uint256(),
block_num,
3
);
return node;
}
TEST(History, Smoky) {
// Fake an empty view
FakeCoinsViewDB fakeDB;
CCoinsViewCache view(&fakeDB);
// Test initial value
EXPECT_EQ(view.GetHistoryLength(0), 0);
view.PushHistoryNode(1, getLeafN(1));
EXPECT_EQ(view.GetHistoryLength(1), 1);
view.PushHistoryNode(1, getLeafN(2));
EXPECT_EQ(view.GetHistoryLength(1), 3);
view.PushHistoryNode(1, getLeafN(3));
EXPECT_EQ(view.GetHistoryLength(1), 4);
view.PushHistoryNode(1, getLeafN(4));
uint256 h4Root = view.GetHistoryRoot(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
view.PushHistoryNode(1, getLeafN(5));
EXPECT_EQ(view.GetHistoryLength(1), 8);
view.PopHistoryNode(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
EXPECT_EQ(h4Root, view.GetHistoryRoot(1));
}
TEST(History, EpochBoundaries) {
// Fake an empty view
FakeCoinsViewDB fakeDB;
CCoinsViewCache view(&fakeDB);
view.PushHistoryNode(1, getLeafN(1));
EXPECT_EQ(view.GetHistoryLength(1), 1);
view.PushHistoryNode(1, getLeafN(2));
EXPECT_EQ(view.GetHistoryLength(1), 3);
view.PushHistoryNode(1, getLeafN(3));
EXPECT_EQ(view.GetHistoryLength(1), 4);
view.PushHistoryNode(1, getLeafN(4));
uint256 h4Root = view.GetHistoryRoot(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
view.PushHistoryNode(1, getLeafN(5));
EXPECT_EQ(view.GetHistoryLength(1), 8);
// New Epoch(2)
view.PushHistoryNode(2, getLeafN(6));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 1);
view.PushHistoryNode(2, getLeafN(7));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 3);
view.PushHistoryNode(2, getLeafN(8));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 4);
// Rolling epoch back to 1
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 3);
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 1);
EXPECT_EQ(view.GetHistoryLength(1), 8);
// And even rolling epoch 1 back a bit
view.PopHistoryNode(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
// And also rolling epoch 2 back to 0
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 0);
}

View File

@ -2433,6 +2433,12 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s
view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING);
}
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
view.PopHistoryNode(consensusBranchId);
}
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash());
@ -2690,8 +2696,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
SaplingMerkleTree sapling_tree;
assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree));
// Grab the consensus branch ID for the block's height
// Grab the consensus branch ID for this block and its parent
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
auto prevConsensusBranchId = CurrentEpochBranchId(pindex->nHeight - 1, chainparams.GetConsensus());
size_t total_sapling_tx = 0;
std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
@ -2811,6 +2820,10 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
sapling_tree.append(outputDescription.cmu);
}
if (!(tx.vShieldedSpend.empty() && tx.vShieldedOutput.empty())) {
total_sapling_tx += 1;
}
vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
@ -2819,17 +2832,61 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
view.PushAnchor(sapling_tree);
if (!fJustCheck) {
pindex->hashFinalSproutRoot = sprout_tree.root();
pindex->hashFinalSaplingRoot = sapling_tree.root();
if (IsActivationHeight(pindex->nHeight, chainparams.GetConsensus(), Consensus::UPGRADE_HEARTWOOD)) {
// The default is null, but let's make it explicit.
pindex->hashChainHistoryRoot.SetNull();
} else if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
pindex->hashChainHistoryRoot = view.GetHistoryRoot(prevConsensusBranchId);
}
}
blockundo.old_sprout_tree_root = old_sprout_tree_root;
// If Sapling is active, block.hashFinalSaplingRoot must be the
// same as the root of the Sapling tree
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_SAPLING)) {
if (block.hashFinalSaplingRoot != sapling_tree.root()) {
if (IsActivationHeight(pindex->nHeight, chainparams.GetConsensus(), Consensus::UPGRADE_HEARTWOOD)) {
// In the block that activates ZIP 221, block.hashLightClientRoot MUST
// be set to all zero bytes.
if (!block.hashLightClientRoot.IsNull()) {
return state.DoS(100,
error("ConnectBlock(): block's hashFinalSaplingRoot is incorrect"),
REJECT_INVALID, "bad-sapling-root-in-block");
error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be null)"),
REJECT_INVALID, "bad-heartwood-root-in-block");
}
} else if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
// If Heartwood is active, block.hashLightClientRoot must be the same as
// the root of the history tree for the previous block. We only store
// one tree per epoch, so we have two possible cases:
// - If the previous block is in the previous epoch, this block won't
// affect that epoch's tree root.
// - If the previous block is in this epoch, this block would affect
// this epoch's tree root, but as we haven't updated the tree for this
// block yet, view.GetHistoryRoot() returns the root we need.
if (block.hashLightClientRoot != view.GetHistoryRoot(prevConsensusBranchId)) {
return state.DoS(100,
error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be history tree root)"),
REJECT_INVALID, "bad-heartwood-root-in-block");
}
} else if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_SAPLING)) {
// If Sapling is active, block.hashLightClientRoot must be the
// same as the root of the Sapling tree
if (block.hashLightClientRoot != sapling_tree.root()) {
return state.DoS(100,
error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be Sapling tree root)"),
REJECT_INVALID, "bad-sapling-root-in-block");
}
}
// History read/write is started with Heartwood update.
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
auto historyNode = libzcash::NewLeaf(
block.GetHash(),
block.nTime,
block.nBits,
pindex->hashFinalSaplingRoot,
ArithToUint256(GetBlockProof(*pindex)),
pindex->nHeight,
total_sapling_tx
);
view.PushHistoryNode(consensusBranchId, historyNode);
}
int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
@ -3564,6 +3621,7 @@ CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
// hashFinalSaplingRoot and hashChainHistoryRoot are set in ConnectBlock()
pindexNew->BuildSkip();
}
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
@ -4369,7 +4427,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
bool static LoadBlockIndexDB()
{
const CChainParams& chainparams = Params();
if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex))
if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex, chainparams))
return false;
boost::this_thread::interruption_point();

View File

@ -520,7 +520,13 @@ CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const MinerAddre
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
pblock->hashFinalSaplingRoot = sapling_tree.root();
if (IsActivationHeight(nHeight, chainparams.GetConsensus(), Consensus::UPGRADE_HEARTWOOD)) {
pblock->hashLightClientRoot.SetNull();
} else if (chainparams.GetConsensus().NetworkUpgradeActive(nHeight, Consensus::UPGRADE_HEARTWOOD)) {
pblock->hashLightClientRoot = view.GetHistoryRoot(consensusBranchId);
} else {
pblock->hashLightClientRoot = sapling_tree.root();
}
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nSolution.clear();

View File

@ -112,12 +112,12 @@ uint256 CBlock::CheckMerkleBranch(uint256 hash, const std::vector<uint256>& vMer
std::string CBlock::ToString() const
{
std::stringstream s;
s << strprintf("CBlock(hash=%s, ver=%d, hashPrevBlock=%s, hashMerkleRoot=%s, hashFinalSaplingRoot=%s, nTime=%u, nBits=%08x, nNonce=%s, vtx=%u)\n",
s << strprintf("CBlock(hash=%s, ver=%d, hashPrevBlock=%s, hashMerkleRoot=%s, hashLightClientRoot=%s, nTime=%u, nBits=%08x, nNonce=%s, vtx=%u)\n",
GetHash().ToString(),
nVersion,
hashPrevBlock.ToString(),
hashMerkleRoot.ToString(),
hashFinalSaplingRoot.ToString(),
hashLightClientRoot.ToString(),
nTime, nBits, nNonce.ToString(),
vtx.size());
for (unsigned int i = 0; i < vtx.size(); i++)

View File

@ -26,7 +26,7 @@ public:
int32_t nVersion;
uint256 hashPrevBlock;
uint256 hashMerkleRoot;
uint256 hashFinalSaplingRoot;
uint256 hashLightClientRoot;
uint32_t nTime;
uint32_t nBits;
uint256 nNonce;
@ -44,7 +44,7 @@ public:
READWRITE(this->nVersion);
READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot);
READWRITE(hashLightClientRoot);
READWRITE(nTime);
READWRITE(nBits);
READWRITE(nNonce);
@ -56,7 +56,7 @@ public:
nVersion = CBlockHeader::CURRENT_VERSION;
hashPrevBlock.SetNull();
hashMerkleRoot.SetNull();
hashFinalSaplingRoot.SetNull();
hashLightClientRoot.SetNull();
nTime = 0;
nBits = 0;
nNonce = uint256();
@ -118,7 +118,7 @@ public:
block.nVersion = nVersion;
block.hashPrevBlock = hashPrevBlock;
block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot;
block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime;
block.nBits = nBits;
block.nNonce = nNonce;
@ -158,7 +158,7 @@ public:
READWRITE(this->nVersion);
READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot);
READWRITE(hashLightClientRoot);
READWRITE(nTime);
READWRITE(nBits);
}

View File

@ -229,7 +229,8 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool tx
result.push_back(Pair("height", blockindex->nHeight));
result.push_back(Pair("version", block.nVersion));
result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex()));
result.push_back(Pair("finalsaplingroot", block.hashFinalSaplingRoot.GetHex()));
result.push_back(Pair("finalsaplingroot", blockindex->hashFinalSaplingRoot.GetHex()));
result.push_back(Pair("chainhistoryroot", blockindex->hashChainHistoryRoot.GetHex()));
UniValue txs(UniValue::VARR);
BOOST_FOREACH(const CTransaction&tx, block.vtx)
{

View File

@ -431,7 +431,8 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
"{\n"
" \"version\" : n, (numeric) The block version\n"
" \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n"
" \"finalsaplingroothash\" : \"xxxx\", (string) The hash of the final sapling root\n"
" \"lightclientroothash\" : \"xxxx\", (string) The hash of the light client root field in the block header\n"
" \"finalsaplingroothash\" : \"xxxx\", (string) (DEPRECATED) The hash of the light client root field in the block header\n"
" \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n"
" {\n"
" \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n"
@ -696,7 +697,9 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
result.push_back(Pair("capabilities", aCaps));
result.push_back(Pair("version", pblock->nVersion));
result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex()));
result.push_back(Pair("finalsaplingroothash", pblock->hashFinalSaplingRoot.GetHex()));
result.push_back(Pair("lightclientroothash", pblock->hashLightClientRoot.GetHex()));
// Deprecated; remove in a future release.
result.push_back(Pair("finalsaplingroothash", pblock->hashLightClientRoot.GetHex()));
result.push_back(Pair("transactions", transactions));
if (coinbasetxn) {
assert(txCoinbase.isObject());

View File

@ -3,6 +3,15 @@
#include <stdint.h>
const int ENTRY_SERIALIZED_LENGTH = 180;
typedef struct HistoryEntry {
unsigned char bytes[ENTRY_SERIALIZED_LENGTH];
} HistoryEntry;
static_assert(
sizeof(HistoryEntry) == ENTRY_SERIALIZED_LENGTH,
"HistoryEntry struct is not the same size as the underlying byte array");
static_assert(alignof(HistoryEntry) == 1, "HistoryEntry struct alignment is not 1");
extern "C" {
#ifdef WIN32
typedef uint16_t codeunit;
@ -312,7 +321,7 @@ extern "C" {
uint32_t cbranch,
uint32_t t_len,
const uint32_t *ni_ptr,
const unsigned char *n_ptr,
const HistoryEntry *n_ptr,
size_t p_len,
const unsigned char *nn_ptr,
unsigned char *rt_ret,
@ -323,7 +332,7 @@ extern "C" {
uint32_t cbranch,
uint32_t t_len,
const uint32_t *ni_ptr,
const unsigned char *n_ptr,
const HistoryEntry *n_ptr,
size_t p_len,
size_t e_len,
unsigned char *rt_ret

View File

@ -1257,7 +1257,7 @@ pub extern "system" fn librustzcash_mmr_append(
.root_node()
.expect("Just added, should resolve always; qed");
unsafe {
*rt_ret = root_node.data().subtree_commitment;
*rt_ret = root_node.data().hash();
for (idx, next_buf) in slice::from_raw_parts_mut(buf_ret, return_count as usize)
.iter_mut()
@ -1310,7 +1310,7 @@ pub extern "system" fn librustzcash_mmr_delete(
.root_node()
.expect("Just generated without errors, root should be resolving")
.data()
.subtree_commitment;
.hash();
}
truncate_len

View File

@ -170,7 +170,8 @@ public:
CAnchorsSproutMap& mapSproutAnchors,
CAnchorsSaplingMap& mapSaplingAnchors,
CNullifiersMap& mapSproutNullifiers,
CNullifiersMap& mapSaplingNullifiers)
CNullifiersMap& mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap)
{
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) {
@ -214,7 +215,8 @@ public:
memusage::DynamicUsage(cacheSproutAnchors) +
memusage::DynamicUsage(cacheSaplingAnchors) +
memusage::DynamicUsage(cacheSproutNullifiers) +
memusage::DynamicUsage(cacheSaplingNullifiers);
memusage::DynamicUsage(cacheSaplingNullifiers) +
memusage::DynamicUsage(historyCacheMap);
for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) {
ret += it->second.coins.DynamicMemoryUsage();
}

View File

@ -273,8 +273,8 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
}
*/
// These tests assume null hashFinalSaplingRoot (before Sapling)
pblock->hashFinalSaplingRoot = uint256();
// These tests assume null hashLightClientRoot (before Sapling)
pblock->hashLightClientRoot = uint256();
CValidationState state;
BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL));

View File

@ -35,6 +35,10 @@ static const char DB_FLAG = 'F';
static const char DB_REINDEX_FLAG = 'R';
static const char DB_LAST_BLOCK = 'l';
static const char DB_MMR_LENGTH = 'M';
static const char DB_MMR_NODE = 'm';
static const char DB_MMR_ROOT = 'r';
// insightexplorer
static const char DB_ADDRESSINDEX = 'd';
static const char DB_ADDRESSUNSPENTINDEX = 'u';
@ -124,6 +128,39 @@ uint256 CCoinsViewDB::GetBestAnchor(ShieldedType type) const {
return hashBestAnchor;
}
HistoryIndex CCoinsViewDB::GetHistoryLength(uint32_t epochId) const {
HistoryIndex historyLength;
if (!db.Read(make_pair(DB_MMR_LENGTH, epochId), historyLength)) {
// Starting new history
historyLength = 0;
}
return historyLength;
}
HistoryNode CCoinsViewDB::GetHistoryAt(uint32_t epochId, HistoryIndex index) const {
HistoryNode mmrNode;
if (index >= GetHistoryLength(epochId)) {
throw runtime_error("History data inconsistent - reindex?");
}
if (!db.Read(make_pair(DB_MMR_NODE, make_pair(epochId, index)), mmrNode)) {
throw runtime_error("History data inconsistent (expected node not found) - reindex?");
}
return mmrNode;
}
uint256 CCoinsViewDB::GetHistoryRoot(uint32_t epochId) const {
uint256 root;
if (!db.Read(make_pair(DB_MMR_ROOT, epochId), root))
{
root = uint256();
}
return root;
}
void BatchWriteNullifiers(CDBBatch& batch, CNullifiersMap& mapToUse, const char& dbChar)
{
for (CNullifiersMap::iterator it = mapToUse.begin(); it != mapToUse.end();) {
@ -158,6 +195,29 @@ void BatchWriteAnchors(CDBBatch& batch, Map& mapToUse, const char& dbChar)
}
}
void BatchWriteHistory(CDBBatch& batch, CHistoryCacheMap& historyCacheMap) {
for (auto nextHistoryCache = historyCacheMap.begin(); nextHistoryCache != historyCacheMap.end(); nextHistoryCache++) {
auto historyCache = nextHistoryCache->second;
auto epochId = nextHistoryCache->first;
// delete old entries since updateDepth
for (int i = historyCache.updateDepth + 1; i <= historyCache.length; i++) {
batch.Erase(make_pair(DB_MMR_NODE, make_pair(epochId, i)));
}
// replace/append new/updated entries
for (auto it = historyCache.appends.begin(); it != historyCache.appends.end(); it++) {
batch.Write(make_pair(DB_MMR_NODE, make_pair(epochId, it->first)), it->second);
}
// write new length
batch.Write(make_pair(DB_MMR_LENGTH, epochId), historyCache.length);
// write current root
batch.Write(make_pair(DB_MMR_ROOT, epochId), historyCache.root);
}
}
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
@ -165,7 +225,8 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) {
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) {
CDBBatch batch(db);
size_t count = 0;
size_t changed = 0;
@ -188,6 +249,8 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
::BatchWriteNullifiers(batch, mapSproutNullifiers, DB_NULLIFIER);
::BatchWriteNullifiers(batch, mapSaplingNullifiers, DB_SAPLING_NULLIFIER);
::BatchWriteHistory(batch, historyCacheMap);
if (!hashBlock.IsNull())
batch.Write(DB_BEST_BLOCK, hashBlock);
if (!hashSproutAnchor.IsNull())
@ -456,7 +519,9 @@ bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
return true;
}
bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
bool CBlockTreeDB::LoadBlockIndexGuts(
std::function<CBlockIndex*(const uint256&)> insertBlockIndex,
const CChainParams& chainParams)
{
boost::scoped_ptr<CDBIterator> pcursor(NewIterator());
@ -479,7 +544,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
pindexNew->hashSproutAnchor = diskindex.hashSproutAnchor;
pindexNew->nVersion = diskindex.nVersion;
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
pindexNew->hashFinalSaplingRoot = diskindex.hashFinalSaplingRoot;
pindexNew->hashLightClientRoot = diskindex.hashLightClientRoot;
pindexNew->nTime = diskindex.nTime;
pindexNew->nBits = diskindex.nBits;
pindexNew->nNonce = diskindex.nNonce;
@ -489,6 +554,8 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
pindexNew->nTx = diskindex.nTx;
pindexNew->nSproutValue = diskindex.nSproutValue;
pindexNew->nSaplingValue = diskindex.nSaplingValue;
pindexNew->hashFinalSaplingRoot = diskindex.hashFinalSaplingRoot;
pindexNew->hashChainHistoryRoot = diskindex.hashChainHistoryRoot;
// Consistency checks
auto header = pindexNew->GetBlockHeader();
@ -498,6 +565,21 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, Params().GetConsensus()))
return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString());
// ZIP 221 consistency checks
if (chainParams.GetConsensus().NetworkUpgradeActive(pindexNew->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
if (pindexNew->hashLightClientRoot != pindexNew->hashChainHistoryRoot) {
return error(
"LoadBlockIndex(): block index inconsistency detected (hashLightClientRoot != hashChainHistoryRoot): %s",
pindexNew->ToString());
}
} else {
if (pindexNew->hashLightClientRoot != pindexNew->hashFinalSaplingRoot) {
return error(
"LoadBlockIndex(): block index inconsistency detected (hashLightClientRoot != hashFinalSaplingRoot): %s",
pindexNew->ToString());
}
}
pcursor->Next();
} else {
return error("LoadBlockIndex() : failed to read value");

View File

@ -15,6 +15,9 @@
#include <utility>
#include <vector>
#include <boost/function.hpp>
#include "zcash/History.hpp"
class CBlockIndex;
// START insightexplorer
@ -85,6 +88,9 @@ public:
bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
@ -92,7 +98,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers);
CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
bool GetStats(CCoinsStats &stats) const;
};
@ -132,7 +139,9 @@ public:
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
bool LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
bool LoadBlockIndexGuts(
std::function<CBlockIndex*(const uint256&)> insertBlockIndex,
const CChainParams& chainParams);
};
#endif // BITCOIN_TXDB_H

129
src/zcash/History.cpp Normal file
View File

@ -0,0 +1,129 @@
#include "zcash/History.hpp"
#include <stdexcept>
#include <boost/foreach.hpp>
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
#include "librustzcash.h"
namespace libzcash {
void HistoryCache::Extend(const HistoryNode &leaf) {
appends[length++] = leaf;
}
void HistoryCache::Truncate(HistoryIndex newLength) {
// Remove any to-be-appended nodes beyond the new length. The array representation is
// zero-indexed, and HistoryIndex is unsigned, so we handle the truncate-to-zero case
// separately.
if (newLength > 0) {
for (HistoryIndex idx = length; idx >= newLength; idx--) {
appends.erase(idx);
}
} else {
appends.clear();
}
length = newLength;
// we track how deep updates go back in the tree, so we could later
// update everything starting from `updateDepth`
//
// imagine we rolled two blocks back and then put another 3 blocks on top
// of the rolled back state. In that case `updateDepth` will be H-3, while length
// will be H (where H is a final chain height after such operation). So we know that
// history entries in the range of H-3..H are expected to be pushed into the database
// to replace/append to the persistent nodes there.
if (updateDepth > length) updateDepth = length;
}
HistoryNode NewNode(
uint256 subtreeCommitment,
uint32_t startTime,
uint32_t endTime,
uint32_t startTarget,
uint32_t endTarget,
uint256 startSaplingRoot,
uint256 endSaplingRoot,
uint256 subtreeTotalWork,
uint64_t startHeight,
uint64_t endHeight,
uint64_t saplingTxCount
)
{
CDataStream buf(SER_DISK, 0);
HistoryNode result;
buf << subtreeCommitment;
buf << startTime;
buf << endTime;
buf << startTarget;
buf << endTarget;
buf << startSaplingRoot;
buf << endSaplingRoot;
buf << subtreeTotalWork;
buf << COMPACTSIZE(startHeight);
buf << COMPACTSIZE(endHeight);
buf << COMPACTSIZE(saplingTxCount);
std::copy(buf.begin(), buf.end(), result.begin());
return result;
}
HistoryNode NewLeaf(
uint256 commitment,
uint32_t time,
uint32_t target,
uint256 saplingRoot,
uint256 totalWork,
uint64_t height,
uint64_t saplingTxCount
) {
return NewNode(
commitment,
time,
time,
target,
target,
saplingRoot,
saplingRoot,
totalWork,
height,
height,
saplingTxCount
);
}
HistoryEntry NodeToEntry(const HistoryNode node, uint32_t left, uint32_t right) {
CDataStream buf(SER_DISK, 0);
HistoryEntry result;
uint8_t code = 0;
buf << code;
buf << left;
buf << right;
buf << node;
assert(buf.size() <= ENTRY_SERIALIZED_LENGTH);
std::copy(std::begin(buf), std::end(buf), result.bytes);
return result;
}
HistoryEntry LeafToEntry(const HistoryNode node) {
CDataStream buf(SER_DISK, 0);
HistoryEntry result;
uint8_t code = 1;
buf << code;
buf << node;
assert(buf.size() <= ENTRY_SERIALIZED_LENGTH);
std::copy(std::begin(buf), std::end(buf), result.bytes);
return result;
}
}

71
src/zcash/History.hpp Normal file
View File

@ -0,0 +1,71 @@
#ifndef ZC_HISTORY_H_
#define ZC_HISTORY_H_
#include <stdexcept>
#include <unordered_map>
#include <boost/foreach.hpp>
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
#include "librustzcash.h"
namespace libzcash {
const int NODE_SERIALIZED_LENGTH = 171;
typedef std::array<unsigned char, NODE_SERIALIZED_LENGTH> HistoryNode;
typedef uint64_t HistoryIndex;
class HistoryCache {
public:
// updates to the persistent(db) layer
std::unordered_map<HistoryIndex, HistoryNode> appends;
// current length of the history
HistoryIndex length;
// how much back into the old state current update state
// goes
HistoryIndex updateDepth;
// current root of the history
uint256 root;
// current epoch of this history state
uint32_t epoch;
HistoryCache(HistoryIndex initialLength, uint256 initialRoot, uint32_t initialEpoch) :
length(initialLength), updateDepth(initialLength), root(initialRoot), epoch(initialEpoch) { };
HistoryCache() { }
// Extends current history update by one history node.
void Extend(const HistoryNode &leaf);
// Truncates current history to the new length.
void Truncate(HistoryIndex newLength);
};
// New history node with metadata based on block state.
HistoryNode NewLeaf(
uint256 commitment,
uint32_t time,
uint32_t target,
uint256 saplingRoot,
uint256 totalWork,
uint64_t height,
uint64_t saplingTxCount
);
// Convert history node to tree node (with children references)
HistoryEntry NodeToEntry(const HistoryNode node, uint32_t left, uint32_t right);
// Convert history node to leaf node (end nodes without children)
HistoryEntry LeafToEntry(const HistoryNode node);
}
typedef libzcash::HistoryCache HistoryCache;
typedef libzcash::HistoryIndex HistoryIndex;
typedef libzcash::HistoryNode HistoryNode;
#endif /* ZC_HISTORY_H_ */