Merge remote-tracking branch 'upstream/master' into bug/4119-rewind_detection

This commit is contained in:
Kris Nuttycombe 2020-04-22 14:58:11 -06:00
commit 5a8f6f8058
56 changed files with 2413 additions and 119 deletions

View File

@ -155,7 +155,9 @@ endif
dist_bin_SCRIPTS = zcutil/fetch-params.sh dist_bin_SCRIPTS = zcutil/fetch-params.sh
dist_noinst_SCRIPTS = autogen.sh zcutil/build-debian-package.sh zcutil/build.sh dist_noinst_SCRIPTS = autogen.sh zcutil/build-debian-package.sh zcutil/build.sh
EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.sh qa/rpc-tests qa/zcash $(DIST_DOCS) $(BIN_CHECKS) RUST_DIST = $(top_srcdir)/.cargo $(top_srcdir)/Cargo.toml $(top_srcdir)/Cargo.lock rust-toolchain
EXTRA_DIST = $(top_srcdir)/share/genbuild.sh qa/pull-tester/rpc-tests.sh qa/rpc-tests qa/zcash $(DIST_DOCS) $(BIN_CHECKS) $(RUST_DIST)
install-exec-hook: install-exec-hook:
mv $(DESTDIR)$(bindir)/fetch-params.sh $(DESTDIR)$(bindir)/zcash-fetch-params mv $(DESTDIR)$(bindir)/fetch-params.sh $(DESTDIR)$(bindir)/zcash-fetch-params

View File

@ -1,4 +1,4 @@
Zcash 2.1.1-1 Zcash 2.1.2-rc1
<img align="right" width="120" height="80" src="doc/imgs/logo.png"> <img align="right" width="120" height="80" src="doc/imgs/logo.png">
=========== ===========

View File

@ -2,8 +2,8 @@ dnl require autoconf 2.60 (AS_ECHO/AS_ECHO_N)
AC_PREREQ([2.60]) AC_PREREQ([2.60])
define(_CLIENT_VERSION_MAJOR, 2) define(_CLIENT_VERSION_MAJOR, 2)
define(_CLIENT_VERSION_MINOR, 1) define(_CLIENT_VERSION_MINOR, 1)
define(_CLIENT_VERSION_REVISION, 1) define(_CLIENT_VERSION_REVISION, 2)
define(_CLIENT_VERSION_BUILD, 51) define(_CLIENT_VERSION_BUILD, 25)
define(_ZC_BUILD_VAL, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, m4_incr(_CLIENT_VERSION_BUILD), m4_eval(_CLIENT_VERSION_BUILD < 50), 1, m4_eval(_CLIENT_VERSION_BUILD - 24), m4_eval(_CLIENT_VERSION_BUILD == 50), 1, , m4_eval(_CLIENT_VERSION_BUILD - 50))) define(_ZC_BUILD_VAL, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, m4_incr(_CLIENT_VERSION_BUILD), m4_eval(_CLIENT_VERSION_BUILD < 50), 1, m4_eval(_CLIENT_VERSION_BUILD - 24), m4_eval(_CLIENT_VERSION_BUILD == 50), 1, , m4_eval(_CLIENT_VERSION_BUILD - 50)))
define(_CLIENT_VERSION_SUFFIX, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, _CLIENT_VERSION_REVISION-beta$1, m4_eval(_CLIENT_VERSION_BUILD < 50), 1, _CLIENT_VERSION_REVISION-rc$1, m4_eval(_CLIENT_VERSION_BUILD == 50), 1, _CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION-$1))) define(_CLIENT_VERSION_SUFFIX, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, _CLIENT_VERSION_REVISION-beta$1, m4_eval(_CLIENT_VERSION_BUILD < 50), 1, _CLIENT_VERSION_REVISION-rc$1, m4_eval(_CLIENT_VERSION_BUILD == 50), 1, _CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION-$1)))
define(_CLIENT_VERSION_IS_RELEASE, true) define(_CLIENT_VERSION_IS_RELEASE, true)

View File

@ -1,3 +1,9 @@
zcash (2.1.2~rc1) stable; urgency=medium
* 2.1.2-rc1 release.
-- Electric Coin Company <team@electriccoin.co> Wed, 15 Apr 2020 17:47:45 -0600
zcash (2.1.1+1) stable; urgency=critical zcash (2.1.1+1) stable; urgency=critical
* 2.1.1-1 release. * 2.1.1-1 release.

View File

@ -1,5 +1,5 @@
--- ---
name: "zcash-2.1.1-1" name: "zcash-2.1.2-rc1"
enable_cache: true enable_cache: true
distro: "debian" distro: "debian"
suites: suites:

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11. .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.13.
.TH ZCASH-CLI "1" "February 2020" "zcash-cli v2.1.1-1" "User Commands" .TH ZCASH-CLI "1" "April 2020" "zcash-cli v2.1.2-rc1" "User Commands"
.SH NAME .SH NAME
zcash-cli \- manual page for zcash-cli v2.1.1-1 zcash-cli \- manual page for zcash-cli v2.1.2-rc1
.SH DESCRIPTION .SH DESCRIPTION
Zcash RPC client version v2.1.1\-1 Zcash RPC client version v2.1.2\-rc1
.PP .PP
In order to ensure you are adequately protecting your privacy when using Zcash, In order to ensure you are adequately protecting your privacy when using Zcash,
please see <https://z.cash/support/security/>. please see <https://z.cash/support/security/>.

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11. .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.13.
.TH ZCASH-TX "1" "February 2020" "zcash-tx v2.1.1-1" "User Commands" .TH ZCASH-TX "1" "April 2020" "zcash-tx v2.1.2-rc1" "User Commands"
.SH NAME .SH NAME
zcash-tx \- manual page for zcash-tx v2.1.1-1 zcash-tx \- manual page for zcash-tx v2.1.2-rc1
.SH DESCRIPTION .SH DESCRIPTION
Zcash zcash\-tx utility version v2.1.1\-1 Zcash zcash\-tx utility version v2.1.2\-rc1
.SS "Usage:" .SS "Usage:"
.TP .TP
zcash\-tx [options] <hex\-tx> [commands] zcash\-tx [options] <hex\-tx> [commands]

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11. .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.13.
.TH ZCASHD "1" "February 2020" "zcashd v2.1.1-1" "User Commands" .TH ZCASHD "1" "April 2020" "zcashd v2.1.2-rc1" "User Commands"
.SH NAME .SH NAME
zcashd \- manual page for zcashd v2.1.1-1 zcashd \- manual page for zcashd v2.1.2-rc1
.SH DESCRIPTION .SH DESCRIPTION
Zcash Daemon version v2.1.1\-1 Zcash Daemon version v2.1.2\-rc1
.PP .PP
In order to ensure you are adequately protecting your privacy when using Zcash, In order to ensure you are adequately protecting your privacy when using Zcash,
please see <https://z.cash/support/security/>. please see <https://z.cash/support/security/>.
@ -67,12 +67,6 @@ Imports blocks from external blk000??.dat file on startup
.IP .IP
Keep at most <n> unconnectable transactions in memory (default: 100) Keep at most <n> unconnectable transactions in memory (default: 100)
.HP .HP
\fB\-maxtimeadjustment=\fR<n>
.IP
Maximum allowed median peer time offset adjustment, in seconds. Local
perspective of time may be influenced by peers forward or backward by
this amount. (default: 0 seconds, maximum: 1500 seconds)
.HP
\fB\-par=\fR<n> \fB\-par=\fR<n>
.IP .IP
Set the number of script verification threads (\fB\-16\fR to 16, 0 = auto, <0 = Set the number of script verification threads (\fB\-16\fR to 16, 0 = auto, <0 =
@ -99,6 +93,11 @@ Rebuild block chain index from current blk000??.dat files on startup
Create new files with system default permissions, instead of umask 077 Create new files with system default permissions, instead of umask 077
(only effective with disabled wallet functionality) (only effective with disabled wallet functionality)
.HP .HP
\fB\-txexpirynotify=\fR<cmd>
.IP
Execute command when transaction expires (%s in cmd is replaced by
transaction id)
.HP
\fB\-txindex\fR \fB\-txindex\fR
.IP .IP
Maintain a full transaction index, used by the getrawtransaction rpc Maintain a full transaction index, used by the getrawtransaction rpc
@ -346,7 +345,8 @@ optional). If <category> is not supplied or if <category> = 1, output
all debugging information. <category> can be: addrman, alert, bench, all debugging information. <category> can be: addrman, alert, bench,
coindb, db, estimatefee, http, libevent, lock, mempool, net, coindb, db, estimatefee, http, libevent, lock, mempool, net,
partitioncheck, pow, proxy, prune, rand, reindex, rpc, selectcoins, tor, partitioncheck, pow, proxy, prune, rand, reindex, rpc, selectcoins, tor,
zmq, zrpc, zrpcunsafe (implies zrpc). zmq, zrpc, zrpcunsafe (implies zrpc). For multiple specific categories
use \fB\-debug=\fR<category> multiple times.
.HP .HP
\fB\-experimentalfeatures\fR \fB\-experimentalfeatures\fR
.IP .IP

View File

@ -4,6 +4,69 @@ release-notes at release time)
Notable changes Notable changes
=============== ===============
Network Upgrade 3: Heartwood
----------------------------
The code preparations for the Heartwood network upgrade are finished and
included in this release. The following ZIPs are being deployed:
- [ZIP 213: Shielded Coinbase](https://zips.z.cash/zip-0213)
- [ZIP 221: FlyClient - Consensus-Layer Changes](https://zips.z.cash/zip-0221)
Heartwood will activate on testnet at height 903800, and can also be activated
at a specific height in regtest mode by setting the config option
`-nuparams=f5b9230b:HEIGHT`.
As a reminder, because the Heartwood activation height is not yet specified for
mainnet, version 2.1.2 will behave similarly as other pre-Heartwood releases
even after a future activation of Heartwood on the network. Upgrading from 2.1.2
will be required in order to follow the Heartwood network upgrade on mainnet.
See [ZIP 250](https://zips.z.cash/zip-0250) for additional information about the
deployment process for Heartwood.
### Mining to Sapling addresses
Miners and mining pools that wish to test the new "shielded coinbase" support on
the Heartwood testnet can generate a new Sapling address with `z_getnewaddress`,
add the config option `mineraddress=SAPLING_ADDRESS` to their `zcash.conf` file,
and then restart their `zcashd` node. `getblocktemplate` will then return
coinbase transactions containing a shielded miner output.
Note that `mineraddress` should only be set to a Sapling address after the
Heartwood network upgrade has activated; setting a Sapling address prior to
Heartwood activation will cause `getblocktemplate` to return block templates
that cannot be mined.
Sapling viewing keys support
----------------------------
Support for Sapling viewing keys (specifically, Sapling extended full viewing
keys, as described in [ZIP 32](https://zips.z.cash/zip-0032)), has been added to
the wallet. Nodes will track both sent and received transactions for any Sapling
addresses associated with the imported Sapling viewing keys.
- Use the `z_exportviewingkey` RPC method to obtain the viewing key for a
shielded address in a node's wallet. For Sapling addresses, these always begin
with "zxviews" (or "zxviewtestsapling" for testnet addresses).
- Use `z_importviewingkey` to import a viewing key into another node. Imported
Sapling viewing keys will be stored in the wallet, and remembered across
restarts.
- `z_getbalance` will show the balance of a Sapling address associated with an
imported Sapling viewing key. Balances for Sapling viewing keys will be
included in the output of `z_gettotalbalance` when the `includeWatchonly`
parameter is set to `true`.
- RPC methods for viewing shielded transaction information (such as
`z_listreceivedbyaddress`) will return information for Sapling addresses
associated with imported Sapling viewing keys.
Details about what information can be viewed with these Sapling viewing keys,
and what guarantees you have about that information, can be found in
[ZIP 310](https://zips.z.cash/zip-0310).
Removal of time adjustment and the -maxtimeadjustment= option Removal of time adjustment and the -maxtimeadjustment= option
------------------------------------------------------------- -------------------------------------------------------------
@ -19,8 +82,8 @@ This effectively disabled time adjustment; however, a `-maxtimeadjustment=`
option was provided to override this default. option was provided to override this default.
As a simplification the time adjustment code has now been completely removed, As a simplification the time adjustment code has now been completely removed,
together with `-maxtimeadjustment=`. Node operators should instead simply together with `-maxtimeadjustment=`. Node operators should instead ensure that
ensure that local time is set reasonably accurately. their local time is set reasonably accurately.
If it appears that the node has a significantly different time than its peers, If it appears that the node has a significantly different time than its peers,
a warning will still be logged and indicated on the metrics screen if enabled. a warning will still be logged and indicated on the metrics screen if enabled.
@ -53,6 +116,62 @@ this includes watch-only addresses linked to viewing keys imported with
`z_importviewingkey`, as well as addresses with spending keys (both generated `z_importviewingkey`, as well as addresses with spending keys (both generated
with `z_getnewaddress` and imported with `z_importkey`). with `z_getnewaddress` and imported with `z_importkey`).
Better error messages for rejected transactions after network upgrades
----------------------------------------------------------------------
The Zcash network upgrade process includes several features designed to protect
users. One of these is the "consensus branch ID", which prevents transactions
created after a network upgrade has activated from being replayed on another
chain (that might have occurred due to, for example, a
[friendly fork](https://electriccoin.co/blog/future-friendly-fork/)). This is
known as "two-way replay protection", and is a core requirement by
[various](https://blog.bitgo.com/bitgos-approach-to-handling-a-hard-fork-71e572506d7d?gi=3b80c02e027e)
[members](https://trezor.io/support/general/hard-forks/) of the cryptocurrency
ecosystem for supporting "hard fork"-style changes like our network upgrades.
One downside of the way replay protection is implemented in Zcash, is that there
is no visible difference between a transaction being rejected by a `zcashd` node
due to targeting a different branch, and being rejected due to an invalid
signature. This has caused issues in the past when a user had not upgraded their
wallet software, or when a wallet lacked support for the new network upgrade's
consensus branch ID; the resulting error messages when users tried to create
transactions were non-intuitive, and particularly cryptic for transparent
transactions.
Starting from this release, `zcashd` nodes will re-verify invalid transparent
and Sprout signatures against the consensus branch ID from before the most
recent network upgrade. If the signature then becomes valid, the transaction
will be rejected with the error message `old-consensus-branch-id`. This error
can be handled specifically by wallet providers to inform the user that they
need to upgrade their wallet software.
Wallet software can also automatically obtain the latest consensus branch ID
from their (up-to-date) `zcashd` node, by calling `getblockchaininfo` and
looking at `{'consensus': {'nextblock': BRANCH_ID, ...}, ...}` in the JSON
output.
Expired transactions notifications
----------------------------------
A new config option `-txexpirynotify` has been added that will cause `zcashd` to
execute a command when a transaction in the mempool expires. This can be used to
notify external systems about transaction expiry, similar to the existing
`-blocknotify` config option that notifies when the chain tip changes.
RPC methods
-----------
- The `z_importkey` and `z_importviewingkey` RPC methods now return the type of
the imported spending or viewing key (`sprout` or `sapling`), and the
corresponding payment address.
- Negative heights are now permitted in `getblock` and `getblockhash`, to select
blocks backwards from the chain tip. A height of `-1` corresponds to the last
known valid block on the main chain.
- A new RPC method `getexperimentalfeatures` returns the list of enabled
experimental features.
Build system Build system
------------ ------------

View File

@ -0,0 +1,615 @@
Notable changes
===============
Network Upgrade 3: Heartwood
----------------------------
The code preparations for the Heartwood network upgrade are finished and
included in this release. The following ZIPs are being deployed:
- [ZIP 213: Shielded Coinbase](https://zips.z.cash/zip-0213)
- [ZIP 221: FlyClient - Consensus-Layer Changes](https://zips.z.cash/zip-0221)
Heartwood will activate on testnet at height XXXXXX, and can also be activated
at a specific height in regtest mode by setting the config option
`-nuparams=f5b9230b:HEIGHT`.
As a reminder, because the Heartwood activation height is not yet specified for
mainnet, version 2.1.2 will behave similarly as other pre-Heartwood releases
even after a future activation of Heartwood on the network. Upgrading from 2.1.2
will be required in order to follow the Heartwood network upgrade on mainnet.
See [ZIP 250](https://zips.z.cash/zip-0250) for additional information about the
deployment process for Heartwood.
### Mining to Sapling addresses
Miners and mining pools that wish to test the new "shielded coinbase" support on
the Heartwood testnet can generate a new Sapling address with `z_getnewaddress`,
add the config option `mineraddress=SAPLING_ADDRESS` to their `zcash.conf` file,
and then restart their `zcashd` node. `getblocktemplate` will then return
coinbase transactions containing a shielded miner output.
Note that `mineraddress` should only be set to a Sapling address after the
Heartwood network upgrade has activated; setting a Sapling address prior to
Heartwood activation will cause `getblocktemplate` to return block templates
that cannot be mined.
Sapling viewing keys support
----------------------------
Support for Sapling viewing keys (specifically, Sapling extended full viewing
keys, as described in [ZIP 32](https://zips.z.cash/zip-0032)), has been added to
the wallet. Nodes will track both sent and received transactions for any Sapling
addresses associated with the imported Sapling viewing keys.
- Use the `z_exportviewingkey` RPC method to obtain the viewing key for a
shielded address in a node's wallet. For Sapling addresses, these always begin
with "zxviews" (or "zxviewtestsapling" for testnet addresses).
- Use `z_importviewingkey` to import a viewing key into another node. Imported
Sapling viewing keys will be stored in the wallet, and remembered across
restarts.
- `z_getbalance` will show the balance of a Sapling address associated with an
imported Sapling viewing key. Balances for Sapling viewing keys will be
included in the output of `z_gettotalbalance` when the `includeWatchonly`
parameter is set to `true`.
- RPC methods for viewing shielded transaction information (such as
`z_listreceivedbyaddress`) will return information for Sapling addresses
associated with imported Sapling viewing keys.
Details about what information can be viewed with these Sapling viewing keys,
and what guarantees you have about that information, can be found in
[ZIP 310](https://zips.z.cash/zip-0310).
Removal of time adjustment and the -maxtimeadjustment= option
-------------------------------------------------------------
Prior to v2.1.1-1, `zcashd` would adjust the local time that it used by up
to 70 minutes, according to a median of the times sent by the first 200 peers
to connect to it. This mechanism was inherently insecure, since an adversary
making multiple connections to the node could effectively control its time
within that +/- 70 minute window (this is called a "timejacking attack").
In the v2.1.1-1 security release, in addition to other mitigations for
timejacking attacks, the maximum time adjustment was set to zero by default.
This effectively disabled time adjustment; however, a `-maxtimeadjustment=`
option was provided to override this default.
As a simplification the time adjustment code has now been completely removed,
together with `-maxtimeadjustment=`. Node operators should instead ensure that
their local time is set reasonably accurately.
If it appears that the node has a significantly different time than its peers,
a warning will still be logged and indicated on the metrics screen if enabled.
View shielded information in wallet transactions
------------------------------------------------
In previous `zcashd` versions, to obtain information about shielded transactions
you would use either the `z_listreceivedbyaddress` RPC method (which returns all
notes received by an address) or `z_listunspent` (which returns unspent notes,
optionally filtered by addresses). There were no RPC methods that directly
returned details about spends, or anything equivalent to the `gettransaction`
method (which returns transparent information about in-wallet transactions).
This release introduces a new RPC method `z_viewtransaction` to fill that gap.
Given the ID of a transaction in the wallet, it decrypts the transaction and
returns detailed shielded information for all decryptable new and spent notes,
including:
- The address that each note belongs to.
- Values in both decimal ZEC and zatoshis.
- The ID of the transaction that each spent note was received in.
- An `outgoing` flag on each new note, which will be `true` if the output is not
for an address in the wallet.
- A `memoStr` field for each new note, containing its text memo (if its memo
field contains a valid UTF-8 string).
Information will be shown for any address that appears in `z_listaddresses`;
this includes watch-only addresses linked to viewing keys imported with
`z_importviewingkey`, as well as addresses with spending keys (both generated
with `z_getnewaddress` and imported with `z_importkey`).
Better error messages for rejected transactions after network upgrades
----------------------------------------------------------------------
The Zcash network upgrade process includes several features designed to protect
users. One of these is the "consensus branch ID", which prevents transactions
created after a network upgrade has activated from being replayed on another
chain (that might have occurred due to, for example, a
[friendly fork](https://electriccoin.co/blog/future-friendly-fork/)). This is
known as "two-way replay protection", and is a core requirement by
[various](https://blog.bitgo.com/bitgos-approach-to-handling-a-hard-fork-71e572506d7d?gi=3b80c02e027e)
[members](https://trezor.io/support/general/hard-forks/) of the cryptocurrency
ecosystem for supporting "hard fork"-style changes like our network upgrades.
One downside of the way replay protection is implemented in Zcash, is that there
is no visible difference between a transaction being rejected by a `zcashd` node
due to targeting a different branch, and being rejected due to an invalid
signature. This has caused issues in the past when a user had not upgraded their
wallet software, or when a wallet lacked support for the new network upgrade's
consensus branch ID; the resulting error messages when users tried to create
transactions were non-intuitive, and particularly cryptic for transparent
transactions.
Starting from this release, `zcashd` nodes will re-verify invalid transparent
and Sprout signatures against the consensus branch ID from before the most
recent network upgrade. If the signature then becomes valid, the transaction
will be rejected with the error message `old-consensus-branch-id`. This error
can be handled specifically by wallet providers to inform the user that they
need to upgrade their wallet software.
Wallet software can also automatically obtain the latest consensus branch ID
from their (up-to-date) `zcashd` node, by calling `getblockchaininfo` and
looking at `{'consensus': {'nextblock': BRANCH_ID, ...}, ...}` in the JSON
output.
Expired transactions notifications
----------------------------------
A new config option `-txexpirynotify` has been added that will cause `zcashd` to
execute a command when a transaction in the mempool expires. This can be used to
notify external systems about transaction expiry, similar to the existing
`-blocknotify` config option that notifies when the chain tip changes.
RPC methods
-----------
- The `z_importkey` and `z_importviewingkey` RPC methods now return the type of
the imported spending or viewing key (`sprout` or `sapling`), and the
corresponding payment address.
- Negative heights are now permitted in `getblock` and `getblockhash`, to select
blocks backwards from the chain tip. A height of `-1` corresponds to the last
known valid block on the main chain.
- A new RPC method `getexperimentalfeatures` returns the list of enabled
experimental features.
Build system
------------
- The `--enable-lcov`, `--disable-tests`, and `--disable-mining` flags for
`zcutil/build.sh` have been removed. You can pass these flags instead by using
the `CONFIGURE_FLAGS` environment variable. For example, to enable coverage
instrumentation (thus enabling "make cov" to work), call:
```
CONFIGURE_FLAGS="--enable-lcov --disable-hardening" ./zcutil/build.sh
```
- The build system no longer defaults to verbose output. You can re-enable
verbose output with `./zcutil/build.sh V=1`
Changelog
=========
Alfredo Garcia (40):
remove SignatureHash from python rpc tests
add negative height to getblock
allow negative index to getblockhash
update docs
add additional tests to rpc_wallet_z_getnewaddress
change convention
change regex
Return address and type of imported key in z_importkey
Delete travis file
dedup decode keys and addresses
remove unused imports
add txexpirynotify
fix rpx_wallet_tests
remove debug noise from 2 gtests
make type and size a pair in DecodeAny arguments
add missing calls to DecodeAny
add destination wrappers
change tuples to classes
change cm() to cmu() in SaplingNote class
change the cm member of OutputDescription to cmu
change maybe_cm to maybe_cmu
add getexperimentalfeatures rpc call
refactor experimental features
make fInsightExplorer a local
add check_node_log utility function
remove space after new line
move check_node_log framework test to a new file
use check_node_log in turnstile.py
add stop_node argument to check_node_log, use it in shieldingcoinbase
change constructors
minor comment fix
preserve test semantics
remove unused import
multiple debug categories documentation
return address info in z_importviewingkey
add expected address check to tests
change unclear wording in z_import calls address returned
Lock with cs_main inside gtests that call chainActive.Height()
add -lightwalletd experimental option
compute more structures in mempool DynamicMemoryUsage
Carl Dong (1):
autoconf: Sane --enable-debug defaults.
Chun Kuan Lee (1):
Reset default -g -O2 flags when enable debug
Cory Fields (3):
bench: switch to std::chrono for time measurements
bench: prefer a steady clock if the resolution is no worse
build: Split hardening/fPIE options out
Dagur Valberg Johannsson (1):
Improve z_getnewaddress tests
Daira Hopwood (24):
Add missing cases for Blossom in ContextualCheckBlock tests.
Revert "Add -maxtimeadjustment with default of 0 instead of the 4200 seconds used in Bitcoin Core."
Remove uses of GetTimeOffset().
Replace time adjustment with warning only.
Update GetAdjustedTime() to GetTime().
Sort entries in zcash_gtest_SOURCES (other than test_tautology which is deliberately first).
Add release notes for removal of -maxtimeadjustment.
Resolve a race condition on `chainActive.Tip()` in initialization (introduced in #4379).
Setting a std::atomic variable in a signal handler only has defined behaviour if it is lock-free.
Add comment to `MilliSleep` documenting that it is an interruption point.
Exit init early if we request shutdown before having loaded the genesis block.
Fix typos/minor errors in comments, and wrap some lines.
Avoid a theoretical possibility of division-by-zero introduced in #4368.
Make the memo a mandatory argument for SendManyRecipient
Add a `zcutil/clean.sh` script that works (unlike `make clean`).
Split into clean.sh and distclean.sh.
Minor refactoring.
Executables end with .exe on Windows.
Avoid spurious error messages when cleaning up directories.
Address review comments.
Use `SA_RESTART` in `sa_flags` when setting up signal handlers.
Remove a redundant `rm -f` command.
Refer to altitude instead of height for history tree peaks
Address review comments: `target` and `depends/work` should be cleaned by clean.sh.
Dimitris Apostolou (8):
Fix Boost compilation on macOS
Remove libsnark preprocessor flags
Fix typo
End diff with LF character
Remove stale comment
Point at support community on Discord
Update documentation info
Fix typos
Eirik Ogilvie-Wigley (2):
Include shielded transaction data when calculating RecursiveDynamicUsage of transactions
Account for malloc overhead
Evan Klitzke (2):
Add --with-sanitizers option to configure
Make --enable-debug to pick better options
Gavin Andresen (2):
Simple benchmarking framework
Support very-fast-running benchmarks
Gregory Maxwell (4):
Avoid integer division in the benchmark inner-most loop.
Move GetWarnings and related globals to util.
Eliminate data races for strMiscWarning and fLargeWork*Found.
Move GetWarnings() into its own file.
Jack Grigg (94):
Revert "Add configure flags for enabling ASan/UBSan and TSan"
configure: Re-introduce additional sanitizer flags
RPC: z_viewtransaction
depends: Add utfcpp to dependencies
RPC: Display valid UTF-8 memos in z_viewtransaction
RPC: Use OutgoingViewingKeys to recover non-wallet Sapling outputs
test: Check z_viewtransaction output in wallet_listreceived RPC test
Benchmark Zcash verification operations
Simulate worst-case block verification
zcutil/build.sh: Remove lcov and mining flags
configure: Change default Proton to match build.sh
zcutil/build.sh: Turn off verbosity by default
Make -fwrapv conditional on --enable-debug=no
Move default -g flag into configure.ac behind --enable-debug=no
Add build system changes to release notes
test: Hard-code hex memo in wallet_listreceived for Python3 compatibility
test: Fix pyflakes warnings
bench: "Use" result of crypto_sign_verify_detached
Add test vectors for small-order Ed25519 pubkeys
Patch libsodium 1.0.15 pubkey validation onto 1.0.18
Patch libsodium 1.0.15 signature validation onto 1.0.18
Add release notes for z_viewtransaction
Deduplicate some wallet keystore logic
Move Sprout and Sapling address logic into separate files
Move ZIP 32 classes inside zcash/Address.hpp
SaplingFullViewingKey -> SaplingExtendedFullViewingKey in keystore maps
Remove default address parameter from Sapling keystore methods
test: Add test for CBasicKeyStore::AddSaplingFullViewingKey
Add encoding and decoding for Sapling extended full viewing keys
Add Sapling ExtFVK support to z_exportviewingkey
Add in-memory Sapling ExtFVK support to z_importviewingkey
Store imported Sapling ExtFVKs in wallet database
OutputDescriptionInfo::Build()
ZIP 213 consensus rules
Add support for Sapling addresses in -mineraddress
wallet: Include coinbase txs in Sapling note selection
Add regtest-only -nurejectoldversions option
test: Minor tweaks to comments in LibsodiumPubkeyValidation
test: RPC test for shielded coinbase
Adjust comments on ZIP 213 logic
Use DoS level constants and parameters for ZIP 213 rejections
test: Check that shielded coinbase can be spent to a t-address
init: Inform on error that -mineraddress must be Sapling or transparent
test: Explicitly check Sapling consensus rules apply to shielded coinbase
Migrate GitHub issue template to new format
Add GitHub issue templates for feature requests and UX reports
depends: Remove comments from libsodium signature validation patch
Bring in librustzcash crate
Bring in Cargo.lock from librustzcash repo
rust: Pin toolchain to 1.36.0, matching depends system
rust: Adjust Cargo.toml so that it compiles
Update .gitignore for Rust code
Replace librustzcash from depends system with src/rust
Move root of Rust crate into repo root
depends: Remove unused vendored crates
Fix Rust static library linking for Windows builds
test: Rename FakeCoinsViewDB -> ValidationFakeCoinsViewDB
test: Modify ValidationFakeCoinsViewDB to optionally contain a coin
test: Add missing parameter selection to Validation.ReceivedBlockTransactions
mempool: Check transparent signatures against the previous network upgrade
mempool: Remove duplicate consensusBranchId from AcceptToMemoryPool
test: Add Overwinter and Sapling support to GetValidTransaction() helper
consensus: Check JoinSplit signatures against the previous network upgrade
depends: Use Rust 1.42.0 toolchain
Bring in updates to librustzcash crate
depends: Define Rust target in a single location
depends: Hard-code Rust target for all Darwin hosts
Add ZIP 221 logic to block index
Add ZIP 221 support to miner and getblocktemplate
Implement ZIP 221 consensus rules
Return the correct root from librustzcash_mmr_{append, delete}
Use a C array for HistoryEntry instead of std::array
test: Verify ZIP 221 logic against reference implementation
build: Move cargo arguments into RUST_BUILD_OPTS
build: Correctly remove generated files from .cargo
test: Build Rust tests as part of qa/zcash/full_test_suite.py
build: Connect cargo verbosity to make verbosity
test: Assert that GetValidTransaction supports the given branch ID
Comment tweaks and cleanups
test: Add an extra assertion to feature_zip221.py
Remove unnecessary else case in CCoinsViewCache::PreloadHistoryTree
Improve documentation of CCoinsViewCache::PreloadHistoryTree
Truncate HistoryCache.appends correctly for zero-indexed entries
Comment clarifications and fixes
Make peak_pos zero-indexed in CCoinsViewCache::PreloadHistoryTree
test: Ignore timestamps in addressindex checks
test: Add a second Sapling note to WalletTests.ClearNoteWitnessCache
test: Run Equihash test vectors on both C++ and Rust validators
Pass the block height through to CheckEquihashSolution()
consensus: From Heartwood activation, use Rust Equihash validator
zcutil/make-release.py: Fix to run with Python 3
zcutil/make-release.py: Check for release dependencies
Update release notes for v2.1.2
zcutil/release-notes.py: Add Python 3 execution header
James O'Beirne (1):
Add basic coverage reporting for RPC tests
Jeremy Rubin (3):
Add Basic CheckQueue Benchmark
Address ryanofsky feedback on CCheckQueue benchmarks. Eliminated magic numbers, fixed scoping of vectors (and memory movement component of benchmark).
Add prevector destructor benchmark
Karl-Johan Alm (1):
Refactoring: Removed using namespace <xxx> from bench/ and test/ source files.
Larry Ruane (2):
zcutil/fetch-params.sh unneeded --testnet arg should warn user
util: CBufferedFile fixes
LitecoinZ (1):
Fix issue #3772
Marshall Gaucher (1):
Update qa/rpc-tests/addressindex.py
Matt Corallo (2):
Remove countMaskInv caching in bench framework
Require a steady clock for bench with at least micro precision
MeshCollider (3):
Fix race for mapBlockIndex in AppInitMain
Make fReindex atomic to avoid race
Consistent parameter names in txdb.h
NicolasDorier (1):
[qa] assert_start_raises_init_error
NikVolf (3):
push/pop history with tests
update chain history in ConnectBlock and DisconnectBlock
use iterative platform-independent log2i
Patrick Strateman (1):
Acquire lock to check for genesis block.
Pavel Janík (3):
Rewrite help texts for features enabled by default.
Ignore bench_bitcoin binary.
Prevent warning: variable 'x' is uninitialized
Philip Kaufmann (1):
[Trivial] ensure minimal header conventions
Pieter Wuille (3):
Benchmark rolling bloom filter
Introduce FastRandomContext::randbool()
FastRandom benchmark
Sean Bowe (9):
Initialize ThreadNotifyWallets before additional blocks are imported.
Handle case of fresh wallets in ThreadNotifyWallets.
Clarify comment
Add librustzcash tests to the full test suite.
Add release profile optimizations and turn off panic unwinding in librustzcash.
Minor typo fixes.
Simplification for MacOS in rust-test.
make-release.py: Versioning changes for 2.1.2-rc1.
make-release.py: Updated manpages for 2.1.2-rc1.
Taylor Hornby (15):
Make the equihash validator macro set its output to false when throwing an exception.
Add test for unused bits in the Equihash solution encoding.
Add Python script for checking if dependencies have updates.
Add GitHub API credential
Update list of dependencies to check
Wrap long lines
Cache releases to reduce network usage and improve performance
Make updatecheck.py compatible with python2
Have make clean delete temporary AFL build directory
Add AFL build directory to .gitignore
Have make clean delete AFL output directories.
Fix bug in updatecheck.py and add utfcpp to its dependency list
Fix typo in updatecheck.py
Update updatecheck.py with the new Rust dependencies and improve the error message in case the untracked dependency list becomes out of date.
Fix undefined behavior in CScriptNum
Wladimir J. van der Laan (7):
bench: Add crypto hash benchmarks
Kill insecure_random and associated global state
bench: Fix subtle counting issue when rescaling iteration count
bench: Add support for measuring CPU cycles
bench: Fix initialization order in registration
util: Don't set strMiscWarning on every exception
test_framework: detect failure of bitcoind startup
Yuri Zhykin (1):
bench: Added base58 encoding/decoding benchmarks
avnish (14):
changed block_test to BlockTests
changed test names from _ to CamelCase
changed header_size_is_expected to HeaderSizeIsExpected
changed "equihash_tests" to EquihashTests
changed founders_reward_test to FoundersRewardTest
changes tests to camelcase
chnged keystore_tests to KeystoreTests
changed libzcash_utils to LibzcashUtils
changed test to CamelCase
changed test to CamelCase
changed test to CamelCase
changed seven_eq_seven to SevenEqSeven
changed txid_tests to TxidTests
changed wallet_zkeys_test to WalletZkeysTest
avnish98 (1):
requested changes are rectified
ca333 (2):
update libsodium to v1.0.18
fix dead openssl download path
gladcow (4):
Show reindex state in metrics
Use processed file size as progress in metrics during reindex
Byte sizes format
Move reindex progress globals to metrics.h/cpp
Marshall Gaucher (74):
update /usr/bin/env; fix print conventions
update test_framework modules
Update rpc-test/test_framework to Py3 convention,modules,encoding
Update ignored testScriptsExt to Python3
Update python3 env path, remove python 2.7 assert
Update hexlify for encoding, update to py3 io module
Update py3 env path, remove py2 assert
Update py2 conventions to py3, remove py2 env and assert
Update py2 conventions to py3, update Decimal calls
Update py2 env path, remove py2 assert
Update py2 env path, remove py2 assert
Update py2 env path, remove py2 assert, update filter to return list for py3
Update py2 env path, remove py2 assert, update http module and assert encoding
Update cmp to py3 functions, update map return to list for py3
Standard py2 to py3 updates
Update py2 modules to py3, update encoding to be py3 compatible
Update to py3 conventions, update decimal calls to be consistent
Update to py3 conventions, update filter to return list
update to py3 conventions, update range to return list for py3
update to py3 convention, update execfile to py3 call
update to py3 conventions, update cmp to be py3 compatible, update map to return list for py3
update to py3 conventions, preserve ipv6 patch
update str cast to prevent address assert issues
clean up binascii call
Add keyerror execption
update to py3 env path
update to py3 conventions, update functions to be upstream consistent
update to py3 conventions, clean up code to be upstream consistent
update to py3 encodings
update encoding, decoding, serialize funcs for py3
Update type to be decimal
update to py3 conventions, BUG with last assert_equal
Update io modules for py3, ISSUE with create_transaction function
Update to py3, ISSUE with encoding
Update to py3, ISSUE with encoding
Update to py3, ISSUE with encoding in create_block
Update to py3, ISSUE with encoding in create_block
Clean up code not needed from upstream
update io module, fix py3 division, and string encoding
update remaining encoding issues, add pyblake2
Use more meaningful assert_equal from our original codebase
Clean up code from upstream we dont use
fix except bug for undefined url
Remove semi colons
make import urlparse module consistent,httplib update to py3
correct update to python3
clean-up imports, keep string notation consistent, remove spacing
clean up
Use upstream encoding for encodeDecimal
fix type issue
fix initialize statements for imports
clean up initiliaze statements from imports
update type for decimal 0
remove debug lines from prior commits
clean up to minimize diff
remove u encoding
Fix decimal 0 issues
Clean up import calls
clean up
clean up
clean up
fix url and port issue
cleanups and fixing odd casting
Update json to simplejson to remove unicode and str issue from py2 to py3
Update py3 division
fix pyflakes errors
clean up conventions and whitespace
fix string pattern issue on byte object
update comment regarding prior py2 exception
Fix remaining python3 conventions
Update remaining Python3 conventions
Updating remaining python3 conventions
Update #! env for python3
Update RPCs to support cross platform paths and libs
murrayn (1):
Add build support for 'gprof' profiling.
practicalswift (8):
build: Show enabled sanitizers in configure output
Add -ftrapv to DEBUG_CXXFLAGS when --enable-debug is used
Assert that what might look like a possible division by zero is actually unreachable
Replace boost::function with std::function (C++11)
Avoid static analyzer warnings regarding uninitialized arguments
Restore default format state of cout after printing with std::fixed/setprecision
Initialize recently introduced non-static class member lastCycles to zero in constructor
Replace boost::function with std::function (C++11)
ptschip (1):
Enable python tests for Native Windows
zancas (3):
update comment, to correctly specify number of methods injected
replace "virtual" with "override" in subclasses
Remove remaining instances of boost::function

View File

@ -84,6 +84,7 @@ testScripts=(
'mining_shielded_coinbase.py' 'mining_shielded_coinbase.py'
'framework.py' 'framework.py'
'sapling_rewind_check.py' 'sapling_rewind_check.py'
'feature_zip221.py'
); );
testScriptsExt=( testScriptsExt=(
'getblocktemplate_longpoll.py' 'getblocktemplate_longpoll.py'

134
qa/rpc-tests/feature_zip221.py Executable file
View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
# Copyright (c) 2020 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.flyclient import (ZcashMMRNode, append, delete, make_root_commitment)
from test_framework.mininode import (HEARTWOOD_BRANCH_ID, CBlockHeader)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
bytes_to_hex_str,
hex_str_to_bytes,
initialize_chain_clean,
start_nodes,
)
from io import BytesIO
NULL_FIELD = "00" * 32
CHAIN_HISTORY_ROOT_VERSION = 2010200
# Verify block header field 'hashLightClientRoot' is set correctly for Heartwood blocks.
class Zip221Test(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir, extra_args=[[
'-nuparams=2bb40e60:1', # Blossom
'-nuparams=f5b9230b:10', # Heartwood
'-nurejectoldversions=false',
]] * 4)
def node_for_block(self, height):
block_header = CBlockHeader()
block_header.deserialize(BytesIO(hex_str_to_bytes(
self.nodes[0].getblock(str(height), 0))))
sapling_root = hex_str_to_bytes(
self.nodes[0].getblock(str(height))["finalsaplingroot"])[::-1]
return ZcashMMRNode.from_block(
block_header, height, sapling_root, 0, HEARTWOOD_BRANCH_ID)
def run_test(self):
self.nodes[0].generate(10)
self.sync_all()
# Verify all blocks up to and including Heartwood activation set
# hashChainHistoryRoot to null.
print("Verifying blocks up to and including Heartwood activation")
blockcount = self.nodes[0].getblockcount()
assert_equal(blockcount, 10)
for height in range(0, blockcount + 1):
blk = self.nodes[0].getblock(str(height))
assert_equal(blk["chainhistoryroot"], NULL_FIELD)
# Create the initial history tree, containing a single node.
root = self.node_for_block(10)
# Generate the first block that contains a non-null chain history root.
print("Verifying first non-null chain history root")
self.nodes[0].generate(1)
self.sync_all()
# Verify that hashChainHistoryRoot is set correctly.
assert_equal(
self.nodes[0].getblock('11')["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# Generate 9 more blocks on node 0, and verify their chain history roots.
print("Mining 9 blocks on node 0")
self.nodes[0].generate(9)
self.sync_all()
print("Verifying node 0's chain history")
for height in range(12, 21):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[0].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# The rest of the test only applies to Heartwood-aware node versions.
# Earlier versions won't serialize chain history roots in the block
# index, and splitting the network below requires restarting the nodes.
if self.nodes[0].getnetworkinfo()["version"] < CHAIN_HISTORY_ROOT_VERSION:
print("Node's block index is not Heartwood-aware, skipping reorg test")
return
# Split the network so we can test the effect of a reorg.
print("Splitting the network")
self.split_network()
# Generate 10 more blocks on node 0, and verify their chain history roots.
print("Mining 10 more blocks on node 0")
self.nodes[0].generate(10)
self.sync_all()
print("Verifying node 0's chain history")
for height in range(21, 31):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[0].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
# Generate 11 blocks on node 2.
print("Mining alternate chain on node 2")
self.nodes[2].generate(11)
self.sync_all()
# Reconnect the nodes; node 0 will re-org to node 2's chain.
print("Re-joining the network so that node 0 reorgs")
self.join_network()
# Verify that node 0's chain history was correctly updated.
print("Deleting orphaned blocks from the expected chain history")
for _ in range(10):
root = delete(root)
print("Verifying that node 0 is now on node 1's chain history")
for height in range(21, 32):
leaf = self.node_for_block(height - 1)
root = append(root, leaf)
assert_equal(
self.nodes[2].getblock(str(height))["chainhistoryroot"],
bytes_to_hex_str(make_root_commitment(root)[::-1]))
if __name__ == '__main__':
Zip221Test().main()

View File

@ -0,0 +1,176 @@
from pyblake2 import blake2b
import struct
from typing import (List, Optional)
from .mininode import (CBlockHeader, block_work_from_compact, ser_compactsize, ser_uint256)
def H(msg: bytes, consensusBranchId: int) -> bytes:
digest = blake2b(
digest_size=32,
person=b'ZcashHistory' + struct.pack("<I", consensusBranchId))
digest.update(msg)
return digest.digest()
class ZcashMMRNode():
# leaf nodes have no children
left_child: Optional['ZcashMMRNode']
right_child: Optional['ZcashMMRNode']
# commitments
hashSubtreeCommitment: bytes
nEarliestTimestamp: int
nLatestTimestamp: int
nEarliestTargetBits: int
nLatestTargetBits: int
hashEarliestSaplingRoot: bytes # left child's sapling root
hashLatestSaplingRoot: bytes # right child's sapling root
nSubTreeTotalWork: int # total difficulty accumulated within each subtree
nEarliestHeight: int
nLatestHeight: int
nSaplingTxCount: int # number of Sapling transactions in block
consensusBranchId: bytes
@classmethod
def from_block(Z, block: CBlockHeader, height, sapling_root, sapling_tx_count, consensusBranchId) -> 'ZcashMMRNode':
'''Create a leaf node from a block'''
node = Z()
node.left_child = None
node.right_child = None
node.hashSubtreeCommitment = ser_uint256(block.rehash())
node.nEarliestTimestamp = block.nTime
node.nLatestTimestamp = block.nTime
node.nEarliestTargetBits = block.nBits
node.nLatestTargetBits = block.nBits
node.hashEarliestSaplingRoot = sapling_root
node.hashLatestSaplingRoot = sapling_root
node.nSubTreeTotalWork = block_work_from_compact(block.nBits)
node.nEarliestHeight = height
node.nLatestHeight = height
node.nSaplingTxCount = sapling_tx_count
node.consensusBranchId = consensusBranchId
return node
def serialize(self) -> bytes:
'''serializes a node'''
buf = b''
buf += self.hashSubtreeCommitment
buf += struct.pack("<I", self.nEarliestTimestamp)
buf += struct.pack("<I", self.nLatestTimestamp)
buf += struct.pack("<I", self.nEarliestTargetBits)
buf += struct.pack("<I", self.nLatestTargetBits)
buf += self.hashEarliestSaplingRoot
buf += self.hashLatestSaplingRoot
buf += ser_uint256(self.nSubTreeTotalWork)
buf += ser_compactsize(self.nEarliestHeight)
buf += ser_compactsize(self.nLatestHeight)
buf += ser_compactsize(self.nSaplingTxCount)
return buf
def make_parent(
left_child: ZcashMMRNode,
right_child: ZcashMMRNode) -> ZcashMMRNode:
parent = ZcashMMRNode()
parent.left_child = left_child
parent.right_child = right_child
parent.hashSubtreeCommitment = H(
left_child.serialize() + right_child.serialize(),
left_child.consensusBranchId,
)
parent.nEarliestTimestamp = left_child.nEarliestTimestamp
parent.nLatestTimestamp = right_child.nLatestTimestamp
parent.nEarliestTargetBits = left_child.nEarliestTargetBits
parent.nLatestTargetBits = right_child.nLatestTargetBits
parent.hashEarliestSaplingRoot = left_child.hashEarliestSaplingRoot
parent.hashLatestSaplingRoot = right_child.hashLatestSaplingRoot
parent.nSubTreeTotalWork = left_child.nSubTreeTotalWork + right_child.nSubTreeTotalWork
parent.nEarliestHeight = left_child.nEarliestHeight
parent.nLatestHeight = right_child.nLatestHeight
parent.nSaplingTxCount = left_child.nSaplingTxCount + right_child.nSaplingTxCount
parent.consensusBranchId = left_child.consensusBranchId
return parent
def make_root_commitment(root: ZcashMMRNode) -> bytes:
'''Makes the root commitment for a blockheader'''
return H(root.serialize(), root.consensusBranchId)
def get_peaks(node: ZcashMMRNode) -> List[ZcashMMRNode]:
peaks: List[ZcashMMRNode] = []
# Get number of leaves.
leaves = node.nLatestHeight - (node.nEarliestHeight - 1)
assert(leaves > 0)
# Check if the number of leaves in this subtree is a power of two.
if (leaves & (leaves - 1)) == 0:
# This subtree is full, and therefore a single peak. This also covers
# the case of a single isolated leaf.
peaks.append(node)
else:
# This is one of the generated nodes; search within its children.
peaks.extend(get_peaks(node.left_child))
peaks.extend(get_peaks(node.right_child))
return peaks
def bag_peaks(peaks: List[ZcashMMRNode]) -> ZcashMMRNode:
'''
"Bag" a list of peaks, and return the final root
'''
root = peaks[0]
for i in range(1, len(peaks)):
root = make_parent(root, peaks[i])
return root
def append(root: ZcashMMRNode, leaf: ZcashMMRNode) -> ZcashMMRNode:
'''Append a leaf to an existing tree, return the new tree root'''
# recursively find a list of peaks in the current tree
peaks: List[ZcashMMRNode] = get_peaks(root)
merged: List[ZcashMMRNode] = []
# Merge peaks from right to left.
# This will produce a list of peaks in reverse order
current = leaf
for peak in peaks[::-1]:
current_leaves = current.nLatestHeight - (current.nEarliestHeight - 1)
peak_leaves = peak.nLatestHeight - (peak.nEarliestHeight - 1)
if current_leaves == peak_leaves:
current = make_parent(peak, current)
else:
merged.append(current)
current = peak
merged.append(current)
# finally, bag the merged peaks
return bag_peaks(merged[::-1])
def delete(root: ZcashMMRNode) -> ZcashMMRNode:
'''
Delete the rightmost leaf node from an existing MMR
Return the new tree root
'''
n_leaves = root.nLatestHeight - (root.nEarliestHeight - 1)
# if there were an odd number of leaves,
# simply replace root with left_child
if n_leaves & 1:
return root.left_child
# otherwise, we need to re-bag the peaks.
else:
# first peak
peaks = [root.left_child]
# we do this traversing the right (unbalanced) side of the tree
# we keep the left side (balanced subtree or leaf) of each subtree
# until we reach a leaf
subtree_root = root.right_child
while subtree_root.left_child:
peaks.append(subtree_root.left_child)
subtree_root = subtree_root.right_child
new_root = bag_peaks(peaks)
return new_root

View File

@ -83,6 +83,15 @@ def hash256(s):
return sha256(sha256(s)) return sha256(sha256(s))
def ser_compactsize(n):
if n < 253:
return struct.pack("B", n)
elif n < 0x10000:
return struct.pack("<BH", 253, n)
elif n < 0x100000000:
return struct.pack("<BI", 254, n)
return struct.pack("<BQ", 255, n)
def deser_string(f): def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0] nit = struct.unpack("<B", f.read(1))[0]
if nit == 253: if nit == 253:
@ -132,6 +141,11 @@ def uint256_from_compact(c):
return v return v
def block_work_from_compact(c):
target = uint256_from_compact(c)
return 2**256 // (target + 1)
def deser_vector(f, c): def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0] nit = struct.unpack("<B", f.read(1))[0]
if nit == 253: if nit == 253:

View File

@ -167,7 +167,7 @@ def wait_for_bitcoind_start(process, url, i):
raise # unknown IO error raise # unknown IO error
except JSONRPCException as e: # Initialization phase except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup? if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception raise # unknown JSON RPC exception
time.sleep(0.25) time.sleep(0.25)
def initialize_chain(test_dir): def initialize_chain(test_dir):
@ -541,7 +541,7 @@ def wait_and_assert_operationid_status_result(node, myopid, in_status='success',
break break
time.sleep(1) time.sleep(1)
assert_true(result is not None, "timeout occured") assert_true(result is not None, "timeout occurred")
status = result['status'] status = result['status']
debug = os.getenv("PYTHON_DEBUG", "") debug = os.getenv("PYTHON_DEBUG", "")

View File

@ -140,6 +140,7 @@ LIBZCASH_H = \
zcash/address/sapling.hpp \ zcash/address/sapling.hpp \
zcash/address/sprout.hpp \ zcash/address/sprout.hpp \
zcash/address/zip32.h \ zcash/address/zip32.h \
zcash/History.hpp \
zcash/JoinSplit.hpp \ zcash/JoinSplit.hpp \
zcash/Note.hpp \ zcash/Note.hpp \
zcash/prf.h \ zcash/prf.h \
@ -569,6 +570,7 @@ libzcash_a_SOURCES = \
zcash/address/sapling.cpp \ zcash/address/sapling.cpp \
zcash/address/sprout.cpp \ zcash/address/sprout.cpp \
zcash/address/zip32.cpp \ zcash/address/zip32.cpp \
zcash/History.cpp \
zcash/JoinSplit.cpp \ zcash/JoinSplit.cpp \
zcash/Proof.cpp \ zcash/Proof.cpp \
zcash/Note.cpp \ zcash/Note.cpp \
@ -614,7 +616,7 @@ CLEANFILES = leveldb/libleveldb.a leveldb/libmemenv.a *.gcda *.gcno */*.gcno wal
DISTCLEANFILES = obj/build.h DISTCLEANFILES = obj/build.h
EXTRA_DIST = leveldb EXTRA_DIST = leveldb rust
clean-local: clean-local:
rm -f $(top_srcdir)/.cargo/config $(top_srcdir)/.cargo/.configured-for-* rm -f $(top_srcdir)/.cargo/config $(top_srcdir)/.cargo/.configured-for-*

View File

@ -28,6 +28,7 @@ zcash_gtest_SOURCES += \
gtest/test_deprecation.cpp \ gtest/test_deprecation.cpp \
gtest/test_dynamicusage.cpp \ gtest/test_dynamicusage.cpp \
gtest/test_equihash.cpp \ gtest/test_equihash.cpp \
gtest/test_history.cpp \
gtest/test_httprpc.cpp \ gtest/test_httprpc.cpp \
gtest/test_joinsplit.cpp \ gtest/test_joinsplit.cpp \
gtest/test_keys.cpp \ gtest/test_keys.cpp \

View File

@ -99,6 +99,7 @@ BITCOIN_TESTS =\
test/streams_tests.cpp \ test/streams_tests.cpp \
test/test_bitcoin.cpp \ test/test_bitcoin.cpp \
test/test_bitcoin.h \ test/test_bitcoin.h \
test/test_random.h \
test/torcontrol_tests.cpp \ test/torcontrol_tests.cpp \
test/transaction_tests.cpp \ test/transaction_tests.cpp \
test/txvalidationcache_tests.cpp \ test/txvalidationcache_tests.cpp \

View File

@ -16,6 +16,7 @@
static const int SPROUT_VALUE_VERSION = 1001400; static const int SPROUT_VALUE_VERSION = 1001400;
static const int SAPLING_VALUE_VERSION = 1010100; static const int SAPLING_VALUE_VERSION = 1010100;
static const int CHAIN_HISTORY_ROOT_VERSION = 2010200;
/** /**
* Maximum amount of time that a block timestamp is allowed to be ahead of the * Maximum amount of time that a block timestamp is allowed to be ahead of the
@ -253,10 +254,26 @@ public:
//! Will be boost::none if nChainTx is zero. //! Will be boost::none if nChainTx is zero.
boost::optional<CAmount> nChainSaplingValue; boost::optional<CAmount> nChainSaplingValue;
//! Root of the Sapling commitment tree as of the end of this block.
//!
//! - For blocks prior to (not including) the Heartwood activation block, this is
//! always equal to hashLightClientRoot.
//! - For blocks including and after the Heartwood activation block, this is only set
//! once a block has been connected to the main chain, and will be null otherwise.
uint256 hashFinalSaplingRoot;
//! Root of the ZIP 221 history tree as of the end of the previous block.
//!
//! - For blocks prior to and including the Heartwood activation block, this is
//! always null.
//! - For blocks after (not including) the Heartwood activation block, this is
//! always equal to hashLightClientRoot.
uint256 hashChainHistoryRoot;
//! block header //! block header
int nVersion; int nVersion;
uint256 hashMerkleRoot; uint256 hashMerkleRoot;
uint256 hashFinalSaplingRoot; uint256 hashLightClientRoot;
unsigned int nTime; unsigned int nTime;
unsigned int nBits; unsigned int nBits;
uint256 nNonce; uint256 nNonce;
@ -289,7 +306,7 @@ public:
nVersion = 0; nVersion = 0;
hashMerkleRoot = uint256(); hashMerkleRoot = uint256();
hashFinalSaplingRoot = uint256(); hashLightClientRoot = uint256();
nTime = 0; nTime = 0;
nBits = 0; nBits = 0;
nNonce = uint256(); nNonce = uint256();
@ -307,7 +324,7 @@ public:
nVersion = block.nVersion; nVersion = block.nVersion;
hashMerkleRoot = block.hashMerkleRoot; hashMerkleRoot = block.hashMerkleRoot;
hashFinalSaplingRoot = block.hashFinalSaplingRoot; hashLightClientRoot = block.hashLightClientRoot;
nTime = block.nTime; nTime = block.nTime;
nBits = block.nBits; nBits = block.nBits;
nNonce = block.nNonce; nNonce = block.nNonce;
@ -339,7 +356,7 @@ public:
if (pprev) if (pprev)
block.hashPrevBlock = pprev->GetBlockHash(); block.hashPrevBlock = pprev->GetBlockHash();
block.hashMerkleRoot = hashMerkleRoot; block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot; block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime; block.nTime = nTime;
block.nBits = nBits; block.nBits = nBits;
block.nNonce = nNonce; block.nNonce = nNonce;
@ -461,7 +478,7 @@ public:
READWRITE(this->nVersion); READWRITE(this->nVersion);
READWRITE(hashPrev); READWRITE(hashPrev);
READWRITE(hashMerkleRoot); READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot); READWRITE(hashLightClientRoot);
READWRITE(nTime); READWRITE(nTime);
READWRITE(nBits); READWRITE(nBits);
READWRITE(nNonce); READWRITE(nNonce);
@ -479,6 +496,17 @@ public:
READWRITE(nSaplingValue); READWRITE(nSaplingValue);
} }
// Only read/write hashFinalSaplingRoot and hashChainHistoryRoot if the
// client version used to create this index was storing them.
if ((s.GetType() & SER_DISK) && (nVersion >= CHAIN_HISTORY_ROOT_VERSION)) {
READWRITE(hashFinalSaplingRoot);
READWRITE(hashChainHistoryRoot);
} else if (ser_action.ForRead()) {
// For block indices written before the client was Heartwood-aware,
// these are always identical.
hashFinalSaplingRoot = hashLightClientRoot;
}
// If you have just added new serialized fields above, remember to add // If you have just added new serialized fields above, remember to add
// them to CBlockTreeDB::LoadBlockIndexGuts() in txdb.cpp :) // them to CBlockTreeDB::LoadBlockIndexGuts() in txdb.cpp :)
} }
@ -489,7 +517,7 @@ public:
block.nVersion = nVersion; block.nVersion = nVersion;
block.hashPrevBlock = hashPrev; block.hashPrevBlock = hashPrev;
block.hashMerkleRoot = hashMerkleRoot; block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot; block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime; block.nTime = nTime;
block.nBits = nBits; block.nBits = nBits;
block.nNonce = nNonce; block.nNonce = nNonce;

View File

@ -321,8 +321,7 @@ public:
consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].hashActivationBlock = consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].hashActivationBlock =
uint256S("00367515ef2e781b8c9358b443b6329572599edd02c59e8af67db9785122f298"); uint256S("00367515ef2e781b8c9358b443b6329572599edd02c59e8af67db9785122f298");
consensus.vUpgrades[Consensus::UPGRADE_HEARTWOOD].nProtocolVersion = 170010; consensus.vUpgrades[Consensus::UPGRADE_HEARTWOOD].nProtocolVersion = 170010;
consensus.vUpgrades[Consensus::UPGRADE_HEARTWOOD].nActivationHeight = consensus.vUpgrades[Consensus::UPGRADE_HEARTWOOD].nActivationHeight = 903800;
Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT;
// On testnet we activate this rule 6 blocks after Blossom activation. From block 299188 and // On testnet we activate this rule 6 blocks after Blossom activation. From block 299188 and
// prior to Blossom activation, the testnet minimum-difficulty threshold was 15 minutes (i.e. // prior to Blossom activation, the testnet minimum-difficulty threshold was 15 minutes (i.e.

View File

@ -17,8 +17,8 @@
//! These need to be macros, as clientversion.cpp's and bitcoin*-res.rc's voodoo requires it //! These need to be macros, as clientversion.cpp's and bitcoin*-res.rc's voodoo requires it
#define CLIENT_VERSION_MAJOR 2 #define CLIENT_VERSION_MAJOR 2
#define CLIENT_VERSION_MINOR 1 #define CLIENT_VERSION_MINOR 1
#define CLIENT_VERSION_REVISION 1 #define CLIENT_VERSION_REVISION 2
#define CLIENT_VERSION_BUILD 51 #define CLIENT_VERSION_BUILD 25
//! Set to true for release, false for prerelease or test build //! Set to true for release, false for prerelease or test build
#define CLIENT_VERSION_IS_RELEASE true #define CLIENT_VERSION_IS_RELEASE true

View File

@ -49,6 +49,10 @@ bool CCoinsView::GetCoins(const uint256 &txid, CCoins &coins) const { return fal
bool CCoinsView::HaveCoins(const uint256 &txid) const { return false; } bool CCoinsView::HaveCoins(const uint256 &txid) const { return false; }
uint256 CCoinsView::GetBestBlock() const { return uint256(); } uint256 CCoinsView::GetBestBlock() const { return uint256(); }
uint256 CCoinsView::GetBestAnchor(ShieldedType type) const { return uint256(); }; uint256 CCoinsView::GetBestAnchor(ShieldedType type) const { return uint256(); };
HistoryIndex CCoinsView::GetHistoryLength(uint32_t epochId) const { return 0; }
HistoryNode CCoinsView::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return HistoryNode(); }
uint256 CCoinsView::GetHistoryRoot(uint32_t epochId) const { return uint256(); }
bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, bool CCoinsView::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
const uint256 &hashSproutAnchor, const uint256 &hashSproutAnchor,
@ -56,7 +60,8 @@ bool CCoinsView::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { return false; } CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) { return false; }
bool CCoinsView::GetStats(CCoinsStats &stats) const { return false; } bool CCoinsView::GetStats(CCoinsStats &stats) const { return false; }
@ -69,6 +74,9 @@ bool CCoinsViewBacked::GetCoins(const uint256 &txid, CCoins &coins) const { retu
bool CCoinsViewBacked::HaveCoins(const uint256 &txid) const { return base->HaveCoins(txid); } bool CCoinsViewBacked::HaveCoins(const uint256 &txid) const { return base->HaveCoins(txid); }
uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); }
uint256 CCoinsViewBacked::GetBestAnchor(ShieldedType type) const { return base->GetBestAnchor(type); } uint256 CCoinsViewBacked::GetBestAnchor(ShieldedType type) const { return base->GetBestAnchor(type); }
HistoryIndex CCoinsViewBacked::GetHistoryLength(uint32_t epochId) const { return base->GetHistoryLength(epochId); }
HistoryNode CCoinsViewBacked::GetHistoryAt(uint32_t epochId, HistoryIndex index) const { return base->GetHistoryAt(epochId, index); }
uint256 CCoinsViewBacked::GetHistoryRoot(uint32_t epochId) const { return base->GetHistoryRoot(epochId); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; } void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
@ -77,7 +85,12 @@ bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { return base->BatchWrite(mapCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor, mapSproutAnchors, mapSaplingAnchors, mapSproutNullifiers, mapSaplingNullifiers); } CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) {
return base->BatchWrite(mapCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor,
mapSproutAnchors, mapSaplingAnchors, mapSproutNullifiers, mapSaplingNullifiers,
historyCacheMap);
}
bool CCoinsViewBacked::GetStats(CCoinsStats &stats) const { return base->GetStats(stats); } bool CCoinsViewBacked::GetStats(CCoinsStats &stats) const { return base->GetStats(stats); }
CCoinsKeyHasher::CCoinsKeyHasher() : salt(GetRandHash()) {} CCoinsKeyHasher::CCoinsKeyHasher() : salt(GetRandHash()) {}
@ -95,6 +108,7 @@ size_t CCoinsViewCache::DynamicMemoryUsage() const {
memusage::DynamicUsage(cacheSaplingAnchors) + memusage::DynamicUsage(cacheSaplingAnchors) +
memusage::DynamicUsage(cacheSproutNullifiers) + memusage::DynamicUsage(cacheSproutNullifiers) +
memusage::DynamicUsage(cacheSaplingNullifiers) + memusage::DynamicUsage(cacheSaplingNullifiers) +
memusage::DynamicUsage(historyCacheMap) +
cachedCoinsUsage; cachedCoinsUsage;
} }
@ -188,6 +202,31 @@ bool CCoinsViewCache::GetNullifier(const uint256 &nullifier, ShieldedType type)
return tmp; return tmp;
} }
HistoryIndex CCoinsViewCache::GetHistoryLength(uint32_t epochId) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
return historyCache.length;
}
HistoryNode CCoinsViewCache::GetHistoryAt(uint32_t epochId, HistoryIndex index) const {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (index >= historyCache.length) {
// Caller should ensure that it is limiting history
// request to 0..GetHistoryLength(epochId)-1 range
throw std::runtime_error("Invalid history request");
}
if (index >= historyCache.updateDepth) {
return historyCache.appends[index];
}
return base->GetHistoryAt(epochId, index);
}
uint256 CCoinsViewCache::GetHistoryRoot(uint32_t epochId) const {
return SelectHistoryCache(epochId).root;
}
template<typename Tree, typename Cache, typename CacheIterator, typename CacheEntry> template<typename Tree, typename Cache, typename CacheIterator, typename CacheEntry>
void CCoinsViewCache::AbstractPushAnchor( void CCoinsViewCache::AbstractPushAnchor(
const Tree &tree, const Tree &tree,
@ -260,6 +299,262 @@ void CCoinsViewCache::BringBestAnchorIntoCache(
assert(GetSaplingAnchorAt(currentRoot, tree)); assert(GetSaplingAnchorAt(currentRoot, tree));
} }
void draftMMRNode(std::vector<uint32_t> &indices,
std::vector<HistoryEntry> &entries,
HistoryNode nodeData,
uint32_t alt,
uint32_t peak_pos)
{
HistoryEntry newEntry = alt == 0
? libzcash::LeafToEntry(nodeData)
// peak_pos - (1 << alt) is the array position of left child.
// peak_pos - 1 is the array position of right child.
: libzcash::NodeToEntry(nodeData, peak_pos - (1 << alt), peak_pos - 1);
indices.push_back(peak_pos);
entries.push_back(newEntry);
}
// Computes floor(log2(x)).
static inline uint32_t floor_log2(uint32_t x) {
assert(x > 0);
int log = 0;
while (x >>= 1) { ++log; }
return log;
}
// Computes the altitude of the largest subtree for an MMR with n nodes,
// which is floor(log2(n + 1)) - 1.
static inline uint32_t altitude(uint32_t n) {
return floor_log2(n + 1) - 1;
}
uint32_t CCoinsViewCache::PreloadHistoryTree(uint32_t epochId, bool extra, std::vector<HistoryEntry> &entries, std::vector<uint32_t> &entry_indices) {
auto treeLength = GetHistoryLength(epochId);
if (treeLength <= 0) {
throw std::runtime_error("Invalid PreloadHistoryTree state called - tree should exist");
} else if (treeLength == 1) {
entries.push_back(libzcash::LeafToEntry(GetHistoryAt(epochId, 0)));
entry_indices.push_back(0);
return 1;
}
uint32_t last_peak_pos = 0;
uint32_t last_peak_alt = 0;
uint32_t alt = 0;
uint32_t peak_pos = 0;
uint32_t total_peaks = 0;
// Assume the following example peak layout with 14 leaves, and 25 stored nodes in
// total (the "tree length"):
//
// P
// /\
// / \
// / \ \
// / \ \ Altitude
// _A_ \ \ 3
// _/ \_ B \ 2
// / \ / \ / \ C 1
// /\ /\ /\ /\ /\ /\ /\ 0
//
// We start by determining the altitude of the highest peak (A).
alt = altitude(treeLength);
// We determine the position of the highest peak (A) by pretending it is the right
// sibling in a tree, and its left-most leaf has position 0. Then the left sibling
// of (A) has position -1, and so we can "jump" to the peak's position by computing
// -1 + 2^(alt + 1) - 1.
peak_pos = (1 << (alt + 1)) - 2;
// Now that we have the position and altitude of the highest peak (A), we collect
// the remaining peaks (B, C). We navigate the peaks as if they were nodes in this
// Merkle tree (with additional imaginary nodes 1 and 2, that have positions beyond
// the MMR's length):
//
// / \
// / \
// / \
// / \
// A ==========> 1
// / \ // \
// _/ \_ B ==> 2
// /\ /\ /\ //
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
//
while (alt != 0) {
// If peak_pos is out of bounds of the tree, we compute the position of its left
// child, and drop down one level in the tree.
if (peak_pos >= treeLength) {
// left child, -2^alt
peak_pos = peak_pos - (1 << alt);
alt = alt - 1;
}
// If the peak exists, we take it and then continue with its right sibling.
if (peak_pos < treeLength) {
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, peak_pos), alt, peak_pos);
last_peak_pos = peak_pos;
last_peak_alt = alt;
// right sibling
peak_pos = peak_pos + (1 << (alt + 1)) - 1;
}
}
total_peaks = entries.size();
// Return early if we don't require extra nodes.
if (!extra) return total_peaks;
alt = last_peak_alt;
peak_pos = last_peak_pos;
// P
// /\
// / \
// / \ \
// / \ \
// _A_ \ \
// _/ \_ B \
// / \ / \ / \ C
// /\ /\ /\ /\ /\ /\ /\
// D E
//
// For extra peaks needed for deletion, we do extra pass on right slope of the last peak
// and add those nodes + their siblings. Extra would be (D, E) for the picture above.
while (alt > 0) {
uint32_t left_pos = peak_pos - (1 << alt);
uint32_t right_pos = peak_pos - 1;
alt = alt - 1;
// drafting left child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, left_pos), alt, left_pos);
// drafting right child
draftMMRNode(entry_indices, entries, GetHistoryAt(epochId, right_pos), alt, right_pos);
// continuing on right slope
peak_pos = right_pos;
}
return total_peaks;
}
HistoryCache& CCoinsViewCache::SelectHistoryCache(uint32_t epochId) const {
auto entry = historyCacheMap.find(epochId);
if (entry != historyCacheMap.end()) {
return entry->second;
} else {
auto cache = HistoryCache(
base->GetHistoryLength(epochId),
base->GetHistoryRoot(epochId),
epochId
);
return historyCacheMap.insert({epochId, cache}).first->second;
}
}
void CCoinsViewCache::PushHistoryNode(uint32_t epochId, const HistoryNode node) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
if (historyCache.length == 0) {
// special case, it just goes into the cache right away
historyCache.Extend(node);
if (librustzcash_mmr_hash_node(epochId, node.data(), historyCache.root.begin()) != 0) {
throw std::runtime_error("hashing node failed");
};
return;
}
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
PreloadHistoryTree(epochId, false, entries, entry_indices);
uint256 newRoot;
std::array<HistoryNode, 32> appendBuf;
uint32_t appends = librustzcash_mmr_append(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
entry_indices.size(),
node.data(),
newRoot.begin(),
appendBuf.data()->data()
);
for (size_t i = 0; i < appends; i++) {
historyCache.Extend(appendBuf[i]);
}
historyCache.root = newRoot;
}
void CCoinsViewCache::PopHistoryNode(uint32_t epochId) {
HistoryCache& historyCache = SelectHistoryCache(epochId);
uint256 newRoot;
switch (historyCache.length) {
case 0:
// Caller is not expected to pop from empty tree! Caller should
// switch to previous epoch and pop history from there.
throw std::runtime_error("popping history node from empty history");
case 1:
// Just resetting tree to empty
historyCache.Truncate(0);
historyCache.root = uint256();
return;
case 2:
// - A tree with one leaf has length 1.
// - A tree with two leaves has length 3.
throw std::runtime_error("a history tree cannot have two nodes");
case 3:
// After removing a leaf from a tree with two leaves, we are left
// with a single-node tree, whose root is just the hash of that
// node.
if (librustzcash_mmr_hash_node(
epochId,
GetHistoryAt(epochId, 0).data(),
newRoot.begin()
) != 0) {
throw std::runtime_error("hashing node failed");
}
historyCache.Truncate(1);
historyCache.root = newRoot;
return;
default:
// This is a non-elementary pop, so use the full tree logic.
std::vector<HistoryEntry> entries;
std::vector<uint32_t> entry_indices;
uint32_t peak_count = PreloadHistoryTree(epochId, true, entries, entry_indices);
uint32_t numberOfDeletes = librustzcash_mmr_delete(
epochId,
historyCache.length,
entry_indices.data(),
entries.data(),
peak_count,
entries.size() - peak_count,
newRoot.begin()
);
historyCache.Truncate(historyCache.length - numberOfDeletes);
historyCache.root = newRoot;
return;
}
}
template<typename Tree, typename Cache, typename CacheEntry> template<typename Tree, typename Cache, typename CacheEntry>
void CCoinsViewCache::AbstractPopAnchor( void CCoinsViewCache::AbstractPopAnchor(
const uint256 &newrt, const uint256 &newrt,
@ -470,6 +765,35 @@ void BatchWriteAnchors(
} }
} }
void BatchWriteHistory(CHistoryCacheMap& historyCacheMap, CHistoryCacheMap& historyCacheMapIn) {
for (auto nextHistoryCache = historyCacheMapIn.begin(); nextHistoryCache != historyCacheMapIn.end(); nextHistoryCache++) {
auto historyCacheIn = nextHistoryCache->second;
auto epochId = nextHistoryCache->first;
auto historyCache = historyCacheMap.find(epochId);
if (historyCache != historyCacheMap.end()) {
// delete old entries since updateDepth
historyCache->second.Truncate(historyCacheIn.updateDepth);
// Replace/append new/updated entries. HistoryCache.Extend
// auto-indexes the nodes, so we need to extend in the same order as
// this cache is indexed.
for (size_t i = historyCacheIn.updateDepth; i < historyCacheIn.length; i++) {
historyCache->second.Extend(historyCacheIn.appends[i]);
}
// the lengths should now match
assert(historyCache->second.length == historyCacheIn.length);
// write current root
historyCache->second.root = historyCacheIn.root;
} else {
// Just insert the history cache into its parent
historyCacheMap.insert({epochId, historyCacheIn});
}
}
}
bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins, bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlockIn, const uint256 &hashBlockIn,
const uint256 &hashSproutAnchorIn, const uint256 &hashSproutAnchorIn,
@ -477,7 +801,8 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMapIn) {
assert(!hasModifier); assert(!hasModifier);
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) { for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end();) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization). if (it->second.flags & CCoinsCacheEntry::DIRTY) { // Ignore non-dirty entries (optimization).
@ -520,6 +845,8 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
::BatchWriteNullifiers(mapSproutNullifiers, cacheSproutNullifiers); ::BatchWriteNullifiers(mapSproutNullifiers, cacheSproutNullifiers);
::BatchWriteNullifiers(mapSaplingNullifiers, cacheSaplingNullifiers); ::BatchWriteNullifiers(mapSaplingNullifiers, cacheSaplingNullifiers);
::BatchWriteHistory(historyCacheMap, historyCacheMapIn);
hashSproutAnchor = hashSproutAnchorIn; hashSproutAnchor = hashSproutAnchorIn;
hashSaplingAnchor = hashSaplingAnchorIn; hashSaplingAnchor = hashSaplingAnchorIn;
hashBlock = hashBlockIn; hashBlock = hashBlockIn;
@ -527,12 +854,21 @@ bool CCoinsViewCache::BatchWrite(CCoinsMap &mapCoins,
} }
bool CCoinsViewCache::Flush() { bool CCoinsViewCache::Flush() {
bool fOk = base->BatchWrite(cacheCoins, hashBlock, hashSproutAnchor, hashSaplingAnchor, cacheSproutAnchors, cacheSaplingAnchors, cacheSproutNullifiers, cacheSaplingNullifiers); bool fOk = base->BatchWrite(cacheCoins,
hashBlock,
hashSproutAnchor,
hashSaplingAnchor,
cacheSproutAnchors,
cacheSaplingAnchors,
cacheSproutNullifiers,
cacheSaplingNullifiers,
historyCacheMap);
cacheCoins.clear(); cacheCoins.clear();
cacheSproutAnchors.clear(); cacheSproutAnchors.clear();
cacheSaplingAnchors.clear(); cacheSaplingAnchors.clear();
cacheSproutNullifiers.clear(); cacheSproutNullifiers.clear();
cacheSaplingNullifiers.clear(); cacheSaplingNullifiers.clear();
historyCacheMap.clear();
cachedCoinsUsage = 0; cachedCoinsUsage = 0;
return fOk; return fOk;
} }

View File

@ -17,6 +17,7 @@
#include <boost/foreach.hpp> #include <boost/foreach.hpp>
#include <boost/unordered_map.hpp> #include <boost/unordered_map.hpp>
#include "zcash/History.hpp"
#include "zcash/IncrementalMerkleTree.hpp" #include "zcash/IncrementalMerkleTree.hpp"
/** /**
@ -321,6 +322,7 @@ typedef boost::unordered_map<uint256, CCoinsCacheEntry, CCoinsKeyHasher> CCoinsM
typedef boost::unordered_map<uint256, CAnchorsSproutCacheEntry, CCoinsKeyHasher> CAnchorsSproutMap; typedef boost::unordered_map<uint256, CAnchorsSproutCacheEntry, CCoinsKeyHasher> CAnchorsSproutMap;
typedef boost::unordered_map<uint256, CAnchorsSaplingCacheEntry, CCoinsKeyHasher> CAnchorsSaplingMap; typedef boost::unordered_map<uint256, CAnchorsSaplingCacheEntry, CCoinsKeyHasher> CAnchorsSaplingMap;
typedef boost::unordered_map<uint256, CNullifiersCacheEntry, CCoinsKeyHasher> CNullifiersMap; typedef boost::unordered_map<uint256, CNullifiersCacheEntry, CCoinsKeyHasher> CNullifiersMap;
typedef boost::unordered_map<uint32_t, HistoryCache> CHistoryCacheMap;
struct CCoinsStats struct CCoinsStats
{ {
@ -362,6 +364,15 @@ public:
//! Get the current "tip" or the latest anchored tree root in the chain //! Get the current "tip" or the latest anchored tree root in the chain
virtual uint256 GetBestAnchor(ShieldedType type) const; virtual uint256 GetBestAnchor(ShieldedType type) const;
//! Get the current chain history length (which should be roughly chain height x2)
virtual HistoryIndex GetHistoryLength(uint32_t epochId) const;
//! Get history node at specified index
virtual HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
//! Get current history root
virtual uint256 GetHistoryRoot(uint32_t epochId) const;
//! Do a bulk modification (multiple CCoins changes + BestBlock change). //! Do a bulk modification (multiple CCoins changes + BestBlock change).
//! The passed mapCoins can be modified. //! The passed mapCoins can be modified.
virtual bool BatchWrite(CCoinsMap &mapCoins, virtual bool BatchWrite(CCoinsMap &mapCoins,
@ -371,7 +382,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers); CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
//! Calculate statistics about the unspent transaction output set //! Calculate statistics about the unspent transaction output set
virtual bool GetStats(CCoinsStats &stats) const; virtual bool GetStats(CCoinsStats &stats) const;
@ -396,6 +408,9 @@ public:
bool HaveCoins(const uint256 &txid) const; bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const; uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const; uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
void SetBackend(CCoinsView &viewIn); void SetBackend(CCoinsView &viewIn);
bool BatchWrite(CCoinsMap &mapCoins, bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
@ -404,7 +419,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers); CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
bool GetStats(CCoinsStats &stats) const; bool GetStats(CCoinsStats &stats) const;
}; };
@ -451,6 +467,7 @@ protected:
mutable CAnchorsSaplingMap cacheSaplingAnchors; mutable CAnchorsSaplingMap cacheSaplingAnchors;
mutable CNullifiersMap cacheSproutNullifiers; mutable CNullifiersMap cacheSproutNullifiers;
mutable CNullifiersMap cacheSaplingNullifiers; mutable CNullifiersMap cacheSaplingNullifiers;
mutable CHistoryCacheMap historyCacheMap;
/* Cached dynamic memory usage for the inner CCoins objects. */ /* Cached dynamic memory usage for the inner CCoins objects. */
mutable size_t cachedCoinsUsage; mutable size_t cachedCoinsUsage;
@ -467,6 +484,9 @@ public:
bool HaveCoins(const uint256 &txid) const; bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const; uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const; uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
void SetBestBlock(const uint256 &hashBlock); void SetBestBlock(const uint256 &hashBlock);
bool BatchWrite(CCoinsMap &mapCoins, bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
@ -475,8 +495,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers); CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
// Adds the tree to mapSproutAnchors (or mapSaplingAnchors based on the type of tree) // Adds the tree to mapSproutAnchors (or mapSaplingAnchors based on the type of tree)
// and sets the current commitment root to this root. // and sets the current commitment root to this root.
@ -489,6 +509,12 @@ public:
// Marks nullifiers for a given transaction as spent or not. // Marks nullifiers for a given transaction as spent or not.
void SetNullifiers(const CTransaction& tx, bool spent); void SetNullifiers(const CTransaction& tx, bool spent);
// Push MMR node history at the end of the history tree
void PushHistoryNode(uint32_t epochId, const HistoryNode node);
// Pop MMR node history from the end of the history tree
void PopHistoryNode(uint32_t epochId);
/** /**
* Return a pointer to CCoins in the cache, or NULL if not found. This is * Return a pointer to CCoins in the cache, or NULL if not found. This is
* more efficient than GetCoins. Modifications to other cache entries are * more efficient than GetCoins. Modifications to other cache entries are
@ -582,6 +608,18 @@ private:
const uint256 &currentRoot, const uint256 &currentRoot,
Tree &tree Tree &tree
); );
//! Preload history tree for further update.
//!
//! If extra = true, extra nodes for deletion are also preloaded.
//! This will allow to delete tail entries from preloaded tree without
//! any further database lookups.
//!
//! Returns number of peaks, not total number of loaded nodes.
uint32_t PreloadHistoryTree(uint32_t epochId, bool extra, std::vector<HistoryEntry> &entries, std::vector<uint32_t> &entry_indices);
//! Selects history cache for specified epoch.
HistoryCache& SelectHistoryCache(uint32_t epochId) const;
}; };
#endif // BITCOIN_COINS_H #endif // BITCOIN_COINS_H

View File

@ -8,7 +8,7 @@
// Deprecation policy: // Deprecation policy:
// * Shut down 16 weeks' worth of blocks after the estimated release block height. // * Shut down 16 weeks' worth of blocks after the estimated release block height.
// * A warning is shown during the 2 weeks' worth of blocks prior to shut down. // * A warning is shown during the 2 weeks' worth of blocks prior to shut down.
static const int APPROX_RELEASE_HEIGHT = 719034; static const int APPROX_RELEASE_HEIGHT = 798142;
static const int WEEKS_UNTIL_DEPRECATION = 16; static const int WEEKS_UNTIL_DEPRECATION = 16;
static const int DEPRECATION_HEIGHT = APPROX_RELEASE_HEIGHT + (WEEKS_UNTIL_DEPRECATION * 7 * 24 * 48); static const int DEPRECATION_HEIGHT = APPROX_RELEASE_HEIGHT + (WEEKS_UNTIL_DEPRECATION * 7 * 24 * 48);

171
src/gtest/test_history.cpp Normal file
View File

@ -0,0 +1,171 @@
#include <gtest/gtest.h>
#include "main.h"
#include "utiltest.h"
#include "zcash/History.hpp"
// Fake an empty view
class FakeCoinsViewDB : public CCoinsView {
public:
FakeCoinsViewDB() {}
bool GetSproutAnchorAt(const uint256 &rt, SproutMerkleTree &tree) const {
return false;
}
bool GetSaplingAnchorAt(const uint256 &rt, SaplingMerkleTree &tree) const {
return false;
}
bool GetNullifier(const uint256 &nf, ShieldedType type) const {
return false;
}
bool GetCoins(const uint256 &txid, CCoins &coins) const {
return false;
}
bool HaveCoins(const uint256 &txid) const {
return false;
}
uint256 GetBestBlock() const {
uint256 a;
return a;
}
uint256 GetBestAnchor(ShieldedType type) const {
uint256 a;
return a;
}
bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock,
const uint256 &hashSproutAnchor,
const uint256 &hashSaplingAnchor,
CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers,
CNullifiersMap saplingNullifiersMap) {
return false;
}
bool GetStats(CCoinsStats &stats) const {
return false;
}
HistoryIndex GetHistoryLength(uint32_t branchId) const {
return 0;
}
HistoryNode GetHistoryAt(uint32_t branchId, HistoryIndex index) const {
return HistoryNode();
}
};
HistoryNode getLeafN(uint64_t block_num) {
HistoryNode node = libzcash::NewLeaf(
uint256(),
block_num*10,
block_num*13,
uint256(),
uint256(),
block_num,
3
);
return node;
}
TEST(History, Smoky) {
// Fake an empty view
FakeCoinsViewDB fakeDB;
CCoinsViewCache view(&fakeDB);
// Test initial value
EXPECT_EQ(view.GetHistoryLength(0), 0);
view.PushHistoryNode(1, getLeafN(1));
EXPECT_EQ(view.GetHistoryLength(1), 1);
view.PushHistoryNode(1, getLeafN(2));
EXPECT_EQ(view.GetHistoryLength(1), 3);
view.PushHistoryNode(1, getLeafN(3));
EXPECT_EQ(view.GetHistoryLength(1), 4);
view.PushHistoryNode(1, getLeafN(4));
uint256 h4Root = view.GetHistoryRoot(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
view.PushHistoryNode(1, getLeafN(5));
EXPECT_EQ(view.GetHistoryLength(1), 8);
view.PopHistoryNode(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
EXPECT_EQ(h4Root, view.GetHistoryRoot(1));
}
TEST(History, EpochBoundaries) {
// Fake an empty view
FakeCoinsViewDB fakeDB;
CCoinsViewCache view(&fakeDB);
view.PushHistoryNode(1, getLeafN(1));
EXPECT_EQ(view.GetHistoryLength(1), 1);
view.PushHistoryNode(1, getLeafN(2));
EXPECT_EQ(view.GetHistoryLength(1), 3);
view.PushHistoryNode(1, getLeafN(3));
EXPECT_EQ(view.GetHistoryLength(1), 4);
view.PushHistoryNode(1, getLeafN(4));
uint256 h4Root = view.GetHistoryRoot(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
view.PushHistoryNode(1, getLeafN(5));
EXPECT_EQ(view.GetHistoryLength(1), 8);
// New Epoch(2)
view.PushHistoryNode(2, getLeafN(6));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 1);
view.PushHistoryNode(2, getLeafN(7));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 3);
view.PushHistoryNode(2, getLeafN(8));
EXPECT_EQ(view.GetHistoryLength(1), 8);
EXPECT_EQ(view.GetHistoryLength(2), 4);
// Rolling epoch back to 1
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 3);
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 1);
EXPECT_EQ(view.GetHistoryLength(1), 8);
// And even rolling epoch 1 back a bit
view.PopHistoryNode(1);
EXPECT_EQ(view.GetHistoryLength(1), 7);
// And also rolling epoch 2 back to 0
view.PopHistoryNode(2);
EXPECT_EQ(view.GetHistoryLength(2), 0);
}

View File

@ -874,7 +874,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
struct sigaction sa; struct sigaction sa;
sa.sa_handler = HandleSIGTERM; sa.sa_handler = HandleSIGTERM;
sigemptyset(&sa.sa_mask); sigemptyset(&sa.sa_mask);
sa.sa_flags = 0; sa.sa_flags = SA_RESTART;
sigaction(SIGTERM, &sa, NULL); sigaction(SIGTERM, &sa, NULL);
sigaction(SIGINT, &sa, NULL); sigaction(SIGINT, &sa, NULL);
@ -883,7 +883,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
struct sigaction sa_hup; struct sigaction sa_hup;
sa_hup.sa_handler = HandleSIGHUP; sa_hup.sa_handler = HandleSIGHUP;
sigemptyset(&sa_hup.sa_mask); sigemptyset(&sa_hup.sa_mask);
sa_hup.sa_flags = 0; sa_hup.sa_flags = SA_RESTART;
sigaction(SIGHUP, &sa_hup, NULL); sigaction(SIGHUP, &sa_hup, NULL);
// Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly // Ignore SIGPIPE, otherwise it will bring the daemon down if the client closes unexpectedly

View File

@ -1740,7 +1740,7 @@ bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHea
return true; return true;
} }
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams) bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, int nHeight, const Consensus::Params& consensusParams)
{ {
block.SetNull(); block.SetNull();
@ -1758,7 +1758,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus:
} }
// Check the header // Check the header
if (!(CheckEquihashSolution(&block, consensusParams) && if (!(CheckEquihashSolution(&block, nHeight, consensusParams) &&
CheckProofOfWork(block.GetHash(), block.nBits, consensusParams))) CheckProofOfWork(block.GetHash(), block.nBits, consensusParams)))
return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString());
@ -1767,7 +1767,7 @@ bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus:
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams) bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{ {
if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), consensusParams)) if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), pindex->nHeight, consensusParams))
return false; return false;
if (block.GetHash() != pindex->GetBlockHash()) if (block.GetHash() != pindex->GetBlockHash())
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
@ -2433,6 +2433,12 @@ static DisconnectResult DisconnectBlock(const CBlock& block, CValidationState& s
view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING); view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING);
} }
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
view.PopHistoryNode(consensusBranchId);
}
// move best block pointer to prevout block // move best block pointer to prevout block
view.SetBestBlock(pindex->pprev->GetBlockHash()); view.SetBestBlock(pindex->pprev->GetBlockHash());
@ -2690,8 +2696,11 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
SaplingMerkleTree sapling_tree; SaplingMerkleTree sapling_tree;
assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree)); assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree));
// Grab the consensus branch ID for the block's height // Grab the consensus branch ID for this block and its parent
auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus()); auto consensusBranchId = CurrentEpochBranchId(pindex->nHeight, chainparams.GetConsensus());
auto prevConsensusBranchId = CurrentEpochBranchId(pindex->nHeight - 1, chainparams.GetConsensus());
size_t total_sapling_tx = 0;
std::vector<PrecomputedTransactionData> txdata; std::vector<PrecomputedTransactionData> txdata;
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
@ -2811,6 +2820,10 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
sapling_tree.append(outputDescription.cmu); sapling_tree.append(outputDescription.cmu);
} }
if (!(tx.vShieldedSpend.empty() && tx.vShieldedOutput.empty())) {
total_sapling_tx += 1;
}
vPos.push_back(std::make_pair(tx.GetHash(), pos)); vPos.push_back(std::make_pair(tx.GetHash(), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
} }
@ -2819,19 +2832,66 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
view.PushAnchor(sapling_tree); view.PushAnchor(sapling_tree);
if (!fJustCheck) { if (!fJustCheck) {
pindex->hashFinalSproutRoot = sprout_tree.root(); pindex->hashFinalSproutRoot = sprout_tree.root();
// - If this block is before Heartwood activation, then we don't set
// hashFinalSaplingRoot here to maintain the invariant documented in
// CBlockIndex (which was ensured in AddToBlockIndex).
// - If this block is on or after Heartwood activation, this is where we
// set the correct value of hashFinalSaplingRoot; in particular,
// blocks that are never passed to ConnectBlock() (and thus never on
// the main chain) will stay with hashFinalSaplingRoot set to null.
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
pindex->hashFinalSaplingRoot = sapling_tree.root();
}
} }
blockundo.old_sprout_tree_root = old_sprout_tree_root; blockundo.old_sprout_tree_root = old_sprout_tree_root;
// If Sapling is active, block.hashFinalSaplingRoot must be the if (IsActivationHeight(pindex->nHeight, chainparams.GetConsensus(), Consensus::UPGRADE_HEARTWOOD)) {
// same as the root of the Sapling tree // In the block that activates ZIP 221, block.hashLightClientRoot MUST
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_SAPLING)) { // be set to all zero bytes.
if (block.hashFinalSaplingRoot != sapling_tree.root()) { if (!block.hashLightClientRoot.IsNull()) {
return state.DoS(100, return state.DoS(100,
error("ConnectBlock(): block's hashFinalSaplingRoot is incorrect"), error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be null)"),
REJECT_INVALID, "bad-heartwood-root-in-block");
}
} else if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
// If Heartwood is active, block.hashLightClientRoot must be the same as
// the root of the history tree for the previous block. We only store
// one tree per epoch, so we have two possible cases:
// - If the previous block is in the previous epoch, this block won't
// affect that epoch's tree root.
// - If the previous block is in this epoch, this block would affect
// this epoch's tree root, but as we haven't updated the tree for this
// block yet, view.GetHistoryRoot() returns the root we need.
if (block.hashLightClientRoot != view.GetHistoryRoot(prevConsensusBranchId)) {
return state.DoS(100,
error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be history tree root)"),
REJECT_INVALID, "bad-heartwood-root-in-block");
}
} else if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_SAPLING)) {
// If Sapling is active, block.hashLightClientRoot must be the
// same as the root of the Sapling tree
if (block.hashLightClientRoot != sapling_tree.root()) {
return state.DoS(100,
error("ConnectBlock(): block's hashLightClientRoot is incorrect (should be Sapling tree root)"),
REJECT_INVALID, "bad-sapling-root-in-block"); REJECT_INVALID, "bad-sapling-root-in-block");
} }
} }
// History read/write is started with Heartwood update.
if (chainparams.GetConsensus().NetworkUpgradeActive(pindex->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
auto historyNode = libzcash::NewLeaf(
block.GetHash(),
block.nTime,
block.nBits,
pindex->hashFinalSaplingRoot,
ArithToUint256(GetBlockProof(*pindex)),
pindex->nHeight,
total_sapling_tx
);
view.PushHistoryNode(consensusBranchId, historyNode);
}
int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart;
LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001); LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001);
@ -3542,7 +3602,7 @@ bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) {
return true; return true;
} }
CBlockIndex* AddToBlockIndex(const CBlockHeader& block) CBlockIndex* AddToBlockIndex(const CBlockHeader& block, const Consensus::Params& consensusParams)
{ {
// Check for duplicate // Check for duplicate
uint256 hash = block.GetHash(); uint256 hash = block.GetHash();
@ -3564,6 +3624,18 @@ CBlockIndex* AddToBlockIndex(const CBlockHeader& block)
{ {
pindexNew->pprev = (*miPrev).second; pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1; pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
if (IsActivationHeight(pindexNew->nHeight, consensusParams, Consensus::UPGRADE_HEARTWOOD)) {
// hashFinalSaplingRoot is currently null, and will be set correctly in ConnectBlock.
// hashChainHistoryRoot is null.
} else if (consensusParams.NetworkUpgradeActive(pindexNew->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
// hashFinalSaplingRoot is currently null, and will be set correctly in ConnectBlock.
pindexNew->hashChainHistoryRoot = pindexNew->hashLightClientRoot;
} else {
// hashChainHistoryRoot is null.
pindexNew->hashFinalSaplingRoot = pindexNew->hashLightClientRoot;
}
pindexNew->BuildSkip(); pindexNew->BuildSkip();
} }
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew); pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
@ -3797,13 +3869,19 @@ bool CheckBlockHeader(
const CChainParams& chainparams, const CChainParams& chainparams,
bool fCheckPOW) bool fCheckPOW)
{ {
auto consensusParams = chainparams.GetConsensus();
// Check block version // Check block version
if (block.nVersion < MIN_BLOCK_VERSION) if (block.nVersion < MIN_BLOCK_VERSION)
return state.DoS(100, error("CheckBlockHeader(): block version too low"), return state.DoS(100, error("CheckBlockHeader(): block version too low"),
REJECT_INVALID, "version-too-low"); REJECT_INVALID, "version-too-low");
// Check Equihash solution is valid // Check Equihash solution is valid. The main check is in ContextualCheckBlockHeader,
if (fCheckPOW && !CheckEquihashSolution(&block, chainparams.GetConsensus())) // because we currently need to know the block height. That function skips the genesis
// block because it has no previous block, so we check it specifically here.
if (fCheckPOW &&
block.GetHash() == consensusParams.hashGenesisBlock &&
!CheckEquihashSolution(&block, 0, consensusParams))
return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"), return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),
REJECT_INVALID, "invalid-solution"); REJECT_INVALID, "invalid-solution");
@ -3880,7 +3958,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state,
bool ContextualCheckBlockHeader( bool ContextualCheckBlockHeader(
const CBlockHeader& block, CValidationState& state, const CBlockHeader& block, CValidationState& state,
const CChainParams& chainParams, CBlockIndex * const pindexPrev) const CChainParams& chainParams, CBlockIndex * const pindexPrev,
bool fCheckPOW)
{ {
const Consensus::Params& consensusParams = chainParams.GetConsensus(); const Consensus::Params& consensusParams = chainParams.GetConsensus();
uint256 hash = block.GetHash(); uint256 hash = block.GetHash();
@ -3892,6 +3971,11 @@ bool ContextualCheckBlockHeader(
int nHeight = pindexPrev->nHeight+1; int nHeight = pindexPrev->nHeight+1;
// Check Equihash solution is valid
if (fCheckPOW && !CheckEquihashSolution(&block, nHeight, consensusParams))
return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),
REJECT_INVALID, "invalid-solution");
// Check proof of work // Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) { if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) {
return state.DoS(100, error("%s: incorrect proof of work", __func__), return state.DoS(100, error("%s: incorrect proof of work", __func__),
@ -4044,7 +4128,7 @@ static bool AcceptBlockHeader(const CBlockHeader& block, CValidationState& state
return false; return false;
if (pindex == NULL) if (pindex == NULL)
pindex = AddToBlockIndex(block); pindex = AddToBlockIndex(block, chainparams.GetConsensus());
if (ppindex) if (ppindex)
*ppindex = pindex; *ppindex = pindex;
@ -4181,7 +4265,7 @@ bool TestBlockValidity(CValidationState& state, const CChainParams& chainparams,
auto verifier = libzcash::ProofVerifier::Disabled(); auto verifier = libzcash::ProofVerifier::Disabled();
// NOTE: CheckBlockHeader is called by CheckBlock // NOTE: CheckBlockHeader is called by CheckBlock
if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev)) if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, fCheckPOW))
return false; return false;
if (!CheckBlock(block, state, chainparams, verifier, fCheckPOW, fCheckMerkleRoot)) if (!CheckBlock(block, state, chainparams, verifier, fCheckPOW, fCheckMerkleRoot))
return false; return false;
@ -4369,7 +4453,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
bool static LoadBlockIndexDB() bool static LoadBlockIndexDB()
{ {
const CChainParams& chainparams = Params(); const CChainParams& chainparams = Params();
if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex)) if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex, chainparams))
return false; return false;
boost::this_thread::interruption_point(); boost::this_thread::interruption_point();
@ -4881,7 +4965,7 @@ bool InitBlockIndex(const CChainParams& chainparams)
return error("LoadBlockIndex(): FindBlockPos failed"); return error("LoadBlockIndex(): FindBlockPos failed");
if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart()))
return error("LoadBlockIndex(): writing genesis block to disk failed"); return error("LoadBlockIndex(): writing genesis block to disk failed");
CBlockIndex *pindex = AddToBlockIndex(block); CBlockIndex *pindex = AddToBlockIndex(block, chainparams.GetConsensus());
if (!ReceivedBlockTransactions(block, state, chainparams, pindex, blockPos)) if (!ReceivedBlockTransactions(block, state, chainparams, pindex, blockPos))
return error("LoadBlockIndex(): genesis block not accepted"); return error("LoadBlockIndex(): genesis block not accepted");
if (!ActivateBestChain(state, chainparams, &block)) if (!ActivateBestChain(state, chainparams, &block))
@ -4975,7 +5059,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB
std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head); std::pair<std::multimap<uint256, CDiskBlockPos>::iterator, std::multimap<uint256, CDiskBlockPos>::iterator> range = mapBlocksUnknownParent.equal_range(head);
while (range.first != range.second) { while (range.first != range.second) {
std::multimap<uint256, CDiskBlockPos>::iterator it = range.first; std::multimap<uint256, CDiskBlockPos>::iterator it = range.first;
if (ReadBlockFromDisk(block, it->second, chainparams.GetConsensus())) if (ReadBlockFromDisk(block, it->second, mapBlockIndex[head]->nHeight, chainparams.GetConsensus()))
{ {
LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(),
head.ToString()); head.ToString());

View File

@ -436,7 +436,7 @@ bool GetTimestampIndex(unsigned int high, unsigned int low, bool fActiveOnly,
/** Functions for disk access for blocks */ /** Functions for disk access for blocks */
bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart); bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart);
bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, const Consensus::Params& consensusParams); bool ReadBlockFromDisk(CBlock& block, const CDiskBlockPos& pos, int nHeight, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
/** Functions for validating blocks and updating the block tree */ /** Functions for validating blocks and updating the block tree */
@ -454,7 +454,8 @@ bool CheckBlock(const CBlock& block, CValidationState& state,
* By "context", we mean only the previous block headers, but not the UTXO * By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock(). */ * set; UTXO-related validity checks are done in ConnectBlock(). */
bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state,
const CChainParams& chainparams, CBlockIndex *pindexPrev); const CChainParams& chainparams, CBlockIndex *pindexPrev,
bool fCheckPOW = true);
bool ContextualCheckBlock(const CBlock& block, CValidationState& state, bool ContextualCheckBlock(const CBlock& block, CValidationState& state,
const CChainParams& chainparams, CBlockIndex *pindexPrev); const CChainParams& chainparams, CBlockIndex *pindexPrev);

View File

@ -88,8 +88,8 @@ static inline size_t DynamicUsage(const std::set<X>& s)
return MallocUsage(sizeof(stl_tree_node<X>)) * s.size(); return MallocUsage(sizeof(stl_tree_node<X>)) * s.size();
} }
template<typename X, typename Y> template<typename X, typename Y, typename C>
static inline size_t DynamicUsage(const std::map<X, Y>& m) static inline size_t DynamicUsage(const std::map<X, Y, C>& m)
{ {
return MallocUsage(sizeof(stl_tree_node<std::pair<const X, Y> >)) * m.size(); return MallocUsage(sizeof(stl_tree_node<std::pair<const X, Y> >)) * m.size();
} }

View File

@ -520,7 +520,13 @@ CBlockTemplate* CreateNewBlock(const CChainParams& chainparams, const MinerAddre
// Fill in header // Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash(); pblock->hashPrevBlock = pindexPrev->GetBlockHash();
pblock->hashFinalSaplingRoot = sapling_tree.root(); if (IsActivationHeight(nHeight, chainparams.GetConsensus(), Consensus::UPGRADE_HEARTWOOD)) {
pblock->hashLightClientRoot.SetNull();
} else if (chainparams.GetConsensus().NetworkUpgradeActive(nHeight, Consensus::UPGRADE_HEARTWOOD)) {
pblock->hashLightClientRoot = view.GetHistoryRoot(consensusBranchId);
} else {
pblock->hashLightClientRoot = sapling_tree.root();
}
UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev); UpdateTime(pblock, chainparams.GetConsensus(), pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus()); pblock->nBits = GetNextWorkRequired(pindexPrev, pblock, chainparams.GetConsensus());
pblock->nSolution.clear(); pblock->nSolution.clear();

View File

@ -59,8 +59,11 @@ static const size_t MAPASKFOR_MAX_SZ = MAX_INV_SZ;
static const size_t SETASKFOR_MAX_SZ = 2 * MAX_INV_SZ; static const size_t SETASKFOR_MAX_SZ = 2 * MAX_INV_SZ;
/** The maximum number of peer connections to maintain. */ /** The maximum number of peer connections to maintain. */
static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125; static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125;
/** The period before a network upgrade activates, where connections to upgrading peers are preferred (in blocks). */ /**
static const int NETWORK_UPGRADE_PEER_PREFERENCE_BLOCK_PERIOD = 24 * 24 * 3; * The period before a network upgrade activates, where connections to upgrading peers are preferred (in blocks).
* This was three days for upgrades up to and including Blossom, and is 1.5 days from Heartwood onward.
*/
static const int NETWORK_UPGRADE_PEER_PREFERENCE_BLOCK_PERIOD = 1728;
static const bool DEFAULT_FORCEDNSSEED = false; static const bool DEFAULT_FORCEDNSSEED = false;
static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000;

View File

@ -13,6 +13,7 @@
#include "streams.h" #include "streams.h"
#include "uint256.h" #include "uint256.h"
#include <librustzcash.h>
#include "sodium.h" #include "sodium.h"
unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params) unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params& params)
@ -92,7 +93,7 @@ unsigned int CalculateNextWorkRequired(arith_uint256 bnAvg,
return bnNew.GetCompact(); return bnNew.GetCompact();
} }
bool CheckEquihashSolution(const CBlockHeader *pblock, const Consensus::Params& params) bool CheckEquihashSolution(const CBlockHeader *pblock, int nHeight, const Consensus::Params& params)
{ {
unsigned int n = params.nEquihashN; unsigned int n = params.nEquihashN;
unsigned int k = params.nEquihashK; unsigned int k = params.nEquihashK;
@ -106,6 +107,17 @@ bool CheckEquihashSolution(const CBlockHeader *pblock, const Consensus::Params&
// I||V // I||V
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss << I; ss << I;
// From Heartwood activation, check with the Rust validator
if (params.NetworkUpgradeActive(nHeight, Consensus::UPGRADE_HEARTWOOD)) {
return librustzcash_eh_isvalid(
n, k,
(unsigned char*)&ss[0], ss.size(),
pblock->nNonce.begin(), pblock->nNonce.size(),
pblock->nSolution.data(), pblock->nSolution.size());
}
// Before Heartwood activation, check with the C++ validator
ss << pblock->nNonce; ss << pblock->nNonce;
// H(I||V||... // H(I||V||...

View File

@ -23,7 +23,7 @@ unsigned int CalculateNextWorkRequired(arith_uint256 bnAvg,
int nextHeight); int nextHeight);
/** Check whether the Equihash solution in a block header is valid */ /** Check whether the Equihash solution in a block header is valid */
bool CheckEquihashSolution(const CBlockHeader *pblock, const Consensus::Params&); bool CheckEquihashSolution(const CBlockHeader *pblock, int nHeight, const Consensus::Params&);
/** Check whether a block hash satisfies the proof-of-work requirement specified by nBits */ /** Check whether a block hash satisfies the proof-of-work requirement specified by nBits */
bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&); bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params&);

View File

@ -112,12 +112,12 @@ uint256 CBlock::CheckMerkleBranch(uint256 hash, const std::vector<uint256>& vMer
std::string CBlock::ToString() const std::string CBlock::ToString() const
{ {
std::stringstream s; std::stringstream s;
s << strprintf("CBlock(hash=%s, ver=%d, hashPrevBlock=%s, hashMerkleRoot=%s, hashFinalSaplingRoot=%s, nTime=%u, nBits=%08x, nNonce=%s, vtx=%u)\n", s << strprintf("CBlock(hash=%s, ver=%d, hashPrevBlock=%s, hashMerkleRoot=%s, hashLightClientRoot=%s, nTime=%u, nBits=%08x, nNonce=%s, vtx=%u)\n",
GetHash().ToString(), GetHash().ToString(),
nVersion, nVersion,
hashPrevBlock.ToString(), hashPrevBlock.ToString(),
hashMerkleRoot.ToString(), hashMerkleRoot.ToString(),
hashFinalSaplingRoot.ToString(), hashLightClientRoot.ToString(),
nTime, nBits, nNonce.ToString(), nTime, nBits, nNonce.ToString(),
vtx.size()); vtx.size());
for (unsigned int i = 0; i < vtx.size(); i++) for (unsigned int i = 0; i < vtx.size(); i++)

View File

@ -26,7 +26,7 @@ public:
int32_t nVersion; int32_t nVersion;
uint256 hashPrevBlock; uint256 hashPrevBlock;
uint256 hashMerkleRoot; uint256 hashMerkleRoot;
uint256 hashFinalSaplingRoot; uint256 hashLightClientRoot;
uint32_t nTime; uint32_t nTime;
uint32_t nBits; uint32_t nBits;
uint256 nNonce; uint256 nNonce;
@ -44,7 +44,7 @@ public:
READWRITE(this->nVersion); READWRITE(this->nVersion);
READWRITE(hashPrevBlock); READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot); READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot); READWRITE(hashLightClientRoot);
READWRITE(nTime); READWRITE(nTime);
READWRITE(nBits); READWRITE(nBits);
READWRITE(nNonce); READWRITE(nNonce);
@ -56,7 +56,7 @@ public:
nVersion = CBlockHeader::CURRENT_VERSION; nVersion = CBlockHeader::CURRENT_VERSION;
hashPrevBlock.SetNull(); hashPrevBlock.SetNull();
hashMerkleRoot.SetNull(); hashMerkleRoot.SetNull();
hashFinalSaplingRoot.SetNull(); hashLightClientRoot.SetNull();
nTime = 0; nTime = 0;
nBits = 0; nBits = 0;
nNonce = uint256(); nNonce = uint256();
@ -118,7 +118,7 @@ public:
block.nVersion = nVersion; block.nVersion = nVersion;
block.hashPrevBlock = hashPrevBlock; block.hashPrevBlock = hashPrevBlock;
block.hashMerkleRoot = hashMerkleRoot; block.hashMerkleRoot = hashMerkleRoot;
block.hashFinalSaplingRoot = hashFinalSaplingRoot; block.hashLightClientRoot = hashLightClientRoot;
block.nTime = nTime; block.nTime = nTime;
block.nBits = nBits; block.nBits = nBits;
block.nNonce = nNonce; block.nNonce = nNonce;
@ -158,7 +158,7 @@ public:
READWRITE(this->nVersion); READWRITE(this->nVersion);
READWRITE(hashPrevBlock); READWRITE(hashPrevBlock);
READWRITE(hashMerkleRoot); READWRITE(hashMerkleRoot);
READWRITE(hashFinalSaplingRoot); READWRITE(hashLightClientRoot);
READWRITE(nTime); READWRITE(nTime);
READWRITE(nBits); READWRITE(nBits);
} }

View File

@ -229,7 +229,8 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* blockindex, bool tx
result.push_back(Pair("height", blockindex->nHeight)); result.push_back(Pair("height", blockindex->nHeight));
result.push_back(Pair("version", block.nVersion)); result.push_back(Pair("version", block.nVersion));
result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex())); result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex()));
result.push_back(Pair("finalsaplingroot", block.hashFinalSaplingRoot.GetHex())); result.push_back(Pair("finalsaplingroot", blockindex->hashFinalSaplingRoot.GetHex()));
result.push_back(Pair("chainhistoryroot", blockindex->hashChainHistoryRoot.GetHex()));
UniValue txs(UniValue::VARR); UniValue txs(UniValue::VARR);
BOOST_FOREACH(const CTransaction&tx, block.vtx) BOOST_FOREACH(const CTransaction&tx, block.vtx)
{ {

View File

@ -431,7 +431,8 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
"{\n" "{\n"
" \"version\" : n, (numeric) The block version\n" " \"version\" : n, (numeric) The block version\n"
" \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of current highest block\n"
" \"finalsaplingroothash\" : \"xxxx\", (string) The hash of the final sapling root\n" " \"lightclientroothash\" : \"xxxx\", (string) The hash of the light client root field in the block header\n"
" \"finalsaplingroothash\" : \"xxxx\", (string) (DEPRECATED) The hash of the light client root field in the block header\n"
" \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n" " \"transactions\" : [ (array) contents of non-coinbase transactions that should be included in the next block\n"
" {\n" " {\n"
" \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n" " \"data\" : \"xxxx\", (string) transaction data encoded in hexadecimal (byte-for-byte)\n"
@ -696,7 +697,9 @@ UniValue getblocktemplate(const UniValue& params, bool fHelp)
result.push_back(Pair("capabilities", aCaps)); result.push_back(Pair("capabilities", aCaps));
result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("version", pblock->nVersion));
result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex()));
result.push_back(Pair("finalsaplingroothash", pblock->hashFinalSaplingRoot.GetHex())); result.push_back(Pair("lightclientroothash", pblock->hashLightClientRoot.GetHex()));
// Deprecated; remove in a future release.
result.push_back(Pair("finalsaplingroothash", pblock->hashLightClientRoot.GetHex()));
result.push_back(Pair("transactions", transactions)); result.push_back(Pair("transactions", transactions));
if (coinbasetxn) { if (coinbasetxn) {
assert(txCoinbase.isObject()); assert(txCoinbase.isObject());

View File

@ -3,6 +3,15 @@
#include <stdint.h> #include <stdint.h>
const int ENTRY_SERIALIZED_LENGTH = 180;
typedef struct HistoryEntry {
unsigned char bytes[ENTRY_SERIALIZED_LENGTH];
} HistoryEntry;
static_assert(
sizeof(HistoryEntry) == ENTRY_SERIALIZED_LENGTH,
"HistoryEntry struct is not the same size as the underlying byte array");
static_assert(alignof(HistoryEntry) == 1, "HistoryEntry struct alignment is not 1");
extern "C" { extern "C" {
#ifdef WIN32 #ifdef WIN32
typedef uint16_t codeunit; typedef uint16_t codeunit;
@ -312,7 +321,7 @@ extern "C" {
uint32_t cbranch, uint32_t cbranch,
uint32_t t_len, uint32_t t_len,
const uint32_t *ni_ptr, const uint32_t *ni_ptr,
const unsigned char *n_ptr, const HistoryEntry *n_ptr,
size_t p_len, size_t p_len,
const unsigned char *nn_ptr, const unsigned char *nn_ptr,
unsigned char *rt_ret, unsigned char *rt_ret,
@ -323,7 +332,7 @@ extern "C" {
uint32_t cbranch, uint32_t cbranch,
uint32_t t_len, uint32_t t_len,
const uint32_t *ni_ptr, const uint32_t *ni_ptr,
const unsigned char *n_ptr, const HistoryEntry *n_ptr,
size_t p_len, size_t p_len,
size_t e_len, size_t e_len,
unsigned char *rt_ret unsigned char *rt_ret

View File

@ -1257,7 +1257,7 @@ pub extern "system" fn librustzcash_mmr_append(
.root_node() .root_node()
.expect("Just added, should resolve always; qed"); .expect("Just added, should resolve always; qed");
unsafe { unsafe {
*rt_ret = root_node.data().subtree_commitment; *rt_ret = root_node.data().hash();
for (idx, next_buf) in slice::from_raw_parts_mut(buf_ret, return_count as usize) for (idx, next_buf) in slice::from_raw_parts_mut(buf_ret, return_count as usize)
.iter_mut() .iter_mut()
@ -1310,7 +1310,7 @@ pub extern "system" fn librustzcash_mmr_delete(
.root_node() .root_node()
.expect("Just generated without errors, root should be resolving") .expect("Just generated without errors, root should be resolving")
.data() .data()
.subtree_commitment; .hash();
} }
truncate_len truncate_len

View File

@ -297,6 +297,14 @@ public:
if(value == 0) if(value == 0)
return std::vector<unsigned char>(); return std::vector<unsigned char>();
if (value == INT64_MIN) {
// The code below is buggy, and produces the "wrong" result for
// INT64_MIN. To avoid undefined behavior while attempting to
// negate a value of INT64_MIN, we intentionally return the result
// that the code below would produce on an x86_64 system.
return {0,0,0,0,0,0,0,128,128};
}
std::vector<unsigned char> result; std::vector<unsigned char> result;
const bool neg = value < 0; const bool neg = value < 0;
uint64_t absvalue = neg ? -value : value; uint64_t absvalue = neg ? -value : value;
@ -326,11 +334,25 @@ public:
} }
private: private:
static int64_t set_vch(const std::vector<unsigned char>& vch) static int64_t set_vch(const std::vector<unsigned char>& vch)
{ {
if (vch.empty()) if (vch.empty())
return 0; return 0;
if (vch == std::vector<unsigned char>({0,0,0,0,0,0,0,128,128})) {
// On an x86_64 system, the code below would actually decode the buggy
// INT64_MIN encoding correctly. However in this case, it would be
// performing left shifts of a signed type by 64, which has undefined
// behavior.
return INT64_MIN;
}
// Guard against undefined behavior. INT64_MIN is the only allowed 9-byte encoding.
if (vch.size() > 8) {
throw scriptnum_error("script number overflow");
}
int64_t result = 0; int64_t result = 0;
for (size_t i = 0; i != vch.size(); ++i) for (size_t i = 0; i != vch.size(); ++i)
result |= static_cast<int64_t>(vch[i]) << 8*i; result |= static_cast<int64_t>(vch[i]) << 8*i;

View File

@ -170,7 +170,8 @@ public:
CAnchorsSproutMap& mapSproutAnchors, CAnchorsSproutMap& mapSproutAnchors,
CAnchorsSaplingMap& mapSaplingAnchors, CAnchorsSaplingMap& mapSaplingAnchors,
CNullifiersMap& mapSproutNullifiers, CNullifiersMap& mapSproutNullifiers,
CNullifiersMap& mapSaplingNullifiers) CNullifiersMap& mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap)
{ {
for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) { for (CCoinsMap::iterator it = mapCoins.begin(); it != mapCoins.end(); ) {
if (it->second.flags & CCoinsCacheEntry::DIRTY) { if (it->second.flags & CCoinsCacheEntry::DIRTY) {
@ -214,7 +215,8 @@ public:
memusage::DynamicUsage(cacheSproutAnchors) + memusage::DynamicUsage(cacheSproutAnchors) +
memusage::DynamicUsage(cacheSaplingAnchors) + memusage::DynamicUsage(cacheSaplingAnchors) +
memusage::DynamicUsage(cacheSproutNullifiers) + memusage::DynamicUsage(cacheSproutNullifiers) +
memusage::DynamicUsage(cacheSaplingNullifiers); memusage::DynamicUsage(cacheSaplingNullifiers) +
memusage::DynamicUsage(historyCacheMap);
for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) { for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) {
ret += it->second.coins.DynamicMemoryUsage(); ret += it->second.coins.DynamicMemoryUsage();
} }

View File

@ -15,6 +15,8 @@
#include "sodium.h" #include "sodium.h"
#include "librustzcash.h"
#include <sstream> #include <sstream>
#include <set> #include <set>
#include <vector> #include <vector>
@ -87,6 +89,9 @@ void TestEquihashSolvers(unsigned int n, unsigned int k, const std::string &I, c
void TestEquihashValidator(unsigned int n, unsigned int k, const std::string &I, const arith_uint256 &nonce, std::vector<uint32_t> soln, bool expected) { void TestEquihashValidator(unsigned int n, unsigned int k, const std::string &I, const arith_uint256 &nonce, std::vector<uint32_t> soln, bool expected) {
size_t cBitLen { n/(k+1) }; size_t cBitLen { n/(k+1) };
auto minimal = GetMinimalFromIndices(soln, cBitLen);
// First test the C++ validator
crypto_generichash_blake2b_state state; crypto_generichash_blake2b_state state;
EhInitialiseState(n, k, state); EhInitialiseState(n, k, state);
uint256 V = ArithToUint256(nonce); uint256 V = ArithToUint256(nonce);
@ -97,7 +102,15 @@ void TestEquihashValidator(unsigned int n, unsigned int k, const std::string &I,
PrintSolution(strm, soln); PrintSolution(strm, soln);
BOOST_TEST_MESSAGE(strm.str()); BOOST_TEST_MESSAGE(strm.str());
bool isValid; bool isValid;
EhIsValidSolution(n, k, state, GetMinimalFromIndices(soln, cBitLen), isValid); EhIsValidSolution(n, k, state, minimal, isValid);
BOOST_CHECK(isValid == expected);
// The Rust validator should have the exact same result
isValid = librustzcash_eh_isvalid(
n, k,
(unsigned char*)&I[0], I.size(),
V.begin(), V.size(),
minimal.data(), minimal.size());
BOOST_CHECK(isValid == expected); BOOST_CHECK(isValid == expected);
} }
@ -219,6 +232,11 @@ BOOST_AUTO_TEST_CASE(validator_allbitsmatter) {
bool isValid; bool isValid;
EhIsValidSolution(n, k, state, sol_char, isValid); EhIsValidSolution(n, k, state, sol_char, isValid);
BOOST_CHECK(isValid == true); BOOST_CHECK(isValid == true);
BOOST_CHECK(librustzcash_eh_isvalid(
n, k,
(unsigned char*)&I[0], I.size(),
V.begin(), V.size(),
sol_char.data(), sol_char.size()));
// Changing any single bit of the encoded solution should make it invalid. // Changing any single bit of the encoded solution should make it invalid.
for (size_t i = 0; i < sol_char.size() * 8; i++) { for (size_t i = 0; i < sol_char.size() * 8; i++) {
@ -226,6 +244,11 @@ BOOST_AUTO_TEST_CASE(validator_allbitsmatter) {
mutated.at(i/8) ^= (1 << (i % 8)); mutated.at(i/8) ^= (1 << (i % 8));
EhIsValidSolution(n, k, state, mutated, isValid); EhIsValidSolution(n, k, state, mutated, isValid);
BOOST_CHECK(isValid == false); BOOST_CHECK(isValid == false);
BOOST_CHECK(!librustzcash_eh_isvalid(
n, k,
(unsigned char*)&I[0], I.size(),
V.begin(), V.size(),
mutated.data(), mutated.size()));
} }
} }

View File

@ -273,8 +273,8 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
} }
*/ */
// These tests assume null hashFinalSaplingRoot (before Sapling) // These tests assume null hashLightClientRoot (before Sapling)
pblock->hashFinalSaplingRoot = uint256(); pblock->hashLightClientRoot = uint256();
CValidationState state; CValidationState state;
BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL)); BOOST_CHECK(ProcessNewBlock(state, chainparams, NULL, pblock, true, NULL));

View File

@ -196,4 +196,23 @@ BOOST_AUTO_TEST_CASE(operators)
} }
} }
BOOST_AUTO_TEST_CASE(intmin)
{
// INT64_MIN encodes to the buggy encoding.
const CScriptNum sn(INT64_MIN);
std::vector<unsigned char> buggy_int64_min_encoding = {0, 0, 0, 0, 0, 0, 0, 128, 128};
BOOST_CHECK(sn.getvch() == buggy_int64_min_encoding);
// The buggy INT64_MIN encoding decodes correctly.
const CScriptNum sn2(buggy_int64_min_encoding, true, 9);
BOOST_CHECK(sn2 == INT64_MIN);
BOOST_CHECK(sn2.getvch() == buggy_int64_min_encoding);
// getint() saturates at the min/max value of the int type
BOOST_CHECK((sn2.getint()) == std::numeric_limits<int>::min());
// Should throw for any other 9+ byte encoding.
std::vector<unsigned char> invalid_nine_bytes = {0, 0, 0, 0, 0, 0, 0, 0, 0};
BOOST_CHECK_THROW (CScriptNum sn3(invalid_nine_bytes, false, 9), scriptnum_error);
}
BOOST_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()

View File

@ -35,6 +35,10 @@ static const char DB_FLAG = 'F';
static const char DB_REINDEX_FLAG = 'R'; static const char DB_REINDEX_FLAG = 'R';
static const char DB_LAST_BLOCK = 'l'; static const char DB_LAST_BLOCK = 'l';
static const char DB_MMR_LENGTH = 'M';
static const char DB_MMR_NODE = 'm';
static const char DB_MMR_ROOT = 'r';
// insightexplorer // insightexplorer
static const char DB_ADDRESSINDEX = 'd'; static const char DB_ADDRESSINDEX = 'd';
static const char DB_ADDRESSUNSPENTINDEX = 'u'; static const char DB_ADDRESSUNSPENTINDEX = 'u';
@ -124,6 +128,39 @@ uint256 CCoinsViewDB::GetBestAnchor(ShieldedType type) const {
return hashBestAnchor; return hashBestAnchor;
} }
HistoryIndex CCoinsViewDB::GetHistoryLength(uint32_t epochId) const {
HistoryIndex historyLength;
if (!db.Read(make_pair(DB_MMR_LENGTH, epochId), historyLength)) {
// Starting new history
historyLength = 0;
}
return historyLength;
}
HistoryNode CCoinsViewDB::GetHistoryAt(uint32_t epochId, HistoryIndex index) const {
HistoryNode mmrNode;
if (index >= GetHistoryLength(epochId)) {
throw runtime_error("History data inconsistent - reindex?");
}
if (!db.Read(make_pair(DB_MMR_NODE, make_pair(epochId, index)), mmrNode)) {
throw runtime_error("History data inconsistent (expected node not found) - reindex?");
}
return mmrNode;
}
uint256 CCoinsViewDB::GetHistoryRoot(uint32_t epochId) const {
uint256 root;
if (!db.Read(make_pair(DB_MMR_ROOT, epochId), root))
{
root = uint256();
}
return root;
}
void BatchWriteNullifiers(CDBBatch& batch, CNullifiersMap& mapToUse, const char& dbChar) void BatchWriteNullifiers(CDBBatch& batch, CNullifiersMap& mapToUse, const char& dbChar)
{ {
for (CNullifiersMap::iterator it = mapToUse.begin(); it != mapToUse.end();) { for (CNullifiersMap::iterator it = mapToUse.begin(); it != mapToUse.end();) {
@ -158,6 +195,29 @@ void BatchWriteAnchors(CDBBatch& batch, Map& mapToUse, const char& dbChar)
} }
} }
void BatchWriteHistory(CDBBatch& batch, CHistoryCacheMap& historyCacheMap) {
for (auto nextHistoryCache = historyCacheMap.begin(); nextHistoryCache != historyCacheMap.end(); nextHistoryCache++) {
auto historyCache = nextHistoryCache->second;
auto epochId = nextHistoryCache->first;
// delete old entries since updateDepth
for (int i = historyCache.updateDepth + 1; i <= historyCache.length; i++) {
batch.Erase(make_pair(DB_MMR_NODE, make_pair(epochId, i)));
}
// replace/append new/updated entries
for (auto it = historyCache.appends.begin(); it != historyCache.appends.end(); it++) {
batch.Write(make_pair(DB_MMR_NODE, make_pair(epochId, it->first)), it->second);
}
// write new length
batch.Write(make_pair(DB_MMR_LENGTH, epochId), historyCache.length);
// write current root
batch.Write(make_pair(DB_MMR_ROOT, epochId), historyCache.root);
}
}
bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins, bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
const uint256 &hashSproutAnchor, const uint256 &hashSproutAnchor,
@ -165,7 +225,8 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers) { CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap) {
CDBBatch batch(db); CDBBatch batch(db);
size_t count = 0; size_t count = 0;
size_t changed = 0; size_t changed = 0;
@ -188,6 +249,8 @@ bool CCoinsViewDB::BatchWrite(CCoinsMap &mapCoins,
::BatchWriteNullifiers(batch, mapSproutNullifiers, DB_NULLIFIER); ::BatchWriteNullifiers(batch, mapSproutNullifiers, DB_NULLIFIER);
::BatchWriteNullifiers(batch, mapSaplingNullifiers, DB_SAPLING_NULLIFIER); ::BatchWriteNullifiers(batch, mapSaplingNullifiers, DB_SAPLING_NULLIFIER);
::BatchWriteHistory(batch, historyCacheMap);
if (!hashBlock.IsNull()) if (!hashBlock.IsNull())
batch.Write(DB_BEST_BLOCK, hashBlock); batch.Write(DB_BEST_BLOCK, hashBlock);
if (!hashSproutAnchor.IsNull()) if (!hashSproutAnchor.IsNull())
@ -456,7 +519,9 @@ bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
return true; return true;
} }
bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex) bool CBlockTreeDB::LoadBlockIndexGuts(
std::function<CBlockIndex*(const uint256&)> insertBlockIndex,
const CChainParams& chainParams)
{ {
boost::scoped_ptr<CDBIterator> pcursor(NewIterator()); boost::scoped_ptr<CDBIterator> pcursor(NewIterator());
@ -479,7 +544,7 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
pindexNew->hashSproutAnchor = diskindex.hashSproutAnchor; pindexNew->hashSproutAnchor = diskindex.hashSproutAnchor;
pindexNew->nVersion = diskindex.nVersion; pindexNew->nVersion = diskindex.nVersion;
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot; pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
pindexNew->hashFinalSaplingRoot = diskindex.hashFinalSaplingRoot; pindexNew->hashLightClientRoot = diskindex.hashLightClientRoot;
pindexNew->nTime = diskindex.nTime; pindexNew->nTime = diskindex.nTime;
pindexNew->nBits = diskindex.nBits; pindexNew->nBits = diskindex.nBits;
pindexNew->nNonce = diskindex.nNonce; pindexNew->nNonce = diskindex.nNonce;
@ -489,6 +554,8 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
pindexNew->nTx = diskindex.nTx; pindexNew->nTx = diskindex.nTx;
pindexNew->nSproutValue = diskindex.nSproutValue; pindexNew->nSproutValue = diskindex.nSproutValue;
pindexNew->nSaplingValue = diskindex.nSaplingValue; pindexNew->nSaplingValue = diskindex.nSaplingValue;
pindexNew->hashFinalSaplingRoot = diskindex.hashFinalSaplingRoot;
pindexNew->hashChainHistoryRoot = diskindex.hashChainHistoryRoot;
// Consistency checks // Consistency checks
auto header = pindexNew->GetBlockHeader(); auto header = pindexNew->GetBlockHeader();
@ -498,6 +565,21 @@ bool CBlockTreeDB::LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)
if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, Params().GetConsensus())) if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, Params().GetConsensus()))
return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString()); return error("LoadBlockIndex(): CheckProofOfWork failed: %s", pindexNew->ToString());
// ZIP 221 consistency checks
if (chainParams.GetConsensus().NetworkUpgradeActive(pindexNew->nHeight, Consensus::UPGRADE_HEARTWOOD)) {
if (pindexNew->hashLightClientRoot != pindexNew->hashChainHistoryRoot) {
return error(
"LoadBlockIndex(): block index inconsistency detected (hashLightClientRoot != hashChainHistoryRoot): %s",
pindexNew->ToString());
}
} else {
if (pindexNew->hashLightClientRoot != pindexNew->hashFinalSaplingRoot) {
return error(
"LoadBlockIndex(): block index inconsistency detected (hashLightClientRoot != hashFinalSaplingRoot): %s",
pindexNew->ToString());
}
}
pcursor->Next(); pcursor->Next();
} else { } else {
return error("LoadBlockIndex() : failed to read value"); return error("LoadBlockIndex() : failed to read value");

View File

@ -15,6 +15,9 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include <boost/function.hpp>
#include "zcash/History.hpp"
class CBlockIndex; class CBlockIndex;
// START insightexplorer // START insightexplorer
@ -85,6 +88,9 @@ public:
bool HaveCoins(const uint256 &txid) const; bool HaveCoins(const uint256 &txid) const;
uint256 GetBestBlock() const; uint256 GetBestBlock() const;
uint256 GetBestAnchor(ShieldedType type) const; uint256 GetBestAnchor(ShieldedType type) const;
HistoryIndex GetHistoryLength(uint32_t epochId) const;
HistoryNode GetHistoryAt(uint32_t epochId, HistoryIndex index) const;
uint256 GetHistoryRoot(uint32_t epochId) const;
bool BatchWrite(CCoinsMap &mapCoins, bool BatchWrite(CCoinsMap &mapCoins,
const uint256 &hashBlock, const uint256 &hashBlock,
const uint256 &hashSproutAnchor, const uint256 &hashSproutAnchor,
@ -92,7 +98,8 @@ public:
CAnchorsSproutMap &mapSproutAnchors, CAnchorsSproutMap &mapSproutAnchors,
CAnchorsSaplingMap &mapSaplingAnchors, CAnchorsSaplingMap &mapSaplingAnchors,
CNullifiersMap &mapSproutNullifiers, CNullifiersMap &mapSproutNullifiers,
CNullifiersMap &mapSaplingNullifiers); CNullifiersMap &mapSaplingNullifiers,
CHistoryCacheMap &historyCacheMap);
bool GetStats(CCoinsStats &stats) const; bool GetStats(CCoinsStats &stats) const;
}; };
@ -132,7 +139,9 @@ public:
bool WriteFlag(const std::string &name, bool fValue); bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue); bool ReadFlag(const std::string &name, bool &fValue);
bool LoadBlockIndexGuts(std::function<CBlockIndex*(const uint256&)> insertBlockIndex); bool LoadBlockIndexGuts(
std::function<CBlockIndex*(const uint256&)> insertBlockIndex,
const CChainParams& chainParams);
}; };
#endif // BITCOIN_TXDB_H #endif // BITCOIN_TXDB_H

View File

@ -790,8 +790,37 @@ bool CCoinsViewMemPool::HaveCoins(const uint256 &txid) const {
size_t CTxMemPool::DynamicMemoryUsage() const { size_t CTxMemPool::DynamicMemoryUsage() const {
LOCK(cs); LOCK(cs);
// Estimate the overhead of mapTx to be 6 pointers + an allocation, as no exact formula for boost::multi_index_contained is implemented.
return memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 6 * sizeof(void*)) * mapTx.size() + memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas) + cachedInnerUsage; size_t total = 0;
// Estimate the overhead of mapTx to be 6 pointers + an allocation, as no exact formula for
// boost::multi_index_contained is implemented.
total += memusage::MallocUsage(sizeof(CTxMemPoolEntry) + 6 * sizeof(void*)) * mapTx.size();
// Two metadata maps inherited from Bitcoin Core
total += memusage::DynamicUsage(mapNextTx) + memusage::DynamicUsage(mapDeltas);
// Saves iterating over the full map
total += cachedInnerUsage;
// Wallet notification
total += memusage::DynamicUsage(mapRecentlyAddedTx);
// Nullifier set tracking
total += memusage::DynamicUsage(mapSproutNullifiers) + memusage::DynamicUsage(mapSaplingNullifiers);
// DoS mitigation
total += memusage::DynamicUsage(recentlyEvicted) + memusage::DynamicUsage(weightedTxTree);
// Insight-related structures
size_t insight;
insight += memusage::DynamicUsage(mapAddress);
insight += memusage::DynamicUsage(mapAddressInserted);
insight += memusage::DynamicUsage(mapSpent);
insight += memusage::DynamicUsage(mapSpentInserted);
total += insight;
return total;
} }
void CTxMemPool::SetMempoolCostLimit(int64_t totalCostLimit, int64_t evictionMemorySeconds) { void CTxMemPool::SetMempoolCostLimit(int64_t totalCostLimit, int64_t evictionMemorySeconds) {

View File

@ -9,7 +9,7 @@
* network protocol versioning * network protocol versioning
*/ */
static const int PROTOCOL_VERSION = 170009; static const int PROTOCOL_VERSION = 170010;
//! initial proto version, to be increased after version/verack negotiation //! initial proto version, to be increased after version/verack negotiation
static const int INIT_PROTO_VERSION = 209; static const int INIT_PROTO_VERSION = 209;

View File

@ -185,7 +185,7 @@ TEST(WalletTests, FindUnspentSproutNotes) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
CWallet wallet; CWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
auto sk = libzcash::SproutSpendingKey::random(); auto sk = libzcash::SproutSpendingKey::random();
wallet.AddSproutSpendingKey(sk); wallet.AddSproutSpendingKey(sk);
@ -643,7 +643,7 @@ TEST(WalletTests, GetConflictedSaplingNotes) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
// Generate Sapling address // Generate Sapling address
auto sk = GetTestMasterSaplingSpendingKey(); auto sk = GetTestMasterSaplingSpendingKey();
@ -759,7 +759,7 @@ TEST(WalletTests, GetConflictedSaplingNotes) {
TEST(WalletTests, SproutNullifierIsSpent) { TEST(WalletTests, SproutNullifierIsSpent) {
CWallet wallet; CWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
auto sk = libzcash::SproutSpendingKey::random(); auto sk = libzcash::SproutSpendingKey::random();
wallet.AddSproutSpendingKey(sk); wallet.AddSproutSpendingKey(sk);
@ -802,7 +802,7 @@ TEST(WalletTests, SaplingNullifierIsSpent) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
// Generate dummy Sapling address // Generate dummy Sapling address
auto sk = GetTestMasterSaplingSpendingKey(); auto sk = GetTestMasterSaplingSpendingKey();
@ -887,7 +887,7 @@ TEST(WalletTests, NavigateFromSaplingNullifierToNote) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
// Generate dummy Sapling address // Generate dummy Sapling address
auto sk = GetTestMasterSaplingSpendingKey(); auto sk = GetTestMasterSaplingSpendingKey();
@ -1010,7 +1010,7 @@ TEST(WalletTests, SpentSaplingNoteIsFromMe) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
// Generate Sapling address // Generate Sapling address
auto sk = GetTestMasterSaplingSpendingKey(); auto sk = GetTestMasterSaplingSpendingKey();
@ -1801,7 +1801,7 @@ TEST(WalletTests, UpdatedSaplingNoteData) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
auto m = GetTestMasterSaplingSpendingKey(); auto m = GetTestMasterSaplingSpendingKey();
@ -1944,7 +1944,7 @@ TEST(WalletTests, MarkAffectedSaplingTransactionsDirty) {
auto consensusParams = RegtestActivateSapling(); auto consensusParams = RegtestActivateSapling();
TestWallet wallet; TestWallet wallet;
LOCK(wallet.cs_wallet); LOCK2(cs_main, wallet.cs_wallet);
// Generate Sapling address // Generate Sapling address
auto sk = GetTestMasterSaplingSpendingKey(); auto sk = GetTestMasterSaplingSpendingKey();

129
src/zcash/History.cpp Normal file
View File

@ -0,0 +1,129 @@
#include "zcash/History.hpp"
#include <stdexcept>
#include <boost/foreach.hpp>
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
#include "librustzcash.h"
namespace libzcash {
void HistoryCache::Extend(const HistoryNode &leaf) {
appends[length++] = leaf;
}
void HistoryCache::Truncate(HistoryIndex newLength) {
// Remove any to-be-appended nodes beyond the new length. The array representation is
// zero-indexed, and HistoryIndex is unsigned, so we handle the truncate-to-zero case
// separately.
if (newLength > 0) {
for (HistoryIndex idx = length; idx >= newLength; idx--) {
appends.erase(idx);
}
} else {
appends.clear();
}
length = newLength;
// we track how deep updates go back in the tree, so we could later
// update everything starting from `updateDepth`
//
// imagine we rolled two blocks back and then put another 3 blocks on top
// of the rolled back state. In that case `updateDepth` will be H-3, while length
// will be H (where H is a final chain height after such operation). So we know that
// history entries in the range of H-3..H are expected to be pushed into the database
// to replace/append to the persistent nodes there.
if (updateDepth > length) updateDepth = length;
}
HistoryNode NewNode(
uint256 subtreeCommitment,
uint32_t startTime,
uint32_t endTime,
uint32_t startTarget,
uint32_t endTarget,
uint256 startSaplingRoot,
uint256 endSaplingRoot,
uint256 subtreeTotalWork,
uint64_t startHeight,
uint64_t endHeight,
uint64_t saplingTxCount
)
{
CDataStream buf(SER_DISK, 0);
HistoryNode result;
buf << subtreeCommitment;
buf << startTime;
buf << endTime;
buf << startTarget;
buf << endTarget;
buf << startSaplingRoot;
buf << endSaplingRoot;
buf << subtreeTotalWork;
buf << COMPACTSIZE(startHeight);
buf << COMPACTSIZE(endHeight);
buf << COMPACTSIZE(saplingTxCount);
std::copy(buf.begin(), buf.end(), result.begin());
return result;
}
HistoryNode NewLeaf(
uint256 commitment,
uint32_t time,
uint32_t target,
uint256 saplingRoot,
uint256 totalWork,
uint64_t height,
uint64_t saplingTxCount
) {
return NewNode(
commitment,
time,
time,
target,
target,
saplingRoot,
saplingRoot,
totalWork,
height,
height,
saplingTxCount
);
}
HistoryEntry NodeToEntry(const HistoryNode node, uint32_t left, uint32_t right) {
CDataStream buf(SER_DISK, 0);
HistoryEntry result;
uint8_t code = 0;
buf << code;
buf << left;
buf << right;
buf << node;
assert(buf.size() <= ENTRY_SERIALIZED_LENGTH);
std::copy(std::begin(buf), std::end(buf), result.bytes);
return result;
}
HistoryEntry LeafToEntry(const HistoryNode node) {
CDataStream buf(SER_DISK, 0);
HistoryEntry result;
uint8_t code = 1;
buf << code;
buf << node;
assert(buf.size() <= ENTRY_SERIALIZED_LENGTH);
std::copy(std::begin(buf), std::end(buf), result.bytes);
return result;
}
}

71
src/zcash/History.hpp Normal file
View File

@ -0,0 +1,71 @@
#ifndef ZC_HISTORY_H_
#define ZC_HISTORY_H_
#include <stdexcept>
#include <unordered_map>
#include <boost/foreach.hpp>
#include "serialize.h"
#include "streams.h"
#include "uint256.h"
#include "librustzcash.h"
namespace libzcash {
const int NODE_SERIALIZED_LENGTH = 171;
typedef std::array<unsigned char, NODE_SERIALIZED_LENGTH> HistoryNode;
typedef uint64_t HistoryIndex;
class HistoryCache {
public:
// updates to the persistent(db) layer
std::unordered_map<HistoryIndex, HistoryNode> appends;
// current length of the history
HistoryIndex length;
// how much back into the old state current update state
// goes
HistoryIndex updateDepth;
// current root of the history
uint256 root;
// current epoch of this history state
uint32_t epoch;
HistoryCache(HistoryIndex initialLength, uint256 initialRoot, uint32_t initialEpoch) :
length(initialLength), updateDepth(initialLength), root(initialRoot), epoch(initialEpoch) { };
HistoryCache() { }
// Extends current history update by one history node.
void Extend(const HistoryNode &leaf);
// Truncates current history to the new length.
void Truncate(HistoryIndex newLength);
};
// New history node with metadata based on block state.
HistoryNode NewLeaf(
uint256 commitment,
uint32_t time,
uint32_t target,
uint256 saplingRoot,
uint256 totalWork,
uint64_t height,
uint64_t saplingTxCount
);
// Convert history node to tree node (with children references)
HistoryEntry NodeToEntry(const HistoryNode node, uint32_t left, uint32_t right);
// Convert history node to leaf node (end nodes without children)
HistoryEntry LeafToEntry(const HistoryNode node);
}
typedef libzcash::HistoryCache HistoryCache;
typedef libzcash::HistoryIndex HistoryIndex;
typedef libzcash::HistoryNode HistoryNode;
#endif /* ZC_HISTORY_H_ */

View File

@ -204,7 +204,7 @@ double benchmark_verify_equihash()
CBlockHeader genesis_header = genesis.GetBlockHeader(); CBlockHeader genesis_header = genesis.GetBlockHeader();
struct timeval tv_start; struct timeval tv_start;
timer_start(tv_start); timer_start(tv_start);
CheckEquihashSolution(&genesis_header, params.GetConsensus()); assert(CheckEquihashSolution(&genesis_header, 1, params.GetConsensus()));
return timer_stop(tv_start); return timer_stop(tv_start);
} }

107
zcutil/clean.sh Executable file
View File

@ -0,0 +1,107 @@
#!/bin/sh
# Copyright (c) 2020 The Zcash developers
rm -f src/Makefile
rm -f src/Makefile.in
rm -f doc/man/Makefile
rm -f doc/man/Makefile.in
rm -f .cargo/config
rm -f .cargo/.configured-for-online
rm -f .cargo/.configured-for-offline
rm -f src/config/stamp-h1
rm -f src/config/bitcoin-config.h
rm -f src/obj/build.h
rm -f src/leveldb/build_config.mk
rm -f src/test/buildenv.py
rm -f src/test/data/*.json.h
rm -f src/test/data/*.raw.h
rm -f qa/pull-tester/run-bitcoind-for-test.sh
rm -f qa/pull-tester/tests-config.sh
rm -f src/fuzz.cpp
rm -rf test_bitcoin.coverage/ zcash-gtest.coverage/ total.coverage/
rm -rf cache
rm -rf target
rm -rf depends/work
find src -type f -and \( -name '*.Po' -or -name '*.Plo' -or -name '*.o' -or -name '*.a' -or -name '*.la' -or -name '*.lo' -or -name '*.lai' -or -name '*.pc' -or -name '.dirstamp' -or -name '*.gcda' -or -name '*.gcno' -or -name '*.sage.py' -or -name '*.trs' \) -delete
clean_dirs()
{
find . -depth -path "*/$1/*" -delete
find . -type d -name "$1" -delete
}
clean_exe()
{
rm -f "$1" "$1.exe"
}
clean_dep()
{
rm -rf "$1/autom4te.cache"
rm -f "$1/build-aux/compile"
rm -f "$1/build-aux/config.guess"
rm -f "$1/build-aux/config.sub"
rm -f "$1/build-aux/depcomp"
rm -f "$1/build-aux/install-sh"
rm -f "$1/build-aux/ltmain.sh"
rm -f "$1/build-aux/missing"
rm -f "$1/build-aux/test-driver"
rm -f "$1/build-aux/m4/libtool.m4"
rm -f "$1/build-aux/m4/lt~obsolete.m4"
rm -f "$1/build-aux/m4/ltoptions.m4"
rm -f "$1/build-aux/m4/ltsugar.m4"
rm -f "$1/build-aux/m4/ltversion.m4"
rm -f "$1/aclocal.m4"
rm -f "$1/config.log"
rm -f "$1/config.status"
rm -f "$1/gen_context"
rm -f "$1/configure"
rm -f "$1/libtool"
rm -f "$1/Makefile"
rm -f "$1/Makefile.in"
rm -f "$1/$2"
rm -f "$1/$2~"
}
clean_dirs .deps
clean_dirs .libs
clean_dirs __pycache__
clean_exe src/bench/bench_bitcoin
clean_exe src/zcash-cli
clean_exe src/zcashd
clean_exe src/zcash-gtest
clean_exe src/zcash-tx
clean_exe src/test/test_bitcoin
clean_exe src/leveldb/db_bench
clean_exe src/leveldb/leveldbutil
rm -f src/leveldb/*_test src/leveldb/*_test.exe
rm -f src/leveldb/*.so src/leveldb/*.so.*
clean_dep . src/config/bitcoin-config.h.in
clean_dep src/secp256k1 src/libsecp256k1-config.h.in
rm -f src/secp256k1/src/ecmult_static_context.h
rm -f src/secp256k1/src/libsecp256k1-config.h
rm -f src/secp256k1/src/stamp-h1
rm -f src/secp256k1/.so_locations
clean_exe src/secp256k1/tests
clean_exe src/secp256k1/exhaustive_tests
rm -f src/secp256k1/tests.log src/secp256k1/exhaustive-tests.log src/secp256k1/test-suite.log
clean_dep src/univalue univalue-config.h.in
rm -f src/univalue/univalue-config.h
rm -f src/univalue/stamp-h1
clean_exe src/univalue/test_json
clean_exe src/univalue/unitester
clean_exe src/univalue/no_nul
rm -f src/univalue/test/*.log

12
zcutil/distclean.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/sh
# Copyright (c) 2020 The Zcash developers
zcutil/clean.sh
rm -rf depends/*-*-*
rm -rf depends/built
rm -rf depends/sources
rm -rf afl-temp
rm -rf src/fuzzing/*/output

View File

@ -1,4 +1,4 @@
#! /usr/bin/env python2 #! /usr/bin/env python3
import os import os
import re import re
@ -9,7 +9,7 @@ import subprocess
import traceback import traceback
import unittest import unittest
import random import random
from cStringIO import StringIO from io import StringIO
from functools import wraps from functools import wraps
@ -77,6 +77,11 @@ def parse_args(args):
# Top-level flow: # Top-level flow:
def main_logged(release, releaseprev, releasefrom, releaseheight, hotfix): def main_logged(release, releaseprev, releasefrom, releaseheight, hotfix):
verify_dependencies([
('help2man', None),
('debchange', 'devscripts'),
])
verify_tags(releaseprev, releasefrom) verify_tags(releaseprev, releasefrom)
verify_version(release, releaseprev, hotfix) verify_version(release, releaseprev, hotfix)
initialize_git(release, hotfix) initialize_git(release, hotfix)
@ -107,6 +112,20 @@ def phase(message):
return deco return deco
@phase('Checking release script dependencies.')
def verify_dependencies(dependencies):
for (dependency, pkg) in dependencies:
try:
sh_log(dependency, '--version')
except OSError:
raise SystemExit(
"Missing dependency {}{}".format(
dependency,
" (part of {} Debian package)".format(pkg) if pkg else "",
),
)
@phase('Checking tags.') @phase('Checking tags.')
def verify_tags(releaseprev, releasefrom): def verify_tags(releaseprev, releasefrom):
candidates = [] candidates = []
@ -403,7 +422,7 @@ def initialize_logging():
def sh_out(*args): def sh_out(*args):
logging.debug('Run (out): %r', args) logging.debug('Run (out): %r', args)
return subprocess.check_output(args) return subprocess.check_output(args).decode()
def sh_log(*args): def sh_log(*args):
@ -417,7 +436,7 @@ def sh_log(*args):
logging.debug('Run (log PID %r): %r', p.pid, args) logging.debug('Run (log PID %r): %r', p.pid, args)
for line in p.stdout: for line in p.stdout:
logging.debug('> %s', line.rstrip()) logging.debug('> %s', line.decode().rstrip())
status = p.wait() status = p.wait()
if status != 0: if status != 0:
raise SystemExit('Nonzero exit status: {!r}'.format(status)) raise SystemExit('Nonzero exit status: {!r}'.format(status))
@ -443,6 +462,7 @@ def sh_progress(markers, *args):
pbar.update(marker) pbar.update(marker)
logging.debug('Run (log PID %r): %r', p.pid, args) logging.debug('Run (log PID %r): %r', p.pid, args)
for line in p.stdout: for line in p.stdout:
line = line.decode()
logging.debug('> %s', line.rstrip()) logging.debug('> %s', line.rstrip())
for idx, val in enumerate(markers[marker:]): for idx, val in enumerate(markers[marker:]):
if val in line: if val in line:
@ -557,6 +577,12 @@ class Version (object):
self.hotfix, self.hotfix,
) )
def __lt__(self, other):
return self._sort_tup() < other._sort_tup()
def __eq__(self, other):
return self._sort_tup() == other._sort_tup()
class PathPatcher (object): class PathPatcher (object):
def __init__(self, path): def __init__(self, path):

View File

@ -1,3 +1,4 @@
#! /usr/bin/env python3
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import re, os, os.path import re, os, os.path
@ -110,7 +111,7 @@ def generate_release_note(version, prev, clear):
latest_tag = subprocess.Popen(['git describe --abbrev=0'], shell=True, stdout=subprocess.PIPE).communicate()[0].strip() latest_tag = subprocess.Popen(['git describe --abbrev=0'], shell=True, stdout=subprocess.PIPE).communicate()[0].strip()
print("Previous release tag: ", latest_tag) print("Previous release tag: ", latest_tag)
notes = subprocess.Popen(['git shortlog --no-merges {0}..HEAD'.format(latest_tag)], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0] notes = subprocess.Popen(['git shortlog --no-merges {0}..HEAD'.format(latest_tag)], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
lines = notes.split('\n') lines = notes.decode().split('\n')
lines = [alias_authors_in_release_notes(line) for line in lines] lines = [alias_authors_in_release_notes(line) for line in lines]
temp_release_note = os.path.join(doc_dir, 'release-notes.md') temp_release_note = os.path.join(doc_dir, 'release-notes.md')
with open(temp_release_note, 'r') as f: with open(temp_release_note, 'r') as f: