Auto merge of #4331 - zcash:hotfix-v2.1.1-1, r=ebfull

Hotfix v2.1.1-1

The commits in this PR have been reviewed internally by our team.
This commit is contained in:
Homu 2020-02-07 04:50:43 +00:00
commit e93586a0c4
28 changed files with 378 additions and 98 deletions

View File

@ -1,4 +1,4 @@
Zcash 2.1.1
Zcash 2.1.1-1
<img align="right" width="120" height="80" src="doc/imgs/logo.png">
===========

View File

@ -3,7 +3,7 @@ AC_PREREQ([2.60])
define(_CLIENT_VERSION_MAJOR, 2)
define(_CLIENT_VERSION_MINOR, 1)
define(_CLIENT_VERSION_REVISION, 1)
define(_CLIENT_VERSION_BUILD, 50)
define(_CLIENT_VERSION_BUILD, 51)
define(_ZC_BUILD_VAL, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, m4_incr(_CLIENT_VERSION_BUILD), m4_eval(_CLIENT_VERSION_BUILD < 50), 1, m4_eval(_CLIENT_VERSION_BUILD - 24), m4_eval(_CLIENT_VERSION_BUILD == 50), 1, , m4_eval(_CLIENT_VERSION_BUILD - 50)))
define(_CLIENT_VERSION_SUFFIX, m4_if(m4_eval(_CLIENT_VERSION_BUILD < 25), 1, _CLIENT_VERSION_REVISION-beta$1, m4_eval(_CLIENT_VERSION_BUILD < 50), 1, _CLIENT_VERSION_REVISION-rc$1, m4_eval(_CLIENT_VERSION_BUILD == 50), 1, _CLIENT_VERSION_REVISION, _CLIENT_VERSION_REVISION-$1)))
define(_CLIENT_VERSION_IS_RELEASE, true)

View File

@ -1,3 +1,9 @@
zcash (2.1.1+1) stable; urgency=critical
* 2.1.1-1 release.
-- Electric Coin Company <team@electriccoin.co> Thu, 06 Feb 2020 16:53:19 -0700
zcash (2.1.1) stable; urgency=medium
* 2.1.1 release.

View File

@ -1,5 +1,5 @@
---
name: "zcash-2.1.1"
name: "zcash-2.1.1-1"
enable_cache: true
distro: "debian"
suites:

View File

@ -1,11 +1,11 @@
Zcash Contributors
==================
Jack Grigg (923)
Jack Grigg (925)
Simon Liu (460)
Sean Bowe (288)
Sean Bowe (291)
Eirik Ogilvie-Wigley (212)
Daira Hopwood (145)
Daira Hopwood (155)
Wladimir J. van der Laan (89)
Jay Graber (89)
Taylor Hornby (84)

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11.
.TH ZCASH-CLI "1" "January 2020" "zcash-cli v2.1.1" "User Commands"
.TH ZCASH-CLI "1" "February 2020" "zcash-cli v2.1.1-1" "User Commands"
.SH NAME
zcash-cli \- manual page for zcash-cli v2.1.1
zcash-cli \- manual page for zcash-cli v2.1.1-1
.SH DESCRIPTION
Zcash RPC client version v2.1.1
Zcash RPC client version v2.1.1\-1
.PP
In order to ensure you are adequately protecting your privacy when using Zcash,
please see <https://z.cash/support/security/>.

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11.
.TH ZCASH-TX "1" "January 2020" "zcash-tx v2.1.1" "User Commands"
.TH ZCASH-TX "1" "February 2020" "zcash-tx v2.1.1-1" "User Commands"
.SH NAME
zcash-tx \- manual page for zcash-tx v2.1.1
zcash-tx \- manual page for zcash-tx v2.1.1-1
.SH DESCRIPTION
Zcash zcash\-tx utility version v2.1.1
Zcash zcash\-tx utility version v2.1.1\-1
.SS "Usage:"
.TP
zcash\-tx [options] <hex\-tx> [commands]

View File

@ -1,9 +1,9 @@
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.47.11.
.TH ZCASHD "1" "January 2020" "zcashd v2.1.1" "User Commands"
.TH ZCASHD "1" "February 2020" "zcashd v2.1.1-1" "User Commands"
.SH NAME
zcashd \- manual page for zcashd v2.1.1
zcashd \- manual page for zcashd v2.1.1-1
.SH DESCRIPTION
Zcash Daemon version v2.1.1
Zcash Daemon version v2.1.1\-1
.PP
In order to ensure you are adequately protecting your privacy when using Zcash,
please see <https://z.cash/support/security/>.
@ -67,6 +67,12 @@ Imports blocks from external blk000??.dat file on startup
.IP
Keep at most <n> unconnectable transactions in memory (default: 100)
.HP
\fB\-maxtimeadjustment=\fR<n>
.IP
Maximum allowed median peer time offset adjustment, in seconds. Local
perspective of time may be influenced by peers forward or backward by
this amount. (default: 0 seconds, maximum: 1500 seconds)
.HP
\fB\-par=\fR<n>
.IP
Set the number of script verification threads (\fB\-16\fR to 16, 0 = auto, <0 =

View File

@ -0,0 +1,43 @@
Notable changes
===============
This release fixes a security issue described at
https://z.cash/support/security/announcements/security-announcement-2020-02-06/ .
This release also adds a `-maxtimeadjustment` option to set the maximum time, in
seconds, by which the node's clock can be adjusted based on the clocks of its
peer nodes. This option defaults to 0, meaning that no such adjustment is performed.
This is a change from the previous behaviour, which was to adjust the clock by up
to 70 minutes forward or backward. The maximum setting for this option is now
25 minutes (1500 seconds).
Fix for incorrect banning of nodes during syncing
-------------------------------------------------
After activation of the Blossom network upgrade, a node that is syncing the
block chain from before Blossom would incorrectly ban peers that send it a
Blossom transaction. This resulted in slower and less reliable syncing (#4283).
Changelog
=========
Daira Hopwood (10):
Move check for block times that are too far ahead of adjusted time, to ContextualCheckBlock.
Improve messages for timestamp rules.
Add constant for how far a block timestamp can be ahead of adjusted time. Loosely based on https://github.com/bitcoin/bitcoin/commit/e57a1fd8999800b3fc744d45bb96354cae294032
Soft fork: restrict block timestamps to be no more than 90 minutes after the MTP of the previous block.
Adjust the miner to satisfy consensus regarding future timestamps relative to median-time-past.
Enable future timestamp soft fork at varying heights according to network.
Cosmetic: brace style in ContextualCheckBlockHeader.
Add -maxtimeadjustment with default of 0 instead of the 4200 seconds used in Bitcoin Core.
Fix ContextualCheckBlock test (the ban score should be 100 since these are mined transactions).
Add string argument to static_asserts to satisfy C++11.
Jack Grigg (2):
test: Update RPC test cache generation to handle new consensus rule
Apply a consistent ban policy within ContextualCheckTransaction
Sean Bowe (3):
Release notes for vulnerability and -maxtimeadjustment option.
make-release.py: Versioning changes for 2.1.1-1.
make-release.py: Updated manpages for 2.1.1-1.

View File

@ -23,6 +23,9 @@ import re
from authproxy import AuthServiceProxy
PRE_BLOSSOM_BLOCK_TARGET_SPACING = 150
POST_BLOSSOM_BLOCK_TARGET_SPACING = 75
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
@ -109,6 +112,27 @@ def initialize_chain(test_dir):
bitcoind and bitcoin-cli must be in search path.
"""
# Due to the consensus change fix for the timejacking attack, we need to
# ensure that the cache is pretty fresh. Specifically, we need the median
# time past of the chain tip of the cache to be no more than 90 minutes
# behind the current local time, or else mined blocks will be rejected by
# all nodes, halting the test. With Sapling active by default, this requires
# the chain tip itself to be no more than 75 minutes behind the current
# local time.
#
# We address this here, by regenerating the cache if it is more than 60
# minutes old. This gives 15 minutes of slack initially that an RPC test has
# to complete in, if it is started right at the oldest cache time. Within an
# individual test, the first five calls to `generate` will each advance the
# median time past of the chain tip by 2.5 minutes (with Sapling active by
# default). Therefore, if the logic between the completion of any two
# adjacent calls to `generate` within a test takes longer than 2.5 minutes,
# the excess will subtract from the slack.
if os.path.isdir(os.path.join("cache", "node0")):
if os.stat("cache").st_mtime + (60 * 60) < time.time():
print("initialize_chain(): Removing stale cache")
shutil.rmtree("cache")
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run bitcoinds:
@ -140,17 +164,20 @@ def initialize_chain(test_dir):
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
# Blocks are created with timestamps 2.5 minutes apart (matching the
# chain defaulting above to Sapling active), starting 200 * 2.5 minutes
# before the current time.
block_time = int(time.time()) - (200 * PRE_BLOSSOM_BLOCK_TARGET_SPACING)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
block_time += PRE_BLOSSOM_BLOCK_TARGET_SPACING
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Check that local time isn't going backwards
assert_greater_than(time.time() + 1, block_time)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)

View File

@ -17,6 +17,31 @@
static const int SPROUT_VALUE_VERSION = 1001400;
static const int SAPLING_VALUE_VERSION = 1010100;
/**
* Maximum amount of time that a block timestamp is allowed to be ahead of the
* median-time-past of the previous block.
*/
static const int64_t MAX_FUTURE_BLOCK_TIME_MTP = 90 * 60;
/**
* Maximum amount of time that a block timestamp is allowed to be ahead of the
* current network-adjusted time.
*/
static const int64_t MAX_FUTURE_BLOCK_TIME_ADJUSTED = 2 * 60 * 60;
/**
* Timestamp window used as a grace period by code that compares external
* timestamps (such as timestamps passed to RPCs, or wallet key creation times)
* to block timestamps.
*/
static const int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME_ADJUSTED + 60;
static_assert(MAX_FUTURE_BLOCK_TIME_ADJUSTED > MAX_FUTURE_BLOCK_TIME_MTP,
"MAX_FUTURE_BLOCK_TIME_ADJUSTED must be greater than MAX_FUTURE_BLOCK_TIME_MTP");
static_assert(TIMESTAMP_WINDOW > MAX_FUTURE_BLOCK_TIME_ADJUSTED,
"TIMESTAMP_WINDOW must be greater than MAX_FUTURE_BLOCK_TIME_ADJUSTED");
class CBlockFileInfo
{
public:

View File

@ -323,6 +323,23 @@ public:
consensus.vUpgrades[Consensus::UPGRADE_HEARTWOOD].nActivationHeight =
Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT;
// On testnet we activate this rule 6 blocks after Blossom activation. From block 299188 and
// prior to Blossom activation, the testnet minimum-difficulty threshold was 15 minutes (i.e.
// a minimum difficulty block can be mined if no block is mined normally within 15 minutes):
// <https://zips.z.cash/zip-0205#change-to-difficulty-adjustment-on-testnet>
// However the median-time-past is 6 blocks behind, and the worst-case time for 7 blocks at a
// 15-minute spacing is ~105 minutes, which is exceeds the limit imposed by the soft fork of
// 90 minutes.
//
// After Blossom, the minimum difficulty threshold time is changed to 6 times the block target
// spacing, which is 7.5 minutes:
// <https://zips.z.cash/zip-0208#minimum-difficulty-blocks-on-the-test-network>
// 7 times that is 52.5 minutes which is well within the limit imposed by the soft fork.
static_assert(6 * Consensus::POST_BLOSSOM_POW_TARGET_SPACING * 7 < MAX_FUTURE_BLOCK_TIME_MTP - 60,
"MAX_FUTURE_BLOCK_TIME_MTP is too low given block target spacing");
consensus.nFutureTimestampSoftForkHeight = consensus.vUpgrades[Consensus::UPGRADE_BLOSSOM].nActivationHeight + 6;
// The best chain should have at least this much work.
consensus.nMinimumChainWork = uint256S("0x0000000000000000000000000000000000000000000000000000001dbb4c4224");

View File

@ -18,7 +18,7 @@
#define CLIENT_VERSION_MAJOR 2
#define CLIENT_VERSION_MINOR 1
#define CLIENT_VERSION_REVISION 1
#define CLIENT_VERSION_BUILD 50
#define CLIENT_VERSION_BUILD 51
//! Set to true for release, false for prerelease or test build
#define CLIENT_VERSION_IS_RELEASE true

View File

@ -11,6 +11,10 @@ namespace Consensus {
return NetworkUpgradeState(nHeight, *this, idx) == UPGRADE_ACTIVE;
}
bool Params::FutureTimestampSoftForkActive(int nHeight) const {
return nHeight >= nFutureTimestampSoftForkHeight;
}
int Params::Halving(int nHeight) const {
// zip208
// Halving(height) :=

View File

@ -96,6 +96,8 @@ struct Params {
*/
bool NetworkUpgradeActive(int nHeight, Consensus::UpgradeIndex idx) const;
bool FutureTimestampSoftForkActive(int nHeight) const;
uint256 hashGenesisBlock;
bool fCoinbaseMustBeShielded;
@ -126,6 +128,47 @@ struct Params {
int nMajorityRejectBlockOutdated;
int nMajorityWindow;
NetworkUpgrade vUpgrades[MAX_NETWORK_UPGRADES];
/**
* Default block height at which the future timestamp soft fork rule activates.
*
* Genesis blocks are hard-coded into the binary for all networks
* (mainnet, testnet, regtest), and have now-ancient timestamps. So we need to
* handle the case where we might use the genesis block's timestamp as the
* median-time-past.
*
* GetMedianTimePast() is implemented such that the chosen block is the
* median of however many blocks we are able to select up to
* nMedianTimeSpan = 11. For example, if nHeight == 6:
*
* ,-<pmedian ,-<pbegin ,-<pend
* [-, -, -, -, 0, 1, 2, 3, 4, 5, 6] -
*
* and thus pbegin[(pend - pbegin)/2] will select block height 3, assuming
* that the block timestamps are all greater than the genesis block's
* timestamp. For regtest mode, this is a valid assumption; we generate blocks
* deterministically and in-order. For mainnet it was true in practice, and
* we aren't going to be starting a new chain linked directly from the mainnet
* genesis block.
*
* Therefore, for regtest and mainnet we only risk using the regtest genesis
* block's timestamp for nHeight < 2 (as GetMedianTimePast() uses floor division).
*
* Separately, for mainnet this is also necessary because there was a long time
* between starting to find the mainnet genesis block (which was mined with a
* single laptop) and mining the block at height 1. For any new mainnet chain
* using Zcash code, the soft fork rule would be enabled from the start so that
* miners would limit their timestamps accordingly.
*
* For testnet, the future timestamp soft fork rule was violated for many
* blocks prior to Blossom activation. At Blossom, the time threshold for the
* (testnet-specific) minimum difficulty rule was changed in such a way that
* starting from shortly after the Blossom activation, no further blocks
* violate the soft fork rule. So for testnet we override the soft fork
* activation height in chainparams.cpp.
*/
int nFutureTimestampSoftForkHeight = 2;
/** Proof of work parameters */
unsigned int nEquihashN = 0;
unsigned int nEquihashK = 0;

View File

@ -8,7 +8,7 @@
// Deprecation policy:
// * Shut down 16 weeks' worth of blocks after the estimated release block height.
// * A warning is shown during the 2 weeks' worth of blocks prior to shut down.
static const int APPROX_RELEASE_HEIGHT = 698112;
static const int APPROX_RELEASE_HEIGHT = 719034;
static const int WEEKS_UNTIL_DEPRECATION = 16;
static const int DEPRECATION_HEIGHT = APPROX_RELEASE_HEIGHT + (WEEKS_UNTIL_DEPRECATION * 7 * 24 * 48);

View File

@ -252,7 +252,7 @@ TEST_F(ContextualCheckBlockTest, BlockSproutRulesRejectOtherTx) {
{
SCOPED_TRACE("BlockSproutRulesRejectOverwinterTx");
ExpectInvalidBlockFromTx(CTransaction(mtx), 0, "tx-overwinter-not-active");
ExpectInvalidBlockFromTx(CTransaction(mtx), 100, "tx-overwinter-not-active");
}
// Make it a Sapling transaction
@ -262,7 +262,7 @@ TEST_F(ContextualCheckBlockTest, BlockSproutRulesRejectOtherTx) {
{
SCOPED_TRACE("BlockSproutRulesRejectSaplingTx");
ExpectInvalidBlockFromTx(CTransaction(mtx), 0, "tx-overwinter-not-active");
ExpectInvalidBlockFromTx(CTransaction(mtx), 100, "tx-overwinter-not-active");
}
};
@ -290,7 +290,7 @@ TEST_F(ContextualCheckBlockTest, BlockOverwinterRulesRejectOtherTx) {
{
SCOPED_TRACE("BlockOverwinterRulesRejectSaplingTx");
ExpectInvalidBlockFromTx(CTransaction(mtx), 0, "bad-overwinter-tx-version-group-id");
ExpectInvalidBlockFromTx(CTransaction(mtx), 100, "bad-overwinter-tx-version-group-id");
}
}
@ -318,6 +318,6 @@ TEST_F(ContextualCheckBlockTest, BlockSaplingRulesRejectOtherTx) {
{
SCOPED_TRACE("BlockSaplingRulesRejectOverwinterTx");
ExpectInvalidBlockFromTx(CTransaction(mtx), 0, "bad-sapling-tx-version-group-id");
ExpectInvalidBlockFromTx(CTransaction(mtx), 100, "bad-sapling-tx-version-group-id");
}
}

View File

@ -167,7 +167,7 @@ TEST(checktransaction_tests, BadTxnsOversize) {
// ... but fails contextual ones!
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-txns-oversize", false)).Times(1);
EXPECT_FALSE(ContextualCheckTransaction(tx, state, Params(), 1, 100));
EXPECT_FALSE(ContextualCheckTransaction(tx, state, Params(), 1, true));
}
{
@ -188,7 +188,7 @@ TEST(checktransaction_tests, BadTxnsOversize) {
MockCValidationState state;
EXPECT_TRUE(CheckTransactionWithoutProofVerification(tx, state));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 1, 100));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 1, true));
// Revert to default
RegtestDeactivateSapling();
@ -503,11 +503,18 @@ TEST(checktransaction_tests, bad_txns_invalid_joinsplit_signature) {
CTransaction tx(mtx);
MockCValidationState state;
// during initial block download, DoS ban score should be zero, else 100
// during initial block download, for transactions being accepted into the
// mempool (and thus not mined), DoS ban score should be zero, else 10
EXPECT_CALL(state, DoS(0, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, 100, [](const CChainParams&) { return true; });
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(10, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return false; });
// for transactions that have been mined in a block, DoS ban score should
// always be 100.
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, 100, [](const CChainParams&) { return false; });
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return false; });
}
TEST(checktransaction_tests, non_canonical_ed25519_signature) {
@ -520,7 +527,7 @@ TEST(checktransaction_tests, non_canonical_ed25519_signature) {
{
CTransaction tx(mtx);
MockCValidationState state;
EXPECT_TRUE(ContextualCheckTransaction(tx, state, chainparams, 0, 100));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, chainparams, 0, true));
}
// Copied from libsodium/crypto_sign/ed25519/ref10/open.c
@ -540,11 +547,18 @@ TEST(checktransaction_tests, non_canonical_ed25519_signature) {
CTransaction tx(mtx);
MockCValidationState state;
// during initial block download, DoS ban score should be zero, else 100
// during initial block download, for transactions being accepted into the
// mempool (and thus not mined), DoS ban score should be zero, else 10
EXPECT_CALL(state, DoS(0, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, 100, [](const CChainParams&) { return true; });
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(10, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return false; });
// for transactions that have been mined in a block, DoS ban score should
// always be 100.
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, 100, [](const CChainParams&) { return false; });
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-txns-invalid-joinsplit-signature", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return false; });
}
TEST(checktransaction_tests, OverwinterConstructors) {
@ -806,7 +820,7 @@ TEST(checktransaction_tests, OverwinterVersionNumberHigh) {
UNSAFE_CTransaction tx(mtx);
MockCValidationState state;
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "bad-tx-overwinter-version-too-high", false)).Times(1);
ContextualCheckTransaction(tx, state, Params(), 1, 100);
ContextualCheckTransaction(tx, state, Params(), 1, true);
// Revert to default
UpdateNetworkUpgradeParameters(Consensus::UPGRADE_OVERWINTER, Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT);
@ -841,11 +855,18 @@ TEST(checktransaction_tests, OverwinterNotActive) {
CTransaction tx(mtx);
MockCValidationState state;
// during initial block download, DoS ban score should be zero, else 100
// during initial block download, for transactions being accepted into the
// mempool (and thus not mined), DoS ban score should be zero, else 10
EXPECT_CALL(state, DoS(0, false, REJECT_INVALID, "tx-overwinter-not-active", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 1, 100, [](const CChainParams&) { return true; });
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(10, false, REJECT_INVALID, "tx-overwinter-not-active", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, false, [](const CChainParams&) { return false; });
// for transactions that have been mined in a block, DoS ban score should
// always be 100.
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "tx-overwinter-not-active", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 1, 100, [](const CChainParams&) { return false; });
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return true; });
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "tx-overwinter-not-active", false)).Times(1);
ContextualCheckTransaction(tx, state, chainparams, 0, true, [](const CChainParams&) { return false; });
}
// This tests a transaction without the fOverwintered flag set, against the Overwinter consensus rule set.
@ -862,7 +883,7 @@ TEST(checktransaction_tests, OverwinterFlagNotSet) {
CTransaction tx(mtx);
MockCValidationState state;
EXPECT_CALL(state, DoS(100, false, REJECT_INVALID, "tx-overwinter-flag-not-set", false)).Times(1);
ContextualCheckTransaction(tx, state, Params(), 1, 100);
ContextualCheckTransaction(tx, state, Params(), 1, true);
// Revert to default
UpdateNetworkUpgradeParameters(Consensus::UPGRADE_OVERWINTER, Consensus::NetworkUpgrade::NO_ACTIVATION_HEIGHT);

View File

@ -106,7 +106,7 @@ TEST(TransactionBuilder, TransparentToSapling)
EXPECT_EQ(tx.valueBalance, -40000);
CValidationState state;
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 2, 0));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 2, true));
EXPECT_EQ(state.GetRejectReason(), "");
// Revert to default
@ -143,7 +143,7 @@ TEST(TransactionBuilder, SaplingToSapling) {
EXPECT_EQ(tx.valueBalance, 10000);
CValidationState state;
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 3, 0));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 3, true));
EXPECT_EQ(state.GetRejectReason(), "");
// Revert to default
@ -181,7 +181,7 @@ TEST(TransactionBuilder, SaplingToSprout) {
EXPECT_EQ(tx.valueBalance, 35000);
CValidationState state;
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 3, 0));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 3, true));
EXPECT_EQ(state.GetRejectReason(), "");
// Revert to default
@ -242,7 +242,7 @@ TEST(TransactionBuilder, SproutToSproutAndSapling) {
EXPECT_EQ(tx.valueBalance, -5000);
CValidationState state;
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 4, 0));
EXPECT_TRUE(ContextualCheckTransaction(tx, state, Params(), 4, true));
EXPECT_EQ(state.GetRejectReason(), "");
// Revert to default

View File

@ -32,6 +32,7 @@
#include "script/standard.h"
#include "script/sigcache.h"
#include "scheduler.h"
#include "timedata.h"
#include "txdb.h"
#include "torcontrol.h"
#include "ui_interface.h"
@ -356,6 +357,7 @@ std::string HelpMessage(HelpMessageMode mode)
strUsage += HelpMessageOpt("-dbcache=<n>", strprintf(_("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache));
strUsage += HelpMessageOpt("-loadblock=<file>", _("Imports blocks from external blk000??.dat file on startup"));
strUsage += HelpMessageOpt("-maxorphantx=<n>", strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxtimeadjustment=<n>", strprintf(_("Maximum allowed median peer time offset adjustment, in seconds. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds, maximum: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT, LIMIT_MAX_TIME_ADJUSTMENT));
strUsage += HelpMessageOpt("-par=<n>", strprintf(_("Set the number of script verification threads (%u to %d, 0 = auto, <0 = leave that many cores free, default: %d)"),
-GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS));
#ifndef WIN32
@ -982,6 +984,11 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
fPruneMode = true;
}
int64_t nMaxTimeAdjustment = GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT);
if (nMaxTimeAdjustment < 0 || nMaxTimeAdjustment > LIMIT_MAX_TIME_ADJUSTMENT) {
return InitError(strprintf(_("-maxtimeadjustment must be in the range 0 to %u seconds"), LIMIT_MAX_TIME_ADJUSTMENT));
}
RegisterAllCoreRPCCommands(tableRPC);
#ifdef ENABLE_WALLET
bool fDisableWallet = GetBoolArg("-disablewallet", false);

View File

@ -763,69 +763,96 @@ unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& in
* 1. AcceptToMemoryPool calls CheckTransaction and this function.
* 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction)
* and ContextualCheckBlock (which calls this function).
* 3. The isInitBlockDownload argument is only to assist with testing.
* 3. For consensus rules that relax restrictions (where a transaction that is invalid at
* nHeight can become valid at a later height), we make the bans conditional on not
* being in Initial Block Download mode.
* 4. The isInitBlockDownload argument is a function parameter to assist with testing.
*/
bool ContextualCheckTransaction(
const CTransaction& tx,
CValidationState &state,
const CChainParams& chainparams,
const int nHeight,
const int dosLevel,
const bool isMined,
bool (*isInitBlockDownload)(const CChainParams&))
{
const int DOS_LEVEL_BLOCK = 100;
// DoS level set to 10 to be more forgiving.
const int DOS_LEVEL_MEMPOOL = 10;
// For constricting rules, we don't need to account for IBD mode.
auto dosLevelConstricting = isMined ? DOS_LEVEL_BLOCK : DOS_LEVEL_MEMPOOL;
// For rules that are relaxing (or might become relaxing when a future
// network upgrade is implemented), we need to account for IBD mode.
auto dosLevelPotentiallyRelaxing = isMined ? DOS_LEVEL_BLOCK : (
isInitBlockDownload(chainparams) ? 0 : DOS_LEVEL_MEMPOOL);
bool overwinterActive = chainparams.GetConsensus().NetworkUpgradeActive(nHeight, Consensus::UPGRADE_OVERWINTER);
bool saplingActive = chainparams.GetConsensus().NetworkUpgradeActive(nHeight, Consensus::UPGRADE_SAPLING);
bool isSprout = !overwinterActive;
// If Sprout rules apply, reject transactions which are intended for Overwinter and beyond
if (isSprout && tx.fOverwintered) {
return state.DoS(isInitBlockDownload(chainparams) ? 0 : dosLevel,
error("ContextualCheckTransaction(): overwinter is not active yet"),
REJECT_INVALID, "tx-overwinter-not-active");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("ContextualCheckTransaction(): overwinter is not active yet"),
REJECT_INVALID, "tx-overwinter-not-active");
}
if (saplingActive) {
// Reject transactions with valid version but missing overwintered flag
if (tx.nVersion >= SAPLING_MIN_TX_VERSION && !tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwintered flag must be set"),
REJECT_INVALID, "tx-overwintered-flag-not-set");
return state.DoS(
dosLevelConstricting,
error("ContextualCheckTransaction(): overwintered flag must be set"),
REJECT_INVALID, "tx-overwintered-flag-not-set");
}
// Reject transactions with non-Sapling version group ID
if (tx.fOverwintered && tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) {
return state.DoS(isInitBlockDownload(chainparams) ? 0 : dosLevel,
error("CheckTransaction(): invalid Sapling tx version"),
REJECT_INVALID, "bad-sapling-tx-version-group-id");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("CheckTransaction(): invalid Sapling tx version"),
REJECT_INVALID, "bad-sapling-tx-version-group-id");
}
// Reject transactions with invalid version
if (tx.fOverwintered && tx.nVersion < SAPLING_MIN_TX_VERSION ) {
return state.DoS(100, error("CheckTransaction(): Sapling version too low"),
return state.DoS(
dosLevelConstricting,
error("CheckTransaction(): Sapling version too low"),
REJECT_INVALID, "bad-tx-sapling-version-too-low");
}
// Reject transactions with invalid version
if (tx.fOverwintered && tx.nVersion > SAPLING_MAX_TX_VERSION ) {
return state.DoS(100, error("CheckTransaction(): Sapling version too high"),
return state.DoS(
dosLevelPotentiallyRelaxing,
error("CheckTransaction(): Sapling version too high"),
REJECT_INVALID, "bad-tx-sapling-version-too-high");
}
} else if (overwinterActive) {
// Reject transactions with valid version but missing overwinter flag
if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"),
REJECT_INVALID, "tx-overwinter-flag-not-set");
return state.DoS(
dosLevelConstricting,
error("ContextualCheckTransaction(): overwinter flag must be set"),
REJECT_INVALID, "tx-overwinter-flag-not-set");
}
// Reject transactions with non-Overwinter version group ID
if (tx.fOverwintered && tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) {
return state.DoS(isInitBlockDownload(chainparams) ? 0 : dosLevel,
error("CheckTransaction(): invalid Overwinter tx version"),
REJECT_INVALID, "bad-overwinter-tx-version-group-id");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("CheckTransaction(): invalid Overwinter tx version"),
REJECT_INVALID, "bad-overwinter-tx-version-group-id");
}
// Reject transactions with invalid version
if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) {
return state.DoS(100, error("CheckTransaction(): overwinter version too high"),
return state.DoS(
dosLevelPotentiallyRelaxing,
error("CheckTransaction(): overwinter version too high"),
REJECT_INVALID, "bad-tx-overwinter-version-too-high");
}
}
@ -834,14 +861,16 @@ bool ContextualCheckTransaction(
if (overwinterActive) {
// Reject transactions intended for Sprout
if (!tx.fOverwintered) {
return state.DoS(dosLevel, error("ContextualCheckTransaction: overwinter is active"),
REJECT_INVALID, "tx-overwinter-active");
return state.DoS(
dosLevelConstricting,
error("ContextualCheckTransaction: overwinter is active"),
REJECT_INVALID, "tx-overwinter-active");
}
// Check that all transactions are unexpired
if (IsExpiredTx(tx, nHeight)) {
// Don't increase banscore if the transaction only just expired
int expiredDosLevel = IsExpiredTx(tx, nHeight - 1) ? dosLevel : 0;
int expiredDosLevel = IsExpiredTx(tx, nHeight - 1) ? dosLevelConstricting : 0;
return state.DoS(expiredDosLevel, error("ContextualCheckTransaction(): transaction is expired"), REJECT_INVALID, "tx-overwinter-expired");
}
}
@ -851,8 +880,10 @@ bool ContextualCheckTransaction(
// Size limits
BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE > MAX_TX_SIZE_BEFORE_SAPLING); // sanity
if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_BEFORE_SAPLING)
return state.DoS(100, error("ContextualCheckTransaction(): size limits failed"),
REJECT_INVALID, "bad-txns-oversize");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("ContextualCheckTransaction(): size limits failed"),
REJECT_INVALID, "bad-txns-oversize");
}
uint256 dataToBeSigned;
@ -867,6 +898,8 @@ bool ContextualCheckTransaction(
try {
dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId);
} catch (std::logic_error ex) {
// A logic error should never occur because we pass NOT_AN_INPUT and
// SIGHASH_ALL to SignatureHash().
return state.DoS(100, error("CheckTransaction(): error computing signature hash"),
REJECT_INVALID, "error-computing-signature-hash");
}
@ -882,9 +915,10 @@ bool ContextualCheckTransaction(
dataToBeSigned.begin(), 32,
tx.joinSplitPubKey.begin()
) != 0) {
return state.DoS(isInitBlockDownload(chainparams) ? 0 : 100,
error("CheckTransaction(): invalid joinsplit signature"),
REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("CheckTransaction(): invalid joinsplit signature"),
REJECT_INVALID, "bad-txns-invalid-joinsplit-signature");
}
}
@ -906,8 +940,10 @@ bool ContextualCheckTransaction(
))
{
librustzcash_sapling_verification_ctx_free(ctx);
return state.DoS(100, error("ContextualCheckTransaction(): Sapling spend description invalid"),
REJECT_INVALID, "bad-txns-sapling-spend-description-invalid");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("ContextualCheckTransaction(): Sapling spend description invalid"),
REJECT_INVALID, "bad-txns-sapling-spend-description-invalid");
}
}
@ -921,6 +957,9 @@ bool ContextualCheckTransaction(
))
{
librustzcash_sapling_verification_ctx_free(ctx);
// This should be a non-contextual check, but we check it here
// as we need to pass over the outputs anyway in order to then
// call librustzcash_sapling_final_check().
return state.DoS(100, error("ContextualCheckTransaction(): Sapling output description invalid"),
REJECT_INVALID, "bad-txns-sapling-output-description-invalid");
}
@ -934,8 +973,10 @@ bool ContextualCheckTransaction(
))
{
librustzcash_sapling_verification_ctx_free(ctx);
return state.DoS(100, error("ContextualCheckTransaction(): Sapling binding signature invalid"),
REJECT_INVALID, "bad-txns-sapling-binding-signature-invalid");
return state.DoS(
dosLevelPotentiallyRelaxing,
error("ContextualCheckTransaction(): Sapling binding signature invalid"),
REJECT_INVALID, "bad-txns-sapling-binding-signature-invalid");
}
librustzcash_sapling_verification_ctx_free(ctx);
@ -1249,9 +1290,8 @@ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransa
if (!CheckTransaction(tx, state, verifier))
return error("AcceptToMemoryPool: CheckTransaction failed");
// DoS level set to 10 to be more forgiving.
// Check transaction contextually against the set of consensus rules which apply in the next block to be mined.
if (!ContextualCheckTransaction(tx, state, Params(), nextBlockHeight, 10)) {
if (!ContextualCheckTransaction(tx, state, Params(), nextBlockHeight, false)) {
return error("AcceptToMemoryPool: ContextualCheckTransaction failed");
}
@ -3692,11 +3732,6 @@ bool CheckBlockHeader(
return state.DoS(50, error("CheckBlockHeader(): proof of work failed"),
REJECT_INVALID, "high-hash");
// Check timestamp
if (block.GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),
REJECT_INVALID, "time-too-new");
return true;
}
@ -3769,35 +3804,62 @@ bool ContextualCheckBlockHeader(
{
const Consensus::Params& consensusParams = chainParams.GetConsensus();
uint256 hash = block.GetHash();
if (hash == consensusParams.hashGenesisBlock)
if (hash == consensusParams.hashGenesisBlock) {
return true;
}
assert(pindexPrev);
int nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams))
if (block.nBits != GetNextWorkRequired(pindexPrev, &block, consensusParams)) {
return state.DoS(100, error("%s: incorrect proof of work", __func__),
REJECT_INVALID, "bad-diffbits");
}
// Check timestamp against prev
if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(error("%s: block's timestamp is too early", __func__),
auto medianTimePast = pindexPrev->GetMedianTimePast();
if (block.GetBlockTime() <= medianTimePast) {
return state.Invalid(error("%s: block at height %d, timestamp %d is not later than median-time-past %d",
__func__, nHeight, block.GetBlockTime(), medianTimePast),
REJECT_INVALID, "time-too-old");
}
if (fCheckpointsEnabled)
{
// Check future timestamp soft fork rule introduced in v2.1.1-1.
// This retrospectively activates at block height 2 for mainnet and regtest,
// and 6 blocks after Blossom activation for testnet.
// Explanations of these activation heights are in src/consensus/params.h
// and chainparams.cpp.
//
if (consensusParams.FutureTimestampSoftForkActive(nHeight) &&
block.GetBlockTime() > medianTimePast + MAX_FUTURE_BLOCK_TIME_MTP) {
return state.Invalid(error("%s: block at height %d, timestamp %d is too far ahead of median-time-past, limit is %d",
__func__, nHeight, block.GetBlockTime(), medianTimePast + MAX_FUTURE_BLOCK_TIME_MTP),
REJECT_INVALID, "time-too-far-ahead-of-mtp");
}
// Check timestamp
auto nTimeLimit = GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME_ADJUSTED;
if (block.GetBlockTime() > nTimeLimit) {
return state.Invalid(error("%s: block at height %d, timestamp %d is too far ahead of adjusted time, limit is %d",
__func__, nHeight, block.GetBlockTime(), nTimeLimit),
REJECT_INVALID, "time-too-new");
}
if (fCheckpointsEnabled) {
// Don't accept any forks from the main chain prior to last checkpoint
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints());
if (pcheckpoint && nHeight < pcheckpoint->nHeight)
if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
return state.DoS(100, error("%s: forked chain older than last checkpoint (height %d)", __func__, nHeight));
}
}
// Reject block.nVersion < 4 blocks
if (block.nVersion < 4)
if (block.nVersion < 4) {
return state.Invalid(error("%s : rejected nVersion<4 block", __func__),
REJECT_OBSOLETE, "bad-version");
}
return true;
}
@ -3813,7 +3875,7 @@ bool ContextualCheckBlock(
BOOST_FOREACH(const CTransaction& tx, block.vtx) {
// Check transaction contextually against consensus rules at block height
if (!ContextualCheckTransaction(tx, state, chainparams, nHeight, 100)) {
if (!ContextualCheckTransaction(tx, state, chainparams, nHeight, true)) {
return false; // Failure reason has been set in validation state object
}

View File

@ -341,7 +341,7 @@ bool ContextualCheckInputs(const CTransaction& tx, CValidationState &state, cons
/** Check a transaction contextually against a set of consensus rules */
bool ContextualCheckTransaction(const CTransaction& tx, CValidationState &state,
const CChainParams& chainparams, int nHeight, int dosLevel,
const CChainParams& chainparams, int nHeight, bool isMined,
bool (*isInitBlockDownload)(const CChainParams&) = IsInitialBlockDownload);
/** Apply the effects of this transaction on the UTXO set represented by view */

View File

@ -99,7 +99,13 @@ public:
void UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
pblock->nTime = std::max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
auto medianTimePast = pindexPrev->GetMedianTimePast();
auto nTime = std::max(medianTimePast + 1, GetAdjustedTime());
// See the comment in ContextualCheckBlockHeader() for background.
if (consensusParams.FutureTimestampSoftForkActive(pindexPrev->nHeight + 1)) {
nTime = std::min(nTime, medianTimePast + MAX_FUTURE_BLOCK_TIME_MTP);
}
pblock->nTime = nTime;
// Updating time can change work required on testnet:
if (consensusParams.nPowAllowMinDifficultyBlocksAfterHeight != boost::none) {

View File

@ -491,7 +491,7 @@ void test_simple_joinsplit_invalidity(uint32_t consensusBranchId, CMutableTransa
jsdesc->nullifiers[1] = GetRandHash();
BOOST_CHECK(CheckTransactionWithoutProofVerification(newTx, state));
BOOST_CHECK(!ContextualCheckTransaction(newTx, state, Params(), 0, 100));
BOOST_CHECK(!ContextualCheckTransaction(newTx, state, Params(), 0, true));
BOOST_CHECK(state.GetRejectReason() == "bad-txns-invalid-joinsplit-signature");
// Empty output script.
@ -505,7 +505,7 @@ void test_simple_joinsplit_invalidity(uint32_t consensusBranchId, CMutableTransa
) == 0);
BOOST_CHECK(CheckTransactionWithoutProofVerification(newTx, state));
BOOST_CHECK(ContextualCheckTransaction(newTx, state, Params(), 0, 100));
BOOST_CHECK(ContextualCheckTransaction(newTx, state, Params(), 0, true));
}
{
// Ensure that values within the joinsplit are well-formed.

View File

@ -79,7 +79,7 @@ void AddTimeData(const CNetAddr& ip, int64_t nOffsetSample)
int64_t nMedian = vTimeOffsets.median();
std::vector<int64_t> vSorted = vTimeOffsets.sorted();
// Only let other nodes change our time by so much
if (abs64(nMedian) < 70 * 60)
if (abs64(nMedian) <= GetArg("-maxtimeadjustment", DEFAULT_MAX_TIME_ADJUSTMENT))
{
nTimeOffset = nMedian;
}

View File

@ -10,6 +10,16 @@
#include <stdint.h>
#include <vector>
#include "chain.h"
static const int64_t DEFAULT_MAX_TIME_ADJUSTMENT = 0;
static const int64_t LIMIT_MAX_TIME_ADJUSTMENT = 25 * 60;
static_assert(LIMIT_MAX_TIME_ADJUSTMENT * 2 < MAX_FUTURE_BLOCK_TIME_MTP,
"LIMIT_MAX_TIME_ADJUSTMENT is too high given MAX_FUTURE_BLOCK_TIME_MTP");
static_assert(MAX_FUTURE_BLOCK_TIME_MTP + LIMIT_MAX_TIME_ADJUSTMENT < MAX_FUTURE_BLOCK_TIME_ADJUSTED,
"LIMIT_MAX_TIME_ADJUSTMENT is too high given MAX_FUTURE_BLOCK_TIME_MTP and MAX_FUTURE_BLOCK_TIME_ADJUSTED");
class CNetAddr;
/**

View File

@ -448,8 +448,9 @@ UniValue importwallet_impl(const UniValue& params, bool fHelp, bool fImportZKeys
pwalletMain->ShowProgress("", 100); // hide progress dialog in GUI
CBlockIndex *pindex = chainActive.Tip();
while (pindex && pindex->pprev && pindex->GetBlockTime() > nTimeBegin - 7200)
while (pindex && pindex->pprev && pindex->GetBlockTime() > nTimeBegin - TIMESTAMP_WINDOW) {
pindex = pindex->pprev;
}
if (!pwalletMain->nTimeFirstKey || nTimeBegin < pwalletMain->nTimeFirstKey)
pwalletMain->nTimeFirstKey = nTimeBegin;

View File

@ -2523,8 +2523,9 @@ int CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, bool fUpdate)
// no need to read and scan block, if block was created before
// our wallet birthday (as adjusted for block time variability)
while (pindex && nTimeFirstKey && (pindex->GetBlockTime() < (nTimeFirstKey - 7200)))
while (pindex && nTimeFirstKey && pindex->GetBlockTime() < nTimeFirstKey - TIMESTAMP_WINDOW) {
pindex = chainActive.Next(pindex);
}
ShowProgress(_("Rescanning..."), 0); // show rescan progress in GUI as dialog or on splashscreen, if -rescan on startup
double dProgressStart = Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), pindex, false);
@ -4347,8 +4348,9 @@ void CWallet::GetKeyBirthTimes(std::map<CKeyID, int64_t> &mapKeyBirth) const {
}
// Extract block timestamps for those keys
for (std::map<CKeyID, CBlockIndex*>::const_iterator it = mapKeyFirstBlock.begin(); it != mapKeyFirstBlock.end(); it++)
mapKeyBirth[it->first] = it->second->GetBlockTime() - 7200; // block times can be 2h off
for (std::map<CKeyID, CBlockIndex*>::const_iterator it = mapKeyFirstBlock.begin(); it != mapKeyFirstBlock.end(); it++) {
mapKeyBirth[it->first] = it->second->GetBlockTime() - TIMESTAMP_WINDOW; // block times can be off
}
}
bool CWallet::AddDestData(const CTxDestination &dest, const std::string &key, const std::string &value)