Repair some RPC tests.

Signed-off-by: Daira Emma Hopwood <daira@jacaranda.org>
This commit is contained in:
Daira Emma Hopwood 2023-04-18 12:05:42 +01:00
parent 3b255b8941
commit eb74c8e8b0
7 changed files with 139 additions and 189 deletions

View File

@ -17,6 +17,7 @@ from test_framework.util import (
sync_mempools,
)
from test_framework.mininode import COIN
from test_framework.zip317 import conventional_fee
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
@ -61,7 +62,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.00005")
fee = conventional_fee(2)
# 100 transactions off a confirmed tx should be fine
chain = []
for i in range(100):

View File

@ -9,8 +9,9 @@
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, connect_nodes_bi, fail, \
initialize_chain_clean, start_node, sync_blocks, sync_mempools, \
initialize_chain_clean, start_node, \
wait_and_assert_operationid_status, DEFAULT_FEE
from test_framework.zip317 import conventional_fee
from decimal import Decimal
@ -28,14 +29,10 @@ def assert_mergetoaddress_exception(expected_error_msg, merge_to_address_lambda)
class MergeToAddressHelper:
def __init__(self, addr_type, any_zaddr, utxos_to_generate, utxos_in_tx1, utxos_in_tx2):
def __init__(self, addr_type, any_zaddr):
self.addr_type = addr_type
self.any_zaddr = [any_zaddr]
self.any_zaddr_or_utxo = [any_zaddr, "ANY_TADDR"]
# utxos_to_generate, utxos_in_tx1, utxos_in_tx2 have to do with testing transaction size limits
self.utxos_to_generate = utxos_to_generate
self.utxos_in_tx1 = utxos_in_tx1
self.utxos_in_tx2 = utxos_in_tx2
def setup_chain(self, test):
print("Initializing test directory "+test.options.tmpdir)
@ -45,7 +42,6 @@ class MergeToAddressHelper:
args = [
'-minrelaytxfee=0',
'-debug=zrpcunsafe',
'-limitancestorcount=%d' % self.utxos_to_generate,
'-allowdeprecated=getnewaddress',
'-allowdeprecated=z_getnewaddress',
'-allowdeprecated=z_getbalance',
@ -230,6 +226,7 @@ class MergeToAddressHelper:
test.sync_all()
test.nodes[1].generate(1)
test.sync_all()
assert_equal(0, len(test.nodes[0].z_listunspent(0)))
assert_equal(test.nodes[0].getbalance(), 0)
assert_equal(test.nodes[0].z_getbalance(do_not_shield_taddr), 0)
@ -239,57 +236,27 @@ class MergeToAddressHelper:
assert_equal(test.nodes[1].z_getbalance(n1taddr), Decimal('80.0') - DEFAULT_FEE)
assert_equal(test.nodes[2].getbalance(), 0)
# Generate self.utxos_to_generate regular UTXOs on node 0, and 20 regular UTXOs on node 2
# Generate 5 regular UTXOs on node 0, and 20 regular UTXOs on node 2.
mytaddr = test.nodes[0].getnewaddress()
n2taddr = test.nodes[2].getnewaddress()
test.nodes[1].generate(1000)
test.nodes[1].generate(20)
test.sync_all()
for i in range(self.utxos_to_generate):
for i in range(5):
test.nodes[1].sendtoaddress(mytaddr, 1)
for i in range(20):
test.nodes[1].sendtoaddress(n2taddr, 1)
test.nodes[1].generate(1)
test.sync_all()
# Merging the UTXOs will conditionally occur over two transactions, since max tx size is 100,000 bytes before Sapling and 2,000,000 after.
# We don't verify mergingTransparentValue as UTXOs are not selected in any specific order, so value can change on each test run.
# We set an unrealistically high limit parameter of 99999, to verify that max tx size will constrain the number of UTXOs.
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, 0, 99999)
assert_equal(result["mergingUTXOs"], self.utxos_in_tx1)
assert_equal(result["remainingUTXOs"], self.utxos_in_tx2)
assert_equal(result["mergingNotes"], Decimal('0'))
assert_equal(result["mergingShieldedValue"], Decimal('0'))
assert_equal(result["remainingNotes"], Decimal('0'))
assert_equal(result["remainingShieldedValue"], Decimal('0'))
remainingTransparentValue = result["remainingTransparentValue"]
# This z_mergetoaddress and the one below result in two notes in myzaddr.
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, DEFAULT_FEE)
assert_equal(result["mergingUTXOs"], 5)
assert_equal(result["remainingUTXOs"], 0)
assert_equal(result["mergingNotes"], 0)
assert_equal(result["remainingNotes"], 0)
wait_and_assert_operationid_status(test.nodes[0], result['opid'])
# For sapling we do not check that this occurs over two transactions because of the time that it would take
if self.utxos_in_tx2 > 0:
# Verify that UTXOs are locked (not available for selection) by queuing up another merging operation
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, 0, 0)
assert_equal(result["mergingUTXOs"], self.utxos_in_tx2)
assert_equal(result["mergingTransparentValue"], Decimal(remainingTransparentValue))
assert_equal(result["remainingUTXOs"], Decimal('0'))
assert_equal(result["remainingTransparentValue"], Decimal('0'))
assert_equal(result["mergingNotes"], Decimal('0'))
assert_equal(result["mergingShieldedValue"], Decimal('0'))
assert_equal(result["remainingNotes"], Decimal('0'))
assert_equal(result["remainingShieldedValue"], Decimal('0'))
wait_and_assert_operationid_status(test.nodes[0], result['opid'])
# sync_all() invokes sync_mempool() but node 2's mempool limit will cause tx1 and tx2 to be rejected.
# So instead, we sync on blocks and mempool for node 0 and node 1, and after a new block is generated
# which mines tx1 and tx2, all nodes will have an empty mempool which can then be synced.
sync_blocks(test.nodes[:2])
sync_mempools(test.nodes[:2])
# Generate enough blocks to ensure all transactions are mined
while test.nodes[1].getmempoolinfo()['size'] > 0:
test.nodes[1].generate(1)
test.sync_all()
# Verify maximum number of UTXOs which node 2 can shield is not limited
# when the limit parameter is set to 0.
# Verify maximum number of UTXOs is not limited when the limit parameter is set to 0.
expected_to_merge = 20
expected_remaining = 0
@ -302,6 +269,7 @@ class MergeToAddressHelper:
test.sync_all()
test.nodes[1].generate(1)
test.sync_all()
assert_equal(2, len(test.nodes[0].z_listunspent()))
# Verify maximum number of UTXOs which node 0 can shield is set by default limit parameter of 50
mytaddr = test.nodes[0].getnewaddress()
@ -309,7 +277,7 @@ class MergeToAddressHelper:
test.nodes[1].sendtoaddress(mytaddr, 1)
test.nodes[1].generate(1)
test.sync_all()
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, DEFAULT_FEE)
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, conventional_fee(52))
assert_equal(result["mergingUTXOs"], Decimal('50'))
assert_equal(result["remainingUTXOs"], Decimal('50'))
assert_equal(result["mergingNotes"], Decimal('0'))
@ -317,40 +285,38 @@ class MergeToAddressHelper:
assert_equal(result["remainingNotes"], Decimal('0'))
wait_and_assert_operationid_status(test.nodes[0], result['opid'])
assert_equal(2, len(test.nodes[0].z_listunspent()))
assert_equal(3, len(test.nodes[0].z_listunspent(0)))
# Verify maximum number of UTXOs which node 0 can shield can be set by the limit parameter
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, DEFAULT_FEE, 33)
result = test.nodes[0].z_mergetoaddress([mytaddr], myzaddr, conventional_fee(35), 33)
assert_equal(result["mergingUTXOs"], Decimal('33'))
assert_equal(result["remainingUTXOs"], Decimal('17'))
assert_equal(result["mergingNotes"], Decimal('0'))
# Remaining notes are only counted if we are trying to merge any notes
assert_equal(result["remainingNotes"], Decimal('0'))
wait_and_assert_operationid_status(test.nodes[0], result['opid'])
# Don't sync node 2 which rejects the tx due to its mempooltxinputlimit
sync_blocks(test.nodes[:2])
sync_mempools(test.nodes[:2])
test.sync_all()
test.nodes[1].generate(1)
test.sync_all()
assert_equal(4, len(test.nodes[0].z_listunspent()))
# Verify maximum number of notes which node 0 can shield can be set by the limit parameter
# Also check that we can set off a second merge before the first one is complete
# myzaddr will have 5 notes if testing before to Sapling activation and 4 otherwise
num_notes = len(test.nodes[0].z_listunspent(0))
result1 = test.nodes[0].z_mergetoaddress([myzaddr], myzaddr, DEFAULT_FEE, 50, 2)
result2 = test.nodes[0].z_mergetoaddress([myzaddr], myzaddr, DEFAULT_FEE, 50, 2)
# First merge should select from all notes
assert_equal(result1["mergingUTXOs"], Decimal('0'))
# Remaining UTXOs are only counted if we are trying to merge any UTXOs
assert_equal(result1["remainingUTXOs"], Decimal('0'))
assert_equal(result1["mergingNotes"], Decimal('2'))
assert_equal(result1["remainingNotes"], num_notes - 2)
assert_equal(result1["remainingNotes"], 2)
# Second merge should ignore locked notes
result2 = test.nodes[0].z_mergetoaddress([myzaddr], myzaddr, DEFAULT_FEE, 50, 2)
assert_equal(result2["mergingUTXOs"], Decimal('0'))
assert_equal(result2["remainingUTXOs"], Decimal('0'))
assert_equal(result2["mergingNotes"], Decimal('2'))
assert_equal(result2["remainingNotes"], num_notes - 4)
assert_equal(result2["remainingNotes"], 0)
wait_and_assert_operationid_status(test.nodes[0], result1['opid'])
wait_and_assert_operationid_status(test.nodes[0], result2['opid'])
@ -359,11 +325,11 @@ class MergeToAddressHelper:
test.sync_all()
# Shield both UTXOs and notes to a z-addr
result = test.nodes[0].z_mergetoaddress(self.any_zaddr_or_utxo, myzaddr, 0, 10, 2)
result = test.nodes[0].z_mergetoaddress(self.any_zaddr_or_utxo, myzaddr, DEFAULT_FEE, 10, 2)
assert_equal(result["mergingUTXOs"], Decimal('10'))
assert_equal(result["remainingUTXOs"], Decimal('7'))
assert_equal(result["mergingNotes"], Decimal('2'))
assert_equal(result["remainingNotes"], num_notes - 4)
assert_equal(result["remainingNotes"], 0)
wait_and_assert_operationid_status(test.nodes[0], result['opid'])
test.sync_all()
test.nodes[1].generate(1)

View File

@ -8,9 +8,7 @@ from mergetoaddress_helper import MergeToAddressHelper
class MergeToAddressSapling (BitcoinTestFramework):
# 13505 would be the maximum number of utxos based on the transaction size limits for Sapling
# but testing this causes the test to take an indeterminately long time to run.
helper = MergeToAddressHelper('sapling', 'ANY_SAPLING', 800, 800, 0)
helper = MergeToAddressHelper('sapling', 'ANY_SAPLING')
def setup_chain(self):
self.helper.setup_chain(self)

View File

@ -4,139 +4,118 @@
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
initialize_chain_clean,
start_node,
sync_blocks,
sync_mempools,
)
from test_framework.zip317 import conventional_fee_zats
from test_framework.util import assert_equal, start_nodes
from test_framework.mininode import COIN
from test_framework.zip317 import BLOCK_UNPAID_ACTION_LIMIT, MARGINAL_FEE, ZIP_317_FEE
import time
from decimal import Decimal
class PrioritiseTransactionTest (BitcoinTestFramework):
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.cache_behavior = 'clean'
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[[
'-paytxfee=0.000001',
'-printpriority=1',
'-allowdeprecated=getnewaddress',
]] * self.num_nodes)
def setup_network(self, split=False):
self.nodes = []
# Start nodes with tiny block size of 11kb
args = [
"-blockmaxsize=11000",
"-maxorphantx=1000",
"-printpriority=1",
"-limitancestorcount=900",
"-allowdeprecated=getnewaddress",
]
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split=False
def run_test(self):
def in_template(block_template, txid):
return any([tx['hash'] == txid for tx in block_template['transactions']])
def eventually_in_template(node, txid):
for tries in range(2):
time.sleep(11)
block_template = node.getblocktemplate()
if in_template(block_template, txid): return True
return False
# Make sure we have enough mature funds on node 0.
# Slow start is switched off for regtest, and we're before Blossom,
# so each utxo will be 10 ZEC.
miner_subsidy = Decimal("10")
self.nodes[0].generate(100 + BLOCK_UNPAID_ACTION_LIMIT + 2)
self.sync_all()
def run_test (self):
print("Mining 11kb blocks...")
self.nodes[0].generate(501)
# Create a tx that will not be mined unless prioritised.
# We spend BLOCK_UNPAID_ACTION_LIMIT mining rewards, ensuring that
# tx has exactly (BLOCK_UNPAID_ACTION_LIMIT + 1) logical actions,
# because one extra input will be needed to pay the fee.
# Since we've set -paytxfee to pay only the relay fee rate, the fee
# will be less than the marginal fee, so these are all unpaid actions.
amount = miner_subsidy * BLOCK_UNPAID_ACTION_LIMIT
tx = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), amount)
# 11 kb blocks will only hold about 50 txs, so this will fill mempool with older txs
taddr = self.nodes[1].getnewaddress()
for _ in range(900):
self.nodes[0].sendtoaddress(taddr, 0.1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# With a rate of either 7tx/s or 14tx/s per peer (depending on whether
# the connection is inbound or outbound), syncing this many transactions
# could take up to 128s. So use a higher timeout on the mempool sync.
sync_mempools(self.nodes, timeout=200)
mempool = self.nodes[0].getrawmempool(True)
assert(tx in mempool)
fee_zats = int(mempool[tx]['fee'] * COIN)
assert(fee_zats < MARGINAL_FEE)
tx_verbose = self.nodes[0].getrawtransaction(tx, 1)
assert_equal(BLOCK_UNPAID_ACTION_LIMIT + 1, len(tx_verbose['vin']))
# Create tx to be prioritised on node 0
# This tx will not be mined without prioritisation.
priority_tx_0 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
# Check that priority_tx_0 is not in block_template() prior to prioritisation
# Check that tx is not in a new block template prior to prioritisation.
block_template = self.nodes[0].getblocktemplate()
in_block_template = False
for tx in block_template['transactions']:
if tx['hash'] == priority_tx_0:
in_block_template = True
break
assert_equal(in_template(block_template, tx), False)
# TODO: this doesn't work reliably now, but as part of #6403 it can be made
# to work reliably by using a transaction that has unpaid actions over the
# block_unpaid_action_limit before prioritisation.
print("in_block_template =", in_block_template)
#assert_equal(in_block_template, False)
def send_fully_paid_transaction():
# Sending a new transaction will make getblocktemplate refresh within 5s.
# We use z_sendmany so that it will pay the conventional fee (ignoring -paytxfee)
# and not take up an unpaid action.
recipients = [{'address': self.nodes[2].getnewaddress(), 'amount': Decimal("1")}]
self.nodes[0].z_sendmany('ANY_TADDR', recipients, 0, ZIP_317_FEE, 'AllowFullyTransparent')
priority_success = self.nodes[0].prioritisetransaction(priority_tx_0, 0, conventional_fee_zats(2))
# Prioritising it on node 1 has no effect on node 0.
self.sync_all()
priority_success = self.nodes[1].prioritisetransaction(tx, 0, MARGINAL_FEE)
assert(priority_success)
mempool = self.nodes[1].getrawmempool(True)
assert_equal(fee_zats + MARGINAL_FEE, mempool[tx]['modifiedfee'] * COIN)
self.sync_all()
send_fully_paid_transaction()
assert_equal(eventually_in_template(self.nodes[0], tx), False)
# Check that prioritised transaction is not in getblocktemplate()
# (not updated because no new txns)
in_block_template = False
# Now prioritise it on node 0, but short by one zatoshi.
priority_success = self.nodes[0].prioritisetransaction(tx, 0, MARGINAL_FEE - fee_zats - 1)
assert(priority_success)
mempool = self.nodes[0].getrawmempool(True)
assert_equal(MARGINAL_FEE - 1, mempool[tx]['modifiedfee'] * COIN)
send_fully_paid_transaction()
assert_equal(eventually_in_template(self.nodes[0], tx), False)
# Finally, prioritise it on node 0 by the one extra zatoshi (this also checks
# that prioritisation is cumulative).
priority_success = self.nodes[0].prioritisetransaction(tx, 0, 1)
assert(priority_success)
mempool = self.nodes[0].getrawmempool(True)
assert_equal(MARGINAL_FEE, mempool[tx]['modifiedfee'] * COIN)
# The block template will refresh after 1 minute, or after 5 seconds if a new
# transaction is added to the mempool. As long as there is less than a minute
# between the getblocktemplate() calls, it should not have been updated yet.
block_template = self.nodes[0].getblocktemplate()
for tx in block_template['transactions']:
if tx['hash'] == priority_tx_0:
in_block_template = True
break
assert_equal(in_template(block_template, tx), False)
# See TODO above.
print("in_block_template =", in_block_template)
#assert_equal(in_block_template, False)
# Check that the prioritised transaction eventually gets into a new block template.
send_fully_paid_transaction()
assert_equal(eventually_in_template(self.nodes[0], tx), True)
# Sending a new transaction will make getblocktemplate refresh within 10s
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
# Check that prioritised transaction is in getblocktemplate()
# getblocktemplate() will refresh after 1 min, or after 10 sec if new transaction is added to mempool
# Mempool is probed every 10 seconds. We'll give getblocktemplate() a maximum of 30 seconds to refresh
block_template = self.nodes[0].getblocktemplate()
start = time.time();
in_block_template = False
while in_block_template == False:
for tx in block_template['transactions']:
if tx['hash'] == priority_tx_0:
in_block_template = True
break
if time.time() - start > 30:
raise AssertionError("Test timed out because prioritised transaction was not returned by getblocktemplate within 30 seconds.")
time.sleep(1)
block_template = self.nodes[0].getblocktemplate()
assert(in_block_template)
# Node 1 doesn't get the next block, so this *shouldn't* be mined despite being prioritised on node 1
priority_tx_1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.1)
self.nodes[1].prioritisetransaction(priority_tx_1, 0, conventional_fee_zats(2))
# Mine block on node 0
# Mine a block on node 0.
blk_hash = self.nodes[0].generate(1)
block = self.nodes[0].getblock(blk_hash[0])
assert_equal(tx in block['tx'], True)
self.sync_all()
# Check that priority_tx_0 was mined
# Check that tx was mined and that node 1 received the funds.
mempool = self.nodes[0].getrawmempool()
assert_equal(priority_tx_0 in block['tx'], True)
assert_equal(priority_tx_0 in mempool, False)
# Check that priority_tx_1 was not mined
assert_equal(priority_tx_1 in mempool, True)
# TODO: again this is potentially unreliable.
#assert_equal(priority_tx_1 in block['tx'], False)
# Mine a block on node 1 and sync
blk_hash_1 = self.nodes[1].generate(1)
block_1 = self.nodes[1].getblock(blk_hash_1[0])
self.sync_all()
# Check to see if priority_tx_1 is now mined
mempool_1 = self.nodes[1].getrawmempool()
assert_equal(priority_tx_1 in mempool_1, False)
assert_equal(priority_tx_1 in block_1['tx'], True)
assert_equal(mempool, [])
assert_equal(self.nodes[1].getbalance(), amount)
# Check that all of the fully paid transactions were mined.
assert_equal(self.nodes[2].getbalance(), Decimal("3"))
if __name__ == '__main__':
PrioritiseTransactionTest().main()

View File

@ -24,6 +24,10 @@ GRACE_ACTIONS = 2
# https://zips.z.cash/zip-0317#recommended-algorithm-for-block-template-construction
WEIGHT_RATIO_CAP = 4
# Limits the number of unpaid actions in a block. See
# https://zips.z.cash/zip-0317#recommended-algorithm-for-block-template-construction
BLOCK_UNPAID_ACTION_LIMIT = 50
# The zcashd RPC sentinel value to indicate the conventional_fee when a positional argument is
# required.
ZIP_317_FEE = None

View File

@ -8,6 +8,7 @@ from test_framework.test_framework import BitcoinTestFramework
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, start_nodes, start_node, \
connect_nodes_bi, sync_blocks, sync_mempools
from test_framework.zip317 import conventional_fee
from decimal import Decimal
@ -101,7 +102,9 @@ class WalletTest (BitcoinTestFramework):
# Catch an attempt to send a transaction with an absurdly high fee.
# Send 1.0 ZEC from an utxo of value 10.0 ZEC but don't specify a change output, so then
# the change of 9.0 ZEC becomes the fee, which is greater than estimated fee of 0.0021 ZEC.
# the change of 9.0 ZEC becomes the fee, which is considered to be absurdly high because
# the fee is more than 4 times the conventional fee.
assert(Decimal("9.0") > 4*conventional_fee(1))
inputs = []
outputs = {}
for utxo in node2utxos:

View File

@ -127,6 +127,7 @@ class WalletShieldingCoinbaseTest (BitcoinTestFramework):
self.nodes[3].z_importviewingkey(myviewingkey, "no")
# This send will succeed. We send two coinbase utxos totalling 20.0 less a default fee, with no change.
# (This tx fits within the block unpaid action limit.)
shieldvalue = Decimal('20.0') - DEFAULT_FEE
recipients = []
recipients.append({"address":myzaddr, "amount": shieldvalue})
@ -181,16 +182,17 @@ class WalletShieldingCoinbaseTest (BitcoinTestFramework):
# check balances (the z_sendmany consumes 3 coinbase utxos)
resp = self.nodes[0].z_gettotalbalance()
assert_equal(Decimal(resp["transparent"]), Decimal('20.0'))
assert_equal(Decimal(resp["private"]), Decimal('20.0') - DEFAULT_FEE)
assert_equal(Decimal(resp["total"]), Decimal('40.0') - DEFAULT_FEE)
assert_equal(Decimal(resp["private"]), shieldvalue)
assert_equal(Decimal(resp["total"]), Decimal('20.0') + shieldvalue)
# The Sprout value pool should reflect the send
# The Sapling value pool should reflect the send
saplingvalue = shieldvalue
check_value_pool(self.nodes[0], 'sapling', saplingvalue)
# A custom fee of 0 is okay. Here the node will send the note value back to itself.
# (This tx fits within the block unpaid action limit.)
recipients = []
recipients.append({"address":myzaddr, "amount": Decimal('20.0') - conventional_fee(2)})
recipients.append({"address":myzaddr, "amount": saplingvalue})
myopid = self.nodes[0].z_sendmany(myzaddr, recipients, 1, Decimal('0.0'))
mytxid = wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
@ -198,8 +200,8 @@ class WalletShieldingCoinbaseTest (BitcoinTestFramework):
self.sync_all()
resp = self.nodes[0].z_gettotalbalance()
assert_equal(Decimal(resp["transparent"]), Decimal('20.0'))
assert_equal(Decimal(resp["private"]), Decimal('20.0') - DEFAULT_FEE)
assert_equal(Decimal(resp["total"]), Decimal('40.0') - DEFAULT_FEE)
assert_equal(Decimal(resp["private"]), saplingvalue)
assert_equal(Decimal(resp["total"]), Decimal('20.0') + saplingvalue)
# The Sapling value pool should be unchanged
check_value_pool(self.nodes[0], 'sapling', saplingvalue)
@ -220,8 +222,8 @@ class WalletShieldingCoinbaseTest (BitcoinTestFramework):
saplingvalue -= unshieldvalue + DEFAULT_FEE
resp = self.nodes[0].z_gettotalbalance()
assert_equal(Decimal(resp["transparent"]), Decimal('30.0'))
assert_equal(Decimal(resp["private"]), Decimal('10.0') - 2*DEFAULT_FEE)
assert_equal(Decimal(resp["total"]), Decimal('40.0') - 2*DEFAULT_FEE)
assert_equal(Decimal(resp["private"]), saplingvalue)
assert_equal(Decimal(resp["total"]), Decimal('30.0') + saplingvalue)
check_value_pool(self.nodes[0], 'sapling', saplingvalue)
# z_sendmany will return an error if there is transparent change output considered dust.
@ -256,14 +258,11 @@ class WalletShieldingCoinbaseTest (BitcoinTestFramework):
errorString = e.error['message']
assert_equal("Insufficient funds, coinbase funds can only be spent after they have been sent to a zaddr" in errorString, True)
# Verify that mempools accept tx with joinsplits which have at least the default z_sendmany fee.
# If this test passes, it confirms that issue #1851 has been resolved, where sending from
# a zaddr to 1385 taddr recipients fails because the default fee was considered too low
# given the tx size, resulting in mempool rejection.
# Verify that mempools accept tx with shielded outputs and that pay at least the conventional fee.
errorString = ''
recipients = []
num_t_recipients = 1998 # stay just under the absurdly-high-fee error
amount_per_recipient = Decimal('0.00000546') # dust threshold
amount_per_recipient = Decimal('0.00000054') # dust threshold
# Note that regtest chainparams does not require standard tx, so setting the amount to be
# less than the dust threshold, e.g. 0.00000001 will not result in mempool rejection.
start_time = timeit.default_timer()