2014-03-18 02:11:00 -07:00
|
|
|
# Copyright (c) 2014 The Bitcoin Core developers
|
2014-10-22 18:48:19 -07:00
|
|
|
# Distributed under the MIT software license, see the accompanying
|
2019-07-18 07:16:09 -07:00
|
|
|
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
|
2017-01-31 05:12:19 -08:00
|
|
|
|
|
|
|
|
2014-02-26 13:31:18 -08:00
|
|
|
#
|
|
|
|
# Helpful routines for regression testing
|
|
|
|
#
|
|
|
|
|
|
|
|
# Add python-bitcoinrpc to module search path:
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
|
2017-01-31 05:12:19 -08:00
|
|
|
from binascii import hexlify, unhexlify
|
|
|
|
from base64 import b64encode
|
2014-09-03 12:07:43 -07:00
|
|
|
from decimal import Decimal, ROUND_DOWN
|
2014-02-26 13:31:18 -08:00
|
|
|
import json
|
2014-03-17 05:19:54 -07:00
|
|
|
import random
|
2014-02-26 13:31:18 -08:00
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import time
|
2014-04-07 08:29:36 -07:00
|
|
|
import re
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2019-11-20 12:12:46 -08:00
|
|
|
from .authproxy import AuthServiceProxy
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2020-02-03 08:40:30 -08:00
|
|
|
PRE_BLOSSOM_BLOCK_TARGET_SPACING = 150
|
|
|
|
POST_BLOSSOM_BLOCK_TARGET_SPACING = 75
|
|
|
|
|
2014-03-14 09:19:52 -07:00
|
|
|
def p2p_port(n):
|
|
|
|
return 11000 + n + os.getpid()%999
|
|
|
|
def rpc_port(n):
|
|
|
|
return 12000 + n + os.getpid()%999
|
2014-02-26 13:31:18 -08:00
|
|
|
|
|
|
|
def check_json_precision():
|
|
|
|
"""Make sure json library being used does not lose precision converting BTC values"""
|
|
|
|
n = Decimal("20000000.00000003")
|
|
|
|
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
|
|
|
|
if satoshis != 2000000000000003:
|
|
|
|
raise RuntimeError("JSON encode/decode loses precision")
|
|
|
|
|
2017-01-31 05:12:19 -08:00
|
|
|
def bytes_to_hex_str(byte_str):
|
|
|
|
return hexlify(byte_str).decode('ascii')
|
|
|
|
|
|
|
|
def hex_str_to_bytes(hex_str):
|
|
|
|
return unhexlify(hex_str.encode('ascii'))
|
|
|
|
|
|
|
|
def str_to_b64str(string):
|
|
|
|
return b64encode(string.encode('utf-8')).decode('ascii')
|
|
|
|
|
2014-08-26 13:28:32 -07:00
|
|
|
def sync_blocks(rpc_connections, wait=1):
|
2014-02-26 13:31:18 -08:00
|
|
|
"""
|
2019-11-21 12:46:00 -08:00
|
|
|
Wait until everybody has the same block count, and has notified
|
|
|
|
all internal listeners of them
|
2014-02-26 13:31:18 -08:00
|
|
|
"""
|
|
|
|
while True:
|
|
|
|
counts = [ x.getblockcount() for x in rpc_connections ]
|
|
|
|
if counts == [ counts[0] ]*len(counts):
|
|
|
|
break
|
2014-08-26 13:28:32 -07:00
|
|
|
time.sleep(wait)
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2019-11-21 12:46:00 -08:00
|
|
|
# Now that the block counts are in sync, wait for the internal
|
|
|
|
# notifications to finish
|
|
|
|
while True:
|
|
|
|
notified = [ x.getblockchaininfo()['fullyNotified'] for x in rpc_connections ]
|
|
|
|
if notified == [ True ] * len(notified):
|
|
|
|
break
|
|
|
|
time.sleep(wait)
|
|
|
|
|
2014-08-26 13:28:32 -07:00
|
|
|
def sync_mempools(rpc_connections, wait=1):
|
2014-02-26 13:31:18 -08:00
|
|
|
"""
|
|
|
|
Wait until everybody has the same transactions in their memory
|
2019-09-20 13:41:49 -07:00
|
|
|
pools, and has notified all internal listeners of them
|
2014-02-26 13:31:18 -08:00
|
|
|
"""
|
|
|
|
while True:
|
|
|
|
pool = set(rpc_connections[0].getrawmempool())
|
|
|
|
num_match = 1
|
|
|
|
for i in range(1, len(rpc_connections)):
|
|
|
|
if set(rpc_connections[i].getrawmempool()) == pool:
|
|
|
|
num_match = num_match+1
|
|
|
|
if num_match == len(rpc_connections):
|
|
|
|
break
|
2014-08-26 13:28:32 -07:00
|
|
|
time.sleep(wait)
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2019-09-20 13:41:49 -07:00
|
|
|
# Now that the mempools are in sync, wait for the internal
|
|
|
|
# notifications to finish
|
|
|
|
while True:
|
|
|
|
notified = [ x.getmempoolinfo()['fullyNotified'] for x in rpc_connections ]
|
|
|
|
if notified == [ True ] * len(notified):
|
|
|
|
break
|
|
|
|
time.sleep(wait)
|
|
|
|
|
2014-07-08 18:24:40 -07:00
|
|
|
bitcoind_processes = {}
|
2014-03-13 14:51:05 -07:00
|
|
|
|
2014-11-14 07:12:41 -08:00
|
|
|
def initialize_datadir(dirname, n):
|
|
|
|
datadir = os.path.join(dirname, "node"+str(n))
|
2014-03-14 09:19:52 -07:00
|
|
|
if not os.path.isdir(datadir):
|
|
|
|
os.makedirs(datadir)
|
2016-05-03 10:28:55 -07:00
|
|
|
with open(os.path.join(datadir, "zcash.conf"), 'w') as f:
|
2018-08-21 13:00:04 -07:00
|
|
|
f.write("regtest=1\n")
|
|
|
|
f.write("showmetrics=0\n")
|
|
|
|
f.write("rpcuser=rt\n")
|
|
|
|
f.write("rpcpassword=rt\n")
|
|
|
|
f.write("port="+str(p2p_port(n))+"\n")
|
|
|
|
f.write("rpcport="+str(rpc_port(n))+"\n")
|
|
|
|
f.write("listenonion=0\n")
|
2014-03-17 05:19:54 -07:00
|
|
|
return datadir
|
2014-03-14 09:19:52 -07:00
|
|
|
|
2014-02-26 13:31:18 -08:00
|
|
|
def initialize_chain(test_dir):
|
|
|
|
"""
|
|
|
|
Create (or copy from cache) a 200-block-long chain and
|
|
|
|
4 wallets.
|
|
|
|
bitcoind and bitcoin-cli must be in search path.
|
|
|
|
"""
|
|
|
|
|
2020-02-03 08:40:30 -08:00
|
|
|
# Due to the consensus change fix for the timejacking attack, we need to
|
|
|
|
# ensure that the cache is pretty fresh. Specifically, we need the median
|
|
|
|
# time past of the chain tip of the cache to be no more than 90 minutes
|
|
|
|
# behind the current local time, or else mined blocks will be rejected by
|
|
|
|
# all nodes, halting the test. With Sapling active by default, this requires
|
|
|
|
# the chain tip itself to be no more than 75 minutes behind the current
|
|
|
|
# local time.
|
|
|
|
#
|
|
|
|
# We address this here, by regenerating the cache if it is more than 60
|
|
|
|
# minutes old. This gives 15 minutes of slack initially that an RPC test has
|
|
|
|
# to complete in, if it is started right at the oldest cache time. Within an
|
|
|
|
# individual test, the first five calls to `generate` will each advance the
|
|
|
|
# median time past of the chain tip by 2.5 minutes (with Sapling active by
|
|
|
|
# default). Therefore, if the logic between the completion of any two
|
|
|
|
# adjacent calls to `generate` within a test takes longer than 2.5 minutes,
|
|
|
|
# the excess will subtract from the slack.
|
|
|
|
if os.path.isdir(os.path.join("cache", "node0")):
|
|
|
|
if os.stat("cache").st_mtime + (60 * 60) < time.time():
|
|
|
|
print("initialize_chain(): Removing stale cache")
|
|
|
|
shutil.rmtree("cache")
|
|
|
|
|
2014-02-26 13:31:18 -08:00
|
|
|
if not os.path.isdir(os.path.join("cache", "node0")):
|
2014-04-02 13:30:38 -07:00
|
|
|
devnull = open("/dev/null", "w+")
|
2014-02-26 13:31:18 -08:00
|
|
|
# Create cache directories, run bitcoinds:
|
|
|
|
for i in range(4):
|
2014-03-17 05:19:54 -07:00
|
|
|
datadir=initialize_datadir("cache", i)
|
2014-10-30 15:59:58 -07:00
|
|
|
args = [ os.getenv("BITCOIND", "bitcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
|
2019-07-18 03:10:07 -07:00
|
|
|
args.extend([
|
|
|
|
'-nuparams=5ba81b19:1', # Overwinter
|
|
|
|
'-nuparams=76b809bb:1', # Sapling
|
|
|
|
])
|
2014-02-26 13:31:18 -08:00
|
|
|
if i > 0:
|
2014-03-14 09:19:52 -07:00
|
|
|
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
|
2014-07-08 18:24:40 -07:00
|
|
|
bitcoind_processes[i] = subprocess.Popen(args)
|
2015-04-28 13:51:46 -07:00
|
|
|
if os.getenv("PYTHON_DEBUG", ""):
|
2019-11-20 12:12:46 -08:00
|
|
|
print("initialize_chain: bitcoind started, calling bitcoin-cli -rpcwait getblockcount")
|
2014-10-30 15:59:58 -07:00
|
|
|
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir,
|
2014-04-02 13:30:38 -07:00
|
|
|
"-rpcwait", "getblockcount"], stdout=devnull)
|
2015-04-28 13:51:46 -07:00
|
|
|
if os.getenv("PYTHON_DEBUG", ""):
|
2019-11-20 12:12:46 -08:00
|
|
|
print("initialize_chain: bitcoin-cli -rpcwait getblockcount completed")
|
2014-04-02 13:30:38 -07:00
|
|
|
devnull.close()
|
2014-02-26 13:31:18 -08:00
|
|
|
rpcs = []
|
|
|
|
for i in range(4):
|
|
|
|
try:
|
2014-03-14 09:19:52 -07:00
|
|
|
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
|
2014-02-26 13:31:18 -08:00
|
|
|
rpcs.append(AuthServiceProxy(url))
|
|
|
|
except:
|
|
|
|
sys.stderr.write("Error connecting to "+url+"\n")
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
# Create a 200-block-long chain; each of the 4 nodes
|
|
|
|
# gets 25 mature blocks and 25 immature.
|
2020-02-03 08:40:30 -08:00
|
|
|
# Blocks are created with timestamps 2.5 minutes apart (matching the
|
|
|
|
# chain defaulting above to Sapling active), starting 200 * 2.5 minutes
|
|
|
|
# before the current time.
|
|
|
|
block_time = int(time.time()) - (200 * PRE_BLOSSOM_BLOCK_TARGET_SPACING)
|
2014-11-13 10:27:13 -08:00
|
|
|
for i in range(2):
|
|
|
|
for peer in range(4):
|
|
|
|
for j in range(25):
|
|
|
|
set_node_times(rpcs, block_time)
|
2015-03-31 20:28:28 -07:00
|
|
|
rpcs[peer].generate(1)
|
2020-02-03 08:40:30 -08:00
|
|
|
block_time += PRE_BLOSSOM_BLOCK_TARGET_SPACING
|
2014-11-13 10:27:13 -08:00
|
|
|
# Must sync before next peer starts generating blocks
|
|
|
|
sync_blocks(rpcs)
|
2020-02-03 08:40:30 -08:00
|
|
|
# Check that local time isn't going backwards
|
|
|
|
assert_greater_than(time.time() + 1, block_time)
|
2014-03-13 14:51:05 -07:00
|
|
|
|
2014-10-24 12:48:06 -07:00
|
|
|
# Shut them down, and clean up cache directories:
|
2014-03-13 14:51:05 -07:00
|
|
|
stop_nodes(rpcs)
|
|
|
|
wait_bitcoinds()
|
2014-02-26 13:31:18 -08:00
|
|
|
for i in range(4):
|
2014-10-24 12:48:06 -07:00
|
|
|
os.remove(log_filename("cache", i, "debug.log"))
|
|
|
|
os.remove(log_filename("cache", i, "db.log"))
|
|
|
|
os.remove(log_filename("cache", i, "peers.dat"))
|
|
|
|
os.remove(log_filename("cache", i, "fee_estimates.dat"))
|
2014-02-26 13:31:18 -08:00
|
|
|
|
|
|
|
for i in range(4):
|
|
|
|
from_dir = os.path.join("cache", "node"+str(i))
|
|
|
|
to_dir = os.path.join(test_dir, "node"+str(i))
|
|
|
|
shutil.copytree(from_dir, to_dir)
|
2016-05-03 10:28:55 -07:00
|
|
|
initialize_datadir(test_dir, i) # Overwrite port/rpcport in zcash.conf
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2014-11-19 12:55:40 -08:00
|
|
|
def initialize_chain_clean(test_dir, num_nodes):
|
|
|
|
"""
|
|
|
|
Create an empty blockchain and num_nodes wallets.
|
|
|
|
Useful if a test case wants complete control over initialization.
|
|
|
|
"""
|
|
|
|
for i in range(num_nodes):
|
2017-06-20 13:20:50 -07:00
|
|
|
initialize_datadir(test_dir, i)
|
2014-11-19 12:55:40 -08:00
|
|
|
|
|
|
|
|
2014-04-07 08:29:36 -07:00
|
|
|
def _rpchost_to_args(rpchost):
|
|
|
|
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
|
|
|
|
if rpchost is None:
|
|
|
|
return []
|
|
|
|
|
|
|
|
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
|
|
|
|
if not match:
|
|
|
|
raise ValueError('Invalid RPC host spec ' + rpchost)
|
|
|
|
|
|
|
|
rpcconnect = match.group(1)
|
|
|
|
rpcport = match.group(2)
|
|
|
|
|
|
|
|
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
|
|
|
|
rpcconnect = rpcconnect[1:-1]
|
|
|
|
|
|
|
|
rv = ['-rpcconnect=' + rpcconnect]
|
|
|
|
if rpcport:
|
|
|
|
rv += ['-rpcport=' + rpcport]
|
|
|
|
return rv
|
|
|
|
|
2015-04-28 09:36:15 -07:00
|
|
|
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
|
2014-03-17 05:19:54 -07:00
|
|
|
"""
|
|
|
|
Start a bitcoind and return RPC connection to it
|
|
|
|
"""
|
2014-11-14 07:12:41 -08:00
|
|
|
datadir = os.path.join(dirname, "node"+str(i))
|
2015-04-28 09:36:15 -07:00
|
|
|
if binary is None:
|
|
|
|
binary = os.getenv("BITCOIND", "bitcoind")
|
|
|
|
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
|
2019-07-18 03:10:07 -07:00
|
|
|
args.extend([
|
|
|
|
'-nuparams=5ba81b19:1', # Overwinter
|
|
|
|
'-nuparams=76b809bb:1', # Sapling
|
|
|
|
])
|
2014-03-17 05:19:54 -07:00
|
|
|
if extra_args is not None: args.extend(extra_args)
|
2014-07-08 18:24:40 -07:00
|
|
|
bitcoind_processes[i] = subprocess.Popen(args)
|
2014-04-02 13:30:38 -07:00
|
|
|
devnull = open("/dev/null", "w+")
|
2015-04-28 13:51:46 -07:00
|
|
|
if os.getenv("PYTHON_DEBUG", ""):
|
2019-11-20 12:12:46 -08:00
|
|
|
print("start_node: bitcoind started, calling bitcoin-cli -rpcwait getblockcount")
|
2014-10-30 15:59:58 -07:00
|
|
|
subprocess.check_call([ os.getenv("BITCOINCLI", "bitcoin-cli"), "-datadir="+datadir] +
|
2014-03-17 05:19:54 -07:00
|
|
|
_rpchost_to_args(rpchost) +
|
|
|
|
["-rpcwait", "getblockcount"], stdout=devnull)
|
2015-04-28 13:51:46 -07:00
|
|
|
if os.getenv("PYTHON_DEBUG", ""):
|
2019-11-20 12:12:46 -08:00
|
|
|
print("start_node: calling bitcoin-cli -rpcwait getblockcount returned")
|
2014-04-02 13:30:38 -07:00
|
|
|
devnull.close()
|
2014-03-17 05:19:54 -07:00
|
|
|
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
|
Add block pruning functionality
This adds a -prune=N option to bitcoind, which if set to N>0 will enable block
file pruning. When pruning is enabled, block and undo files will be deleted to
try to keep total space used by those files to below the prune target (N, in
MB) specified by the user, subject to some constraints:
- The last 288 blocks on the main chain are always kept (MIN_BLOCKS_TO_KEEP),
- N must be at least 550MB (chosen as a value for the target that could
reasonably be met, with some assumptions about block sizes, orphan rates,
etc; see comment in main.h),
- No blocks are pruned until chainActive is at least 100,000 blocks long (on
mainnet; defined separately for mainnet, testnet, and regtest in chainparams
as nPruneAfterHeight).
This unsets NODE_NETWORK if pruning is enabled.
Also included is an RPC test for pruning (pruning.py).
Thanks to @rdponticelli for earlier work on this feature; this is based in
part off that work.
2015-02-23 11:27:44 -08:00
|
|
|
if timewait is not None:
|
|
|
|
proxy = AuthServiceProxy(url, timeout=timewait)
|
|
|
|
else:
|
|
|
|
proxy = AuthServiceProxy(url)
|
2014-07-11 05:39:50 -07:00
|
|
|
proxy.url = url # store URL on proxy for info
|
|
|
|
return proxy
|
2014-03-17 05:19:54 -07:00
|
|
|
|
2015-04-28 09:36:15 -07:00
|
|
|
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
|
2014-03-17 05:19:54 -07:00
|
|
|
"""
|
|
|
|
Start multiple bitcoinds, return RPC connections to them
|
|
|
|
"""
|
|
|
|
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
|
2015-04-28 09:36:15 -07:00
|
|
|
if binary is None: binary = [ None for i in range(num_nodes) ]
|
|
|
|
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2014-11-14 07:12:41 -08:00
|
|
|
def log_filename(dirname, n_node, logname):
|
|
|
|
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
|
2014-03-13 14:51:05 -07:00
|
|
|
|
2017-06-21 04:53:01 -07:00
|
|
|
def check_node(i):
|
|
|
|
bitcoind_processes[i].poll()
|
|
|
|
return bitcoind_processes[i].returncode
|
|
|
|
|
2014-07-08 18:24:40 -07:00
|
|
|
def stop_node(node, i):
|
|
|
|
node.stop()
|
|
|
|
bitcoind_processes[i].wait()
|
|
|
|
del bitcoind_processes[i]
|
|
|
|
|
2014-03-13 14:51:05 -07:00
|
|
|
def stop_nodes(nodes):
|
2014-11-13 10:27:13 -08:00
|
|
|
for node in nodes:
|
|
|
|
node.stop()
|
2014-03-13 14:51:05 -07:00
|
|
|
del nodes[:] # Emptying array closes connections as a side effect
|
|
|
|
|
2014-11-13 10:27:13 -08:00
|
|
|
def set_node_times(nodes, t):
|
|
|
|
for node in nodes:
|
|
|
|
node.setmocktime(t)
|
|
|
|
|
2014-03-13 14:51:05 -07:00
|
|
|
def wait_bitcoinds():
|
|
|
|
# Wait for all bitcoinds to cleanly exit
|
2014-07-08 18:24:40 -07:00
|
|
|
for bitcoind in bitcoind_processes.values():
|
2014-03-13 14:51:05 -07:00
|
|
|
bitcoind.wait()
|
2014-07-08 18:24:40 -07:00
|
|
|
bitcoind_processes.clear()
|
2014-02-26 13:31:18 -08:00
|
|
|
|
|
|
|
def connect_nodes(from_connection, node_num):
|
2014-03-14 09:19:52 -07:00
|
|
|
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
|
2014-02-26 13:31:18 -08:00
|
|
|
from_connection.addnode(ip_port, "onetry")
|
2014-06-23 08:41:52 -07:00
|
|
|
# poll until version handshake complete to avoid race conditions
|
|
|
|
# with transaction relaying
|
|
|
|
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
|
|
|
|
time.sleep(0.1)
|
2014-02-26 13:31:18 -08:00
|
|
|
|
2014-10-24 00:06:37 -07:00
|
|
|
def connect_nodes_bi(nodes, a, b):
|
|
|
|
connect_nodes(nodes[a], b)
|
|
|
|
connect_nodes(nodes[b], a)
|
|
|
|
|
2014-03-17 05:19:54 -07:00
|
|
|
def find_output(node, txid, amount):
|
|
|
|
"""
|
|
|
|
Return index to output of txid with value amount
|
|
|
|
Raises exception if there is none.
|
|
|
|
"""
|
|
|
|
txdata = node.getrawtransaction(txid, 1)
|
|
|
|
for i in range(len(txdata["vout"])):
|
|
|
|
if txdata["vout"][i]["value"] == amount:
|
|
|
|
return i
|
|
|
|
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
|
|
|
|
|
2014-11-19 12:55:40 -08:00
|
|
|
|
|
|
|
def gather_inputs(from_node, amount_needed, confirmations_required=1):
|
2014-03-17 05:19:54 -07:00
|
|
|
"""
|
|
|
|
Return a random set of unspent txouts that are enough to pay amount_needed
|
|
|
|
"""
|
2014-11-19 12:55:40 -08:00
|
|
|
assert(confirmations_required >=0)
|
|
|
|
utxo = from_node.listunspent(confirmations_required)
|
2014-03-17 05:19:54 -07:00
|
|
|
random.shuffle(utxo)
|
|
|
|
inputs = []
|
|
|
|
total_in = Decimal("0.00000000")
|
|
|
|
while total_in < amount_needed and len(utxo) > 0:
|
|
|
|
t = utxo.pop()
|
|
|
|
total_in += t["amount"]
|
|
|
|
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
|
|
|
|
if total_in < amount_needed:
|
2014-11-07 06:47:29 -08:00
|
|
|
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
|
2014-03-17 05:19:54 -07:00
|
|
|
return (total_in, inputs)
|
|
|
|
|
|
|
|
def make_change(from_node, amount_in, amount_out, fee):
|
|
|
|
"""
|
|
|
|
Create change output(s), return them
|
|
|
|
"""
|
|
|
|
outputs = {}
|
|
|
|
amount = amount_out+fee
|
|
|
|
change = amount_in - amount
|
|
|
|
if change > amount*2:
|
|
|
|
# Create an extra change output to break up big inputs
|
2014-09-03 12:07:43 -07:00
|
|
|
change_address = from_node.getnewaddress()
|
|
|
|
# Split change in two, being careful of rounding:
|
|
|
|
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
|
|
|
|
change = amount_in - amount - outputs[change_address]
|
2014-03-17 05:19:54 -07:00
|
|
|
if change > 0:
|
2014-09-03 12:07:43 -07:00
|
|
|
outputs[from_node.getnewaddress()] = change
|
2014-03-17 05:19:54 -07:00
|
|
|
return outputs
|
|
|
|
|
|
|
|
def send_zeropri_transaction(from_node, to_node, amount, fee):
|
|
|
|
"""
|
|
|
|
Create&broadcast a zero-priority transaction.
|
|
|
|
Returns (txid, hex-encoded-txdata)
|
|
|
|
Ensures transaction is zero-priority by first creating a send-to-self,
|
2015-04-28 07:48:28 -07:00
|
|
|
then using its output
|
2014-03-17 05:19:54 -07:00
|
|
|
"""
|
|
|
|
|
|
|
|
# Create a send-to-self with confirmed inputs:
|
|
|
|
self_address = from_node.getnewaddress()
|
|
|
|
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
|
|
|
|
outputs = make_change(from_node, total_in, amount+fee, fee)
|
|
|
|
outputs[self_address] = float(amount+fee)
|
|
|
|
|
|
|
|
self_rawtx = from_node.createrawtransaction(inputs, outputs)
|
|
|
|
self_signresult = from_node.signrawtransaction(self_rawtx)
|
|
|
|
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
|
|
|
|
|
|
|
|
vout = find_output(from_node, self_txid, amount+fee)
|
|
|
|
# Now immediately spend the output to create a 1-input, 1-output
|
|
|
|
# zero-priority transaction:
|
|
|
|
inputs = [ { "txid" : self_txid, "vout" : vout } ]
|
|
|
|
outputs = { to_node.getnewaddress() : float(amount) }
|
|
|
|
|
|
|
|
rawtx = from_node.createrawtransaction(inputs, outputs)
|
|
|
|
signresult = from_node.signrawtransaction(rawtx)
|
|
|
|
txid = from_node.sendrawtransaction(signresult["hex"], True)
|
|
|
|
|
|
|
|
return (txid, signresult["hex"])
|
|
|
|
|
|
|
|
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
|
|
|
|
"""
|
|
|
|
Create a random zero-priority transaction.
|
|
|
|
Returns (txid, hex-encoded-transaction-data, fee)
|
|
|
|
"""
|
|
|
|
from_node = random.choice(nodes)
|
|
|
|
to_node = random.choice(nodes)
|
|
|
|
fee = min_fee + fee_increment*random.randint(0,fee_variants)
|
|
|
|
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
|
|
|
|
return (txid, txhex, fee)
|
|
|
|
|
|
|
|
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
|
|
|
|
"""
|
|
|
|
Create a random transaction.
|
|
|
|
Returns (txid, hex-encoded-transaction-data, fee)
|
|
|
|
"""
|
|
|
|
from_node = random.choice(nodes)
|
|
|
|
to_node = random.choice(nodes)
|
|
|
|
fee = min_fee + fee_increment*random.randint(0,fee_variants)
|
|
|
|
|
|
|
|
(total_in, inputs) = gather_inputs(from_node, amount+fee)
|
|
|
|
outputs = make_change(from_node, total_in, amount, fee)
|
|
|
|
outputs[to_node.getnewaddress()] = float(amount)
|
|
|
|
|
|
|
|
rawtx = from_node.createrawtransaction(inputs, outputs)
|
|
|
|
signresult = from_node.signrawtransaction(rawtx)
|
|
|
|
txid = from_node.sendrawtransaction(signresult["hex"], True)
|
|
|
|
|
|
|
|
return (txid, signresult["hex"], fee)
|
|
|
|
|
2019-11-23 18:40:49 -08:00
|
|
|
def assert_equal(thing1, thing2):
|
|
|
|
if thing1 != thing2:
|
|
|
|
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
|
2018-04-23 14:56:43 -07:00
|
|
|
|
|
|
|
def assert_true(condition, message = ""):
|
|
|
|
if not condition:
|
|
|
|
raise AssertionError(message)
|
|
|
|
|
|
|
|
def assert_false(condition, message = ""):
|
|
|
|
assert_true(not condition, message)
|
2014-11-26 07:26:02 -08:00
|
|
|
|
|
|
|
def assert_greater_than(thing1, thing2):
|
|
|
|
if thing1 <= thing2:
|
2014-12-02 08:44:50 -08:00
|
|
|
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
|
|
|
|
|
|
|
|
def assert_raises(exc, fun, *args, **kwds):
|
|
|
|
try:
|
|
|
|
fun(*args, **kwds)
|
|
|
|
except exc:
|
|
|
|
pass
|
|
|
|
except Exception as e:
|
|
|
|
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
|
|
|
|
else:
|
|
|
|
raise AssertionError("No exception raised")
|
2017-12-12 23:38:46 -08:00
|
|
|
|
2018-10-18 11:59:29 -07:00
|
|
|
def fail(message=""):
|
|
|
|
raise AssertionError(message)
|
|
|
|
|
2019-04-18 14:41:27 -07:00
|
|
|
|
|
|
|
# Returns an async operation result
|
|
|
|
def wait_and_assert_operationid_status_result(node, myopid, in_status='success', in_errormsg=None, timeout=300):
|
2017-12-12 23:38:46 -08:00
|
|
|
print('waiting for async operation {}'.format(myopid))
|
2018-08-21 12:54:17 -07:00
|
|
|
result = None
|
2019-11-20 12:12:46 -08:00
|
|
|
for _ in range(1, timeout):
|
2018-08-21 12:54:17 -07:00
|
|
|
results = node.z_getoperationresult([myopid])
|
|
|
|
if len(results) > 0:
|
|
|
|
result = results[0]
|
2017-12-12 23:38:46 -08:00
|
|
|
break
|
2018-08-21 12:54:17 -07:00
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
assert_true(result is not None, "timeout occured")
|
|
|
|
status = result['status']
|
|
|
|
|
2019-04-18 14:41:27 -07:00
|
|
|
debug = os.getenv("PYTHON_DEBUG", "")
|
|
|
|
if debug:
|
|
|
|
print('...returned status: {}'.format(status))
|
|
|
|
|
2018-08-21 12:54:17 -07:00
|
|
|
errormsg = None
|
|
|
|
if status == "failed":
|
|
|
|
errormsg = result['error']['message']
|
2019-04-18 14:41:27 -07:00
|
|
|
if debug:
|
2017-12-12 23:42:10 -08:00
|
|
|
print('...returned error: {}'.format(errormsg))
|
2019-04-18 14:41:27 -07:00
|
|
|
assert_equal(in_errormsg, errormsg)
|
2019-03-18 19:33:02 -07:00
|
|
|
|
2018-08-21 12:54:17 -07:00
|
|
|
assert_equal(in_status, status, "Operation returned mismatched status. Error Message: {}".format(errormsg))
|
|
|
|
|
2019-04-18 14:41:27 -07:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
# Returns txid if operation was a success or None
|
|
|
|
def wait_and_assert_operationid_status(node, myopid, in_status='success', in_errormsg=None, timeout=300):
|
|
|
|
result = wait_and_assert_operationid_status_result(node, myopid, in_status, in_errormsg, timeout)
|
|
|
|
if result['status'] == "success":
|
|
|
|
return result['result']['txid']
|
2018-08-21 12:54:17 -07:00
|
|
|
else:
|
2019-04-18 14:41:27 -07:00
|
|
|
return None
|
2018-03-10 02:19:44 -08:00
|
|
|
|
|
|
|
# Find a coinbase address on the node, filtering by the number of UTXOs it has.
|
2019-01-28 10:45:48 -08:00
|
|
|
# If no filter is provided, returns the coinbase address on the node containing
|
|
|
|
# the greatest number of spendable UTXOs.
|
2018-03-10 02:19:44 -08:00
|
|
|
# The default cached chain has one address per coinbase output.
|
2019-01-28 10:45:48 -08:00
|
|
|
def get_coinbase_address(node, expected_utxos=None):
|
2018-03-10 02:19:44 -08:00
|
|
|
addrs = [utxo['address'] for utxo in node.listunspent() if utxo['generated']]
|
|
|
|
assert(len(set(addrs)) > 0)
|
2019-01-28 10:45:48 -08:00
|
|
|
|
|
|
|
if expected_utxos is None:
|
|
|
|
addrs = [(addrs.count(a), a) for a in set(addrs)]
|
|
|
|
return sorted(addrs, reverse=True)[0][1]
|
|
|
|
|
2018-03-10 02:19:44 -08:00
|
|
|
addrs = [a for a in set(addrs) if addrs.count(a) == expected_utxos]
|
|
|
|
assert(len(addrs) > 0)
|
|
|
|
return addrs[0]
|