change some test constants

This commit is contained in:
Mariano Sorgente 2020-10-27 18:20:10 +09:00 committed by Yostra
parent 3c4cf2eac5
commit c83f2907d9
15 changed files with 213 additions and 592 deletions

View File

@ -16,8 +16,8 @@ class ConsensusConstants:
SIGNIFICANT_BITS: int # The number of bits to look at in difficulty and min iters. The rest are zeroed
DISCRIMINANT_SIZE_BITS: int # Max is 1024 (based on ClassGroupElement int size)
NUMBER_ZERO_BITS_CHALLENGE_SIG: int # H(plot signature of the challenge) must start with these many zeroes
NUMBER_ZERO_BITS_ICP_SIG: int # H(plot signature of the icp) must start with these many zeroes
NUMBER_ZERO_BITS_PLOT_FILTER: int # H(plot signature of the challenge) must start with these many zeroes
NUMBER_ZERO_BITS_ICP_FILTER: int # H(plot signature of the icp) must start with these many zeroes
SLOT_TIME_TARGET: int # The target number of seconds per block
EXTRA_ITERS_TIME_TARGET: float
MAX_FUTURE_TIME: int # The next block can have a timestamp of at most these many seconds more
@ -76,8 +76,8 @@ testnet_kwargs = {
"EPOCH_SUB_BLOCKS": 4096, # The number of sub-blocks per epoch, mainnet 32256
"SIGNIFICANT_BITS": 12, # The number of bits to look at in difficulty and min iters. The rest are zeroed
"DISCRIMINANT_SIZE_BITS": 1024, # Max is 1024 (based on ClassGroupElement int size)
"NUMBER_ZERO_BITS_CHALLENGE_SIG": 3, # H(plot signature of the challenge) must start with these many zeroes
"NUMBER_ZERO_BITS_ICP_SIG": 4, # H(plot signature of the challenge) must start with these many zeroes
"NUMBER_ZERO_BITS_PLOT_FILTER": 3, # H(plot signature of the challenge) must start with these many zeroes
"NUMBER_ZERO_BITS_ICP_FILTER": 4, # H(plot signature of the challenge) must start with these many zeroes
"SLOT_TIME_TARGET": 300, # The target number of seconds per slot
"EXTRA_ITERS_TIME_TARGET": 37.5,
"MAX_FUTURE_TIME": 7200, # The next block can have a timestamp of at most these many seconds more

View File

@ -48,14 +48,14 @@ def calculate_iterations_quality(
def calculate_iterations(
constants: ConsensusConstants,
proof_of_space: ProofOfSpace,
difficulty: int,
num_zero_bits: int,
) -> uint64:
"""
Convenience function to calculate the number of iterations using the proof instead
of the quality. The quality must be retrieved from the proof.
"""
quality: bytes32 = proof_of_space.verify_and_get_quality_string(num_zero_bits)
quality: bytes32 = proof_of_space.verify_and_get_quality_string(constants)
assert quality is not None
return calculate_iterations_quality(quality, proof_of_space.size, difficulty)

View File

@ -59,10 +59,7 @@ class Farmer:
# This is the farmer configuration
self.wallet_target = decode_puzzle_hash(self.config["xch_target_address"])
self.pool_public_keys = [
G1Element.from_bytes(bytes.fromhex(pk))
for pk in self.config["pool_public_keys"]
]
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the pool configuration, which should be moved out to the pool once it exists
self.pool_target = decode_puzzle_hash(pool_config["xch_target_address"])
@ -87,12 +84,8 @@ class Farmer:
async def _on_connect(self):
# Sends a handshake to the harvester
msg = harvester_protocol.HarvesterHandshake(
self._get_public_keys(), self.pool_public_keys
)
yield OutboundMessage(
NodeType.HARVESTER, Message("harvester_handshake", msg), Delivery.RESPOND
)
msg = harvester_protocol.HarvesterHandshake(self._get_public_keys(), self.pool_public_keys)
yield OutboundMessage(NodeType.HARVESTER, Message("harvester_handshake", msg), Delivery.RESPOND)
if self.current_weight in self.challenges:
for posf in self.challenges[self.current_weight]:
message = harvester_protocol.NewChallenge(posf.challenge_hash)
@ -122,13 +115,9 @@ class Farmer:
def _get_private_keys(self):
all_sks = self.keychain.get_all_private_keys()
return [master_sk_to_farmer_sk(sk) for sk, _ in all_sks] + [
master_sk_to_pool_sk(sk) for sk, _ in all_sks
]
return [master_sk_to_farmer_sk(sk) for sk, _ in all_sks] + [master_sk_to_pool_sk(sk) for sk, _ in all_sks]
async def _get_required_iters(
self, challenge_hash: bytes32, quality_string: bytes32, plot_size: uint8
):
async def _get_required_iters(self, challenge_hash: bytes32, quality_string: bytes32, plot_size: uint8):
weight: uint128 = self.challenge_to_weight[challenge_hash]
difficulty: uint64 = uint64(0)
for posf in self.challenges[weight]:
@ -138,9 +127,7 @@ class Farmer:
raise RuntimeError("Did not find challenge")
estimate_min = (
self.proof_of_time_estimate_ips
* self.constants.BLOCK_TIME_TARGET
/ self.constants.MIN_ITERS_PROPORTION
self.proof_of_time_estimate_ips * self.constants.BLOCK_TIME_TARGET / self.constants.MIN_ITERS_PROPORTION
)
estimate_min = uint64(int(estimate_min))
number_iters: uint64 = calculate_iterations_quality(
@ -152,9 +139,7 @@ class Farmer:
return number_iters
@api_request
async def challenge_response(
self, challenge_response: harvester_protocol.ChallengeResponse
):
async def challenge_response(self, challenge_response: harvester_protocol.ChallengeResponse):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
@ -167,31 +152,19 @@ class Farmer:
)
if height < 1000: # As the difficulty adjusts, don't fetch all qualities
if challenge_response.challenge_hash not in self.challenge_to_best_iters:
self.challenge_to_best_iters[
challenge_response.challenge_hash
] = number_iters
elif (
number_iters
< self.challenge_to_best_iters[challenge_response.challenge_hash]
):
self.challenge_to_best_iters[
challenge_response.challenge_hash
] = number_iters
self.challenge_to_best_iters[challenge_response.challenge_hash] = number_iters
elif number_iters < self.challenge_to_best_iters[challenge_response.challenge_hash]:
self.challenge_to_best_iters[challenge_response.challenge_hash] = number_iters
else:
return
estimate_secs: float = number_iters / self.proof_of_time_estimate_ips
if challenge_response.challenge_hash not in self.challenge_to_estimates:
self.challenge_to_estimates[challenge_response.challenge_hash] = []
self.challenge_to_estimates[challenge_response.challenge_hash].append(
estimate_secs
)
self.challenge_to_estimates[challenge_response.challenge_hash].append(estimate_secs)
log.info(f"Estimate: {estimate_secs}, rate: {self.proof_of_time_estimate_ips}")
if (
estimate_secs < self.config["pool_share_threshold"]
or estimate_secs < self.config["propagate_threshold"]
):
if estimate_secs < self.config["pool_share_threshold"] or estimate_secs < self.config["propagate_threshold"]:
request = harvester_protocol.RequestProofOfSpace(
challenge_response.challenge_hash,
@ -208,9 +181,7 @@ class Farmer:
self._state_changed("challenge")
@api_request
async def respond_proof_of_space(
self, response: harvester_protocol.RespondProofOfSpace
):
async def respond_proof_of_space(self, response: harvester_protocol.RespondProofOfSpace):
"""
This is a response from the harvester with a proof of space. We check it's validity,
and request a pool partial, a header signature, or both, if the proof is good enough.
@ -226,7 +197,7 @@ class Farmer:
raise RuntimeError("Did not find challenge")
computed_quality_string = response.proof.verify_and_get_quality_string(
self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
self.constants.NUMBER_ZERO_BITS_PLOT_FILTER
)
if computed_quality_string is None:
raise RuntimeError("Invalid proof of space")
@ -241,9 +212,7 @@ class Farmer:
)
estimate_min = (
self.proof_of_time_estimate_ips
* self.constants.BLOCK_TIME_TARGET
/ self.constants.MIN_ITERS_PROPORTION
self.proof_of_time_estimate_ips * self.constants.BLOCK_TIME_TARGET / self.constants.MIN_ITERS_PROPORTION
)
estimate_min = uint64(int(estimate_min))
number_iters: uint64 = calculate_iterations_quality(
@ -260,14 +229,10 @@ class Farmer:
if estimate_secs < self.config["propagate_threshold"]:
pool_pk = bytes(response.proof.pool_public_key)
if pool_pk not in self.pool_sks_map:
log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
log.error(f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}")
return
pool_target: PoolTarget = PoolTarget(self.pool_target, uint32(0))
pool_target_signature: G2Element = AugSchemeMPL.sign(
self.pool_sks_map[pool_pk], bytes(pool_target)
)
pool_target_signature: G2Element = AugSchemeMPL.sign(self.pool_sks_map[pool_pk], bytes(pool_target))
request2 = farmer_protocol.RequestHeaderHash(
challenge_hash,
@ -298,9 +263,7 @@ class Farmer:
agg_pk = ProofOfSpace.generate_plot_public_key(response.local_pk, pk)
assert agg_pk == proof_of_space.plot_public_key
farmer_share = AugSchemeMPL.sign(sk, header_hash, agg_pk)
agg_sig = AugSchemeMPL.aggregate(
[response.message_signature, farmer_share]
)
agg_sig = AugSchemeMPL.aggregate([response.message_signature, farmer_share])
validates = AugSchemeMPL.verify(agg_pk, header_hash, agg_sig)
if validates:
@ -310,9 +273,7 @@ class Farmer:
pos_hash: bytes32 = proof_of_space.get_hash()
request = farmer_protocol.HeaderSignature(pos_hash, header_hash, agg_sig)
yield OutboundMessage(
NodeType.FULL_NODE, Message("header_signature", request), Delivery.BROADCAST
)
yield OutboundMessage(NodeType.FULL_NODE, Message("header_signature", request), Delivery.BROADCAST)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
@ -342,9 +303,7 @@ class Farmer:
)
@api_request
async def proof_of_space_finalized(
self, proof_of_space_finalized: farmer_protocol.ProofOfSpaceFinalized
):
async def proof_of_space_finalized(self, proof_of_space_finalized: farmer_protocol.ProofOfSpaceFinalized):
"""
Full node notifies farmer that a proof of space has been completed. It gets added to the
challenges list at that weight, and weight is updated if necessary
@ -362,24 +321,14 @@ class Farmer:
log.info(f"\tCurrent weight set to {self.current_weight}")
self.seen_challenges.add(proof_of_space_finalized.challenge_hash)
if proof_of_space_finalized.weight not in self.challenges:
self.challenges[proof_of_space_finalized.weight] = [
proof_of_space_finalized
]
self.challenges[proof_of_space_finalized.weight] = [proof_of_space_finalized]
else:
self.challenges[proof_of_space_finalized.weight].append(
proof_of_space_finalized
)
self.challenge_to_weight[
proof_of_space_finalized.challenge_hash
] = proof_of_space_finalized.weight
self.challenge_to_height[
proof_of_space_finalized.challenge_hash
] = proof_of_space_finalized.height
self.challenges[proof_of_space_finalized.weight].append(proof_of_space_finalized)
self.challenge_to_weight[proof_of_space_finalized.challenge_hash] = proof_of_space_finalized.weight
self.challenge_to_height[proof_of_space_finalized.challenge_hash] = proof_of_space_finalized.height
if get_proofs:
message = harvester_protocol.NewChallenge(
proof_of_space_finalized.challenge_hash
)
message = harvester_protocol.NewChallenge(proof_of_space_finalized.challenge_hash)
yield OutboundMessage(
NodeType.HARVESTER,
Message("new_challenge", message),
@ -394,9 +343,7 @@ class Farmer:
self._state_changed("challenge")
@api_request
async def proof_of_space_arrived(
self, proof_of_space_arrived: farmer_protocol.ProofOfSpaceArrived
):
async def proof_of_space_arrived(self, proof_of_space_arrived: farmer_protocol.ProofOfSpaceArrived):
"""
Full node notifies the farmer that a new proof of space was created. The farmer can use this
information to decide whether to propagate a proof.
@ -404,14 +351,10 @@ class Farmer:
if proof_of_space_arrived.weight not in self.unfinished_challenges:
self.unfinished_challenges[proof_of_space_arrived.weight] = []
else:
self.unfinished_challenges[proof_of_space_arrived.weight].append(
proof_of_space_arrived.quality_string
)
self.unfinished_challenges[proof_of_space_arrived.weight].append(proof_of_space_arrived.quality_string)
@api_request
async def proof_of_time_rate(
self, proof_of_time_rate: farmer_protocol.ProofOfTimeRate
):
async def proof_of_time_rate(self, proof_of_time_rate: farmer_protocol.ProofOfTimeRate):
"""
Updates our internal estimate of the iterations per second for the fastest proof of time
in the network.

View File

@ -13,7 +13,7 @@ def full_block_to_sub_block_record(constants: ConsensusConstants, block: FullBlo
timestamp = block.foliage_block.timestamp if block.foliage_block is not None else None
q_str: Optional[bytes32] = block.reward_chain_sub_block.proof_of_space.verify_and_get_quality_string(
constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
constants.NUMBER_ZERO_BITS_PLOT_FILTER
)
# TODO: remove redundant verification of PoSpace
required_iters: uint64 = calculate_iterations_quality(

View File

@ -1206,7 +1206,7 @@ class FullNode:
request.proof_of_space,
difficulty,
vdf_min_iters,
self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
self.constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
removal_merkle_set = MerkleSet()

View File

@ -69,9 +69,7 @@ class FullNodeRpcApi:
raise ValueError("No LCA block is set")
min_iters: uint64 = self.service.blockchain.get_next_min_iters(lca_block)
ips: uint64 = uint64(
min_iters
* self.service.constants.MIN_ITERS_PROPORTION
// self.service.constants.BLOCK_TIME_TARGET
min_iters * self.service.constants.MIN_ITERS_PROPORTION // self.service.constants.BLOCK_TIME_TARGET
)
tip_hashes = []
@ -86,9 +84,7 @@ class FullNodeRpcApi:
if lca.height > 1:
newer_block_hex = lca.header_hash.hex()
older_block_hex = self.service.blockchain.height_to_hash[
max(1, lca.height - 100)
].hex()
older_block_hex = self.service.blockchain.height_to_hash[max(1, lca.height - 100)].hex()
space = await self.get_network_space(
{
"newer_block_header_hash": newer_block_hex,
@ -122,9 +118,7 @@ class FullNodeRpcApi:
raise ValueError("No header_hash in request")
header_hash = hexstr_to_bytes(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_block(
header_hash
)
block: Optional[FullBlock] = await self.service.block_store.get_block(header_hash)
if block is None:
raise ValueError(f"Block {header_hash.hex()} not found")
@ -135,9 +129,7 @@ class FullNodeRpcApi:
raise ValueError("No height in request")
height = request["height"]
header_height = uint32(int(height))
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash.get(
header_height, None
)
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash.get(header_height, None)
if header_hash is None:
raise ValueError(f"Height {height} not found in chain")
header: Header = self.service.blockchain.headers[header_hash]
@ -148,9 +140,7 @@ class FullNodeRpcApi:
raise ValueError("header_hash not in request")
header_hash_str = request["header_hash"]
header_hash = hexstr_to_bytes(header_hash_str)
header: Optional[Header] = self.service.blockchain.headers.get(
header_hash, None
)
header: Optional[Header] = self.service.blockchain.headers.get(header_hash, None)
return {"header": header}
async def get_unfinished_block_headers(self, request: Dict) -> Optional[Dict]:
@ -158,9 +148,7 @@ class FullNodeRpcApi:
raise ValueError("height not in request")
height = request["height"]
response_headers: List[Header] = []
for block in (
await self.service.full_node_store.get_unfinished_blocks()
).values():
for block in (await self.service.full_node_store.get_unfinished_blocks()).values():
if block.height == height:
response_headers.append(block.header)
return {"headers": response_headers}
@ -184,9 +172,7 @@ class FullNodeRpcApi:
break
if current.height == 0:
break
header: Optional[Header] = self.service.blockchain.headers.get(
current.prev_header_hash, None
)
header: Optional[Header] = self.service.blockchain.headers.get(current.prev_header_hash, None)
assert header is not None
headers[header.header_hash] = header
current = header
@ -200,18 +186,8 @@ class FullNodeRpcApi:
assert header is not None
all_unfinished[header.header_hash] = header
sorted_headers = [
v
for v in sorted(
headers.values(), key=lambda item: item.height, reverse=True
)
]
sorted_unfinished = [
v
for v in sorted(
all_unfinished.values(), key=lambda item: item.height, reverse=True
)
]
sorted_headers = [v for v in sorted(headers.values(), key=lambda item: item.height, reverse=True)]
sorted_unfinished = [v for v in sorted(all_unfinished.values(), key=lambda item: item.height, reverse=True)]
finished_with_meta = []
finished_header_hashes = set()
@ -252,9 +228,7 @@ class FullNodeRpcApi:
Calculates the sum of min_iters from all blocks starting from
old and up to and including new_block, but not including old_block.
"""
older_block_parent = await self.service.block_store.get_block(
older_block.prev_header_hash
)
older_block_parent = await self.service.block_store.get_block(older_block.prev_header_hash)
if older_block_parent is None:
raise ValueError("Older block not found")
older_diff = older_block.weight - older_block_parent.weight
@ -262,27 +236,19 @@ class FullNodeRpcApi:
older_block.proof_of_space,
older_diff,
older_block.proof_of_time.number_of_iterations,
self.service.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
# We do not count the min iters in the old block, since it's not included in the range
total_mi: uint64 = uint64(0)
for curr_h in range(older_block.height + 1, newer_block.height + 1):
if (
curr_h % self.service.constants.DIFFICULTY_EPOCH
) == self.service.constants.DIFFICULTY_DELAY:
curr_b_header_hash = self.service.blockchain.height_to_hash.get(
uint32(int(curr_h))
)
if (curr_h % self.service.constants.DIFFICULTY_EPOCH) == self.service.constants.DIFFICULTY_DELAY:
curr_b_header_hash = self.service.blockchain.height_to_hash.get(uint32(int(curr_h)))
if curr_b_header_hash is None:
raise ValueError(f"Curr header hash {curr_h} not found")
curr_b_block = await self.service.block_store.get_block(
curr_b_header_hash
)
curr_b_block = await self.service.block_store.get_block(curr_b_header_hash)
if curr_b_block is None or curr_b_block.proof_of_time is None:
raise ValueError("Block invalid")
curr_parent = await self.service.block_store.get_block(
curr_b_block.prev_header_hash
)
curr_parent = await self.service.block_store.get_block(curr_b_block.prev_header_hash)
if curr_parent is None:
raise ValueError("Curr parent block invalid")
curr_diff = curr_b_block.weight - curr_parent.weight
@ -290,7 +256,7 @@ class FullNodeRpcApi:
curr_b_block.proof_of_space,
uint64(curr_diff),
curr_b_block.proof_of_time.number_of_iterations,
self.service.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
if curr_mi is None:
raise ValueError("Curr_mi invalid")
@ -303,13 +269,8 @@ class FullNodeRpcApi:
Retrieves an estimate of total space validating the chain
between two block header hashes.
"""
if (
"newer_block_header_hash" not in request
or "older_block_header_hash" not in request
):
raise ValueError(
"Invalid request. newer_block_header_hash and older_block_header_hash required"
)
if "newer_block_header_hash" not in request or "older_block_header_hash" not in request:
raise ValueError("Invalid request. newer_block_header_hash and older_block_header_hash required")
newer_block_hex = request["newer_block_header_hash"]
older_block_hex = request["older_block_header_hash"]
@ -326,9 +287,7 @@ class FullNodeRpcApi:
if older_block is None:
raise ValueError("Newer block not found")
delta_weight = newer_block.header.data.weight - older_block.header.data.weight
delta_iters = (
newer_block.header.data.total_iters - older_block.header.data.total_iters
)
delta_iters = newer_block.header.data.total_iters - older_block.header.data.total_iters
total_min_inters = await self.get_total_miniters(newer_block, older_block)
if total_min_inters is None:
raise ValueError("Min iters invalid")
@ -336,14 +295,9 @@ class FullNodeRpcApi:
weight_div_iters = delta_weight / delta_iters
tips_adjustment_constant = 0.65
network_space_constant = 2 ** 32 # 2^32
eligible_plots_filter_mult = (
2 ** self.service.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
)
eligible_plots_filter_mult = 2 ** self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER
network_space_bytes_estimate = (
weight_div_iters
* network_space_constant
* tips_adjustment_constant
* eligible_plots_filter_mult
weight_div_iters * network_space_constant * tips_adjustment_constant * eligible_plots_filter_mult
)
return {"space": uint128(int(network_space_bytes_estimate))}
@ -362,11 +316,7 @@ class FullNodeRpcApi:
else:
header = None
coin_records = (
await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hash(
puzzle_hash, header
)
)
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hash(puzzle_hash, header)
return {"coin_records": coin_records}

View File

@ -1,7 +1,7 @@
from src.util.make_test_constants import make_test_constants_without_genesis
from src.consensus.constants import constants
test_constants = make_test_constants_without_genesis(
{
test_constants = constants.replace(
**{
"DIFFICULTY_STARTING": 1,
"DISCRIMINANT_SIZE_BITS": 8,
"BLOCK_TIME_TARGET": 10,
@ -12,10 +12,10 @@ test_constants = make_test_constants_without_genesis(
"TX_PER_SEC": 1,
"MEMPOOL_BLOCK_BUFFER": 10,
"MIN_ITERS_STARTING": 50 * 1,
"NUMBER_ZERO_BITS_CHALLENGE_SIG": 1,
"NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"NUMBER_ZERO_BITS_ICP_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"CLVM_COST_RATIO_CONSTANT": 108,
"COINBASE_FREEZE_PERIOD": 0,
"GENESIS_BLOCK": b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb6D\x9c,h\xdf\x97\xc1\x9e\x88D'\xe4.\xe75\t\x82\xd4\x02\x05q\xea\xd0\x872a_\xf3\x9b\xd2\x16\xbf\xd60\xb6F\x07\x84\x98+\xec\x98\xb4\x9f\xeay\xd0\x85\xee5\xc5\xa6\x9b[\xac5\xd1\xb2\x11|c\xc5\xe3\x01\xe2\xf7\xd2e\x11\xda\x91\xbaH\xa5'\xb1\xd1e5=\xc3O\xcdr\xbaR\x0bD\xfe;\x00\xc3)\xea\xd3\x12\x00\x00\x00\x90\xbf\xef\xd3MQ]\xd2\xa0Kg\xb0\x83\xad\xd3?\xf7<\xb6\xb3s\xb8|n\xc0\xa5m\xbf{\x0b\x0b\x9f<\xffLF\xd9w/\xf0\x98\xf8\x870x\x1a\xd1P\xf5\xa4\x925,\xde{\x86\x87\t\x1b\x1fo\xe4^\x0e-\xa8\x17}\xb7/O\xd4\x13\x1d\x9b\xe0\xb4\x10:\x03\xe6j\xfb\xd0\x99\x1d]\xff\xe7\xc5\xf5K\xb9\xf2\xe6\x7fM\x04\x01\xcf\x95L\xb2I\xd7\x8f\x9e\xcd\xc1'\xefL dn\xef\xbd\xbf\xf0x\x18\x8f\x06u\x92BOVQ\xb5\x83\x03\xf2\x10\x00\x7f\xf8\xaa$\xd7\xb5li]\xdd\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00_\x1e\xe6K\xe01\xecA\xe2p:\xc1\x04h\x81\xe9`\x97\xd4q\xee\xc7\xf1\x0f\x17*#n%\x0f2^\xcf\xf0\x80-\xb0\x02D\xda\xb7\x88q\xa7\x17)`\xb3\xca\x7f&\xc1\xc9\xf6\xd6\xec\xffg\xeb\x8f\x1e\xa6\xba\x9a\x14vH\x8c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06\xc4>\x0b\xaee\xf4\x13\x8ebH\xdff\xe60lFc\xe0\xf5N\xbb\x8e\xe7\xf5G\x17\x14\x9a\xe6\x90\xc70T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\xdf*`\xd9.^k\xa0\xff\x1e\x1c\xb6`\x1c\xfbl\x81\xf0\xf6(.j\xd4\xc2\xc1[_\xc7~Oe\x00\x00\x01\xd1\xa9J \x00\x8b\x0f&\x19<\x9ac\r\x1e\xbf\xc5\x06\x9b@J\xaes\xe1E\x83\xa97\xe7H\x9e\xa3\xf9\x96y\xea\xb5\xcb\x00\x00\x00\x00\x8aA\xd5\x1b\xb8\xb4*\"\x1ej$\x8eO\xf1*\x1f[\xa6\xf8\xb9=\xe8\xc1\x98\x95`W\xd2\xea\xb4\x1e\xceZ\x1eL\x8e\x19\xdeT\x7f]\xdb\xa3?\xcb\xad\x97C\x18\xff/\xe1T\x8fVK\xc38\x87\xa1\xa4e\x1d\xc0v\xf4\x15\xfb(\xb6!\xe1KF\xf6\x998\xd2P\x8e\x0en\x81r}\x9dq\x161\nX\xbf^}uB\x00\x00\x00\x00\x00\x00\x00\x00\x83\xb9l!2\x0fz\xc0o\x86\x19\xdd\xb2{\x8c\xa7`a42\xf5\xaa\x87\xa0\rQQB\x1ah*\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa6BWV\xaf\xdaf!%\x117\xe84tw\x87\x00\xc3\xfa\x1ah\xd8\xc4|\x9c\xae\xfb\xd4\x08\x93_In\xce7\xe4\xc9\xd8\xa3Z\xa5V =\xb6\xf2\t\xe8\x10\x18\xe8\\\x1c\xcc\xf4\xb6\x9f\xae\x96\xd7\x14<\x05w\xe6\x8a\x9d\xfaJ\xf8\"\xb0\xf0Q\x89\xd6b]&\xe2\xad'\x07c'\xacy\\\x8ezNw\xb3\x7f8f\x00\x00\x00\x00\x07\x02\x92E\x80A9\xa0", # noqa: E501
}
)
@ -23,6 +23,7 @@ if __name__ == "__main__":
from src.util.default_root import DEFAULT_ROOT_PATH
from src.util.block_tools import BlockTools
# TODO: mariano: fix this with new consensus
bt = BlockTools(root_path=DEFAULT_ROOT_PATH)
new_genesis_block = bt.create_genesis_block(test_constants, bytes([0] * 32), b"0")

View File

@ -50,12 +50,12 @@ class ProofOfSpace(Streamable):
# If icp_output is provided, both plot filters are checked. Otherwise only the first one is checked
plot_filter_1 = BitArray(std_hash(bytes(plot_id) + bytes(challenge_hash)))
if icp_output is None:
return plot_filter_1[: constants.NUMBER_ZERO_BITS_CHALLENGE_SIG].uint == 0
return plot_filter_1[: constants.NUMBER_ZERO_BITS_PLOT_FILTER].uint == 0
else:
plot_filter_2 = BitArray(std_hash(bytes(plot_id) + bytes(icp_output)))
return (
plot_filter_1[: constants.NUMBER_ZERO_BITS_CHALLENGE_SIG].uint == 0
and plot_filter_2[: constants.NUMBER_ZERO_BITS_ICP_SIG].uint == 0
plot_filter_1[: constants.NUMBER_ZERO_BITS_PLOT_FILTER].uint == 0
and plot_filter_2[: constants.NUMBER_ZERO_BITS_ICP_FILTER].uint == 0
)
@staticmethod

View File

@ -208,7 +208,7 @@ class BlockTools:
block_list[-1].proof_of_space,
curr_difficulty,
block_list[-1].proof_of_time.number_of_iterations,
test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
test_constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
starting_height = block_list[-1].height + 1
@ -422,7 +422,7 @@ class BlockTools:
ccp = ProofOfSpace.can_create_proof(
plot_id,
challenge_hash,
test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
test_constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
if not ccp:
continue
@ -439,7 +439,7 @@ class BlockTools:
ccp = ProofOfSpace.can_create_proof(
plot_id,
challenge_hash,
test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
test_constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
if not ccp:
continue
@ -475,7 +475,7 @@ class BlockTools:
proof_of_space,
difficulty,
min_iters,
test_constants.NUMBER_ZERO_BITS_CHALLENGE_SIG,
test_constants.NUMBER_ZERO_BITS_PLOT_FILTER,
)
if self.real_plots:
print(f"Performing {number_iters} VDF iterations")

View File

@ -1,25 +1,7 @@
from typing import Dict
from src.consensus.default_constants import DEFAULT_CONSTANTS
from src.util.block_tools import BlockTools
from src.consensus.constants import constants
def make_test_constants_with_genesis(test_constants_overrides: Dict):
test_constants = make_test_constants_without_genesis(test_constants_overrides)
bt = BlockTools()
new_genesis_block = bt.create_genesis_block(test_constants, bytes([0] * 32), b"0")
final_test_constants = test_constants.replace(
GENESIS_BLOCK=bytes(new_genesis_block)
)
return final_test_constants, bt
def make_test_constants_without_genesis(test_constants_overrides: Dict):
test_constants = DEFAULT_CONSTANTS.replace(**test_constants_overrides)
return test_constants
def make_test_constants(test_constants_overrides: Dict):
return constants.replace(**test_constants_overrides)

View File

@ -171,9 +171,7 @@ class WalletStateManager:
if len(self.block_records) > 0:
# Initializes the state based on the DB block records
# Header hash with the highest weight
self.lca = max(
(item[1].weight, item[0]) for item in self.block_records.items()
)[1]
self.lca = max((item[1].weight, item[0]) for item in self.block_records.items())[1]
for key, value in self.block_records.items():
self.height_to_hash[value.height] = value.header_hash
@ -184,10 +182,7 @@ class WalletStateManager:
# Loads the genesis block if there are no blocks
genesis_challenge = Challenge(
genesis.proof_of_space.challenge_hash,
std_hash(
genesis.proof_of_space.get_hash()
+ genesis.proof_of_time.output.get_hash()
),
std_hash(genesis.proof_of_space.get_hash() + genesis.proof_of_time.output.get_hash()),
None,
)
genesis_hb = HeaderBlock(
@ -271,9 +266,7 @@ class WalletStateManager:
for wallet_id in targets:
target_wallet = self.wallets[wallet_id]
last: Optional[
uint32
] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
last: Optional[uint32] = await self.puzzle_store.get_last_derivation_path_for_wallet(wallet_id)
start_index = 0
derivation_paths: List[DerivationRecord] = []
@ -291,13 +284,9 @@ class WalletStateManager:
break
type = target_wallet.rl_info.type
if type == "user":
rl_pubkey = G1Element.from_bytes(
target_wallet.rl_info.user_pubkey
)
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.user_pubkey)
else:
rl_pubkey = G1Element.from_bytes(
target_wallet.rl_info.admin_pubkey
)
rl_pubkey = G1Element.from_bytes(target_wallet.rl_info.admin_pubkey)
rl_puzzle: Program = target_wallet.puzzle_for_pk(rl_pubkey)
puzzle_hash: bytes32 = rl_puzzle.get_tree_hash()
@ -319,14 +308,10 @@ class WalletStateManager:
pubkey: G1Element = self.get_public_key(uint32(index))
puzzle: Program = target_wallet.puzzle_for_pk(bytes(pubkey))
if puzzle is None:
self.log.warning(
f"Unable to create puzzles with wallet {target_wallet}"
)
self.log.warning(f"Unable to create puzzles with wallet {target_wallet}")
break
puzzlehash: bytes32 = puzzle.get_tree_hash()
self.log.info(
f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}"
)
self.log.info(f"Puzzle at index {index} wallet ID {wallet_id} puzzle hash {puzzlehash.hex()}")
derivation_paths.append(
DerivationRecord(
uint32(index),
@ -349,18 +334,14 @@ class WalletStateManager:
"""
async with self.puzzle_store.lock:
# If we have no unused public keys, we will create new ones
unused: Optional[
uint32
] = await self.puzzle_store.get_unused_derivation_path()
unused: Optional[uint32] = await self.puzzle_store.get_unused_derivation_path()
if unused is None:
await self.create_more_puzzle_hashes()
# Now we must have unused public keys
unused = await self.puzzle_store.get_unused_derivation_path()
assert unused is not None
record: Optional[
DerivationRecord
] = await self.puzzle_store.get_derivation_record(unused, wallet_id)
record: Optional[DerivationRecord] = await self.puzzle_store.get_derivation_record(unused, wallet_id)
assert record is not None
# Set this key to used so we never use it again
@ -418,15 +399,11 @@ class WalletStateManager:
self.sync_mode = mode
self.state_changed("sync_changed")
async def get_confirmed_spendable_balance_for_wallet(
self, wallet_id: int
) -> uint64:
async def get_confirmed_spendable_balance_for_wallet(self, wallet_id: int) -> uint64:
"""
Returns the balance amount of all coins that are spendable.
"""
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(
wallet_id
)
spendable: Set[WalletCoinRecord] = await self.get_spendable_coins_for_wallet(wallet_id)
amount: uint64 = uint64(0)
@ -454,9 +431,7 @@ class WalletStateManager:
"""
Returns the confirmed balance, including coinbase rewards that are not spendable.
"""
record_list: Set[
WalletCoinRecord
] = await self.wallet_store.get_unspent_coins_for_wallet(wallet_id)
record_list: Set[WalletCoinRecord] = await self.wallet_store.get_unspent_coins_for_wallet(wallet_id)
amount: uint64 = uint64(0)
for record in record_list:
@ -470,9 +445,7 @@ class WalletStateManager:
transactions.
"""
confirmed = await self.get_confirmed_balance_for_wallet(wallet_id)
unconfirmed_tx: List[
TransactionRecord
] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_amount = 0
for record in unconfirmed_tx:
@ -490,23 +463,13 @@ class WalletStateManager:
valid_index = current_index - coinbase_freeze_period
not_frozen: Set[
WalletCoinRecord
] = await self.wallet_store.get_spendable_for_index(valid_index, wallet_id)
all_records: Set[
WalletCoinRecord
] = await self.wallet_store.get_spendable_for_index(current_index, wallet_id)
sum_not_frozen = sum(
record.coin.amount for record in not_frozen if record.coinbase
)
sum_all_records = sum(
record.coin.amount for record in all_records if record.coinbase
)
not_frozen: Set[WalletCoinRecord] = await self.wallet_store.get_spendable_for_index(valid_index, wallet_id)
all_records: Set[WalletCoinRecord] = await self.wallet_store.get_spendable_for_index(current_index, wallet_id)
sum_not_frozen = sum(record.coin.amount for record in not_frozen if record.coinbase)
sum_all_records = sum(record.coin.amount for record in all_records if record.coinbase)
return uint64(sum_all_records - sum_not_frozen)
async def unconfirmed_additions_for_wallet(
self, wallet_id: int
) -> Dict[bytes32, Coin]:
async def unconfirmed_additions_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns change coins for the wallet_id.
(Unconfirmed addition transactions that have not been confirmed yet.)
@ -519,9 +482,7 @@ class WalletStateManager:
additions[coin.name()] = coin
return additions
async def unconfirmed_removals_for_wallet(
self, wallet_id: int
) -> Dict[bytes32, Coin]:
async def unconfirmed_removals_for_wallet(self, wallet_id: int) -> Dict[bytes32, Coin]:
"""
Returns new removals transactions that have not been confirmed yet.
"""
@ -532,21 +493,15 @@ class WalletStateManager:
removals[coin.name()] = coin
return removals
async def coins_of_interest_received(
self, removals: List[Coin], additions: List[Coin], height: uint32
):
async def coins_of_interest_received(self, removals: List[Coin], additions: List[Coin], height: uint32):
for coin in additions:
await self.puzzle_hash_created(coin)
trade_additions = await self.coins_of_interest_added(additions, height)
trade_removals = await self.coins_of_interest_removed(removals, height)
if len(trade_additions) > 0 or len(trade_removals) > 0:
await self.trade_manager.coins_of_interest_farmed(
trade_removals, trade_additions, height
)
await self.trade_manager.coins_of_interest_farmed(trade_removals, trade_additions, height)
async def coins_of_interest_added(
self, coins: List[Coin], height: uint32
) -> List[Coin]:
async def coins_of_interest_added(self, coins: List[Coin], height: uint32) -> List[Coin]:
(
trade_removals,
trade_additions,
@ -566,20 +521,11 @@ class WalletStateManager:
info = await self.puzzle_store.wallet_info_for_puzzle_hash(coin.puzzle_hash)
if info is not None:
wallet_id, wallet_type = info
await self.coin_added(
coin,
height,
is_coinbase,
is_fee_reward,
uint32(wallet_id),
wallet_type,
)
await self.coin_added(coin, height, is_coinbase, uint32(wallet_id), wallet_type)
return trade_adds
async def coins_of_interest_removed(
self, coins: List[Coin], height: uint32
) -> List[Coin]:
async def coins_of_interest_removed(self, coins: List[Coin], height: uint32) -> List[Coin]:
"This get's called when coins of our interest are spent on chain"
(
trade_removals,
@ -610,9 +556,7 @@ class WalletStateManager:
await self.wallet_store.set_spent(coin.name(), index)
unconfirmed_record: List[
TransactionRecord
] = await self.tx_store.unconfirmed_with_removal_coin(coin.name())
unconfirmed_record: List[TransactionRecord] = await self.tx_store.unconfirmed_with_removal_coin(coin.name())
for unconfirmed in unconfirmed_record:
await self.tx_store.set_confirmed(unconfirmed.name(), index)
@ -693,9 +637,7 @@ class WalletStateManager:
if wallet_type == WalletType.COLOURED_COIN:
wallet: CCWallet = self.wallets[wallet_id]
header_hash: bytes32 = self.height_to_hash[index]
block: Optional[BlockRecord] = await self.wallet_store.get_block_record(
header_hash
)
block: Optional[BlockRecord] = await self.wallet_store.get_block_record(header_hash)
assert block is not None
assert block.removals is not None
await wallet.coin_added(coin, index, header_hash, block.removals)
@ -729,9 +671,7 @@ class WalletStateManager:
"""
Full node received our transaction, no need to keep it in queue anymore
"""
updated = await self.tx_store.increment_sent(
spendbundle_id, name, send_status, error
)
updated = await self.tx_store.increment_sent(spendbundle_id, name, send_status, error)
if updated:
tx: Optional[TransactionRecord] = await self.get_transaction(spendbundle_id)
if tx is not None:
@ -811,14 +751,9 @@ class WalletStateManager:
if header_block is not None:
if not await self.validate_header_block(block, header_block):
return ReceiveBlockResult.INVALID_BLOCK
if (
(block.height + 1) % self.constants.DIFFICULTY_EPOCH
== self.constants.DIFFICULTY_DELAY
):
if (block.height + 1) % self.constants.DIFFICULTY_EPOCH == self.constants.DIFFICULTY_DELAY:
assert header_block.challenge.new_work_difficulty is not None
self.difficulty_resets_prev[
block.header_hash
] = header_block.challenge.new_work_difficulty
self.difficulty_resets_prev[block.header_hash] = header_block.challenge.new_work_difficulty
if (block.height + 1) % self.constants.DIFFICULTY_EPOCH == 0:
assert block.total_iters is not None
@ -829,9 +764,7 @@ class WalletStateManager:
# If one of these new additions is ours, generate more puzzle hashes
phs: List[bytes32] = [addition.puzzle_hash for addition in block.additions]
block_includes_our_tx: bool = (
await self.puzzle_store.one_of_puzzle_hashes_exists(phs)
)
block_includes_our_tx: bool = await self.puzzle_store.one_of_puzzle_hashes_exists(phs)
if block_includes_our_tx:
async with self.puzzle_store.lock:
for addition in block.additions:
@ -849,18 +782,14 @@ class WalletStateManager:
assert block.height == 0
await self.wallet_store.add_block_to_path(block.header_hash)
self.lca = block.header_hash
await self.coins_of_interest_received(
block.removals, block.additions, block.height
)
await self.coins_of_interest_received(block.removals, block.additions, block.height)
self.height_to_hash[uint32(0)] = block.header_hash
return ReceiveBlockResult.NEW_TIP
# Not genesis, updated LCA
if block.weight > self.block_records[self.lca].weight:
fork_h = find_fork_point_in_chain(
self.block_records, self.block_records[self.lca], block
)
fork_h = find_fork_point_in_chain(self.block_records, self.block_records[self.lca], block)
await self.reorg_rollback(fork_h)
# Add blocks between fork point and new lca
@ -878,13 +807,8 @@ class WalletStateManager:
for path_block in blocks_to_add:
self.height_to_hash[path_block.height] = path_block.header_hash
await self.wallet_store.add_block_to_path(path_block.header_hash)
assert (
path_block.additions is not None
and path_block.removals is not None
)
await self.coins_of_interest_received(
path_block.removals, path_block.additions, path_block.height
)
assert path_block.additions is not None and path_block.removals is not None
await self.coins_of_interest_received(path_block.removals, path_block.additions, path_block.height)
self.lca = block.header_hash
self.state_changed("new_block")
return ReceiveBlockResult.NEW_TIP
@ -898,21 +822,12 @@ class WalletStateManager:
epoch.
"""
curr = block_record
if (
curr.height
< self.constants.DIFFICULTY_EPOCH + self.constants.DIFFICULTY_DELAY
):
if curr.height < self.constants.DIFFICULTY_EPOCH + self.constants.DIFFICULTY_DELAY:
return self.constants.MIN_ITERS_STARTING
if (
curr.height % self.constants.DIFFICULTY_EPOCH
< self.constants.DIFFICULTY_DELAY
):
if curr.height % self.constants.DIFFICULTY_EPOCH < self.constants.DIFFICULTY_DELAY:
# First few blocks of epoch (using old difficulty and min_iters)
height2 = (
curr.height
- (curr.height % self.constants.DIFFICULTY_EPOCH)
- self.constants.DIFFICULTY_EPOCH
- 1
curr.height - (curr.height % self.constants.DIFFICULTY_EPOCH) - self.constants.DIFFICULTY_EPOCH - 1
)
else:
# The rest of the blocks of epoch (using new difficulty and min iters)
@ -931,50 +846,32 @@ class WalletStateManager:
assert iters1 is not None
assert iters2 is not None
min_iters_precise = uint64(
(iters2 - iters1)
// (self.constants.DIFFICULTY_EPOCH * self.constants.MIN_ITERS_PROPORTION)
(iters2 - iters1) // (self.constants.DIFFICULTY_EPOCH * self.constants.MIN_ITERS_PROPORTION)
)
# Truncates to only 12 bits plus 0s. This prevents grinding attacks.
return uint64(
truncate_to_significant_bits(
min_iters_precise, self.constants.SIGNIFICANT_BITS
)
)
return uint64(truncate_to_significant_bits(min_iters_precise, self.constants.SIGNIFICANT_BITS))
async def validate_header_block(
self, br: BlockRecord, header_block: HeaderBlock
) -> bool:
async def validate_header_block(self, br: BlockRecord, header_block: HeaderBlock) -> bool:
"""
Fully validates a header block. This requires the ancestors to be present in the blockchain.
This method also validates that the header block is consistent with the block record.
"""
# POS challenge hash == POT challenge hash == Challenge prev challenge hash
if (
header_block.proof_of_space.challenge_hash
!= header_block.proof_of_time.challenge_hash
):
if header_block.proof_of_space.challenge_hash != header_block.proof_of_time.challenge_hash:
return False
if (
header_block.proof_of_space.challenge_hash
!= header_block.challenge.prev_challenge_hash
):
if header_block.proof_of_space.challenge_hash != header_block.challenge.prev_challenge_hash:
return False
if br.height > 0:
prev_br = self.block_records[br.prev_header_hash]
# If prev header block, check prev header block hash matches
if prev_br.new_challenge_hash is not None:
if (
header_block.proof_of_space.challenge_hash
!= prev_br.new_challenge_hash
):
if header_block.proof_of_space.challenge_hash != prev_br.new_challenge_hash:
return False
# Validate PoS and get quality
quality_str: Optional[
bytes32
] = header_block.proof_of_space.verify_and_get_quality_string(
self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
quality_str: Optional[bytes32] = header_block.proof_of_space.verify_and_get_quality_string(
self.constants.NUMBER_ZERO_BITS_PLOT_FILTER
)
if quality_str is None:
return False
@ -982,10 +879,7 @@ class WalletStateManager:
difficulty: uint64
min_iters: uint64 = self.get_min_iters(br)
prev_block: Optional[BlockRecord]
if (
br.height % self.constants.DIFFICULTY_EPOCH
!= self.constants.DIFFICULTY_DELAY
):
if br.height % self.constants.DIFFICULTY_EPOCH != self.constants.DIFFICULTY_DELAY:
# Only allow difficulty changes once per epoch
if br.height > 1:
prev_block = self.block_records[br.prev_header_hash]
@ -1047,16 +941,11 @@ class WalletStateManager:
return False
# Check PoT
if not header_block.proof_of_time.is_valid(
self.constants.DISCRIMINANT_SIZE_BITS
):
if not header_block.proof_of_time.is_valid(self.constants.DISCRIMINANT_SIZE_BITS):
return False
# Validate challenge
proofs_hash = std_hash(
header_block.proof_of_space.get_hash()
+ header_block.proof_of_time.output.get_hash()
)
proofs_hash = std_hash(header_block.proof_of_space.get_hash() + header_block.proof_of_time.output.get_hash())
if proofs_hash != header_block.challenge.proofs_hash:
return False
# Note that we are not validating the work difficulty reset (since we don't know the
@ -1087,17 +976,11 @@ class WalletStateManager:
return False
# Check that block is not far in the future
if (
header_block.header.data.timestamp
> time.time() + self.constants.MAX_FUTURE_TIME
):
if header_block.header.data.timestamp > time.time() + self.constants.MAX_FUTURE_TIME:
return False
# Check header pos hash
if (
header_block.proof_of_space.get_hash()
!= header_block.header.data.proof_of_space_hash
):
if header_block.proof_of_space.get_hash() != header_block.header.data.proof_of_space_hash:
return False
return True
@ -1126,33 +1009,20 @@ class WalletStateManager:
# Validate proof hash of previous header block
if (
std_hash(
prev_header_block.proof_of_space.get_hash()
+ prev_header_block.proof_of_time.output.get_hash()
prev_header_block.proof_of_space.get_hash() + prev_header_block.proof_of_time.output.get_hash()
)
!= all_proof_hashes[prev_height][0]
):
return False
# Calculate challenge hash (with difficulty)
if (
prev_header_block.challenge.prev_challenge_hash
!= prev_header_block.proof_of_space.challenge_hash
):
if prev_header_block.challenge.prev_challenge_hash != prev_header_block.proof_of_space.challenge_hash:
return False
if (
prev_header_block.challenge.prev_challenge_hash
!= prev_header_block.proof_of_time.challenge_hash
):
if prev_header_block.challenge.prev_challenge_hash != prev_header_block.proof_of_time.challenge_hash:
return False
if (
prev_header_block.challenge.proofs_hash
!= all_proof_hashes[prev_height][0]
):
if prev_header_block.challenge.proofs_hash != all_proof_hashes[prev_height][0]:
return False
if (
height % self.constants.DIFFICULTY_EPOCH
== self.constants.DIFFICULTY_DELAY
):
if height % self.constants.DIFFICULTY_EPOCH == self.constants.DIFFICULTY_DELAY:
diff_change = all_proof_hashes[height][1]
assert diff_change is not None
if prev_header_block.challenge.new_work_difficulty != diff_change:
@ -1174,59 +1044,35 @@ class WalletStateManager:
return False
# Validate proof hash
if (
std_hash(
header_block.proof_of_space.get_hash()
+ header_block.proof_of_time.output.get_hash()
)
std_hash(header_block.proof_of_space.get_hash() + header_block.proof_of_time.output.get_hash())
!= all_proof_hashes[height][0]
):
return False
# Get difficulty
if (
height % self.constants.DIFFICULTY_EPOCH
< self.constants.DIFFICULTY_DELAY
):
if height % self.constants.DIFFICULTY_EPOCH < self.constants.DIFFICULTY_DELAY:
diff_height = (
height
- (height % self.constants.DIFFICULTY_EPOCH)
- (
self.constants.DIFFICULTY_EPOCH
- self.constants.DIFFICULTY_DELAY
)
- (self.constants.DIFFICULTY_EPOCH - self.constants.DIFFICULTY_DELAY)
)
else:
diff_height = (
height
- (height % self.constants.DIFFICULTY_EPOCH)
+ self.constants.DIFFICULTY_DELAY
)
diff_height = height - (height % self.constants.DIFFICULTY_EPOCH) + self.constants.DIFFICULTY_DELAY
difficulty = all_proof_hashes[diff_height][1]
assert difficulty is not None
# Validate pospace to get iters
quality_str = header_block.proof_of_space.verify_and_get_quality_string(
self.constants.NUMBER_ZERO_BITS_CHALLENGE_SIG
self.constants.NUMBER_ZERO_BITS_PLOT_FILTER
)
assert quality_str is not None
if (
height
< self.constants.DIFFICULTY_EPOCH + self.constants.DIFFICULTY_DELAY
):
if height < self.constants.DIFFICULTY_EPOCH + self.constants.DIFFICULTY_DELAY:
min_iters = self.constants.MIN_ITERS_STARTING
else:
if (
height % self.constants.DIFFICULTY_EPOCH
< self.constants.DIFFICULTY_DELAY
):
height2 = (
height
- (height % self.constants.DIFFICULTY_EPOCH)
- self.constants.DIFFICULTY_EPOCH
- 1
)
if height % self.constants.DIFFICULTY_EPOCH < self.constants.DIFFICULTY_DELAY:
height2 = height - (height % self.constants.DIFFICULTY_EPOCH) - self.constants.DIFFICULTY_EPOCH - 1
else:
height2 = height - (height % self.constants.DIFFICULTY_EPOCH) - 1
@ -1240,16 +1086,10 @@ class WalletStateManager:
assert iters2 is not None
min_iters = uint64(
(iters2 - iters1)
// (
self.constants.DIFFICULTY_EPOCH
* self.constants.MIN_ITERS_PROPORTION
)
(iters2 - iters1) // (self.constants.DIFFICULTY_EPOCH * self.constants.MIN_ITERS_PROPORTION)
)
trunc = truncate_to_significant_bits(
min_iters, self.constants.SIGNIFICANT_BITS
)
trunc = truncate_to_significant_bits(min_iters, self.constants.SIGNIFICANT_BITS)
number_of_iters: uint64 = calculate_iterations_quality(
quality_str,
header_block.proof_of_space.size,
@ -1261,9 +1101,7 @@ class WalletStateManager:
if number_of_iters != header_block.proof_of_time.number_of_iterations:
return False
if not header_block.proof_of_time.is_valid(
self.constants.DISCRIMINANT_SIZE_BITS
):
if not header_block.proof_of_time.is_valid(self.constants.DISCRIMINANT_SIZE_BITS):
return False
return True
@ -1277,14 +1115,10 @@ class WalletStateManager:
tx_filter = PyBIP158([b for b in transactions_filter])
# Find fork point
fork_h: uint32 = find_fork_point_in_chain(
self.block_records, self.block_records[self.lca], new_block
)
fork_h: uint32 = find_fork_point_in_chain(self.block_records, self.block_records[self.lca], new_block)
# Get all unspent coins
my_coin_records_lca: Set[
WalletCoinRecord
] = await self.wallet_store.get_unspent_coins_at_height(uint32(fork_h))
my_coin_records_lca: Set[WalletCoinRecord] = await self.wallet_store.get_unspent_coins_at_height(uint32(fork_h))
# Filter coins up to and including fork point
unspent_coin_names: Set[bytes32] = set()
@ -1302,15 +1136,11 @@ class WalletStateManager:
# For each block, process additions to get all Coins, then process removals to get unspent coins
for reorg_block in reorg_blocks:
assert (
reorg_block.additions is not None and reorg_block.removals is not None
)
assert reorg_block.additions is not None and reorg_block.removals is not None
for addition in reorg_block.additions:
unspent_coin_names.add(addition.name())
for removal in reorg_block.removals:
record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(
removal.puzzle_hash
)
record = await self.puzzle_store.get_derivation_record_for_puzzle_hash(removal.puzzle_hash)
if record is None:
continue
unspent_coin_names.remove(removal)
@ -1379,9 +1209,7 @@ class WalletStateManager:
result: List[Coin] = []
wallet_coin_records = await self.wallet_store.get_unspent_coins_at_height()
my_coins: Dict[bytes32, Coin] = {
r.coin.name(): r.coin for r in list(wallet_coin_records)
}
my_coins: Dict[bytes32, Coin] = {r.coin.name(): r.coin for r in list(wallet_coin_records)}
for coin in removals:
if coin.name() in my_coins:
@ -1396,9 +1224,7 @@ class WalletStateManager:
"""
await self.wallet_store.rollback_lca_to_block(index)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(
index
)
reorged: List[TransactionRecord] = await self.tx_store.get_transaction_above(index)
await self.tx_store.rollback_to_block(index)
await self.retry_sending_after_reorg(reorged)
@ -1476,11 +1302,7 @@ class WalletStateManager:
meta_data["pubkey"] = bytes(backup_pk.get_g1()).hex()
meta_data_bytes = json.dumps(meta_data).encode()
signature = bytes(
AugSchemeMPL.sign(
backup_pk, std_hash(encrypted) + std_hash(meta_data_bytes)
)
).hex()
signature = bytes(AugSchemeMPL.sign(backup_pk, std_hash(encrypted) + std_hash(meta_data_bytes))).hex()
backup["data"] = encrypted.decode()
backup["meta_data"] = meta_data
@ -1516,9 +1338,7 @@ class WalletStateManager:
self.wallets[uint32(id)] = wallet
await self.create_more_puzzle_hashes()
async def get_spendable_coins_for_wallet(
self, wallet_id: int
) -> Set[WalletCoinRecord]:
async def get_spendable_coins_for_wallet(self, wallet_id: int) -> Set[WalletCoinRecord]:
if self.lca is None:
return set()
@ -1531,14 +1351,10 @@ class WalletStateManager:
valid_index = current_index - coinbase_freeze_period
records = await self.wallet_store.get_spendable_for_index(
valid_index, wallet_id
)
records = await self.wallet_store.get_spendable_for_index(valid_index, wallet_id)
# Coins that are currently part of a transaction
unconfirmed_tx: List[
TransactionRecord
] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
unconfirmed_tx: List[TransactionRecord] = await self.tx_store.get_unconfirmed_for_wallet(wallet_id)
removal_dict: Dict[bytes32, Coin] = {}
for tx in unconfirmed_tx:
for coin in tx.removals:
@ -1547,9 +1363,7 @@ class WalletStateManager:
removal_dict[coin.name()] = coin
# Coins that are part of the trade
offer_locked_coins: Dict[
bytes32, WalletCoinRecord
] = await self.trade_manager.get_locked_coins()
offer_locked_coins: Dict[bytes32, WalletCoinRecord] = await self.trade_manager.get_locked_coins()
filtered = set()
for record in records:
@ -1570,26 +1384,20 @@ class WalletStateManager:
done: bool,
data: str,
):
await self.action_store.create_action(
name, wallet_id, type, callback, done, data
)
await self.action_store.create_action(name, wallet_id, type, callback, done, data)
self.tx_pending_changed()
async def set_action_done(self, action_id: int):
await self.action_store.action_done(action_id)
async def generator_received(
self, height: uint32, header_hash: uint32, program: Program
):
async def generator_received(self, height: uint32, header_hash: uint32, program: Program):
actions: List[WalletAction] = await self.action_store.get_all_pending_actions()
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_generator":
stored_header_hash = bytes32(
hexstr_to_bytes(action_data["header_hash"])
)
stored_header_hash = bytes32(hexstr_to_bytes(action_data["header_hash"]))
stored_height = uint32(action_data["height"])
if stored_header_hash == header_hash and stored_height == height:
if action.done:

View File

@ -9,7 +9,6 @@ from blspy import AugSchemeMPL
from src.full_node.blockchain import Blockchain, ReceiveBlockResult
from src.types.full_block import FullBlock
from src.types.header import Header, HeaderData
from src.types.proof_of_space import ProofOfSpace
from src.util.ints import uint8, uint64, uint32
from src.util.errors import Err
@ -18,11 +17,11 @@ from src.types.pool_target import PoolTarget
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.consensus.find_fork_point import find_fork_point_in_chain
from src.util.make_test_constants import make_test_constants_with_genesis
from src.consensus.constants import constants
test_constants, bt = make_test_constants_with_genesis(
{
test_constants = constants.replace(
**{
"DIFFICULTY_STARTING": 1,
"DISCRIMINANT_SIZE_BITS": 8,
"BLOCK_TIME_TARGET": 10,
@ -30,9 +29,11 @@ test_constants, bt = make_test_constants_with_genesis(
"DIFFICULTY_WARP_FACTOR": 3,
"DIFFICULTY_DELAY": 2, # EPOCH / WARP_FACTOR
"MIN_ITERS_STARTING": 50 * 1,
"NUMBER_ZERO_BITS_CHALLENGE_SIG": 1,
"NUMBER_ZERO_BITS_PLOT_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
"NUMBER_ZERO_BITS_ICP_FILTER": 1, # H(plot signature of the challenge) must start with these many zeroes
}
)
bt = None # TODO: almog
@pytest.fixture(scope="module")
@ -152,9 +153,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -187,9 +186,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -225,9 +222,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -245,9 +240,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
blocks[9].header.data,
AugSchemeMPL.sign(
AugSchemeMPL.key_gen(bytes([5] * 32)), token_bytes(32)
),
AugSchemeMPL.sign(AugSchemeMPL.key_gen(bytes([5] * 32)), token_bytes(32)),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -294,9 +287,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -343,9 +334,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -382,9 +371,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -397,12 +384,8 @@ class TestBlockValidation:
async def test_invalid_max_height(self, initial_blockchain):
blocks, b = initial_blockchain
print(blocks[9].header)
pool_target = PoolTarget(
blocks[9].header.data.pool_target.puzzle_hash, uint32(8)
)
agg_sig = bt.get_pool_key_signature(
pool_target, blocks[9].proof_of_space.pool_public_key
)
pool_target = PoolTarget(blocks[9].header.data.pool_target.puzzle_hash, uint32(8))
agg_sig = bt.get_pool_key_signature(pool_target, blocks[9].proof_of_space.pool_public_key)
assert agg_sig is not None
new_header_data = HeaderData(
@ -429,9 +412,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -443,12 +424,8 @@ class TestBlockValidation:
@pytest.mark.asyncio
async def test_invalid_pool_sig(self, initial_blockchain):
blocks, b = initial_blockchain
pool_target = PoolTarget(
blocks[9].header.data.pool_target.puzzle_hash, uint32(10)
)
agg_sig = bt.get_pool_key_signature(
pool_target, blocks[9].proof_of_space.pool_public_key
)
pool_target = PoolTarget(blocks[9].header.data.pool_target.puzzle_hash, uint32(10))
agg_sig = bt.get_pool_key_signature(pool_target, blocks[9].proof_of_space.pool_public_key)
assert agg_sig is not None
new_header_data = HeaderData(
@ -475,9 +452,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -515,9 +490,7 @@ class TestBlockValidation:
blocks[9].proof_of_time,
Header(
new_header_data,
bt.get_plot_signature(
new_header_data, blocks[9].proof_of_space.plot_public_key
),
bt.get_plot_signature(new_header_data, blocks[9].proof_of_space.plot_public_key),
),
blocks[9].transactions_generator,
blocks[9].transactions_filter,
@ -576,9 +549,7 @@ class TestReorgs:
await b.receive_block(blocks[i])
assert b.get_current_tips()[0].height == 15
blocks_reorg_chain = bt.get_consecutive_blocks(
test_constants, 7, blocks[:10], 9, b"2"
)
blocks_reorg_chain = bt.get_consecutive_blocks(test_constants, 7, blocks[:10], 9, b"2")
for i in range(1, len(blocks_reorg_chain)):
reorg_block = blocks_reorg_chain[i]
result, removed, error_code = await b.receive_block(reorg_block)
@ -609,9 +580,7 @@ class TestReorgs:
assert b.get_current_tips()[0].height == 20
# Reorg from genesis
blocks_reorg_chain = bt.get_consecutive_blocks(
test_constants, 21, [blocks[0]], 9, b"3"
)
blocks_reorg_chain = bt.get_consecutive_blocks(test_constants, 21, [blocks[0]], 9, b"3")
for i in range(1, len(blocks_reorg_chain)):
reorg_block = blocks_reorg_chain[i]
result, removed, error_code = await b.receive_block(reorg_block)
@ -624,9 +593,7 @@ class TestReorgs:
assert b.get_current_tips()[0].height == 21
# Reorg back to original branch
blocks_reorg_chain_2 = bt.get_consecutive_blocks(
test_constants, 3, blocks[:-1], 9, b"4"
)
blocks_reorg_chain_2 = bt.get_consecutive_blocks(test_constants, 3, blocks[:-1], 9, b"4")
result, _, error_code = await b.receive_block(blocks_reorg_chain_2[20])
assert result == ReceiveBlockResult.ADDED_AS_ORPHAN
@ -690,36 +657,20 @@ class TestReorgs:
for i in range(1, len(blocks_2)):
await b.receive_block(blocks_2[i])
assert (
find_fork_point_in_chain(b.headers, blocks[10].header, blocks_2[10].header)
== 4
)
assert find_fork_point_in_chain(b.headers, blocks[10].header, blocks_2[10].header) == 4
for i in range(1, len(blocks_3)):
await b.receive_block(blocks_3[i])
assert (
find_fork_point_in_chain(b.headers, blocks[10].header, blocks_3[10].header)
== 2
)
assert find_fork_point_in_chain(b.headers, blocks[10].header, blocks_3[10].header) == 2
assert b.lca_block.data == blocks[2].header.data
for i in range(1, len(blocks_reorg)):
await b.receive_block(blocks_reorg[i])
assert (
find_fork_point_in_chain(
b.headers, blocks[10].header, blocks_reorg[10].header
)
== 8
)
assert (
find_fork_point_in_chain(
b.headers, blocks_2[10].header, blocks_reorg[10].header
)
== 4
)
assert find_fork_point_in_chain(b.headers, blocks[10].header, blocks_reorg[10].header) == 8
assert find_fork_point_in_chain(b.headers, blocks_2[10].header, blocks_reorg[10].header) == 4
assert b.lca_block.data == blocks[4].header.data
await connection.close()
b.shut_down()

View File

@ -20,11 +20,12 @@ from src.server.start_service import Service
from src.util.ints import uint16, uint32
from src.util.make_test_constants import make_test_constants_with_genesis
from src.util.chech32 import encode_puzzle_hash
from src.consensus.constants import constants
from tests.time_out_assert import time_out_assert
test_constants, bt = make_test_constants_with_genesis(
{
test_constants = constants.replace(
**{
"DIFFICULTY_STARTING": 1,
"DISCRIMINANT_SIZE_BITS": 8,
"BLOCK_TIME_TARGET": 10,
@ -35,10 +36,12 @@ test_constants, bt = make_test_constants_with_genesis(
"TX_PER_SEC": 1,
"MEMPOOL_BLOCK_BUFFER": 10,
"MIN_ITERS_STARTING": 50 * 1,
"NUMBER_ZERO_BITS_CHALLENGE_SIG": 1,
"NUMBER_ZERO_BITS_PLOT_FILTER": 1,
"NUMBER_ZERO_BITS_ICP_FILTER": 1,
"CLVM_COST_RATIO_CONSTANT": 108,
}
)
bt = None # TODO: almog
self_hostname = bt.config["self_hostname"]
@ -251,12 +254,8 @@ async def setup_vdf_clients(port):
await kill_processes()
async def setup_timelord(
port, full_node_port, sanitizer, consensus_constants: ConsensusConstants
):
config = bt.config["timelord"]
config["port"] = port
config["full_node_peer"]["port"] = full_node_port
async def setup_timelord(port, full_node_port, sanitizer, consensus_constants: ConsensusConstants):
config = load_config(bt.root_path, "config.yaml", "timelord")
config["sanitizer_mode"] = sanitizer
if sanitizer:
config["vdf_server"]["port"] = 7999
@ -284,12 +283,8 @@ async def setup_two_nodes(consensus_constants: ConsensusConstants):
Setup and teardown of two full nodes, with blockchains and separate DBs.
"""
node_iters = [
setup_full_node(
consensus_constants, "blockchain_test.db", 21234, simulator=False
),
setup_full_node(
consensus_constants, "blockchain_test_2.db", 21235, simulator=False
),
setup_full_node(consensus_constants, "blockchain_test.db", 21234, simulator=False),
setup_full_node(consensus_constants, "blockchain_test_2.db", 21235, simulator=False),
]
fn1, s1 = await node_iters[0].__anext__()
@ -300,16 +295,10 @@ async def setup_two_nodes(consensus_constants: ConsensusConstants):
await _teardown_nodes(node_iters)
async def setup_node_and_wallet(
consensus_constants: ConsensusConstants, starting_height=None
):
async def setup_node_and_wallet(consensus_constants: ConsensusConstants, starting_height=None):
node_iters = [
setup_full_node(
consensus_constants, "blockchain_test.db", 21234, simulator=False
),
setup_wallet_node(
21235, consensus_constants, None, starting_height=starting_height
),
setup_full_node(consensus_constants, "blockchain_test.db", 21234, simulator=False),
setup_wallet_node(21235, consensus_constants, None, starting_height=starting_height),
]
full_node, s1 = await node_iters[0].__anext__()
@ -377,12 +366,8 @@ async def setup_full_system(consensus_constants: ConsensusConstants):
setup_farmer(21235, consensus_constants, uint16(21237)),
setup_vdf_clients(8000),
setup_timelord(21236, 21237, False, consensus_constants),
setup_full_node(
consensus_constants, "blockchain_test.db", 21237, 21232, False, 10
),
setup_full_node(
consensus_constants, "blockchain_test_2.db", 21238, 21232, False, 10
),
setup_full_node(consensus_constants, "blockchain_test.db", 21237, 21232, False, 10),
setup_full_node(consensus_constants, "blockchain_test_2.db", 21238, 21232, False, 10),
setup_vdf_clients(7999),
setup_timelord(21239, 21238, True, consensus_constants),
]

View File

@ -4,18 +4,21 @@ from typing import List
from tests.setup_nodes import setup_full_system
from src.util.ints import uint16, uint32
from src.types.full_block import FullBlock
from src.util.make_test_constants import make_test_constants_with_genesis
from tests.time_out_assert import time_out_assert, time_out_assert_custom_interval
from src.types.peer_info import PeerInfo
from src.consensus.constants import constants
test_constants, bt = make_test_constants_with_genesis(
{
test_constants = constants.replace(
**{
"DIFFICULTY_STARTING": 1000,
"MIN_ITERS_STARTING": 100000,
"NUMBER_ZERO_BITS_CHALLENGE_SIG": 1,
"NUMBER_ZERO_BITS_PLOT_FILTER": 1,
"NUMBER_ZERO_BITS_ICP_FILTER": 1,
}
)
bt = None # TODO: almog
def node_height_at_least(node, h):
if (max([h.height for h in node.blockchain.get_current_tips()])) >= h:
@ -42,12 +45,8 @@ class TestSimulation:
async def has_compact(node1, node2, max_height):
for h in range(1, max_height):
blocks_1: List[FullBlock] = await node1.block_store.get_blocks_at(
[uint32(h)]
)
blocks_2: List[FullBlock] = await node2.block_store.get_blocks_at(
[uint32(h)]
)
blocks_1: List[FullBlock] = await node1.block_store.get_blocks_at([uint32(h)])
blocks_2: List[FullBlock] = await node2.block_store.get_blocks_at([uint32(h)])
has_compact_1 = False
has_compact_2 = False
for block in blocks_1:
@ -64,6 +63,4 @@ class TestSimulation:
return True
return True
await time_out_assert_custom_interval(
120, 2, has_compact, True, node1, node2, max_height
)
await time_out_assert_custom_interval(120, 2, has_compact, True, node1, node2, max_height)

View File

@ -1,19 +1,23 @@
from secrets import token_bytes
from src.types.proof_of_space import ProofOfSpace # pylint: disable=E0401
from src.consensus.constants import constants
from src.types.classgroup import ClassgroupElement
class TestProofOfSpace:
def test_can_create_proof(self):
"""
Tests that the change of getting a correct proof is exactly 1/256.
Tests that the change of getting a correct proof is exactly 1/target_filter.
"""
num_trials = 40000
success_count = 0
target_filter = (2 ** constants.NUMBER_ZERO_BITS_PLOT_FILTER) * (2 ** constants.NUMBER_ZERO_BITS_ICP_FILTER)
for _ in range(num_trials):
challenge_hash = token_bytes(32)
plot_id = token_bytes(32)
if ProofOfSpace.can_create_proof(plot_id, challenge_hash, 8):
icp_output = ClassgroupElement.get_default_element()
if ProofOfSpace.can_create_proof(constants, plot_id, challenge_hash, icp_output):
success_count += 1
assert abs((success_count * 256 / num_trials) - 1) < 0.3
assert abs((success_count * target_filter / num_trials) - 1) < 0.3