Merge branch 'master' of github.com:Chia-Network/chia-blockchain into network
This commit is contained in:
commit
f6e1506e44
|
@ -24,6 +24,7 @@ def approximate_parameters(T):
|
||||||
|
|
||||||
# 1/w is the approximate proportion of time spent on the proof
|
# 1/w is the approximate proportion of time spent on the proof
|
||||||
w = math.floor(T / (T/k + L * pow(2, k+1))) - 2
|
w = math.floor(T / (T/k + L * pow(2, k+1))) - 2
|
||||||
|
w = max(w, 0)
|
||||||
|
|
||||||
return (L, k, w)
|
return (L, k, w)
|
||||||
|
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,9 +1,11 @@
|
||||||
DIFFICULTY_STARTING = 1 << 32
|
DIFFICULTY_STARTING = 50 # These are in units of 2^32
|
||||||
DIFFICULTY_EPOCH = 2016 # The number of blocks per epoch
|
DIFFICULTY_EPOCH = 10 # The number of blocks per epoch
|
||||||
DIFFICULTY_TARGET = 200 # The target number of seconds per block
|
DIFFICULTY_TARGET = 10 # The target number of seconds per block
|
||||||
DIFFICULTY_FACTOR = 4 # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
|
DIFFICULTY_FACTOR = 4 # The next difficulty is truncated to range [prev / FACTOR, prev * FACTOR]
|
||||||
DIFFICULTY_WARP_FACTOR = 4 # DELAY divides EPOCH in order to warp efficiently.
|
DIFFICULTY_WARP_FACTOR = 4 # DELAY divides EPOCH in order to warp efficiently.
|
||||||
DIFFICULTY_DELAY = DIFFICULTY_EPOCH // DIFFICULTY_WARP_FACTOR # The delay in blocks before the difficulty reset applies.
|
DIFFICULTY_DELAY = DIFFICULTY_EPOCH // DIFFICULTY_WARP_FACTOR # The delay in blocks before the difficulty reset applies
|
||||||
DISCRIMINANT_SIZE_BITS = 1024
|
DISCRIMINANT_SIZE_BITS = 1024
|
||||||
|
|
||||||
|
# The percentage of the difficulty target that the VDF must be run for, at a minimum
|
||||||
MIN_BLOCK_TIME_PERCENT = 20
|
MIN_BLOCK_TIME_PERCENT = 20
|
||||||
MIN_VDF_ITERATIONS = 1 << 10
|
MIN_VDF_ITERATIONS = 1 # These are in units of 2^32
|
||||||
|
|
|
@ -43,7 +43,7 @@ def calculate_iterations_quality(quality: bytes32, size: uint8, difficulty: uint
|
||||||
between 0 and 1, then divided by expected plot size, and finally multiplied by the
|
between 0 and 1, then divided by expected plot size, and finally multiplied by the
|
||||||
difficulty.
|
difficulty.
|
||||||
"""
|
"""
|
||||||
dec_iters = (Decimal(int(difficulty)) *
|
dec_iters = (Decimal(int(difficulty) << 32) *
|
||||||
(_quality_to_decimal(quality) / _expected_plot_size(size)))
|
(_quality_to_decimal(quality) / _expected_plot_size(size)))
|
||||||
return uint64(max(MIN_VDF_ITERATIONS, int(dec_iters.to_integral_exact(rounding=ROUND_UP))))
|
return uint64(max(MIN_VDF_ITERATIONS, int(dec_iters.to_integral_exact(rounding=ROUND_UP))))
|
||||||
|
|
||||||
|
|
|
@ -243,6 +243,7 @@ async def proof_of_space_arrived(proof_of_space_arrived: farmer_protocol.ProofOf
|
||||||
@api_request
|
@api_request
|
||||||
async def deep_reorg_notification(deep_reorg_notification: farmer_protocol.DeepReorgNotification):
|
async def deep_reorg_notification(deep_reorg_notification: farmer_protocol.DeepReorgNotification):
|
||||||
# TODO: implement
|
# TODO: implement
|
||||||
|
# TODO: "forget everything and start over (reset db)"
|
||||||
log.error(f"Deep reorg notification not implemented.")
|
log.error(f"Deep reorg notification not implemented.")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ from secrets import token_bytes
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from chiapos import Verifier
|
from chiapos import Verifier
|
||||||
from blspy import Util, Signature, PrivateKey
|
from blspy import Util, Signature, PrivateKey
|
||||||
from asyncio import Lock
|
from asyncio import Lock, sleep
|
||||||
from typing import Dict, List, Tuple, Optional
|
from typing import Dict, List, Tuple, Optional
|
||||||
from src.util.api_decorators import api_request
|
from src.util.api_decorators import api_request
|
||||||
from src.protocols import farmer_protocol
|
from src.protocols import farmer_protocol
|
||||||
|
@ -18,6 +18,7 @@ from src.types.challenge import Challenge
|
||||||
from src.types.block_header import BlockHeaderData, BlockHeader
|
from src.types.block_header import BlockHeaderData, BlockHeader
|
||||||
from src.types.proof_of_space import ProofOfSpace
|
from src.types.proof_of_space import ProofOfSpace
|
||||||
from src.consensus.pot_iterations import calculate_iterations
|
from src.consensus.pot_iterations import calculate_iterations
|
||||||
|
from src.consensus.constants import DIFFICULTY_TARGET
|
||||||
from src.types.full_block import FullBlock
|
from src.types.full_block import FullBlock
|
||||||
from src.types.fees_target import FeesTarget
|
from src.types.fees_target import FeesTarget
|
||||||
from src.blockchain import Blockchain
|
from src.blockchain import Blockchain
|
||||||
|
@ -30,6 +31,7 @@ farmer_ip = "127.0.0.1"
|
||||||
farmer_port = 8001
|
farmer_port = 8001
|
||||||
timelord_ip = "127.0.0.1"
|
timelord_ip = "127.0.0.1"
|
||||||
timelord_port = 8003
|
timelord_port = 8003
|
||||||
|
update_pot_estimate_interval: int = 30
|
||||||
|
|
||||||
|
|
||||||
class Database:
|
class Database:
|
||||||
|
@ -44,7 +46,7 @@ class Database:
|
||||||
# These are the blocks that we created, have PoS, but not PoT yet, keyed from the
|
# These are the blocks that we created, have PoS, but not PoT yet, keyed from the
|
||||||
# block header hash
|
# block header hash
|
||||||
unfinished_blocks: Dict[Tuple[bytes32, int], FullBlock] = {}
|
unfinished_blocks: Dict[Tuple[bytes32, int], FullBlock] = {}
|
||||||
proof_of_time_estimate_ips: uint64 = uint64(3000)
|
proof_of_time_estimate_ips: uint64 = uint64(1500)
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
@ -86,6 +88,16 @@ async def send_challenges_to_timelords():
|
||||||
yield OutboundMessage("timelord", "challenge_start", request, False, True)
|
yield OutboundMessage("timelord", "challenge_start", request, False, True)
|
||||||
|
|
||||||
|
|
||||||
|
async def proof_of_time_estimate_interval():
|
||||||
|
while True:
|
||||||
|
estimated_ips: Optional[uint64] = db.blockchain.get_vdf_rate_estimate()
|
||||||
|
async with db.lock:
|
||||||
|
if estimated_ips is not None:
|
||||||
|
db.proof_of_time_estimate_ips = estimated_ips
|
||||||
|
log.info(f"Updated proof of time estimate to {estimated_ips} iterations per second.")
|
||||||
|
await sleep(update_pot_estimate_interval)
|
||||||
|
|
||||||
|
|
||||||
@api_request
|
@api_request
|
||||||
async def request_header_hash(request: farmer_protocol.RequestHeaderHash):
|
async def request_header_hash(request: farmer_protocol.RequestHeaderHash):
|
||||||
"""
|
"""
|
||||||
|
@ -167,15 +179,13 @@ async def header_signature(header_signature: farmer_protocol.HeaderSignature):
|
||||||
[pos.plot_pubkey])
|
[pos.plot_pubkey])
|
||||||
|
|
||||||
block_header: BlockHeader = BlockHeader(block_header_data, header_signature.header_signature)
|
block_header: BlockHeader = BlockHeader(block_header_data, header_signature.header_signature)
|
||||||
|
|
||||||
assert db.blockchain.block_can_be_added(block_header, block_body)
|
|
||||||
|
|
||||||
trunk: TrunkBlock = TrunkBlock(pos, None, None, block_header)
|
trunk: TrunkBlock = TrunkBlock(pos, None, None, block_header)
|
||||||
unfinished_block_obj: FullBlock = FullBlock(trunk, block_body)
|
unfinished_block_obj: FullBlock = FullBlock(trunk, block_body)
|
||||||
|
|
||||||
# Propagate to ourselves (which validates and does further propagations)
|
# Propagate to ourselves (which validates and does further propagations)
|
||||||
request = peer_protocol.UnfinishedBlock(unfinished_block_obj)
|
request = peer_protocol.UnfinishedBlock(unfinished_block_obj)
|
||||||
async for m in unfinished_block(request):
|
async for m in unfinished_block(request):
|
||||||
|
# Yield all new messages (propagation to peers)
|
||||||
yield m
|
yield m
|
||||||
|
|
||||||
|
|
||||||
|
@ -188,6 +198,9 @@ async def proof_of_time_finished(request: timelord_protocol.ProofOfTimeFinished)
|
||||||
"""
|
"""
|
||||||
async with db.lock:
|
async with db.lock:
|
||||||
dict_key = (request.proof.output.challenge_hash, request.proof.output.number_of_iterations)
|
dict_key = (request.proof.output.challenge_hash, request.proof.output.number_of_iterations)
|
||||||
|
if dict_key not in db.unfinished_blocks:
|
||||||
|
log.warn(f"Received a proof of time that we cannot use to complete a block {dict_key}")
|
||||||
|
return
|
||||||
unfinished_block_obj: FullBlock = db.unfinished_blocks[dict_key]
|
unfinished_block_obj: FullBlock = db.unfinished_blocks[dict_key]
|
||||||
prev_block: TrunkBlock = db.blockchain.get_trunk_block(unfinished_block_obj.trunk_block.prev_header_hash)
|
prev_block: TrunkBlock = db.blockchain.get_trunk_block(unfinished_block_obj.trunk_block.prev_header_hash)
|
||||||
difficulty: uint64 = db.blockchain.get_next_difficulty(unfinished_block_obj.trunk_block.prev_header_hash)
|
difficulty: uint64 = db.blockchain.get_next_difficulty(unfinished_block_obj.trunk_block.prev_header_hash)
|
||||||
|
@ -195,7 +208,8 @@ async def proof_of_time_finished(request: timelord_protocol.ProofOfTimeFinished)
|
||||||
challenge: Challenge = Challenge(unfinished_block_obj.trunk_block.proof_of_space.get_hash(),
|
challenge: Challenge = Challenge(unfinished_block_obj.trunk_block.proof_of_space.get_hash(),
|
||||||
request.proof.output.get_hash(),
|
request.proof.output.get_hash(),
|
||||||
prev_block.challenge.height + 1,
|
prev_block.challenge.height + 1,
|
||||||
prev_block.challenge.total_weight + difficulty)
|
prev_block.challenge.total_weight + difficulty,
|
||||||
|
prev_block.challenge.total_iters + request.proof.output.number_of_iterations)
|
||||||
|
|
||||||
new_trunk_block = TrunkBlock(unfinished_block_obj.trunk_block.proof_of_space,
|
new_trunk_block = TrunkBlock(unfinished_block_obj.trunk_block.proof_of_space,
|
||||||
request.proof,
|
request.proof,
|
||||||
|
@ -240,7 +254,10 @@ async def unfinished_block(unfinished_block: peer_protocol.UnfinishedBlock):
|
||||||
timelords.
|
timelords.
|
||||||
"""
|
"""
|
||||||
async with db.lock:
|
async with db.lock:
|
||||||
# TODO: verify block using blockchain class, including coinbase rewards
|
if not db.blockchain.is_child_of_head(unfinished_block.block):
|
||||||
|
return
|
||||||
|
|
||||||
|
# TODO(alex): verify block using blockchain class, including coinbase rewards
|
||||||
prev_block: TrunkBlock = db.blockchain.get_trunk_block(
|
prev_block: TrunkBlock = db.blockchain.get_trunk_block(
|
||||||
unfinished_block.block.trunk_block.prev_header_hash)
|
unfinished_block.block.trunk_block.prev_header_hash)
|
||||||
|
|
||||||
|
@ -255,8 +272,14 @@ async def unfinished_block(unfinished_block: peer_protocol.UnfinishedBlock):
|
||||||
log.info(f"Have already seen unfinished block {(challenge_hash, iterations_needed)}")
|
log.info(f"Have already seen unfinished block {(challenge_hash, iterations_needed)}")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
expected_time: float = iterations_needed / db.proof_of_time_estimate_ips
|
||||||
|
|
||||||
|
# TODO(alex): tweak this
|
||||||
|
log.info(f"Expected finish time: {expected_time}")
|
||||||
|
if expected_time > 10 * DIFFICULTY_TARGET:
|
||||||
|
return
|
||||||
|
|
||||||
db.unfinished_blocks[(challenge_hash, iterations_needed)] = unfinished_block.block
|
db.unfinished_blocks[(challenge_hash, iterations_needed)] = unfinished_block.block
|
||||||
# TODO: Only propagate if it's actually good
|
|
||||||
|
|
||||||
timelord_request = timelord_protocol.ProofOfSpaceInfo(challenge_hash, iterations_needed)
|
timelord_request = timelord_protocol.ProofOfSpaceInfo(challenge_hash, iterations_needed)
|
||||||
yield OutboundMessage("timelord", "proof_of_space_info", timelord_request, False, True)
|
yield OutboundMessage("timelord", "proof_of_space_info", timelord_request, False, True)
|
||||||
|
@ -286,7 +309,6 @@ async def block(block: peer_protocol.Block):
|
||||||
if header_hash in db.bodies and block.block.body in db.bodies[header_hash]:
|
if header_hash in db.bodies and block.block.body in db.bodies[header_hash]:
|
||||||
log.info(f"Already have block {header_hash}")
|
log.info(f"Already have block {header_hash}")
|
||||||
return
|
return
|
||||||
|
|
||||||
# TODO(alex): Check if we care about this block, we don't want to add random
|
# TODO(alex): Check if we care about this block, we don't want to add random
|
||||||
# disconnected blocks. For example if it's on one of the heads, or if it's an older
|
# disconnected blocks. For example if it's on one of the heads, or if it's an older
|
||||||
# block that we need
|
# block that we need
|
||||||
|
|
|
@ -9,7 +9,10 @@ from src.types.coinbase import CoinbaseInfo
|
||||||
Protocol between farmer and full node.
|
Protocol between farmer and full node.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
Farmer <- Full node
|
||||||
|
Update current height
|
||||||
|
"""
|
||||||
@cbor_message(tag=2000)
|
@cbor_message(tag=2000)
|
||||||
class ProofOfSpaceFinalized:
|
class ProofOfSpaceFinalized:
|
||||||
challenge_hash: bytes32
|
challenge_hash: bytes32
|
||||||
|
|
|
@ -10,38 +10,74 @@ from src.types.peer_info import PeerInfo
|
||||||
Protocol between full nodes.
|
Protocol between full nodes.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
If already seen, ignore
|
||||||
|
Validate transaction
|
||||||
|
If consistent with at least 1/3 heads, store in mempool
|
||||||
|
Propagate transaction
|
||||||
|
"""
|
||||||
@cbor_message(tag=4000)
|
@cbor_message(tag=4000)
|
||||||
class NewTransaction:
|
class NewTransaction:
|
||||||
transaction: Transaction
|
transaction: Transaction
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
TODO(alex): update this
|
||||||
|
If already seen, ignore
|
||||||
|
If prev block not a head, ignore
|
||||||
|
Call self.ProofOfTimeFinished
|
||||||
|
Propagate PoT (?)
|
||||||
|
"""
|
||||||
@cbor_message(tag=4001)
|
@cbor_message(tag=4001)
|
||||||
class NewProofOfTime:
|
class NewProofOfTime:
|
||||||
proof: ProofOfTime
|
proof: ProofOfTime
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
TODO(alex): update this
|
||||||
|
If not a child of a head, ignore
|
||||||
|
If we have a PoT to complete this block, call self.Block
|
||||||
|
Otherwise: validate, store, and propagate
|
||||||
|
"""
|
||||||
@cbor_message(tag=4002)
|
@cbor_message(tag=4002)
|
||||||
class UnfinishedBlock:
|
class UnfinishedBlock:
|
||||||
# Block that does not have ProofOfTime and Challenge
|
# Block that does not have ProofOfTime and Challenge
|
||||||
block: FullBlock
|
block: FullBlock
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
If have block, return block
|
||||||
|
TODO: request blocks?
|
||||||
|
"""
|
||||||
@cbor_message(tag=4003)
|
@cbor_message(tag=4003)
|
||||||
class RequestBlock:
|
class RequestBlock:
|
||||||
header_hash: bytes32
|
header_hash: bytes32
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
TODO(alex): update this
|
||||||
|
If already have, ignore
|
||||||
|
If not child of a head, or ancestor of a head, ignore
|
||||||
|
Add block to head
|
||||||
|
- Validate block
|
||||||
|
If heads updated, propagate block to full nodes, farmers, timelords
|
||||||
|
"""
|
||||||
@cbor_message(tag=4004)
|
@cbor_message(tag=4004)
|
||||||
class Block:
|
class Block:
|
||||||
block: FullBlock
|
block: FullBlock
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Return full list of peers
|
||||||
|
"""
|
||||||
@cbor_message(tag=4005)
|
@cbor_message(tag=4005)
|
||||||
class RequestPeers:
|
class RequestPeers:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Update list of peers
|
||||||
|
"""
|
||||||
@cbor_message(tag=4006)
|
@cbor_message(tag=4006)
|
||||||
class Peers:
|
class Peers:
|
||||||
peer_list: List[PeerInfo]
|
peer_list: List[PeerInfo]
|
||||||
|
|
|
@ -7,7 +7,11 @@ from src.types.proof_of_time import ProofOfTime
|
||||||
Protocol between timelord and full node.
|
Protocol between timelord and full node.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
"""
|
||||||
|
If don't have the unfinished block, ignore
|
||||||
|
Validate PoT
|
||||||
|
Call self.Block
|
||||||
|
"""
|
||||||
@cbor_message(tag=3000)
|
@cbor_message(tag=3000)
|
||||||
class ProofOfTimeFinished:
|
class ProofOfTimeFinished:
|
||||||
proof: ProofOfTime
|
proof: ProofOfTime
|
||||||
|
|
|
@ -8,6 +8,19 @@ from src.server.server import start_chia_server, start_chia_client
|
||||||
logging.basicConfig(format='FullNode %(name)-23s: %(levelname)-8s %(message)s', level=logging.INFO)
|
logging.basicConfig(format='FullNode %(name)-23s: %(levelname)-8s %(message)s', level=logging.INFO)
|
||||||
global_connections = PeerConnections()
|
global_connections = PeerConnections()
|
||||||
|
|
||||||
|
"""
|
||||||
|
Full node startup algorithm:
|
||||||
|
- Update peer list (?)
|
||||||
|
- Start server
|
||||||
|
- Sync:
|
||||||
|
- Check which are the heaviest bkocks
|
||||||
|
- Request flyclient proofs for all heads
|
||||||
|
- Blacklist peers with invalid heads
|
||||||
|
- Sync blockchain up to heads (request blocks in batches, and add to queue)
|
||||||
|
- If connected to farmer, send challenges
|
||||||
|
- If connected to timelord, send challenges
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
farmer_con_task, farmer_client = await start_chia_client(full_node.farmer_ip, full_node.farmer_port,
|
farmer_con_task, farmer_client = await start_chia_client(full_node.farmer_ip, full_node.farmer_port,
|
||||||
|
@ -25,6 +38,9 @@ async def main():
|
||||||
async for msg in full_node.send_challenges_to_timelords():
|
async for msg in full_node.send_challenges_to_timelords():
|
||||||
timelord_client.push(msg)
|
timelord_client.push(msg)
|
||||||
|
|
||||||
|
# Periodically update our estimate of proof of time speeds
|
||||||
|
asyncio.create_task(full_node.proof_of_time_estimate_interval())
|
||||||
|
|
||||||
await asyncio.gather(farmer_con_task, timelord_con_task, server)
|
await asyncio.gather(farmer_con_task, timelord_con_task, server)
|
||||||
|
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
|
|
|
@ -30,4 +30,4 @@ class BlockHeader:
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def header_hash(self):
|
def header_hash(self):
|
||||||
return sha256(self.serialize()).digest()
|
return bytes32(sha256(self.serialize()).digest())
|
||||||
|
|
|
@ -9,6 +9,7 @@ class Challenge:
|
||||||
proof_of_time_output_hash: bytes32
|
proof_of_time_output_hash: bytes32
|
||||||
height: uint32
|
height: uint32
|
||||||
total_weight: uint64
|
total_weight: uint64
|
||||||
|
total_iters: uint64
|
||||||
|
|
||||||
def is_valid(self) -> bool:
|
def is_valid(self) -> bool:
|
||||||
# TODO
|
# TODO
|
||||||
|
|
|
@ -15,18 +15,14 @@ class TrunkBlock:
|
||||||
|
|
||||||
def is_valid(self):
|
def is_valid(self):
|
||||||
if not self.proof_of_time or not self.challenge:
|
if not self.proof_of_time or not self.challenge:
|
||||||
print("1 false")
|
|
||||||
return False
|
return False
|
||||||
pos_quality = self.proof_of_space.verify_and_get_quality(self.proof_of_time.output.challenge_hash)
|
pos_quality = self.proof_of_space.verify_and_get_quality(self.proof_of_time.output.challenge_hash)
|
||||||
# TODO: check iterations
|
# TODO: check iterations
|
||||||
if not pos_quality:
|
if not pos_quality:
|
||||||
print("2 false")
|
|
||||||
return False
|
return False
|
||||||
if not self.proof_of_space.get_hash() == self.challenge.proof_of_space_hash:
|
if not self.proof_of_space.get_hash() == self.challenge.proof_of_space_hash:
|
||||||
print("3 false")
|
|
||||||
return False
|
return False
|
||||||
if not self.proof_of_time.output.get_hash() == self.challenge.proof_of_time_output_hash:
|
if not self.proof_of_time.output.get_hash() == self.challenge.proof_of_time_output_hash:
|
||||||
print("4 false")
|
|
||||||
return False
|
return False
|
||||||
return self.challenge.is_valid() and self.proof_of_time.is_valid() and self.header.is_valid()
|
return self.challenge.is_valid() and self.proof_of_time.is_valid() and self.header.is_valid()
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,6 @@ def api_request(f):
|
||||||
inter = dict(binding.arguments)
|
inter = dict(binding.arguments)
|
||||||
print_args = {k: v for (k, v) in inter.items() if k != "source_connection"
|
print_args = {k: v for (k, v) in inter.items() if k != "source_connection"
|
||||||
and k != "all_connections"}
|
and k != "all_connections"}
|
||||||
log.info(f"{f.__name__}({print_args})")
|
log.info(f"{f.__name__}({print_args})"[:200])
|
||||||
return f(**inter)
|
return f(**inter)
|
||||||
return f_substitute
|
return f_substitute
|
||||||
|
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue