wormhole/Tiltfile

885 lines
28 KiB
Plaintext
Raw Normal View History

2020-10-31 04:42:18 -07:00
# This Tiltfile contains the deployment and build config for the Wormhole devnet.
#
# We use Buildkit cache mounts and careful layering to avoid unnecessary rebuilds - almost
# all source code changes result in small, incremental rebuilds. Dockerfiles are written such
# that, for example, changing the contract source code won't cause Solana itself to be rebuilt.
#
load("ext://namespace", "namespace_create", "namespace_inject")
load("ext://secret", "secret_yaml_generic")
# set the replica value of a StatefulSet
def set_replicas_in_statefulset(config_yaml, statefulset_name, num_replicas):
for obj in config_yaml:
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == statefulset_name:
obj["spec"]["replicas"] = num_replicas
return config_yaml
allow_k8s_contexts("ci")
# Disable telemetry by default
analytics_settings(False)
2022-02-13 18:47:38 -08:00
# Moar updates (default is 3)
update_settings(max_parallel_updates = 10)
2022-02-13 18:47:38 -08:00
# Runtime configuration
config.define_bool("ci", False, "We are running in CI")
config.define_bool("manual", False, "Set TRIGGER_MODE_MANUAL by default")
config.define_string("num", False, "Number of guardian nodes to run")
# You do not usually need to set this argument - this argument is for debugging only. If you do use a different
# namespace, note that the "wormhole" namespace is hardcoded in tests and don't forget specifying the argument
# when running "tilt down".
#
config.define_string("namespace", False, "Kubernetes namespace to use")
# These arguments will enable writing Guardian events to a cloud BigTable instance.
# Writing to a cloud BigTable is optional. These arguments are not required to run the devnet.
config.define_string("gcpProject", False, "GCP project ID for BigTable persistence")
config.define_string("bigTableKeyPath", False, "Path to BigTable json key file")
# When running Tilt on a server, this can be used to set the public hostname Tilt runs on
# for service links in the UI to work.
config.define_string("webHost", False, "Public hostname for port forwards")
# Components
2022-08-04 08:52:26 -07:00
config.define_bool("near", False, "Enable Near component")
config.define_bool("sui", False, "Enable Sui component")
config.define_bool("btc", False, "Enable BTC component")
2022-10-13 18:23:23 -07:00
config.define_bool("aptos", False, "Enable Aptos component")
2022-02-28 12:48:50 -08:00
config.define_bool("algorand", False, "Enable Algorand component")
2022-07-10 17:47:10 -07:00
config.define_bool("evm2", False, "Enable second Eth component")
2022-02-28 12:48:50 -08:00
config.define_bool("solana", False, "Enable Solana component")
config.define_bool("pythnet", False, "Enable PythNet component")
2022-07-10 17:47:10 -07:00
config.define_bool("terra_classic", False, "Enable Terra Classic component")
config.define_bool("terra2", False, "Enable Terra 2 component")
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
config.define_bool("spy_relayer", False, "Enable spy relayer")
config.define_bool("ci_tests", False, "Enable tests runner component")
config.define_bool("guardiand_debug", False, "Enable dlv endpoint for guardiand")
config.define_bool("node_metrics", False, "Enable Prometheus & Grafana for Guardian metrics")
config.define_bool("guardiand_governor", False, "Enable chain governor in guardiand")
config.define_bool("wormchain", False, "Enable a wormchain node")
config.define_bool("ibc_relayer", False, "Enable IBC relayer between cosmos chains")
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
namespace = cfg.get("namespace", "wormhole")
2022-10-27 20:19:17 -07:00
gcpProject = cfg.get("gcpProject", "")
bigTableKeyPath = cfg.get("bigTableKeyPath", "")
webHost = cfg.get("webHost", "localhost")
ci = cfg.get("ci", False)
2022-10-26 04:56:59 -07:00
algorand = cfg.get("algorand", ci)
near = cfg.get("near", ci)
aptos = cfg.get("aptos", ci)
2022-11-18 06:14:22 -08:00
sui = cfg.get("sui", False)
2022-10-26 04:56:59 -07:00
evm2 = cfg.get("evm2", ci)
solana = cfg.get("solana", ci)
pythnet = cfg.get("pythnet", False)
2022-10-26 04:56:59 -07:00
terra_classic = cfg.get("terra_classic", ci)
terra2 = cfg.get("terra2", ci)
wormchain = cfg.get("wormchain", ci)
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
spy_relayer = cfg.get("spy_relayer", ci)
ci_tests = cfg.get("ci_tests", ci)
guardiand_debug = cfg.get("guardiand_debug", False)
node_metrics = cfg.get("node_metrics", False)
guardiand_governor = cfg.get("guardiand_governor", False)
ibc_relayer = cfg.get("ibc_relayer", False)
btc = cfg.get("btc", False)
if cfg.get("manual", False):
trigger_mode = TRIGGER_MODE_MANUAL
else:
trigger_mode = TRIGGER_MODE_AUTO
# namespace
if not ci:
namespace_create(namespace)
def k8s_yaml_with_ns(objects):
return k8s_yaml(namespace_inject(objects, namespace))
local_resource(
name = "const-gen",
deps = ["scripts", "clients", "ethereum/.env.test"],
cmd = 'tilt docker build -- --target const-export -f Dockerfile.const -o type=local,dest=. --build-arg num_guardians=%s .' % (num_guardians),
env = {"DOCKER_BUILDKIT": "1"},
allow_parallel = True,
trigger_mode = trigger_mode,
)
# node
2022-10-27 20:19:17 -07:00
if bigTableKeyPath != "":
k8s_yaml_with_ns(
secret_yaml_generic(
"node-bigtable-key",
from_file = "bigtable-key.json=" + bigTableKeyPath,
),
)
docker_build(
ref = "guardiand-image",
context = ".",
2022-12-21 05:44:57 -08:00
dockerfile = "node/Dockerfile",
target = "build",
ignore=["./sdk/js"]
)
def command_with_dlv(argv):
return [
"/dlv",
"--listen=0.0.0.0:2345",
"--accept-multiclient",
"--headless=true",
"--api-version=2",
"--continue=true",
"exec",
argv[0],
"--",
] + argv[1:]
def build_node_yaml():
node_yaml = read_yaml_stream("devnet/node.yaml")
node_yaml_with_replicas = set_replicas_in_statefulset(node_yaml, "guardian", num_guardians)
for obj in node_yaml_with_replicas:
2020-08-20 09:56:35 -07:00
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "guardian":
container = obj["spec"]["template"]["spec"]["containers"][0]
if container["name"] != "guardiand":
fail("container 0 is not guardiand")
container["command"] += ["--devNumGuardians", str(num_guardians)]
if guardiand_debug:
container["command"] = command_with_dlv(container["command"])
container["command"] += ["--logLevel=debug"]
print(container["command"])
2022-10-26 17:22:09 -07:00
elif ci:
container["command"] += ["--logLevel=warn"]
2022-10-27 20:19:17 -07:00
if gcpProject != "":
container["command"] += [
"--bigTablePersistenceEnabled",
"--bigTableInstanceName",
"wormhole",
"--bigTableTableName",
"v2Events",
"--bigTableTopicName",
"new-vaa-devnet",
"--bigTableKeyPath",
"/tmp/mounted-keys/bigtable-key.json",
"--bigTableGCPProject",
gcpProject,
]
2022-10-13 18:23:23 -07:00
if aptos:
container["command"] += [
"--aptosRPC",
"http://aptos:8080",
"--aptosAccount",
"de0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017",
"--aptosHandle",
"0xde0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017::state::WormholeMessageHandle",
]
2022-11-18 06:14:22 -08:00
if sui:
container["command"] += [
"--suiRPC",
"http://sui:9002",
# In testnet and mainnet, you will need to also specify the suiPackage argument. In Devnet, we subscribe to
2022-11-18 06:14:22 -08:00
# event traffic purely based on the account since that is the only thing that is deterministic.
# "--suiPackage",
# "0x.....",
"--suiAccount",
"0x2acab6bb0e4722e528291bc6ca4f097e18ce9331",
"--suiWS",
"sui:9001",
]
2022-07-10 17:47:10 -07:00
if evm2:
container["command"] += [
"--bscRPC",
"ws://eth-devnet2:8545",
]
else:
container["command"] += [
"--bscRPC",
"ws://eth-devnet:8545",
]
if solana:
container["command"] += [
"--solanaRPC",
"http://solana-devnet:8899",
]
if pythnet:
container["command"] += [
"--pythnetRPC",
# "http://solana-devnet:8899",
"http://pythnet.rpcpool.com",
"--pythnetWS",
# "ws://solana-devnet:8900",
"wss://pythnet.rpcpool.com",
"--pythnetContract",
"H3fxXJ86ADW2PNuDDmZJg6mzTtPxkYCpNuQUTgmJ7AjU",
]
2022-07-10 17:47:10 -07:00
if terra_classic:
container["command"] += [
"--terraWS",
"ws://terra-terrad:26657/websocket",
"--terraLCD",
"http://terra-terrad:1317",
"--terraContract",
"terra18vd8fpwxzck93qlwghaj6arh4p7c5n896xzem5",
]
if terra2:
container["command"] += [
"--terra2WS",
"ws://terra2-terrad:26657/websocket",
"--terra2LCD",
"http://terra2-terrad:1317",
"--terra2Contract",
"terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
]
if algorand:
container["command"] += [
"--algorandAppID",
"4",
"--algorandIndexerRPC",
"http://algorand:8980",
"--algorandIndexerToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"--algorandAlgodRPC",
"http://algorand:4001",
"--algorandAlgodToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
if guardiand_governor:
container["command"] += [
"--chainGovernorEnabled"
]
2022-08-04 08:52:26 -07:00
if near:
container["command"] += [
"--nearRPC",
"http://near:3030",
"--nearContract",
"wormhole.test.near"
]
if wormchain:
container["command"] += [
"--wormchainWS",
"ws://wormchain:26657/websocket",
"--wormchainLCD",
"http://wormchain:1317"
]
return encode_yaml_stream(node_yaml_with_replicas)
k8s_yaml_with_ns(build_node_yaml())
guardian_resource_deps = ["eth-devnet"]
2022-07-10 17:47:10 -07:00
if evm2:
guardian_resource_deps = guardian_resource_deps + ["eth-devnet2"]
if solana or pythnet:
2022-02-28 12:48:50 -08:00
guardian_resource_deps = guardian_resource_deps + ["solana-devnet"]
2022-08-04 08:52:26 -07:00
if near:
guardian_resource_deps = guardian_resource_deps + ["near"]
2022-07-10 17:47:10 -07:00
if terra_classic:
guardian_resource_deps = guardian_resource_deps + ["terra-terrad"]
if terra2:
guardian_resource_deps = guardian_resource_deps + ["terra2-terrad"]
if algorand:
guardian_resource_deps = guardian_resource_deps + ["algorand"]
2022-10-13 18:23:23 -07:00
if aptos:
guardian_resource_deps = guardian_resource_deps + ["aptos"]
if wormchain:
guardian_resource_deps = guardian_resource_deps + ["wormchain"]
if sui:
guardian_resource_deps = guardian_resource_deps + ["sui"]
2022-02-28 12:48:50 -08:00
k8s_resource(
"guardian",
2022-02-28 12:48:50 -08:00
resource_deps = guardian_resource_deps,
port_forwards = [
port_forward(6060, name = "Debug/Status Server [:6060]", host = webHost),
port_forward(7070, name = "Public gRPC [:7070]", host = webHost),
port_forward(7071, name = "Public REST [:7071]", host = webHost),
port_forward(2345, name = "Debugger [:2345]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# guardian set update - triggered by "tilt args" changes
if num_guardians >= 2 and ci == False:
local_resource(
name = "guardian-set-update",
2022-03-18 06:20:16 -07:00
resource_deps = guardian_resource_deps + ["guardian"],
deps = ["scripts/send-vaa.sh", "clients/eth"],
2022-04-01 10:42:29 -07:00
cmd = './scripts/update-guardian-set.sh %s %s %s' % (num_guardians, webHost, namespace),
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# grafana + prometheus for node metrics
if node_metrics:
dashboard = read_json("dashboards/Wormhole.json")
dashboard_yaml = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "grafana-dashboards-json"
},
"data": {
"wormhole.json": encode_json(dashboard)
}
}
k8s_yaml_with_ns(encode_yaml(dashboard_yaml))
k8s_yaml_with_ns("devnet/node-metrics.yaml")
k8s_resource(
"prometheus-server",
resource_deps = ["guardian"],
port_forwards = [
port_forward(9099, name = "Prometheus [:9099]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
k8s_resource(
"grafana",
resource_deps = ["prometheus-server"],
port_forwards = [
port_forward(3033, name = "Grafana UI [:3033]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# spy
k8s_yaml_with_ns("devnet/spy.yaml")
k8s_resource(
"spy",
resource_deps = ["guardian"],
port_forwards = [
port_forward(6061, container_port = 6060, name = "Debug/Status Server [:6061]", host = webHost),
port_forward(7072, name = "Spy gRPC [:7072]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
if solana or pythnet:
2022-02-28 12:48:50 -08:00
# solana client cli (used for devnet setup)
2022-02-28 12:48:50 -08:00
docker_build(
ref = "bridge-client",
context = ".",
only = ["./proto", "./solana", "./clients"],
2022-12-21 05:44:57 -08:00
dockerfile = "solana/Dockerfile.client",
2022-02-28 12:48:50 -08:00
# Ignore target folders from local (non-container) development.
ignore = ["./solana/*/target"],
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana smart contract
docker_build(
ref = "solana-contract",
context = "solana",
dockerfile = "solana/Dockerfile",
target = "builder",
build_args = {"BRIDGE_ADDRESS": "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"}
2022-02-28 12:48:50 -08:00
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana local devnet
2022-02-28 12:48:50 -08:00
k8s_yaml_with_ns("devnet/solana-devnet.yaml")
2022-02-28 12:48:50 -08:00
k8s_resource(
"solana-devnet",
port_forwards = [
port_forward(8899, name = "Solana RPC [:8899]", host = webHost),
port_forward(8900, name = "Solana WS [:8900]", host = webHost),
port_forward(9000, name = "Solana PubSub [:9000]", host = webHost),
],
resource_deps = ["const-gen"],
2022-02-28 12:48:50 -08:00
labels = ["solana"],
trigger_mode = trigger_mode,
)
2020-08-15 16:38:10 -07:00
# eth devnet
docker_build(
ref = "eth-node",
context = "./ethereum",
dockerfile = "./ethereum/Dockerfile",
2020-08-16 02:17:35 -07:00
# ignore local node_modules (in case they're present)
ignore = ["./ethereum/node_modules"],
# sync external scripts for incremental development
# (everything else needs to be restarted from scratch for determinism)
#
# This relies on --update-mode=exec to work properly with a non-root user.
# https://github.com/tilt-dev/tilt/issues/3708
live_update = [
sync("./ethereum/src", "/home/node/app/src"),
],
2020-08-15 16:38:10 -07:00
)
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
if spy_relayer:
docker_build(
ref = "redis",
context = ".",
only = ["./third_party"],
dockerfile = "third_party/redis/Dockerfile",
)
k8s_yaml_with_ns("devnet/redis.yaml")
k8s_resource(
"redis",
port_forwards = [
port_forward(6379, name = "Redis Default [:6379]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
docker_build(
ref = "spy-relay-image",
2022-10-26 12:24:46 -07:00
context = "relayer/spy_relayer",
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
dockerfile = "relayer/spy_relayer/Dockerfile",
live_update = []
)
k8s_yaml_with_ns("devnet/spy-listener.yaml")
k8s_resource(
"spy-listener",
resource_deps = ["guardian", "redis", "spy"],
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
port_forwards = [
port_forward(6062, container_port = 6060, name = "Debug/Status Server [:6062]", host = webHost),
port_forward(4201, name = "REST [:4201]", host = webHost),
port_forward(8082, name = "Prometheus [:8082]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/spy-relayer.yaml")
k8s_resource(
"spy-relayer",
resource_deps = ["guardian", "redis"],
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
port_forwards = [
port_forward(8083, name = "Prometheus [:8083]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/spy-wallet-monitor.yaml")
k8s_resource(
"spy-wallet-monitor",
resource_deps = ["guardian", "redis"],
port_forwards = [
port_forward(8084, name = "Prometheus [:8084]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/eth-devnet.yaml")
2020-08-15 16:38:10 -07:00
k8s_resource(
"eth-devnet",
port_forwards = [
port_forward(8545, name = "Ganache RPC [:8545]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["evm"],
trigger_mode = trigger_mode,
)
2020-11-10 10:39:32 -08:00
2022-07-10 17:47:10 -07:00
if evm2:
k8s_yaml_with_ns("devnet/eth-devnet2.yaml")
k8s_resource(
"eth-devnet2",
port_forwards = [
port_forward(8546, name = "Ganache RPC [:8546]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["evm"],
trigger_mode = trigger_mode,
)
if ci_tests:
2022-07-14 13:54:02 -07:00
docker_build(
ref = "sdk-test-image",
context = ".",
dockerfile = "testing/Dockerfile.sdk.test",
only = [],
live_update = [
sync("./sdk/js/src", "/app/sdk/js/src"),
sync("./testing", "/app/testing"),
],
)
docker_build(
ref = "spydk-test-image",
context = ".",
dockerfile = "testing/Dockerfile.spydk.test",
only = [],
live_update = [
sync("./spydk/js/src", "/app/spydk/js/src"),
sync("./testing", "/app/testing"),
],
)
k8s_yaml_with_ns("devnet/tests.yaml")
2022-07-14 13:54:02 -07:00
# separate resources to parallelize docker builds
k8s_resource(
"sdk-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
2022-10-26 04:11:53 -07:00
resource_deps = [], # testing/sdk.sh handles waiting for spy, not having deps gets the build earlier
2022-07-14 13:54:02 -07:00
)
k8s_resource(
2022-07-14 13:54:02 -07:00
"spydk-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
2022-10-26 04:11:53 -07:00
resource_deps = [], # testing/spydk.sh handles waiting for spy, not having deps gets the build earlier
)
2022-07-10 17:47:10 -07:00
if terra_classic:
docker_build(
ref = "terra-image",
context = "./terra/devnet",
dockerfile = "terra/devnet/Dockerfile",
)
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra-contracts",
context = "./terra",
dockerfile = "./terra/Dockerfile",
)
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra-devnet.yaml")
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-terrad",
port_forwards = [
port_forward(26657, name = "Terra RPC [:26657]", host = webHost),
port_forward(1317, name = "Terra LCD [:1317]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-postgres",
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-fcd",
resource_deps = ["terra-terrad", "terra-postgres"],
port_forwards = [port_forward(3060, name = "Terra FCD [:3060]", host = webHost)],
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
if terra2 or wormchain:
docker_build(
ref = "cosmwasm_artifacts",
context = ".",
dockerfile = "./cosmwasm/Dockerfile",
target = "artifacts",
)
2022-07-10 17:47:10 -07:00
if terra2:
docker_build(
ref = "terra2-image",
context = "./cosmwasm/deployment/terra2/devnet",
dockerfile = "./cosmwasm/deployment/terra2/devnet/Dockerfile",
2022-07-10 17:47:10 -07:00
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra2-deploy",
context = "./cosmwasm/deployment/terra2",
dockerfile = "./cosmwasm/Dockerfile.deploy",
2022-07-10 17:47:10 -07:00
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra2-devnet.yaml")
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-terrad",
port_forwards = [
port_forward(26658, container_port = 26657, name = "Terra 2 RPC [:26658]", host = webHost),
port_forward(1318, container_port = 1317, name = "Terra 2 LCD [:1318]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-postgres",
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-fcd",
resource_deps = ["terra2-terrad", "terra2-postgres"],
port_forwards = [port_forward(3061, container_port = 3060, name = "Terra 2 FCD [:3061]", host = webHost)],
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-04-29 12:53:48 -07:00
if algorand:
k8s_yaml_with_ns("devnet/algorand-devnet.yaml")
2022-04-29 12:53:48 -07:00
docker_build(
ref = "algorand-algod",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/algod/Dockerfile"
)
docker_build(
ref = "algorand-indexer",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/indexer/Dockerfile"
)
docker_build(
ref = "algorand-contracts",
context = "algorand",
dockerfile = "algorand/Dockerfile",
ignore = ["algorand/test/*.*"]
)
k8s_resource(
"algorand",
port_forwards = [
port_forward(4001, name = "Algod [:4001]", host = webHost),
port_forward(4002, name = "KMD [:4002]", host = webHost),
port_forward(8980, name = "Indexer [:8980]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["algorand"],
trigger_mode = trigger_mode,
)
if sui:
k8s_yaml_with_ns("devnet/sui-devnet.yaml")
docker_build(
ref = "sui-node",
context = "sui",
dockerfile = "sui/Dockerfile",
ignore = ["./sui/sui.log*", "sui/sui.log*", "sui.log.*"],
only = ["Dockerfile", "scripts"],
)
k8s_resource(
"sui",
port_forwards = [
port_forward(9001, name = "WS [:9001]", host = webHost),
port_forward(9002, name = "RPC [:9002]", host = webHost),
port_forward(5003, name = "Faucet [:5003]", host = webHost),
port_forward(9184, name = "Prometheus [:9184]", host = webHost),
],
# resource_deps = ["const-gen"],
labels = ["sui"],
trigger_mode = trigger_mode,
)
2022-08-04 08:52:26 -07:00
if near:
k8s_yaml_with_ns("devnet/near-devnet.yaml")
docker_build(
ref = "near-node",
context = "near",
dockerfile = "near/Dockerfile",
only = ["Dockerfile", "node_builder.sh", "start_node.sh", "README.md", "cert.pem"],
)
docker_build(
ref = "near-deploy",
2022-08-04 08:52:26 -07:00
context = "near",
dockerfile = "near/Dockerfile.deploy",
ignore = ["./test"]
2022-08-04 08:52:26 -07:00
)
k8s_resource(
"near",
port_forwards = [
port_forward(3030, name = "Node [:3030]", host = webHost),
port_forward(3031, name = "webserver [:3031]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["near"],
trigger_mode = trigger_mode,
)
if wormchain:
docker_build(
ref = "wormchaind-image",
context = ".",
2022-12-21 05:44:57 -08:00
dockerfile = "./wormchain/Dockerfile",
build_args = {"num_guardians": str(num_guardians)},
only = [],
ignore = ["./wormchain/testing", "./wormchain/ts-sdk", "./wormchain/design", "./wormchain/vue", "./wormchain/build/wormchaind"],
)
2022-12-19 06:23:15 -08:00
docker_build(
ref = "wormchain-deploy",
context = "./wormchain/contracts",
dockerfile = "./cosmwasm/Dockerfile.deploy",
)
def build_wormchain_yaml(yaml_path, num_instances):
wormchain_yaml = read_yaml_stream(yaml_path)
# set the number of replicas in the StatefulSet to be num_guardians
wormchain_set = set_replicas_in_statefulset(wormchain_yaml, "wormchain", num_instances)
# add a Service for each wormchain instance
services = []
for obj in wormchain_set:
if obj["kind"] == "Service" and obj["metadata"]["name"] == "wormchain-0":
# make a Service for each replica so we can resolve it by name from other pods.
# copy wormchain-0's Service then set the name and selector for the instance.
for instance_num in list(range(1, num_instances)):
instance_name = 'wormchain-%s' % (instance_num)
# Copy the Service's properties to a new dict, by value, three levels deep.
# tl;dr - if the value is a dict, use a comprehension to copy it immutably.
service = { k: ({ k2: ({ k3:v3
for (k3,v3) in v2.items()} if type(v2) == "dict" else v2)
for (k2,v2) in v.items()} if type(v) == "dict" else v)
for (k,v) in obj.items()}
# add the name we want to be able to resolve via k8s DNS
service["metadata"]["name"] = instance_name
# add the name of the pod the service should connect to
service["spec"]["selector"] = { "statefulset.kubernetes.io/pod-name": instance_name }
services.append(service)
return encode_yaml_stream(wormchain_set + services)
wormchain_path = "devnet/wormchain.yaml"
if num_guardians >= 2:
# update wormchain's k8s config to spin up multiple instances
k8s_yaml_with_ns(build_wormchain_yaml(wormchain_path, num_guardians))
else:
k8s_yaml_with_ns(wormchain_path)
2022-12-19 06:23:15 -08:00
k8s_resource(
"wormchain",
port_forwards = [
port_forward(1319, container_port = 1317, name = "REST [:1319]", host = webHost),
Node: Initial guardiand changes for accounting (#2181) * node: guardiand support for accounting Change-Id: I97fe1f6d6d71a5803881ff4c793e3c30f22b14d8 * Node: Tie accounting into the guardian Change-Id: I31600d18176f516b75b3eb046fd7ac6e54e1b133 * Node: accounting tests and metrics Change-Id: Ieb139772edf464ed1ab202861babeaf0f857ad6b * Node: minor tweak to accounting metrics Change-Id: Iad2b7e34870734f0c5e5d538c0ac86269a9a4728 * Node: load accounting key Change-Id: I228ce23e63b556d751000b97097202eda48650aa * More work in progress Change-Id: I85088d26c05cf02d26043cf6ee8c67efd13f2ea4 * Node: send observations to accounting contract Change-Id: Ib90909c2ee705d5e2a7e6cf3a6ec4ba7519e2eb1 * Node: Fix lint error in accounting tests Change-Id: Id73397cf45107243a9f68ba82bed3ccf2b0299b5 * Node: Need to copy libwasmvm.so Change-Id: I2856c8964ca082f1f4014d6db9fb1b2dc4e64409 * Node: Rename wormchain to wormconn Change-Id: I6782be733ebdd92b908228d3984a906aa4c795f7 * Node: moving accounting check after governor Change-Id: I064c77d30514715c6f8b6b5da50806a5e1adf657 * Node: Add accounting status to heartbeat Change-Id: I0ae3e476386cfaccc5c877ee1351dbe41c0358c7 * Node: start of accounting integration work Change-Id: I8ad206eb7fc07aa9e1a2ebc321f2c490ec36b51e * Node: More broadcast tx stuff Change-Id: Id2cc83df859310c013665eaa9c6ce3033bb1d9c5 * Node: Can actually send a request to accounting Change-Id: I6af5d59c53939f58b2f13ae501914bef260592f2 * Node: More accounting tx broadcast stuff Change-Id: If758e49f8928807e87053320e9330c7208aad490 * Node: config changes for accounting Change-Id: I2803cceb188d04c557a52aa9aa8ba7296da8879f * Node: More accounting changes Change-Id: Id979af0ec6ab8484bc094072f3febf39355351ca * Node/Acct: Use new observation request format * Node/acct: use new contract interface * Node/acct: fix minor copy/paste error * Node: Clean up comments and lint errors * Node: disable accounting in dev by default * Node: Fix test failure * Remove test code * Switch messages to debug, rename Run() * check for "out of gas" * Use worker routine to submit observations * Rename mutex to reflect what it protects * Create handleEvents func * Remove FinalizeObservation * Node/Acct: Trying to use tm library for watcher * Node/acct: switch watcher to use tm library * Node/Acct: Need separate WS parm for accounting * Node/Acct: Fix compile error in tests * Node/Acct: Minor rework * Node: add wormchain as a dep to remove stale code * Node/Acct: GS index is not correct in requests * Node/Acct: Peg connection error metric * Node/Acct: Add wormchain to node docker file * Node/Acct: Fix for double base64 decode * Node/Acct: Change public key to sender address * Node/Acct: Fix lint error * Node/Acct: key pass phrase change * Node/Acct: Pass guardian index in obs req * Node/Acct: No go on submit observation * Node/Acct: Don't double encode tx_hash * Node/Acct: Remove unneeded base64 encoding * Node/Acct: handle submit channel overflow * Node/Acct: Added a TODO to document a review issue * Node/Acct: Fix for checking if channel is full Co-authored-by: Conor Patrick <conorpp94@gmail.com>
2023-01-16 04:33:01 -08:00
port_forward(9090, container_port = 9090, name = "GRPC", host = webHost),
port_forward(26659, container_port = 26657, name = "TENDERMINT [:26659]", host = webHost)
],
2022-12-19 06:23:15 -08:00
resource_deps = ["const-gen"],
labels = ["wormchain"],
trigger_mode = trigger_mode,
)
k8s_resource(
"wormchain-deploy",
resource_deps = ["wormchain"],
labels = ["wormchain"],
trigger_mode = trigger_mode,
)
2022-10-13 18:23:23 -07:00
if ibc_relayer:
docker_build(
ref = "ibc-relayer-image",
context = ".",
dockerfile = "./wormchain/ibc-relayer/Dockerfile",
only = []
)
k8s_yaml_with_ns("devnet/ibc-relayer.yaml")
k8s_resource(
"ibc-relayer",
port_forwards = [
port_forward(7597, name = "HTTPDEBUG [:7597]", host = webHost),
],
2022-12-19 06:23:15 -08:00
resource_deps = ["wormchain", "terra2-terrad"],
labels = ["ibc-relayer"],
trigger_mode = trigger_mode,
)
if btc:
k8s_yaml_with_ns("devnet/btc-localnet.yaml")
docker_build(
ref = "btc-node",
context = "bitcoin",
dockerfile = "bitcoin/Dockerfile",
target = "bitcoin-build",
)
k8s_resource(
"btc",
port_forwards = [
port_forward(18556, name = "RPC [:18556]", host = webHost),
],
labels = ["btc"],
trigger_mode = trigger_mode,
)
2022-10-13 18:23:23 -07:00
if aptos:
k8s_yaml_with_ns("devnet/aptos-localnet.yaml")
docker_build(
ref = "aptos-node",
context = "aptos",
dockerfile = "aptos/Dockerfile",
target = "aptos",
)
k8s_resource(
"aptos",
port_forwards = [
port_forward(8080, name = "RPC [:8080]", host = webHost),
port_forward(6181, name = "FullNode [:6181]", host = webHost),
port_forward(8081, name = "Faucet [:8081]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["aptos"],
trigger_mode = trigger_mode,
)