wormhole/Tiltfile

921 lines
30 KiB
Plaintext
Raw Normal View History

2020-10-31 04:42:18 -07:00
# This Tiltfile contains the deployment and build config for the Wormhole devnet.
#
# We use Buildkit cache mounts and careful layering to avoid unnecessary rebuilds - almost
# all source code changes result in small, incremental rebuilds. Dockerfiles are written such
# that, for example, changing the contract source code won't cause Solana itself to be rebuilt.
#
load("ext://namespace", "namespace_create", "namespace_inject")
load("ext://secret", "secret_yaml_generic")
# set the replica value of a StatefulSet
def set_replicas_in_statefulset(config_yaml, statefulset_name, num_replicas):
for obj in config_yaml:
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == statefulset_name:
obj["spec"]["replicas"] = num_replicas
return config_yaml
2023-06-16 05:59:36 -07:00
# set the env value of all containers in all jobs
def set_env_in_jobs(config_yaml, name, value):
for obj in config_yaml:
if obj["kind"] == "Job":
for container in obj["spec"]["template"]["spec"]["containers"]:
if not "env" in container:
container["env"] = []
container["env"].append({"name": name, "value": value})
return config_yaml
allow_k8s_contexts("ci")
# Disable telemetry by default
analytics_settings(False)
2022-02-13 18:47:38 -08:00
# Moar updates (default is 3)
update_settings(max_parallel_updates = 10)
2022-02-13 18:47:38 -08:00
# Runtime configuration
config.define_bool("ci", False, "We are running in CI")
config.define_bool("manual", False, "Set TRIGGER_MODE_MANUAL by default")
config.define_string("num", False, "Number of guardian nodes to run")
# You do not usually need to set this argument - this argument is for debugging only. If you do use a different
# namespace, note that the "wormhole" namespace is hardcoded in tests and don't forget specifying the argument
# when running "tilt down".
#
config.define_string("namespace", False, "Kubernetes namespace to use")
# When running Tilt on a server, this can be used to set the public hostname Tilt runs on
# for service links in the UI to work.
config.define_string("webHost", False, "Public hostname for port forwards")
2023-06-07 10:11:40 -07:00
# When running Tilt on a server, this can be used to set the public hostname Tilt runs on
# for service links in the UI to work.
config.define_string("guardiand_loglevel", False, "Log level for guardiand (debug, info, warn, error, dpanic, panic, fatal)")
# Components
2022-08-04 08:52:26 -07:00
config.define_bool("near", False, "Enable Near component")
config.define_bool("sui", False, "Enable Sui component")
config.define_bool("btc", False, "Enable BTC component")
2022-10-13 18:23:23 -07:00
config.define_bool("aptos", False, "Enable Aptos component")
2022-02-28 12:48:50 -08:00
config.define_bool("algorand", False, "Enable Algorand component")
2022-07-10 17:47:10 -07:00
config.define_bool("evm2", False, "Enable second Eth component")
2022-02-28 12:48:50 -08:00
config.define_bool("solana", False, "Enable Solana component")
2024-03-05 06:57:05 -08:00
config.define_bool("solana_watcher", False, "Enable Solana watcher on guardian")
config.define_bool("pythnet", False, "Enable PythNet component")
2022-07-10 17:47:10 -07:00
config.define_bool("terra_classic", False, "Enable Terra Classic component")
config.define_bool("terra2", False, "Enable Terra 2 component")
config.define_bool("ci_tests", False, "Enable tests runner component")
config.define_bool("guardiand_debug", False, "Enable dlv endpoint for guardiand")
config.define_bool("node_metrics", False, "Enable Prometheus & Grafana for Guardian metrics")
config.define_bool("guardiand_governor", False, "Enable chain governor in guardiand")
config.define_bool("wormchain", False, "Enable a wormchain node")
config.define_bool("ibc_relayer", False, "Enable IBC relayer between cosmos chains")
config.define_bool("redis", False, "Enable a redis instance")
config.define_bool("generic_relayer", False, "Enable the generic relayer off-chain component")
config.define_bool("query_server", False, "Enable cross-chain query server")
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
namespace = cfg.get("namespace", "wormhole")
webHost = cfg.get("webHost", "localhost")
ci = cfg.get("ci", False)
2022-10-26 04:56:59 -07:00
algorand = cfg.get("algorand", ci)
near = cfg.get("near", ci)
aptos = cfg.get("aptos", ci)
sui = cfg.get("sui", ci)
2022-10-26 04:56:59 -07:00
evm2 = cfg.get("evm2", ci)
solana = cfg.get("solana", ci)
pythnet = cfg.get("pythnet", False)
2024-03-05 06:57:05 -08:00
solana_watcher = cfg.get("solana_watcher", solana or pythnet)
2022-10-26 04:56:59 -07:00
terra_classic = cfg.get("terra_classic", ci)
terra2 = cfg.get("terra2", ci)
wormchain = cfg.get("wormchain", ci)
ci_tests = cfg.get("ci_tests", ci)
guardiand_debug = cfg.get("guardiand_debug", False)
node_metrics = cfg.get("node_metrics", False)
guardiand_governor = cfg.get("guardiand_governor", False)
ibc_relayer = cfg.get("ibc_relayer", ci)
btc = cfg.get("btc", False)
redis = cfg.get('redis', ci)
generic_relayer = cfg.get("generic_relayer", ci)
query_server = cfg.get("query_server", ci)
2023-06-07 10:11:40 -07:00
if ci:
guardiand_loglevel = cfg.get("guardiand_loglevel", "warn")
else:
guardiand_loglevel = cfg.get("guardiand_loglevel", "info")
if cfg.get("manual", False):
trigger_mode = TRIGGER_MODE_MANUAL
else:
trigger_mode = TRIGGER_MODE_AUTO
# namespace
if not ci:
namespace_create(namespace)
def k8s_yaml_with_ns(objects):
return k8s_yaml(namespace_inject(objects, namespace))
docker_build(
ref = "cli-gen",
context = ".",
dockerfile = "Dockerfile.cli",
)
docker_build(
ref = "const-gen",
context = ".",
dockerfile = "Dockerfile.const",
build_args={"num_guardians": '%s' % (num_guardians)},
)
# node
docker_build(
ref = "guardiand-image",
context = ".",
2022-12-21 05:44:57 -08:00
dockerfile = "node/Dockerfile",
target = "build",
ignore=["./sdk/js", "./relayer"]
)
def command_with_dlv(argv):
return [
"/dlv",
"--listen=0.0.0.0:2345",
"--accept-multiclient",
"--headless=true",
"--api-version=2",
"--continue=true",
"exec",
argv[0],
"--",
] + argv[1:]
def build_node_yaml():
node_yaml = read_yaml_stream("devnet/node.yaml")
node_yaml_with_replicas = set_replicas_in_statefulset(node_yaml, "guardian", num_guardians)
for obj in node_yaml_with_replicas:
2020-08-20 09:56:35 -07:00
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "guardian":
container = obj["spec"]["template"]["spec"]["containers"][0]
if container["name"] != "guardiand":
fail("container 0 is not guardiand")
2023-06-07 10:11:40 -07:00
container["command"] += ["--logLevel="+guardiand_loglevel]
if guardiand_debug:
container["command"] = command_with_dlv(container["command"])
print(container["command"])
2022-10-13 18:23:23 -07:00
if aptos:
container["command"] += [
"--aptosRPC",
"http://aptos:8080",
"--aptosAccount",
"de0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017",
"--aptosHandle",
"0xde0036a9600559e295d5f6802ef6f3f802f510366e0c23912b0655d972166017::state::WormholeMessageHandle",
]
2022-11-18 06:14:22 -08:00
if sui:
container["command"] += [
"--suiRPC",
"http://sui:9000",
"--suiMoveEventType",
"0x320a40bff834b5ffa12d7f5cc2220dd733dd9e8e91c425800203d06fb2b1fee8::publish_message::WormholeMessage",
2022-11-18 06:14:22 -08:00
"--suiWS",
"ws://sui:9000",
2022-11-18 06:14:22 -08:00
]
2022-07-10 17:47:10 -07:00
if evm2:
container["command"] += [
"--bscRPC",
"ws://eth-devnet2:8545",
]
else:
container["command"] += [
"--bscRPC",
"ws://eth-devnet:8545",
]
2024-03-05 06:57:05 -08:00
if solana_watcher:
2022-07-10 17:47:10 -07:00
container["command"] += [
"--solanaRPC",
"http://solana-devnet:8899",
]
if pythnet:
container["command"] += [
"--pythnetRPC",
# "http://solana-devnet:8899",
"http://pythnet.rpcpool.com",
"--pythnetWS",
# "ws://solana-devnet:8900",
"wss://pythnet.rpcpool.com",
"--pythnetContract",
"H3fxXJ86ADW2PNuDDmZJg6mzTtPxkYCpNuQUTgmJ7AjU",
]
2022-07-10 17:47:10 -07:00
if terra_classic:
container["command"] += [
"--terraWS",
"ws://terra-terrad:26657/websocket",
"--terraLCD",
"http://terra-terrad:1317",
"--terraContract",
"terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
2022-07-10 17:47:10 -07:00
]
if terra2:
container["command"] += [
"--terra2WS",
"ws://terra2-terrad:26657/websocket",
"--terra2LCD",
"http://terra2-terrad:1317",
"--terra2Contract",
"terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
]
if algorand:
container["command"] += [
"--algorandAppID",
"1004",
2022-07-10 17:47:10 -07:00
"--algorandIndexerRPC",
"http://algorand:8980",
"--algorandIndexerToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"--algorandAlgodRPC",
"http://algorand:4001",
"--algorandAlgodToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
if guardiand_governor:
container["command"] += [
"--chainGovernorEnabled"
]
2022-08-04 08:52:26 -07:00
if near:
container["command"] += [
"--nearRPC",
"http://near:3030",
"--nearContract",
"wormhole.test.near"
]
if wormchain:
container["command"] += [
2023-01-27 12:48:19 -08:00
"--wormchainURL",
"wormchain:9090",
"--accountantWS",
"http://wormchain:26657",
"--accountantContract",
"wormhole14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9srrg465",
"--accountantKeyPath",
"/tmp/mounted-keys/wormchain/accountantKey",
"--accountantKeyPassPhrase",
2023-01-27 12:48:19 -08:00
"test0000",
"--accountantCheckEnabled",
"true",
"--accountantNttContract",
"wormhole17p9rzwnnfxcjp32un9ug7yhhzgtkhvl9jfksztgw5uh69wac2pgshdnj3k",
"--accountantNttKeyPath",
"/tmp/mounted-keys/wormchain/accountantNttKey",
"--accountantNttKeyPassPhrase",
"test0000",
"--ibcContract",
"wormhole1nc5tatafv6eyq7llkr2gv50ff9e22mnf70qgjlv737ktmt4eswrq0kdhcj",
"--ibcWS",
"ws://wormchain:26657/websocket",
"--ibcLCD",
"http://wormchain:1317",
"--gatewayRelayerContract",
2024-03-15 10:22:06 -07:00
"wormhole1wn625s4jcmvk0szpl85rj5azkfc6suyvf75q6vrddscjdphtve8sca0pvl",
"--gatewayRelayerKeyPath",
"/tmp/mounted-keys/wormchain/gwrelayerKey",
"--gatewayRelayerKeyPassPhrase",
"test0000",
"--gatewayContract",
2024-03-15 10:22:06 -07:00
"wormhole1ghd753shjuwexxywmgs4xz7x2q732vcnkm6h2pyv9s6ah3hylvrqtm7t3h",
"--gatewayWS",
"ws://wormchain:26657/websocket",
"--gatewayLCD",
"http://wormchain:1317"
]
return encode_yaml_stream(node_yaml_with_replicas)
k8s_yaml_with_ns(build_node_yaml())
guardian_resource_deps = ["eth-devnet"]
2022-07-10 17:47:10 -07:00
if evm2:
guardian_resource_deps = guardian_resource_deps + ["eth-devnet2"]
2024-03-05 06:57:05 -08:00
if solana_watcher:
2022-02-28 12:48:50 -08:00
guardian_resource_deps = guardian_resource_deps + ["solana-devnet"]
2022-08-04 08:52:26 -07:00
if near:
guardian_resource_deps = guardian_resource_deps + ["near"]
2022-07-10 17:47:10 -07:00
if terra_classic:
guardian_resource_deps = guardian_resource_deps + ["terra-terrad"]
if terra2:
guardian_resource_deps = guardian_resource_deps + ["terra2-terrad"]
if algorand:
guardian_resource_deps = guardian_resource_deps + ["algorand"]
2022-10-13 18:23:23 -07:00
if aptos:
guardian_resource_deps = guardian_resource_deps + ["aptos"]
if wormchain:
2023-01-27 12:48:19 -08:00
guardian_resource_deps = guardian_resource_deps + ["wormchain", "wormchain-deploy"]
if sui:
guardian_resource_deps = guardian_resource_deps + ["sui"]
2022-02-28 12:48:50 -08:00
k8s_resource(
"guardian",
2022-02-28 12:48:50 -08:00
resource_deps = guardian_resource_deps,
port_forwards = [
port_forward(6060, name = "Debug/Status Server [:6060]", host = webHost),
port_forward(7070, name = "Public gRPC [:7070]", host = webHost),
port_forward(7071, name = "Public REST [:7071]", host = webHost),
port_forward(2345, name = "Debugger [:2345]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# guardian set update - triggered by "tilt args" changes
if num_guardians >= 2 and ci == False:
local_resource(
name = "guardian-set-update",
2022-03-18 06:20:16 -07:00
resource_deps = guardian_resource_deps + ["guardian"],
deps = ["scripts/send-vaa.sh", "clients/eth"],
2022-04-01 10:42:29 -07:00
cmd = './scripts/update-guardian-set.sh %s %s %s' % (num_guardians, webHost, namespace),
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# grafana + prometheus for node metrics
if node_metrics:
dashboard = read_json("dashboards/Wormhole.json")
dashboard_yaml = {
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "grafana-dashboards-json"
},
"data": {
"wormhole.json": encode_json(dashboard)
}
}
k8s_yaml_with_ns(encode_yaml(dashboard_yaml))
k8s_yaml_with_ns("devnet/node-metrics.yaml")
k8s_resource(
"prometheus-server",
resource_deps = ["guardian"],
port_forwards = [
port_forward(9099, name = "Prometheus [:9099]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
k8s_resource(
"grafana",
resource_deps = ["prometheus-server"],
port_forwards = [
port_forward(3033, name = "Grafana UI [:3033]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# spy
k8s_yaml_with_ns("devnet/spy.yaml")
k8s_resource(
"spy",
resource_deps = ["guardian"],
port_forwards = [
port_forward(6061, container_port = 6060, name = "Debug/Status Server [:6061]", host = webHost),
port_forward(7072, name = "Spy gRPC [:7072]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
if solana or pythnet:
2022-02-28 12:48:50 -08:00
# solana client cli (used for devnet setup)
2022-02-28 12:48:50 -08:00
docker_build(
ref = "bridge-client",
context = ".",
only = ["./proto", "./solana", "./clients"],
2022-12-21 05:44:57 -08:00
dockerfile = "solana/Dockerfile.client",
2022-02-28 12:48:50 -08:00
# Ignore target folders from local (non-container) development.
ignore = ["./solana/*/target"],
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana smart contract
docker_build(
ref = "solana-contract",
context = "solana",
dockerfile = "solana/Dockerfile",
target = "builder",
build_args = {"BRIDGE_ADDRESS": "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"}
2022-02-28 12:48:50 -08:00
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana local devnet
2022-02-28 12:48:50 -08:00
k8s_yaml_with_ns("devnet/solana-devnet.yaml")
2022-02-28 12:48:50 -08:00
k8s_resource(
"solana-devnet",
port_forwards = [
port_forward(8899, name = "Solana RPC [:8899]", host = webHost),
port_forward(8900, name = "Solana WS [:8900]", host = webHost),
],
labels = ["solana"],
trigger_mode = trigger_mode,
)
2020-08-15 16:38:10 -07:00
# eth devnet
docker_build(
ref = "eth-node",
context = ".",
only = ["./ethereum", "./relayer/ethereum"],
dockerfile = "./ethereum/Dockerfile",
2020-08-16 02:17:35 -07:00
# ignore local node_modules (in case they're present)
ignore = ["./ethereum/node_modules","./relayer/ethereum/node_modules"],
Relayer: Ethereum folder Changes for Merging into Main (#3038) * gRelayer: surrounding files * modification to get compilation * restore devnet * remove generic relayer docker * remove wait for relayer engine * keep build time 20 * sh -> bash * sh -> bash * Remove comment * bash -> sh * Revert "bash -> sh" This reverts commit 5c37e92fa19bbdbefc79c8ee0dbceeb127c53373. * bash->sh * gRelayer: ethereum folder changes for generic-relayer-merge * add eth-devnet * Adds .github because workflow needs to install forge * sdk-ci-tests need to install forge * don't wait for nonexistent relayer engine * update package.json and package-lock.json * Remove unnecessary types from package.json * ts-node * gRelayer: ethereum folder changes for generic-relayer-merge * sdk-ci-tests need to install forge * don't wait for nonexistent relayer engine * update package.json and package-lock.json * remove these changes * Relayer: Natspec documentation in IWormholeRelayer (#3032) * WIP * Fixes * Updated interfaces * remove bash * Forward uses same refund chain id and refund address (#3034) * WIP * Fixes * Forward uses same refund chain id and refund address * Updated interfaces * Remove forge build warnings * Add note to interface for resend * via-ir on unless in Tilt * Correct IWormholeReceiver interface * Wormhole message fee now part of quoteDeliveryPrice (#3043) * Fix to PR 3043 * Remove compiler warning * Relayer/address drew review (#3060) * Fix typo in Create2Factory * Add event for contract upgrades * Prevent registering contract if it is already registered * Prevent allowing unset chainId for default delivery provider governance VAA * memory to calldata for external functions in WormholeRelayerSend * continue memory to calldata for external functions * Fix pricing in delivery provider * Sanity check new default delivery provider isn't 0 address * Don't save vaaKey as local variable * cache the length of array rather than iterate every time for vaaKeys * Replacing memory with calldata in few locations * Remove stale file DeliveryProviderMessages * Remove batch VAA sender script * Remove batch VAA from WormholeSimulator * Wait for a confirmation in deploy scripts * remove unnecessary comments * Fix Delivery Provider Pricing and add a test * remove console logs * Revert "continue memory to calldata for external functions" This reverts commit f322afb6c0bbd09e3d04ab42a90e592ff752f6bf. * Revert "memory to calldata for external functions in WormholeRelayerSend" This reverts commit 42fcaad8842d0c81506c9586d8d0fd98f6bb6ae1. * Revert "Don't save vaaKey as local variable" This reverts commit a9172379c564fd430a083645c1c42c78e014d68d. * Revert "cache the length of array rather than iterate every time for vaaKeys" This reverts commit d61380a9b0c0671e67e3bd5d874ae339e180dd34. * Revert "Replacing memory with calldata in few locations" This reverts commit 94e47b6e72eaaa52ac0ba2980c439180401fafd7. * Revert "Fix typo in Create2Factory" This reverts commit a9f7bdf461945c8abf020007d16bbc6b4301d051. * Update contract addresses for via-ir * Slight improvements to delivery provider implementation * typed errors for delivery provider * enable VIA-IR in CI and not in Tilt * correct contract address for via ir * WormholeRelayerSend and WormholeRelayerDelivery (#3082)
2023-06-13 14:01:43 -07:00
build_args = {"num_guardians": str(num_guardians), "dev": str(not ci)},
# sync external scripts for incremental development
# (everything else needs to be restarted from scratch for determinism)
#
# This relies on --update-mode=exec to work properly with a non-root user.
# https://github.com/tilt-dev/tilt/issues/3708
live_update = [
sync("./ethereum/src", "/home/node/app/src"),
],
2020-08-15 16:38:10 -07:00
)
2023-09-13 06:51:36 -07:00
if redis or generic_relayer:
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
docker_build(
ref = "redis",
context = ".",
only = ["./third_party"],
dockerfile = "third_party/redis/Dockerfile",
)
2023-09-13 06:51:36 -07:00
if redis:
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
k8s_resource(
"redis",
port_forwards = [
port_forward(6379, name = "Redis Default [:6379]", host = webHost),
],
labels = ["redis"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/redis.yaml")
if generic_relayer:
k8s_resource(
"redis-relayer",
port_forwards = [
port_forward(6378, name = "Generic Relayer Redis [:6378]", host = webHost),
],
labels = ["redis-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/redis-relayer.yaml")
if generic_relayer:
k8s_resource(
"relayer-engine",
resource_deps = ["guardian", "redis-relayer", "spy"],
port_forwards = [
port_forward(3003, container_port=3000, name = "Bullmq UI [:3003]", host = webHost),
],
labels = ["relayer-engine"],
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
trigger_mode = trigger_mode,
)
docker_build(
ref = "relayer-engine",
context = ".",
only = ["./relayer/generic_relayer", "./relayer/ethereum/ts-scripts/relayer/config"],
2023-07-06 14:00:05 -07:00
dockerfile = "relayer/generic_relayer/relayer-engine-v2/Dockerfile",
build_args = {"dev": str(not ci)}
)
k8s_yaml_with_ns("devnet/relayer-engine.yaml")
k8s_yaml_with_ns("devnet/eth-devnet.yaml")
2020-08-15 16:38:10 -07:00
k8s_resource(
"eth-devnet",
port_forwards = [
2024-04-04 08:44:02 -07:00
port_forward(8545, name = "Anvil RPC [:8545]", host = webHost),
],
labels = ["evm"],
trigger_mode = trigger_mode,
)
2020-11-10 10:39:32 -08:00
2022-07-10 17:47:10 -07:00
if evm2:
k8s_yaml_with_ns("devnet/eth-devnet2.yaml")
k8s_resource(
"eth-devnet2",
port_forwards = [
2024-04-04 08:44:02 -07:00
port_forward(8546, 8545, name = "Anvil RPC [:8546]", host = webHost),
2022-07-10 17:47:10 -07:00
],
labels = ["evm"],
trigger_mode = trigger_mode,
)
if ci_tests:
2022-07-14 13:54:02 -07:00
docker_build(
ref = "sdk-test-image",
context = ".",
dockerfile = "testing/Dockerfile.sdk.test",
only = [],
live_update = [
sync("./sdk/js/src", "/app/sdk/js/src"),
sync("./testing", "/app/testing"),
],
)
docker_build(
ref = "spydk-test-image",
context = ".",
dockerfile = "testing/Dockerfile.spydk.test",
only = [],
live_update = [
sync("./spydk/js/src", "/app/spydk/js/src"),
sync("./testing", "/app/testing"),
],
)
docker_build(
ref = "query-sdk-test-image",
context = ".",
dockerfile = "testing/Dockerfile.querysdk.test",
only = [],
live_update = [
sync("./sdk/js/src", "/app/sdk/js-query/src"),
sync("./testing", "/app/testing"),
],
)
2023-06-16 05:59:36 -07:00
k8s_yaml_with_ns(encode_yaml_stream(set_env_in_jobs(read_yaml_stream("devnet/tests.yaml"), "NUM_GUARDIANS", str(num_guardians))))
2022-07-14 13:54:02 -07:00
# separate resources to parallelize docker builds
k8s_resource(
"sdk-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
2022-10-26 04:11:53 -07:00
resource_deps = [], # testing/sdk.sh handles waiting for spy, not having deps gets the build earlier
2022-07-14 13:54:02 -07:00
)
k8s_resource(
2022-07-14 13:54:02 -07:00
"spydk-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
2022-10-26 04:11:53 -07:00
resource_deps = [], # testing/spydk.sh handles waiting for spy, not having deps gets the build earlier
)
2023-01-27 12:48:19 -08:00
k8s_resource(
"accountant-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_accountant.sh handles waiting for guardian, not having deps gets the build earlier
2023-01-27 12:48:19 -08:00
)
k8s_resource(
"ntt-accountant-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_ntt_accountant.sh handles waiting for guardian, not having deps gets the build earlier
)
k8s_resource(
"query-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
resource_deps = [], # node/hack/query/test/test_query.sh handles waiting for guardian, not having deps gets the build earlier
)
k8s_resource(
"query-sdk-ci-tests",
labels = ["ci"],
trigger_mode = trigger_mode,
resource_deps = [], # testing/querysdk.sh handles waiting for query-server, not having deps gets the build earlier
)
2022-07-10 17:47:10 -07:00
if terra_classic:
docker_build(
ref = "terra-image",
context = "./terra/devnet",
dockerfile = "terra/devnet/Dockerfile",
)
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra-contracts",
context = "./terra",
dockerfile = "./terra/Dockerfile",
)
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra-devnet.yaml")
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-terrad",
port_forwards = [
port_forward(26657, name = "Terra RPC [:26657]", host = webHost),
port_forward(1317, name = "Terra LCD [:1317]", host = webHost),
],
labels = ["terra"],
trigger_mode = trigger_mode,
)
if terra2 or wormchain:
docker_build(
ref = "cosmwasm_artifacts",
context = ".",
dockerfile = "./cosmwasm/Dockerfile",
target = "artifacts",
)
2022-07-10 17:47:10 -07:00
if terra2:
docker_build(
ref = "terra2-image",
context = "./cosmwasm/deployment/terra2/devnet",
dockerfile = "./cosmwasm/deployment/terra2/devnet/Dockerfile",
2022-07-10 17:47:10 -07:00
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra2-deploy",
context = "./cosmwasm/deployment/terra2",
dockerfile = "./cosmwasm/Dockerfile.deploy",
2022-07-10 17:47:10 -07:00
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra2-devnet.yaml")
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-terrad",
port_forwards = [
port_forward(26658, container_port = 26657, name = "Terra 2 RPC [:26658]", host = webHost),
port_forward(1318, container_port = 1317, name = "Terra 2 LCD [:1318]", host = webHost),
],
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-04-29 12:53:48 -07:00
if algorand:
k8s_yaml_with_ns("devnet/algorand-devnet.yaml")
2022-04-29 12:53:48 -07:00
docker_build(
ref = "algorand-algod",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/algod/Dockerfile"
)
docker_build(
ref = "algorand-indexer",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/indexer/Dockerfile"
)
docker_build(
ref = "algorand-contracts",
context = "algorand",
dockerfile = "algorand/Dockerfile",
ignore = ["algorand/test/*.*"]
)
k8s_resource(
"algorand",
port_forwards = [
port_forward(4001, name = "Algod [:4001]", host = webHost),
port_forward(4002, name = "KMD [:4002]", host = webHost),
port_forward(8980, name = "Indexer [:8980]", host = webHost),
],
labels = ["algorand"],
trigger_mode = trigger_mode,
)
if sui:
k8s_yaml_with_ns("devnet/sui-devnet.yaml")
docker_build(
ref = "sui-node",
target = "sui",
context = ".",
dockerfile = "sui/Dockerfile",
ignore = ["./sui/sui.log*", "sui/sui.log*", "sui.log.*"],
only = ["./sui"],
)
k8s_resource(
"sui",
port_forwards = [
port_forward(9000, 9000, name = "RPC [:9000]", host = webHost),
port_forward(9184, name = "Prometheus [:9184]", host = webHost),
],
labels = ["sui"],
trigger_mode = trigger_mode,
)
2022-08-04 08:52:26 -07:00
2023-06-07 10:11:40 -07:00
if near:
2022-08-04 08:52:26 -07:00
k8s_yaml_with_ns("devnet/near-devnet.yaml")
docker_build(
ref = "near-node",
context = "near",
dockerfile = "near/Dockerfile",
only = ["Dockerfile", "node_builder.sh", "start_node.sh", "README.md"],
2022-08-04 08:52:26 -07:00
)
docker_build(
ref = "near-deploy",
2022-08-04 08:52:26 -07:00
context = "near",
dockerfile = "near/Dockerfile.deploy",
ignore = ["./test"]
2022-08-04 08:52:26 -07:00
)
k8s_resource(
"near",
port_forwards = [
port_forward(3030, name = "Node [:3030]", host = webHost),
port_forward(3031, name = "webserver [:3031]", host = webHost),
],
labels = ["near"],
trigger_mode = trigger_mode,
)
if wormchain:
docker_build(
ref = "wormchaind-image",
context = ".",
2022-12-21 05:44:57 -08:00
dockerfile = "./wormchain/Dockerfile",
build_args = {"num_guardians": str(num_guardians)},
only = [],
ignore = ["./wormchain/testing", "./wormchain/ts-sdk", "./wormchain/design", "./wormchain/vue", "./wormchain/build/wormchaind"],
)
2023-01-24 14:48:13 -08:00
docker_build(
ref = "vue-export",
context = ".",
dockerfile = "./wormchain/Dockerfile.proto",
target = "vue-export",
)
docker_build(
ref = "wormchain-deploy",
context = "./wormchain",
dockerfile = "./wormchain/Dockerfile.deploy",
)
def build_wormchain_yaml(yaml_path, num_instances):
wormchain_yaml = read_yaml_stream(yaml_path)
# set the number of replicas in the StatefulSet to be num_guardians
wormchain_set = set_replicas_in_statefulset(wormchain_yaml, "wormchain", num_instances)
# add a Service for each wormchain instance
services = []
for obj in wormchain_set:
if obj["kind"] == "Service" and obj["metadata"]["name"] == "wormchain-0":
# make a Service for each replica so we can resolve it by name from other pods.
# copy wormchain-0's Service then set the name and selector for the instance.
for instance_num in list(range(1, num_instances)):
instance_name = 'wormchain-%s' % (instance_num)
# Copy the Service's properties to a new dict, by value, three levels deep.
# tl;dr - if the value is a dict, use a comprehension to copy it immutably.
service = { k: ({ k2: ({ k3:v3
for (k3,v3) in v2.items()} if type(v2) == "dict" else v2)
for (k2,v2) in v.items()} if type(v) == "dict" else v)
for (k,v) in obj.items()}
# add the name we want to be able to resolve via k8s DNS
service["metadata"]["name"] = instance_name
# add the name of the pod the service should connect to
service["spec"]["selector"] = { "statefulset.kubernetes.io/pod-name": instance_name }
services.append(service)
return encode_yaml_stream(wormchain_set + services)
wormchain_path = "devnet/wormchain.yaml"
if num_guardians >= 2:
# update wormchain's k8s config to spin up multiple instances
k8s_yaml_with_ns(build_wormchain_yaml(wormchain_path, num_guardians))
else:
k8s_yaml_with_ns(wormchain_path)
k8s_resource(
"wormchain",
port_forwards = [
port_forward(1319, container_port = 1317, name = "REST [:1319]", host = webHost),
Node: Initial guardiand changes for accounting (#2181) * node: guardiand support for accounting Change-Id: I97fe1f6d6d71a5803881ff4c793e3c30f22b14d8 * Node: Tie accounting into the guardian Change-Id: I31600d18176f516b75b3eb046fd7ac6e54e1b133 * Node: accounting tests and metrics Change-Id: Ieb139772edf464ed1ab202861babeaf0f857ad6b * Node: minor tweak to accounting metrics Change-Id: Iad2b7e34870734f0c5e5d538c0ac86269a9a4728 * Node: load accounting key Change-Id: I228ce23e63b556d751000b97097202eda48650aa * More work in progress Change-Id: I85088d26c05cf02d26043cf6ee8c67efd13f2ea4 * Node: send observations to accounting contract Change-Id: Ib90909c2ee705d5e2a7e6cf3a6ec4ba7519e2eb1 * Node: Fix lint error in accounting tests Change-Id: Id73397cf45107243a9f68ba82bed3ccf2b0299b5 * Node: Need to copy libwasmvm.so Change-Id: I2856c8964ca082f1f4014d6db9fb1b2dc4e64409 * Node: Rename wormchain to wormconn Change-Id: I6782be733ebdd92b908228d3984a906aa4c795f7 * Node: moving accounting check after governor Change-Id: I064c77d30514715c6f8b6b5da50806a5e1adf657 * Node: Add accounting status to heartbeat Change-Id: I0ae3e476386cfaccc5c877ee1351dbe41c0358c7 * Node: start of accounting integration work Change-Id: I8ad206eb7fc07aa9e1a2ebc321f2c490ec36b51e * Node: More broadcast tx stuff Change-Id: Id2cc83df859310c013665eaa9c6ce3033bb1d9c5 * Node: Can actually send a request to accounting Change-Id: I6af5d59c53939f58b2f13ae501914bef260592f2 * Node: More accounting tx broadcast stuff Change-Id: If758e49f8928807e87053320e9330c7208aad490 * Node: config changes for accounting Change-Id: I2803cceb188d04c557a52aa9aa8ba7296da8879f * Node: More accounting changes Change-Id: Id979af0ec6ab8484bc094072f3febf39355351ca * Node/Acct: Use new observation request format * Node/acct: use new contract interface * Node/acct: fix minor copy/paste error * Node: Clean up comments and lint errors * Node: disable accounting in dev by default * Node: Fix test failure * Remove test code * Switch messages to debug, rename Run() * check for "out of gas" * Use worker routine to submit observations * Rename mutex to reflect what it protects * Create handleEvents func * Remove FinalizeObservation * Node/Acct: Trying to use tm library for watcher * Node/acct: switch watcher to use tm library * Node/Acct: Need separate WS parm for accounting * Node/Acct: Fix compile error in tests * Node/Acct: Minor rework * Node: add wormchain as a dep to remove stale code * Node/Acct: GS index is not correct in requests * Node/Acct: Peg connection error metric * Node/Acct: Add wormchain to node docker file * Node/Acct: Fix for double base64 decode * Node/Acct: Change public key to sender address * Node/Acct: Fix lint error * Node/Acct: key pass phrase change * Node/Acct: Pass guardian index in obs req * Node/Acct: No go on submit observation * Node/Acct: Don't double encode tx_hash * Node/Acct: Remove unneeded base64 encoding * Node/Acct: handle submit channel overflow * Node/Acct: Added a TODO to document a review issue * Node/Acct: Fix for checking if channel is full Co-authored-by: Conor Patrick <conorpp94@gmail.com>
2023-01-16 04:33:01 -08:00
port_forward(9090, container_port = 9090, name = "GRPC", host = webHost),
port_forward(26659, container_port = 26657, name = "TENDERMINT [:26659]", host = webHost)
],
labels = ["wormchain"],
trigger_mode = trigger_mode,
)
2022-10-13 18:23:23 -07:00
k8s_resource(
"wormchain-deploy",
resource_deps = ["wormchain"],
labels = ["wormchain"],
trigger_mode = trigger_mode,
)
if ibc_relayer:
docker_build(
ref = "ibc-relayer-image",
context = ".",
dockerfile = "./wormchain/ibc-relayer/Dockerfile",
only = []
)
k8s_yaml_with_ns("devnet/ibc-relayer.yaml")
k8s_resource(
"ibc-relayer",
port_forwards = [
port_forward(7597, name = "HTTPDEBUG [:7597]", host = webHost),
],
resource_deps = ["wormchain-deploy", "terra2-terrad"],
labels = ["ibc-relayer"],
trigger_mode = trigger_mode,
)
if btc:
k8s_yaml_with_ns("devnet/btc-localnet.yaml")
docker_build(
ref = "btc-node",
context = "bitcoin",
dockerfile = "bitcoin/Dockerfile",
target = "bitcoin-build",
)
k8s_resource(
"btc",
port_forwards = [
port_forward(18556, name = "RPC [:18556]", host = webHost),
],
labels = ["btc"],
trigger_mode = trigger_mode,
)
2022-10-13 18:23:23 -07:00
if aptos:
k8s_yaml_with_ns("devnet/aptos-localnet.yaml")
docker_build(
ref = "aptos-node",
context = "aptos",
dockerfile = "aptos/Dockerfile",
target = "aptos",
)
k8s_resource(
"aptos",
port_forwards = [
port_forward(8080, name = "RPC [:8080]", host = webHost),
port_forward(6181, name = "FullNode [:6181]", host = webHost),
port_forward(8081, name = "Faucet [:8081]", host = webHost),
],
labels = ["aptos"],
trigger_mode = trigger_mode,
)
if query_server:
k8s_yaml_with_ns("devnet/query-server.yaml")
k8s_resource(
"query-server",
resource_deps = ["guardian"],
port_forwards = [
port_forward(6069, name = "REST [:6069]", host = webHost),
port_forward(6068, name = "Status [:6068]", host = webHost)
],
labels = ["query-server"],
trigger_mode = trigger_mode
)