wormhole/Tiltfile

682 lines
21 KiB
Plaintext
Raw Normal View History

2020-10-31 04:42:18 -07:00
# This Tiltfile contains the deployment and build config for the Wormhole devnet.
#
# We use Buildkit cache mounts and careful layering to avoid unnecessary rebuilds - almost
# all source code changes result in small, incremental rebuilds. Dockerfiles are written such
# that, for example, changing the contract source code won't cause Solana itself to be rebuilt.
#
load("ext://namespace", "namespace_create", "namespace_inject")
load("ext://secret", "secret_yaml_generic")
allow_k8s_contexts("ci")
# Disable telemetry by default
analytics_settings(False)
2022-02-13 18:47:38 -08:00
# Moar updates (default is 3)
update_settings(max_parallel_updates = 10)
2022-02-13 18:47:38 -08:00
# Runtime configuration
config.define_bool("ci", False, "We are running in CI")
config.define_bool("manual", False, "Set TRIGGER_MODE_MANUAL by default")
config.define_string("num", False, "Number of guardian nodes to run")
# You do not usually need to set this argument - this argument is for debugging only. If you do use a different
# namespace, note that the "wormhole" namespace is hardcoded in tests and don't forget specifying the argument
# when running "tilt down".
#
config.define_string("namespace", False, "Kubernetes namespace to use")
# These arguments will enable writing Guardian events to a cloud BigTable instance.
# Writing to a cloud BigTable is optional. These arguments are not required to run the devnet.
config.define_string("gcpProject", False, "GCP project ID for BigTable persistence")
config.define_string("bigTableKeyPath", False, "Path to BigTable json key file")
# When running Tilt on a server, this can be used to set the public hostname Tilt runs on
# for service links in the UI to work.
config.define_string("webHost", False, "Public hostname for port forwards")
# Components
2022-02-28 12:48:50 -08:00
config.define_bool("algorand", False, "Enable Algorand component")
2022-07-10 17:47:10 -07:00
config.define_bool("evm2", False, "Enable second Eth component")
2022-02-28 12:48:50 -08:00
config.define_bool("solana", False, "Enable Solana component")
2022-07-10 17:47:10 -07:00
config.define_bool("terra_classic", False, "Enable Terra Classic component")
config.define_bool("terra2", False, "Enable Terra 2 component")
config.define_bool("explorer", False, "Enable explorer component")
config.define_bool("bridge_ui", False, "Enable bridge UI component")
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
config.define_bool("spy_relayer", False, "Enable spy relayer")
2021-12-13 06:40:06 -08:00
config.define_bool("e2e", False, "Enable E2E testing stack")
config.define_bool("ci_tests", False, "Enable tests runner component")
config.define_bool("bridge_ui_hot", False, "Enable hot loading bridge_ui")
config.define_bool("guardiand_debug", False, "Enable dlv endpoint for guardiand")
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
namespace = cfg.get("namespace", "wormhole")
gcpProject = cfg.get("gcpProject", "local-dev")
bigTableKeyPath = cfg.get("bigTableKeyPath", "./event_database/devnet_key.json")
webHost = cfg.get("webHost", "localhost")
2022-02-28 12:48:50 -08:00
algorand = cfg.get("algorand", True)
2022-07-10 17:47:10 -07:00
evm2 = cfg.get("evm2", True)
2022-02-28 12:48:50 -08:00
solana = cfg.get("solana", True)
2022-07-10 17:47:10 -07:00
terra_classic = cfg.get("terra_classic", True)
terra2 = cfg.get("terra2", True)
ci = cfg.get("ci", False)
explorer = cfg.get("explorer", ci)
bridge_ui = cfg.get("bridge_ui", ci)
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
spy_relayer = cfg.get("spy_relayer", ci)
2021-12-13 06:40:06 -08:00
e2e = cfg.get("e2e", ci)
ci_tests = cfg.get("ci_tests", ci)
guardiand_debug = cfg.get("guardiand_debug", False)
bridge_ui_hot = not ci
if cfg.get("manual", False):
trigger_mode = TRIGGER_MODE_MANUAL
else:
trigger_mode = TRIGGER_MODE_AUTO
# namespace
if not ci:
namespace_create(namespace)
def k8s_yaml_with_ns(objects):
return k8s_yaml(namespace_inject(objects, namespace))
# protos
proto_deps = ["./proto", "buf.yaml", "buf.gen.yaml"]
local_resource(
name = "proto-gen",
deps = proto_deps,
cmd = "tilt docker build -- --target go-export -f Dockerfile.proto -o type=local,dest=node .",
env = {"DOCKER_BUILDKIT": "1"},
labels = ["protobuf"],
allow_parallel = True,
trigger_mode = trigger_mode,
)
local_resource(
name = "proto-gen-web",
[WIP] Pr/drozdziak1/p2w batching/5e704f8b (#877) * ethereum: p2w contract -> p2w emitter, fill in essential envs Change-Id: I6fa9364a96738d2cc02ec829a31fedba0586d8e8 commit-id:0a56f1f8 * Add p2w-relay, a p2w-sdk integration test commit-id:6bfab639 * p2w-sdk: Expand README Change-Id: I17cb547d6aaddc240588923561c26d11a787df2e commit-id:6ebd6a22 * p2w-sdk: don't build ETH contracts, only the types Change-Id: I7cbd18328227700635d7688aa24a9671e8919fcd commit-id:adf079f7 * p2w: configurability and sane envs commit-id:f10fd90e * Solitaire: Implement Option<T> support in structs commit-id:31aa12d6 * bridge/governance.rs: Stop pestering about EMITTER_ADDRESS commit-id:d5bd7234 * p2w-attest: price batching This commit introduces support for multiple Pyth product/price pairs per call. The initial maximum batch size is 5 and is enforced using a `P2W_MAX_BATCH_SIZE` constant. solana/pyth2wormhole/program: * On-chain batching logic * Batch message parsing logic solana/pyth2wormhole/client: * Off-chain batching logic - divides any number of symbols into largest possible batches * Use a multi-symbol config file instead of CLI arguments third_party/pyth/p2w-sdk: * Expose batch parsing logic third_party/pyth/p2w-relay: * Comment out target chain calls until ETH contract supports batching * Test the batch parsing function third_party/pyth/p2w_autoattest.py: * Generate and use the symbol config file with pyth2wormhole-client third_party/pyth/pyth_publisher.py: * Add a configurable number of mock Pyth symbols * Adjust HTTP endpoint for multiple symbols commit-id:73787a61 * p2w-attest: mention attestation size in batch This commit ensures that no matter the attestation format, a batch will never contain attestations of different sizes. This guarantee enables forward compatibility by adding new constant-size fields at the end of a batch at all times. An older implementation will simply not consume the remaining newer values while respecting the stated batch member alignment. commit-id:210da230 * pyth2wormhole-client: use fresh blockhashes, harden batch errors This commit makes sure we don't have to deal with expired transactions due to stale blockhashes. The problem existed with larger symbol configs as well as on Solana mainnet. Additionally, the attestation logic now treats transaction errors as non-critical - a failure for a batch does not prevent attestation attempts for batches farther in the queue commit-id:5e704f8b
2022-02-23 10:12:16 -08:00
deps = proto_deps + ["buf.gen.web.yaml"],
resource_deps = ["proto-gen"],
cmd = "tilt docker build -- --target node-export -f Dockerfile.proto -o type=local,dest=. .",
env = {"DOCKER_BUILDKIT": "1"},
labels = ["protobuf"],
allow_parallel = True,
trigger_mode = trigger_mode,
)
local_resource(
name = "const-gen",
deps = ["scripts", "clients", "ethereum/.env.test"],
cmd = 'tilt docker build -- --target const-export -f Dockerfile.const -o type=local,dest=. --build-arg num_guardians=%s .' % (num_guardians),
env = {"DOCKER_BUILDKIT": "1"},
allow_parallel = True,
trigger_mode = trigger_mode,
)
# wasm
2022-02-28 12:48:50 -08:00
if solana:
local_resource(
name = "wasm-gen",
deps = ["solana"],
dir = "solana",
cmd = "tilt docker build -- -f Dockerfile.wasm -o type=local,dest=.. .",
env = {"DOCKER_BUILDKIT": "1"},
labels = ["solana"],
allow_parallel = True,
2022-02-28 12:48:50 -08:00
trigger_mode = trigger_mode,
)
# node
if explorer:
k8s_yaml_with_ns(
secret_yaml_generic(
"node-bigtable-key",
from_file = "bigtable-key.json=" + bigTableKeyPath,
),
)
docker_build(
ref = "guardiand-image",
context = "node",
dockerfile = "node/Dockerfile",
target = "build",
)
def command_with_dlv(argv):
return [
"/dlv",
"--listen=0.0.0.0:2345",
"--accept-multiclient",
"--headless=true",
"--api-version=2",
"--continue=true",
"exec",
argv[0],
"--",
] + argv[1:]
def build_node_yaml():
node_yaml = read_yaml_stream("devnet/node.yaml")
for obj in node_yaml:
2020-08-20 09:56:35 -07:00
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "guardian":
obj["spec"]["replicas"] = num_guardians
container = obj["spec"]["template"]["spec"]["containers"][0]
if container["name"] != "guardiand":
fail("container 0 is not guardiand")
container["command"] += ["--devNumGuardians", str(num_guardians)]
if guardiand_debug:
container["command"] = command_with_dlv(container["command"])
container["command"] += ["--logLevel=debug"]
print(container["command"])
if explorer:
container["command"] += [
"--bigTablePersistenceEnabled",
"--bigTableInstanceName",
"wormhole",
"--bigTableTableName",
"v2Events",
"--bigTableTopicName",
"new-vaa-devnet",
"--bigTableKeyPath",
"/tmp/mounted-keys/bigtable-key.json",
"--bigTableGCPProject",
gcpProject,
]
2022-07-10 17:47:10 -07:00
if evm2:
container["command"] += [
"--bscRPC",
"ws://eth-devnet2:8545",
]
else:
container["command"] += [
"--bscRPC",
"ws://eth-devnet:8545",
]
if solana:
container["command"] += [
"--solanaWS",
"ws://solana-devnet:8900",
"--solanaRPC",
"http://solana-devnet:8899",
]
if terra_classic:
container["command"] += [
"--terraWS",
"ws://terra-terrad:26657/websocket",
"--terraLCD",
"http://terra-terrad:1317",
"--terraContract",
"terra18vd8fpwxzck93qlwghaj6arh4p7c5n896xzem5",
]
if terra2:
container["command"] += [
"--terra2WS",
"ws://terra2-terrad:26657/websocket",
"--terra2LCD",
"http://terra2-terrad:1317",
"--terra2Contract",
"terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au",
]
if algorand:
container["command"] += [
"--algorandAppID",
"4",
"--algorandIndexerRPC",
"http://algorand:8980",
"--algorandIndexerToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"--algorandAlgodRPC",
"http://algorand:4001",
"--algorandAlgodToken",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
]
return encode_yaml_stream(node_yaml)
k8s_yaml_with_ns(build_node_yaml())
2022-07-10 17:47:10 -07:00
guardian_resource_deps = ["proto-gen", "eth-devnet"]
if evm2:
guardian_resource_deps = guardian_resource_deps + ["eth-devnet2"]
2022-02-28 12:48:50 -08:00
if solana:
guardian_resource_deps = guardian_resource_deps + ["solana-devnet"]
2022-07-10 17:47:10 -07:00
if terra_classic:
guardian_resource_deps = guardian_resource_deps + ["terra-terrad"]
if terra2:
guardian_resource_deps = guardian_resource_deps + ["terra2-terrad"]
2022-02-28 12:48:50 -08:00
k8s_resource(
"guardian",
2022-02-28 12:48:50 -08:00
resource_deps = guardian_resource_deps,
port_forwards = [
port_forward(6060, name = "Debug/Status Server [:6060]", host = webHost),
port_forward(7070, name = "Public gRPC [:7070]", host = webHost),
port_forward(7071, name = "Public REST [:7071]", host = webHost),
port_forward(2345, name = "Debugger [:2345]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# guardian set update - triggered by "tilt args" changes
if num_guardians >= 2 and ci == False:
local_resource(
name = "guardian-set-update",
2022-03-18 06:20:16 -07:00
resource_deps = guardian_resource_deps + ["guardian"],
deps = ["scripts/send-vaa.sh", "clients/eth"],
2022-04-01 10:42:29 -07:00
cmd = './scripts/update-guardian-set.sh %s %s %s' % (num_guardians, webHost, namespace),
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# spy
k8s_yaml_with_ns("devnet/spy.yaml")
k8s_resource(
"spy",
resource_deps = ["proto-gen", "guardian"],
port_forwards = [
port_forward(6061, container_port = 6060, name = "Debug/Status Server [:6061]", host = webHost),
port_forward(7072, name = "Spy gRPC [:7072]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
2022-02-28 12:48:50 -08:00
if solana:
# solana client cli (used for devnet setup)
2022-02-28 12:48:50 -08:00
docker_build(
ref = "bridge-client",
context = ".",
only = ["./proto", "./solana", "./clients"],
dockerfile = "Dockerfile.client",
# Ignore target folders from local (non-container) development.
ignore = ["./solana/*/target"],
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana smart contract
docker_build(
ref = "solana-contract",
context = "solana",
dockerfile = "solana/Dockerfile",
target = "builder",
build_args = {"BRIDGE_ADDRESS": "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"}
2022-02-28 12:48:50 -08:00
)
2020-08-20 09:56:26 -07:00
2022-02-28 12:48:50 -08:00
# solana local devnet
2022-02-28 12:48:50 -08:00
k8s_yaml_with_ns("devnet/solana-devnet.yaml")
2022-02-28 12:48:50 -08:00
k8s_resource(
"solana-devnet",
port_forwards = [
port_forward(8899, name = "Solana RPC [:8899]", host = webHost),
port_forward(8900, name = "Solana WS [:8900]", host = webHost),
port_forward(9000, name = "Solana PubSub [:9000]", host = webHost),
],
resource_deps = ["const-gen"],
2022-02-28 12:48:50 -08:00
labels = ["solana"],
trigger_mode = trigger_mode,
)
2020-08-15 16:38:10 -07:00
# eth devnet
docker_build(
ref = "eth-node",
context = "./ethereum",
dockerfile = "./ethereum/Dockerfile",
2020-08-16 02:17:35 -07:00
# ignore local node_modules (in case they're present)
ignore = ["./ethereum/node_modules"],
# sync external scripts for incremental development
# (everything else needs to be restarted from scratch for determinism)
#
# This relies on --update-mode=exec to work properly with a non-root user.
# https://github.com/tilt-dev/tilt/issues/3708
live_update = [
sync("./ethereum/src", "/home/node/app/src"),
],
2020-08-15 16:38:10 -07:00
)
Spy relayer cleanup (#1015) * initial spy-relayer * Update spy_relayer Dockerfile * added example mainnet config files * split out private keys into its own ENV variable * Update spy relayer supportedChains.json To remove the `walletPrivateKey` entries. All of the private keys have been split out into their own json file. * fixed evm private key env parse * missing solana accounts report 0 balance, rather than error * wallet address is logged in debug * spy_relayer: enabled prometheus default metrics Also set a prefix of `relayer_` * spy_relayer: updates to the prometheus bits * Use a single metric registry * Use a simpler metric name and add labels for individual wallets * spy_relayer: human readable app mode in the metrics [ listener | relayer | both ] * spy_relayer: unify metrics * remove the collection of default metrics * hardcode the `spy_relayer_` prefix on all custom metrics * fixed dep arrays, nullable terra token/balance info * attempt stack debug * debug pullTerraBalance * provider http or ws * update sdk * logging for tokenAddress is 0 * fix foreign address calc * fix calcLocalAddressesTerra * relayer/spy_relayer: update prometheus helpers Add / url handler for the ingress-gce stupid load balancer that doesn't support custom url healthchecks unless you make a BackendConfig custom resource definition. * logging refinement * use chain name in prometheus * adjust retry timeout calculation * spy_relayer: update prometheus bits * improved error handling * relayer ui improvements * prep sdk release * use latest sdk, manual redeem button * relaying ux improvements * gas price fix * shortened terra success log * use gh base relayer list * fix prometheus urls * Update prometheus metric name * only show TPS warning on mainnet * show relayer fee in source preview * fix unwrap check * add native bool to balance metric * logging improvements * add feeRecipientAddress to redeemOnSolana * gather solana fees * remove relayer ws support * add nativeCurrencySymbol to ChainConfigInfo * fix solana native symbol * demoteWorking option, logger contexts * scoped logging * bridge_ui: unwrap native * add evm wallet monitor test * solana vaa parsing fix * add monitorRedis * make Jeff's brain happy * log demoting keys * register redisQueue metric * human readable redisQueue metric * fix timestamp inconsistency * use scopedLogger for the first level of workers * pull wallet balances in parallel * more scoped logging * pick a solana fee * moving keys log improvement * update eth gas calculations based on recent txs * use postVaaSolanaWithRetry * split success and failures by chain * fix using terraCoin * check prom every 10s * batch getting evm token balances * batch calcLocalAddressesEVM * debug worker logging * log retry number * support Polygon? * reset status on demotion * enhance! * update avax fee Co-authored-by: Chase Moran <chasemoran45@gmail.com> Co-authored-by: Kevin Peters <kpeters@jumptrading.com> Co-authored-by: Evan Gray <battledingo@gmail.com>
2022-03-28 20:39:08 -07:00
if spy_relayer:
docker_build(
ref = "redis",
context = ".",
only = ["./third_party"],
dockerfile = "third_party/redis/Dockerfile",
)
k8s_yaml_with_ns("devnet/redis.yaml")
k8s_resource(
"redis",
port_forwards = [
port_forward(6379, name = "Redis Default [:6379]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
docker_build(
ref = "spy-relay-image",
context = ".",
only = ["./relayer/spy_relayer"],
dockerfile = "relayer/spy_relayer/Dockerfile",
live_update = []
)
k8s_yaml_with_ns("devnet/spy-listener.yaml")
k8s_resource(
"spy-listener",
resource_deps = ["proto-gen", "guardian", "redis"],
port_forwards = [
port_forward(6062, container_port = 6060, name = "Debug/Status Server [:6062]", host = webHost),
port_forward(4201, name = "REST [:4201]", host = webHost),
port_forward(8082, name = "Prometheus [:8082]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/spy-relayer.yaml")
k8s_resource(
"spy-relayer",
resource_deps = ["proto-gen", "guardian", "redis"],
port_forwards = [
port_forward(8083, name = "Prometheus [:8083]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/spy-wallet-monitor.yaml")
k8s_resource(
"spy-wallet-monitor",
resource_deps = ["proto-gen", "guardian", "redis"],
port_forwards = [
port_forward(8084, name = "Prometheus [:8084]", host = webHost),
],
labels = ["spy-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/eth-devnet.yaml")
2020-08-15 16:38:10 -07:00
k8s_resource(
"eth-devnet",
port_forwards = [
port_forward(8545, name = "Ganache RPC [:8545]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["evm"],
trigger_mode = trigger_mode,
)
2020-11-10 10:39:32 -08:00
2022-07-10 17:47:10 -07:00
if evm2:
k8s_yaml_with_ns("devnet/eth-devnet2.yaml")
k8s_resource(
"eth-devnet2",
port_forwards = [
port_forward(8546, name = "Ganache RPC [:8546]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["evm"],
trigger_mode = trigger_mode,
)
if bridge_ui:
entrypoint = "npm run build && /app/node_modules/.bin/serve -s build -n"
live_update = []
if bridge_ui_hot:
entrypoint = "npm start"
live_update = [
sync("./bridge_ui/public", "/app/public"),
sync("./bridge_ui/src", "/app/src"),
]
docker_build(
ref = "bridge-ui",
context = ".",
only = ["./bridge_ui"],
dockerfile = "bridge_ui/Dockerfile",
entrypoint = entrypoint,
live_update = live_update,
)
k8s_yaml_with_ns("devnet/bridge-ui.yaml")
k8s_resource(
"bridge-ui",
resource_deps = [],
port_forwards = [
port_forward(3000, name = "Bridge UI [:3000]", host = webHost),
],
labels = ["portal"],
trigger_mode = trigger_mode,
)
if ci_tests:
local_resource(
name = "solana-tests",
deps = ["solana"],
dir = "solana",
cmd = "tilt docker build -- -f Dockerfile --target ci_tests --build-arg BRIDGE_ADDRESS=Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o .",
env = {"DOCKER_BUILDKIT": "1"},
labels = ["ci"],
allow_parallel = True,
trigger_mode = trigger_mode,
)
docker_build(
ref = "tests-image",
context = ".",
dockerfile = "testing/Dockerfile.tests",
only = [],
live_update = [
sync("./spydk/js/src", "/app/spydk/js/src"),
sync("./sdk/js/src", "/app/sdk/js/src"),
sync("./testing", "/app/testing"),
sync("./bridge_ui/src", "/app/bridge_ui/src"),
],
)
k8s_yaml_with_ns("devnet/tests.yaml")
k8s_resource(
"ci-tests",
2022-06-16 09:48:01 -07:00
resource_deps = ["proto-gen-web", "wasm-gen", "eth-devnet", "eth-devnet2", "terra-terrad", "terra-fcd", "terra2-terrad", "terra2-fcd", "solana-devnet", "spy", "guardian"],
labels = ["ci"],
trigger_mode = trigger_mode,
)
# e2e
2021-12-13 06:40:06 -08:00
if e2e:
k8s_yaml_with_ns("devnet/e2e.yaml")
2021-12-13 06:40:06 -08:00
docker_build(
ref = "e2e",
context = "e2e",
dockerfile = "e2e/Dockerfile",
network = "host",
)
2021-12-13 06:40:06 -08:00
k8s_resource(
"e2e",
port_forwards = [
port_forward(6080, name = "VNC [:6080]", host = webHost, link_path = "/vnc_auto.html"),
],
labels = ["ci"],
2021-12-13 06:40:06 -08:00
trigger_mode = trigger_mode,
)
# bigtable
if explorer:
k8s_yaml_with_ns("devnet/bigtable.yaml")
k8s_resource(
"bigtable-emulator",
2022-03-18 06:20:16 -07:00
port_forwards = [port_forward(8086, name = "BigTable clients [:8086]")],
labels = ["explorer"],
trigger_mode = trigger_mode,
)
k8s_resource(
"pubsub-emulator",
port_forwards = [port_forward(8085, name = "PubSub listeners [:8085]")],
labels = ["explorer"],
)
docker_build(
ref = "cloud-functions",
2022-03-02 14:37:05 -08:00
context = "./event_database",
dockerfile = "./event_database/functions_server/Dockerfile",
live_update = [
2022-03-02 14:37:05 -08:00
sync("./event_database/cloud_functions", "/app/cloud_functions"),
],
)
k8s_resource(
"cloud-functions",
resource_deps = ["proto-gen", "bigtable-emulator", "pubsub-emulator"],
2022-03-18 06:20:16 -07:00
port_forwards = [port_forward(8090, name = "Cloud Functions [:8090]", host = webHost)],
labels = ["explorer"],
trigger_mode = trigger_mode,
)
2022-07-10 17:47:10 -07:00
if terra_classic:
docker_build(
ref = "terra-image",
context = "./terra/devnet",
dockerfile = "terra/devnet/Dockerfile",
)
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra-contracts",
context = "./terra",
dockerfile = "./terra/Dockerfile",
)
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra-devnet.yaml")
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-terrad",
port_forwards = [
port_forward(26657, name = "Terra RPC [:26657]", host = webHost),
port_forward(1317, name = "Terra LCD [:1317]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-postgres",
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra-fcd",
resource_deps = ["terra-terrad", "terra-postgres"],
port_forwards = [port_forward(3060, name = "Terra FCD [:3060]", host = webHost)],
labels = ["terra"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
if terra2:
docker_build(
ref = "terra2-image",
context = "./cosmwasm/devnet",
dockerfile = "cosmwasm/devnet/Dockerfile",
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
docker_build(
ref = "terra2-contracts",
context = "./cosmwasm",
dockerfile = "./cosmwasm/Dockerfile",
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_yaml_with_ns("devnet/terra2-devnet.yaml")
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-terrad",
port_forwards = [
port_forward(26658, container_port = 26657, name = "Terra 2 RPC [:26658]", host = webHost),
port_forward(1318, container_port = 1317, name = "Terra 2 LCD [:1318]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-postgres",
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-06-16 09:48:01 -07:00
2022-07-10 17:47:10 -07:00
k8s_resource(
"terra2-fcd",
resource_deps = ["terra2-terrad", "terra2-postgres"],
port_forwards = [port_forward(3061, container_port = 3060, name = "Terra 2 FCD [:3061]", host = webHost)],
labels = ["terra2"],
trigger_mode = trigger_mode,
)
2022-04-29 12:53:48 -07:00
if algorand:
k8s_yaml_with_ns("devnet/algorand-devnet.yaml")
docker_build(
ref = "algorand-algod",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/algod/Dockerfile"
)
docker_build(
ref = "algorand-indexer",
context = "algorand/sandbox-algorand",
dockerfile = "algorand/sandbox-algorand/images/indexer/Dockerfile"
)
docker_build(
ref = "algorand-contracts",
context = "algorand",
dockerfile = "algorand/Dockerfile",
ignore = ["algorand/test/*.*"]
)
k8s_resource(
"algorand",
port_forwards = [
port_forward(4001, name = "Algod [:4001]", host = webHost),
port_forward(4002, name = "KMD [:4002]", host = webHost),
port_forward(8980, name = "Indexer [:8980]", host = webHost),
],
resource_deps = ["const-gen"],
labels = ["algorand"],
trigger_mode = trigger_mode,
)