Relayer/merge into wormhole relayer engine (#3042)

Note: The time limit for CI needs to be increased because now the relayer engine needs to build the SDK from source (the ethereum folder) in it's docker image.

Once the SDK is published, I can have the relayer engine docker image simply import the SDK, which should save a lot of time and allow us to reduce the CI time limit again.
This commit is contained in:
derpy-duck 2023-06-14 15:08:28 -04:00 committed by GitHub
parent 7f6213019a
commit 10cc45aba2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 8661 additions and 8 deletions

View File

@ -35,7 +35,7 @@ jobs:
kubectl config use-context ci
- run: tilt ci -- --ci --namespace=$DEPLOY_NS --num=2
timeout-minutes: 20
timeout-minutes: 40
# Clean up k8s resources
- run: kubectl delete --namespace=$DEPLOY_NS service,statefulset,configmap,pod,job --all

View File

@ -0,0 +1,44 @@
name: Publish generic relayer container image
on:
workflow_dispatch:
push:
branches: ["main"]
release:
types: [published]
env:
REGISTRY: ghcr.io
IMAGE_NAME: wormhole-foundation/generic-relayer
jobs:
build-and-push-relayer-image:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
- name: Build and push Docker image
uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc
with:
context: .
file: ./relayer/generic_relayer/relayer-engine-v2/Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}

View File

@ -66,6 +66,9 @@ config.define_bool("node_metrics", False, "Enable Prometheus & Grafana for Guard
config.define_bool("guardiand_governor", False, "Enable chain governor in guardiand")
config.define_bool("wormchain", False, "Enable a wormchain node")
config.define_bool("ibc_relayer", False, "Enable IBC relayer between cosmos chains")
config.define_bool("redis", False, "Enable a redis instance")
config.define_bool("generic_relayer", False, "Enable the generic relayer off-chain component")
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
@ -91,6 +94,8 @@ node_metrics = cfg.get("node_metrics", False)
guardiand_governor = cfg.get("guardiand_governor", False)
ibc_relayer = cfg.get("ibc_relayer", ci)
btc = cfg.get("btc", False)
redis = cfg.get('redis', ci)
generic_relayer = cfg.get("generic_relayer", ci)
if ci:
guardiand_loglevel = cfg.get("guardiand_loglevel", "warn")
@ -139,7 +144,7 @@ docker_build(
context = ".",
dockerfile = "node/Dockerfile",
target = "build",
ignore=["./sdk/js"]
ignore=["./sdk/js", "./relayer"]
)
def command_with_dlv(argv):
@ -452,8 +457,9 @@ docker_build(
dockerfile = "./ethereum/Dockerfile",
# ignore local node_modules (in case they're present)
ignore = ["./ethereum/node_modules"],
ignore = ["./node_modules"],
build_args = {"num_guardians": str(num_guardians), "dev": str(not ci)},
# sync external scripts for incremental development
# (everything else needs to be restarted from scratch for determinism)
#
@ -464,7 +470,7 @@ docker_build(
],
)
if spy_relayer:
if spy_relayer or redis or generic_relayer:
docker_build(
ref = "redis",
context = ".",
@ -472,17 +478,53 @@ if spy_relayer:
dockerfile = "third_party/redis/Dockerfile",
)
k8s_yaml_with_ns("devnet/redis.yaml")
if spy_relayer or redis:
k8s_resource(
"redis",
port_forwards = [
port_forward(6379, name = "Redis Default [:6379]", host = webHost),
],
labels = ["spy-relayer"],
labels = ["redis"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/redis.yaml")
if generic_relayer:
k8s_resource(
"redis-relayer",
port_forwards = [
port_forward(6378, name = "Generic Relayer Redis [:6378]", host = webHost),
],
labels = ["redis-relayer"],
trigger_mode = trigger_mode,
)
k8s_yaml_with_ns("devnet/redis-relayer.yaml")
if generic_relayer:
k8s_resource(
"relayer-engine",
resource_deps = ["guardian", "redis-relayer", "spy"],
port_forwards = [
port_forward(3003, container_port=3000, name = "Bullmq UI [:3003]", host = webHost),
],
labels = ["relayer-engine"],
trigger_mode = trigger_mode,
)
docker_build(
ref = "relayer-engine",
context = ".",
only = ["./ethereum", "./relayer/generic_relayer", "./sdk", "./solana"],
dockerfile = "relayer/generic_relayer/relayer-engine-v2/Dockerfile",
ignore = ["./ethereum/node_modules", "./sdk/js/src/relayer/__tests__"]
)
k8s_yaml_with_ns("devnet/relayer-engine.yaml")
if spy_relayer:
docker_build(
ref = "spy-relay-image",
context = "relayer/spy_relayer",

45
devnet/redis-relayer.yaml Normal file
View File

@ -0,0 +1,45 @@
---
apiVersion: v1
kind: Service
metadata:
name: redis-relayer
labels:
app: redis-relayer
spec:
selector:
app: redis-relayer
ports:
- port: 6378
targetPort: 6379
name: redis-relayer
protocol: TCP
type: LoadBalancer
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis-relayer
spec:
selector:
matchLabels:
app: redis-relayer
serviceName: redis-relayer
template:
metadata:
labels:
app: redis-relayer
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: redis-relayer
image: redis
readinessProbe:
tcpSocket:
port: 6379
periodSeconds: 1
failureThreshold: 300
ports:
- containerPort: 6379
name: redis-relayer
protocol: TCP

View File

@ -0,0 +1,53 @@
---
apiVersion: v1
kind: Service
metadata:
name: relayer-engine
labels:
app: redis
spec:
clusterIP: None
selector:
app: relayer-engine
ports:
- port: 3000
name: bullmq
protocol: TCP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: relayer-engine
spec:
selector:
matchLabels:
app: relayer-engine
serviceName: relayer-engine
template:
metadata:
labels:
app: relayer-engine
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: relayer-engine
image: relayer-engine
command:
- /bin/sh
- -c
- "npm run tiltkub"
env:
- name: REDIS_PORT
value: "6378"
- name: REDIS_HOST
value: "redis-relayer"
readinessProbe:
tcpSocket:
port: 3000
periodSeconds: 1
failureThreshold: 300
ports:
- containerPort: 3000
name: relayer-engine
protocol: TCP

View File

@ -0,0 +1,2 @@
lib
node_modules

View File

@ -0,0 +1,13 @@
{
"printWidth": 80,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": false,
"quoteProps": "as-needed",
"jsxSingleQuote": false,
"trailingComma": "es5",
"bracketSpacing": true,
"bracketSameLine": false,
"arrowParens": "always"
}

View File

@ -0,0 +1,110 @@
# syntax=docker.io/docker/dockerfile:1.3@sha256:42399d4635eddd7a9b8a24be879d2f9a930d0ed040a61324cfdf59ef1357b3b2
FROM node:19.6.1-slim@sha256:a1ba21bf0c92931d02a8416f0a54daad66cb36a85d2b73af9d73b044f5f57cfc
# npm wants to clone random Git repositories - lovely.
# RUN apk add git python make build-base
# RUN apk update && apk add bash
RUN apt-get update && apt-get -y install \
git python make curl netcat
RUN npm i typescript -g
RUN curl -L https://foundry.paradigm.xyz | bash
RUN $HOME/.foundry/bin/foundryup
RUN ls $HOME/.foundry/bin
# Run as user, otherwise, npx explodes.
RUN mv /root/.foundry/bin/forge /bin/forge
USER 1000
RUN mkdir -p /home/node/app
RUN mkdir -p /home/node/.npm
WORKDIR /home/node/app
# Fix git ssh error
RUN git config --global url."https://".insteadOf ssh://
# Support additional root CAs
COPY ./ethereum/README.md cert.pem* /certs/
# Node
ENV NODE_EXTRA_CA_CERTS=/certs/cert.pem
ENV NODE_OPTIONS=--use-openssl-ca
# npm
RUN if [ -e /certs/cert.pem ]; then npm config set cafile /certs/cert.pem; fi
# git
RUN if [ -e /certs/cert.pem ]; then git config --global http.sslCAInfo /certs/cert.pem; fi
WORKDIR /home/node/app/ethereum
# Only invalidate the npm install step if package.json changed
COPY --chown=node:node ./ethereum/package.json .
COPY --chown=node:node ./ethereum/package-lock.json .
COPY --chown=node:node ./ethereum/.env.test .env
# We want to cache node_modules *and* incorporate it into the final image.
RUN --mount=type=cache,uid=1000,gid=1000,target=/home/node/.npm \
--mount=type=cache,uid=1000,gid=1000,target=/ethereum/node_modules \
npm ci && \
cp -R node_modules node_modules_cache
# Amusingly, Debian's coreutils version has a bug where mv believes that
# the target is on a different fs and does a full recursive copy for what
# could be a renameat syscall. Alpine does not have this bug.
RUN rm -rf node_modules && mv node_modules_cache node_modules
COPY --chown=node:node ./ethereum .
#typechain needs to be on the path
USER root
RUN npm i --save-dev --global typechain
RUN forge install --no-git --no-commit
RUN apt-get install tree
RUN make dependencies
RUN tree -L 1; forge build -o build-forge
RUN typechain --target=ethers-v5 --out-dir=./ethers-contracts ./build-forge/!\(test\).sol/*.json
#Ethereum setup above this line should eventually be removed once the TS SDK is set up
# Add the SDK
WORKDIR /home/node/app/
COPY --chown=node:node ./sdk ./sdk
COPY --chown=node:node ./solana/idl ./solana/idl
WORKDIR /home/node/app/sdk/js/
RUN npm ci
RUN npm run build
#Path matters so as to not break imports
WORKDIR /home/node/app/relayer/generic_relayer/relayer-engine-v2/
# Only invalidate the npm install step if package.json changed
COPY --chown=node:node /relayer/generic_relayer/relayer-engine-v2/package.json .
COPY --chown=node:node /relayer/generic_relayer/relayer-engine-v2/package-lock.json .
# We want to cache node_modules *and* incorporate it into the final image.
RUN --mount=type=cache,uid=1000,gid=1000,target=/home/node/.npm \
--mount=type=cache,uid=1000,gid=1000,target=/relayer/generic_relayer/relayer-engine-v2/node_modules \
npm install && \
cp -R node_modules node_modules_cache
## NOTE: The above should be an 'npm ci'; but it does not work for some reason
## It is my understanding that this dockerfile will be changed in the future anyways
## So will leave this in here for now
## DO NOT keep this in production
# Amusingly, Debian's coreutils version has a bug where mv believes that
# the target is on a different fs and does a full recursive copy for what
# could be a renameat syscall. Alpine does not have this bug.
RUN rm -rf node_modules && mv node_modules_cache node_modules
COPY --chown=node:node /relayer/generic_relayer/relayer-engine-v2/ .

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
{
"name": "@wormhole-foundation/offchain-generic-relayer",
"version": "0.1.0",
"description": "",
"main": "index.js",
"scripts": {
"start": "tsx src/app.ts --flag=testnet",
"testnet": "tsx src/app.ts --flag=testnet",
"tilt": "tsx src/app.ts --flag=tilt",
"tiltkub": "tsx src/app.ts --flag=tiltkub",
"prettier": "prettier --write .",
"build": "tsc",
"typecheck": "tsc --noEmit --skipLibCheck",
"watch": "tsc --watch",
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Joe Howarth",
"license": "ISC",
"dependencies": {
"@certusone/wormhole-sdk": "file:../../../sdk/js",
"@improbable-eng/grpc-web-node-http-transport": "^0.15.0",
"@types/clone": "^2.1.1",
"@types/koa": "^2.13.5",
"clone": "^2.1.2",
"koa": "^2.14.1",
"koa-router": "^12.0.0",
"relayer-engine": "github:wormhole-foundation/relayer-engine#7698ed6",
"yargs": "^17.7.1"
},
"peerDependencies": {
"ethers": "^5",
"winston": "3.8.2"
},
"devDependencies": {
"@types/koa-router": "^7.4.4",
"@types/yargs": "^17.0.23",
"nodemon": "^2.0.20",
"prettier": "^2.8.7",
"tsx": "^3.12.7",
"typescript": "^4.8.4",
"winston": "3.8.2"
}
}

View File

@ -0,0 +1,125 @@
import Koa from "koa";
import Router from "koa-router";
import {
Next,
RelayerApp,
StandardRelayerAppOpts,
StandardRelayerContext,
logging,
wallets,
missedVaas,
providers,
sourceTx,
} from "relayer-engine";
import { RedisStorage } from "relayer-engine/lib/storage/redis-storage";
import { EVMChainId } from "@certusone/wormhole-sdk";
import { processGenericRelayerVaa } from "./processor";
import { Logger } from "winston";
import deepCopy from "clone";
import { loadAppConfig } from "./env";
export type GRContext = StandardRelayerContext & {
deliveryProviders: Record<EVMChainId, string>;
wormholeRelayers: Record<EVMChainId, string>;
opts: StandardRelayerAppOpts;
};
async function main() {
const { env, opts, deliveryProviders, wormholeRelayers } = await loadAppConfig();
const logger = opts.logger!;
logger.debug("Redis config: ", opts.redis);
const app = new RelayerApp<GRContext>(env, opts);
const {
privateKeys,
name,
spyEndpoint,
redis,
redisCluster,
redisClusterEndpoints,
wormholeRpcs,
} = opts;
app.spy(spyEndpoint);
const store = new RedisStorage({
redis,
redisClusterEndpoints,
redisCluster,
attempts: opts.workflows?.retries ?? 3,
namespace: name,
queueName: `${name}-relays`,
});
app.useStorage(store);
app.logger(logger);
app.use(logging(logger));
app.use(
missedVaas(app, {
namespace: name,
logger,
redis,
redisCluster,
redisClusterEndpoints,
wormholeRpcs,
})
);
app.use(providers(opts.providers));
if (opts.privateKeys && Object.keys(opts.privateKeys).length) {
app.use(
wallets(env, {
logger,
namespace: name,
privateKeys: privateKeys!,
metrics: { registry: store.registry},
})
);
}
if (opts.fetchSourceTxhash) {
app.use(sourceTx());
}
// Set up middleware
app.use(async (ctx: GRContext, next: Next) => {
ctx.deliveryProviders = deepCopy(deliveryProviders);
ctx.wormholeRelayers = deepCopy(wormholeRelayers);
ctx.opts = { ...opts };
next();
});
// Set up routes
app.multiple(deepCopy(wormholeRelayers), processGenericRelayerVaa);
app.listen();
runApi(store, opts, logger);
}
function runApi(storage: RedisStorage, { port, redis }: any, logger: Logger) {
const app = new Koa();
const router = new Router();
router.get("/metrics", async (ctx: Koa.Context) => {
ctx.body = await storage.registry?.metrics();
});
app.use(router.routes());
app.use(router.allowedMethods());
if (redis?.host) {
app.use(storage.storageKoaUI("/ui"));
}
port = Number(port) || 3000;
app.listen(port, () => {
logger.info(`Running on ${port}...`);
logger.info(`For the UI, open http://localhost:${port}/ui`);
logger.info(
`For prometheus metrics, open http://localhost:${port}/metrics`
);
logger.info("Make sure Redis is running on port 6379 by default");
});
}
main().catch((e) => {
console.error("Encountered unrecoverable error:");
console.error(e);
process.exit(1);
});

View File

@ -0,0 +1,270 @@
import * as fs from "fs/promises";
import yargs from "yargs";
import {
Environment,
ProvidersOpts,
RedisOptions,
StandardRelayerAppOpts,
} from "relayer-engine";
import {
CHAIN_ID_ETH,
CHAIN_ID_BSC,
EVMChainId,
} from "@certusone/wormhole-sdk";
import { rootLogger } from "./log";
import { ethers } from "ethers";
const SCRIPTS_DIR = "../../../ethereum/ts-scripts/relayer";
type Opts = {
flag: Flag;
};
enum Flag {
TiltKub = "tiltkub",
Tilt = "tilt",
Testnet = "testnet",
K8sTestnet = "k8s-testnet",
Mainnet = "mainnet",
}
type ContractConfigEntry = { chainId: EVMChainId; address: "string" };
type ContractsJson = {
deliveryProviders: ContractConfigEntry[];
wormholeRelayers: ContractConfigEntry[];
mockIntegrations: ContractConfigEntry[];
};
export interface GRRelayerAppConfig {
contractsJsonPath: string;
name: string;
spyEndpoint: string;
wormholeRpcs: [string];
providers?: ProvidersOpts;
fetchSourceTxhash: boolean;
logLevel: string;
logFormat: "json" | "text";
redis?: RedisOptions;
redisCluster?: StandardRelayerAppOpts["redisCluster"];
redisClusterEndpoints?: StandardRelayerAppOpts["redisClusterEndpoints"];
}
const defaults: { [key in Flag]: GRRelayerAppConfig } = {
[Flag.TiltKub]: {
name: "GenericRelayer",
contractsJsonPath: `${SCRIPTS_DIR}/config/${Flag.Tilt}/contracts.json`,
spyEndpoint: "spy:7072",
logLevel: "debug",
logFormat: "text",
wormholeRpcs: ["http://guardian:7071"],
providers: {
chains: {
[CHAIN_ID_ETH]: {
endpoints: ["http://eth-devnet:8545/"],
},
[CHAIN_ID_BSC]: {
endpoints: ["http://eth-devnet2:8545/"],
},
},
},
fetchSourceTxhash: false,
redis: { host: "redis", port: 6379 },
},
[Flag.Tilt]: {
name: "GenericRelayer",
contractsJsonPath: `${SCRIPTS_DIR}/config/${Flag.Tilt}/contracts.json`,
logLevel: "debug",
logFormat: "text",
spyEndpoint: "localhost:7072",
wormholeRpcs: ["http://localhost:7071"],
providers: {
chains: {
[CHAIN_ID_ETH]: {
endpoints: ["http://localhost:8545/"],
},
[CHAIN_ID_BSC]: {
endpoints: ["http://localhost:8546/"],
},
},
},
fetchSourceTxhash: false,
redis: { host: "localhost", port: 6379 },
},
// TODO
[Flag.K8sTestnet]: {
contractsJsonPath: `${SCRIPTS_DIR}/config/${Flag.Testnet}/contracts.json`,
name: "GenericRelayer",
logLevel: "debug",
logFormat: "json",
spyEndpoint: "spy:7073",
wormholeRpcs: ["https://wormhole-v2-testnet-api.certus.one"],
fetchSourceTxhash: true,
redisCluster: {
redis: { host: "redis", port: 6379 },
},
} as any,
[Flag.Testnet]: {
contractsJsonPath: `${SCRIPTS_DIR}/config/${Flag.Testnet}/contracts.json`,
name: "GenericRelayer",
logLevel: "debug",
logFormat: "text",
spyEndpoint: "localhost:7073",
wormholeRpcs: ["https://wormhole-v2-testnet-api.certus.one"],
fetchSourceTxhash: true,
redis: { host: "localhost", port: 6379 },
},
[Flag.Mainnet]: {} as any,
};
export async function loadAppConfig(): Promise<{
env: Environment;
opts: GRRelayerAppConfig & StandardRelayerAppOpts;
deliveryProviders: Record<EVMChainId, string>;
wormholeRelayers: Record<EVMChainId, string>;
}> {
const { flag } = getEnvironmentOptions();
const config = await loadAndMergeConfig(flag);
const contracts = await loadJson<ContractsJson>(config.contractsJsonPath);
const deliveryProviders = {} as Record<EVMChainId, string>;
const wormholeRelayers = {} as Record<EVMChainId, string>;
contracts.deliveryProviders.forEach(
({ chainId, address }: ContractConfigEntry) =>
(deliveryProviders[chainId] = ethers.utils.getAddress(address))
);
contracts.wormholeRelayers.forEach(
({ chainId, address }: ContractConfigEntry) =>
(wormholeRelayers[chainId] = ethers.utils.getAddress(address))
);
return {
deliveryProviders,
wormholeRelayers,
env: flagToEnvironment(flag),
opts: {
...config,
logger: rootLogger(config.logLevel, config.logFormat),
privateKeys: privateKeys(contracts),
},
};
}
function getEnvironmentOptions(): Opts {
let opts = yargs(process.argv.slice(2)).argv as unknown as Opts;
if (opts.flag == undefined) {
opts.flag = process.env.GR_RE_FLAG as Flag;
}
if (!validateStringEnum(Flag, opts.flag)) {
throw new Error("Unrecognized flag variant: " + opts.flag);
}
return opts;
}
function loadAndMergeConfig(flag: Flag): GRRelayerAppConfig {
const base = defaults[flag];
const isRedisCluster = !!process.env.REDIS_CLUSTER_ENDPOINTS;
return {
name: process.env.GENERIC_RELAYER_NAME || base.name,
// env: process.env.NODE_ENV?.trim()?.toLowerCase() || "local",
contractsJsonPath:
process.env.CONTRACTS_JSON_PATH || base.contractsJsonPath,
logFormat: (process.env.LOG_FORMAT as "text" | "json") || base.logFormat,
logLevel: process.env.LOG_LEVEL || base.logLevel,
spyEndpoint: process.env.SPY_URL || base.spyEndpoint,
wormholeRpcs: process.env.WORMHOLE_RPCS
? JSON.parse(process.env.WORMHOLE_RPCS)
: base.wormholeRpcs,
providers: process.env.BLOCKCHAIN_PROVIDERS
? JSON.parse(process.env.BLOCKCHAIN_PROVIDERS)
: base.providers,
fetchSourceTxhash: process.env.FETCH_SOURCE_TX_HASH
? JSON.parse(process.env.FETCH_SOURCE_TX_HASH)
: base.fetchSourceTxhash,
redisClusterEndpoints: process.env.REDIS_CLUSTER_ENDPOINTS?.split(","), // "url1:port,url2:port"
redisCluster: isRedisCluster
? {
dnsLookup: (address: any, callback: any) => callback(null, address),
slotsRefreshTimeout: 1000,
redisOptions: {
tls: process.env.REDIS_TLS ? {} : base.redis?.tls,
username: process.env.REDIS_USERNAME,
password: process.env.REDIS_PASSWORD,
},
}
: undefined,
redis: <RedisOptions>{
tls: process.env.REDIS_TLS ? {} : base.redis?.tls,
host: process.env.REDIS_HOST ? process.env.REDIS_HOST : base.redis?.host,
port: process.env.REDIS_CLUSTER_ENDPOINTS
? undefined
: Number(process.env.REDIS_PORT) || base.redis?.port,
username: process.env.REDIS_USERNAME,
password: process.env.REDIS_PASSWORD,
},
};
}
function privateKeys(contracts: ContractsJson): {
[k in Partial<EVMChainId>]: string[];
} {
const chainIds = new Set(contracts.wormholeRelayers.map((r) => r.chainId));
let privateKeysArray = [] as string[];
if (process.env.EVM_PRIVATE_KEYS) {
privateKeysArray = JSON.parse(process.env.EVM_PRIVATE_KEYS);
} else if (process.env.EVM_PRIVATE_KEY) {
privateKeysArray = [process.env.EVM_PRIVATE_KEY];
} else if (process.env.PRIVATE_KEY) {
// tilt
privateKeysArray = [process.env.PRIVATE_KEY];
} else {
// Todo: remove this
// tilt evm private key
console.log(
"Warning: using tilt private key because no others were specified"
);
privateKeysArray = [
"6cbed15c793ce57650b9877cf6fa156fbef513c4e6134f022a85b1ffdd59b2a1",
];
}
const privateKeys = {} as Record<EVMChainId, string[]>;
for (const chainId of chainIds) {
privateKeys[chainId] = privateKeysArray;
}
return privateKeys;
}
function flagToEnvironment(flag: Flag): Environment {
switch (flag) {
case Flag.K8sTestnet:
return Environment.TESTNET;
case Flag.Testnet:
return Environment.TESTNET;
case Flag.Mainnet:
return Environment.MAINNET;
case Flag.Tilt:
return Environment.DEVNET;
case Flag.TiltKub:
return Environment.DEVNET;
}
}
function validateStringEnum<O extends Object>(
enumObject: O,
passed: string
): boolean {
for (const value of Object.values(enumObject)) {
if (value === passed) {
return true;
}
}
return false;
}
function loadJson<T>(path: string): Promise<T> {
return fs
.readFile(path, {
encoding: "utf-8",
})
.then(JSON.parse) as Promise<T>;
}

View File

@ -0,0 +1,28 @@
import * as winston from "winston";
export function rootLogger(level = "debug", format: "text" | "json" = "json") {
return winston.createLogger({
transports: [
new winston.transports.Console({
level,
}),
],
format: format === "text" ? textFormat : jsonFormat,
});
}
const textFormat = winston.format.combine(
winston.format.colorize(),
winston.format.splat(),
winston.format.simple(),
winston.format.timestamp({
format: "YYYY-MM-DD HH:mm:ss.SSS",
}),
winston.format.errors({ stack: true })
);
const jsonFormat = winston.format.combine(
winston.format.timestamp(),
winston.format.json(),
winston.format.errors({ stack: true })
);

View File

@ -0,0 +1,275 @@
import * as wh from "@certusone/wormhole-sdk";
import { Next, ParsedVaaWithBytes, sleep } from "relayer-engine";
import {
VaaKeyType,
RelayerPayloadId,
parseWormholeRelayerPayloadType,
parseWormholeRelayerSend,
deliveryInstructionsPrintable,
vaaKeyPrintable,
parseWormholeRelayerResend,
RedeliveryInstruction,
DeliveryInstruction,
packOverrides,
DeliveryOverrideArgs,
parseEVMExecutionInfoV1
} from "@certusone/wormhole-sdk/lib/cjs/relayer";
import { EVMChainId } from "@certusone/wormhole-sdk";
import { GRContext } from "./app";
import { BigNumber, ethers } from "ethers";
import { IWormholeRelayerDelivery__factory } from "@certusone/wormhole-sdk/lib/cjs/ethers-contracts";
export async function processGenericRelayerVaa(ctx: GRContext, next: Next) {
ctx.logger.info(`Processing generic relayer vaa`);
const payloadId = parseWormholeRelayerPayloadType(ctx.vaa!.payload);
// route payload types
if (payloadId == RelayerPayloadId.Delivery) {
ctx.logger.info(`Detected delivery VAA, processing delivery payload...`);
await processDelivery(ctx);
} else if (payloadId == RelayerPayloadId.Redelivery) {
ctx.logger.info(
`Detected redelivery VAA, processing redelivery payload...`
);
await processRedelivery(ctx);
} else {
ctx.logger.error(`Expected GR Delivery payload type, found ${payloadId}`);
throw new Error("Expected GR Delivery payload type");
}
await next();
}
async function processDelivery(ctx: GRContext) {
const deliveryVaa = parseWormholeRelayerSend(ctx.vaa!.payload);
const sourceDeliveryProvider = ethers.utils.getAddress(wh.tryUint8ArrayToNative(deliveryVaa.sourceDeliveryProvider, "ethereum"));
if (
sourceDeliveryProvider !==
ctx.deliveryProviders[ctx.vaa!.emitterChain as EVMChainId]
) {
ctx.logger.info("Delivery vaa specifies different relay provider", {
sourceDeliveryProvider: deliveryVaa.sourceDeliveryProvider,
});
return;
}
processDeliveryInstruction(ctx, deliveryVaa, ctx.vaaBytes!);
}
async function processRedelivery(ctx: GRContext) {
const redeliveryVaa = parseWormholeRelayerResend(ctx.vaa!.payload);
const sourceDeliveryProvider = ethers.utils.getAddress(wh.tryUint8ArrayToNative(redeliveryVaa.newSourceDeliveryProvider, "ethereum"));
if (
sourceDeliveryProvider !==
ctx.deliveryProviders[ctx.vaa!.emitterChain as EVMChainId]
) {
ctx.logger.info("Delivery vaa specifies different relay provider", {
sourceDeliveryProvider: redeliveryVaa.newSourceDeliveryProvider,
});
return;
}
ctx.logger.info(
`Redelivery requested for the following VAA: `,
vaaKeyPrintable(redeliveryVaa.deliveryVaaKey)
);
let originalVaa = await ctx.fetchVaa(
redeliveryVaa.deliveryVaaKey.chainId as wh.ChainId,
Buffer.from(redeliveryVaa.deliveryVaaKey.emitterAddress!),
redeliveryVaa.deliveryVaaKey.sequence!.toBigInt()
);
ctx.logger.info("Retrieved original VAA!");
const delivery = parseWormholeRelayerSend(originalVaa.payload);
if (!isValidRedelivery(ctx, delivery, redeliveryVaa)) {
ctx.logger.info("Exiting redelivery process");
return;
} else {
ctx.logger.info("Redelivery is valid, proceeding with redelivery");
processDeliveryInstruction(ctx, delivery, originalVaa.bytes, {
newReceiverValue: redeliveryVaa.newRequestedReceiverValue,
newExecutionInfo: redeliveryVaa.newEncodedExecutionInfo,
redeliveryHash: ctx.vaa!.hash,
});
}
}
function isValidRedelivery(
ctx: GRContext,
delivery: DeliveryInstruction,
redelivery: RedeliveryInstruction
): boolean {
//TODO check that the delivery & redelivery chains agree!
if (delivery.targetChainId != redelivery.targetChainId) {
ctx.logger.info(
"Redelivery targetChain does not match original delivery targetChain"
);
ctx.logger.info(
"Original targetChain: " +
delivery.targetChainId +
" Redelivery targetChain: " +
redelivery.targetChainId
);
return false;
}
//TODO check that the sourceRelayerAddress is one of this relayer's addresses
if (!redelivery.newSourceDeliveryProvider) {
}
const [deliveryExecutionInfo,] = parseEVMExecutionInfoV1(delivery.encodedExecutionInfo, 0);
const [redeliveryExecutionInfo,] = parseEVMExecutionInfoV1(redelivery.newEncodedExecutionInfo, 0);
if (deliveryExecutionInfo.targetChainRefundPerGasUnused.gt(redeliveryExecutionInfo.targetChainRefundPerGasUnused)) {
ctx.logger.info(
"Redelivery target chain refund per gas unused is less than original delivery target chain refund per gas unused"
);
ctx.logger.info(
"Original refund: " +
deliveryExecutionInfo.targetChainRefundPerGasUnused.toBigInt().toLocaleString() +
" Redelivery: " +
redeliveryExecutionInfo.targetChainRefundPerGasUnused.toBigInt().toLocaleString()
);
return false;
}
if (delivery.requestedReceiverValue.gt(redelivery.newRequestedReceiverValue)) {
ctx.logger.info(
"Redelivery requested receiverValue is less than original delivery requested receiverValue"
);
ctx.logger.info(
"Original refund: " +
delivery.requestedReceiverValue.toBigInt().toLocaleString(),
+" Redelivery: " +
redelivery.newRequestedReceiverValue.toBigInt().toLocaleString()
);
return false;
}
if (
deliveryExecutionInfo.gasLimit >
redeliveryExecutionInfo.gasLimit
) {
ctx.logger.info(
"Redelivery gasLimit is less than original delivery gasLimit"
);
ctx.logger.info(
"Original refund: " + deliveryExecutionInfo.gasLimit,
" Redelivery: " + redeliveryExecutionInfo.gasLimit
);
return false;
}
return true;
}
async function processDeliveryInstruction(
ctx: GRContext,
delivery: DeliveryInstruction,
deliveryVaa: Buffer | Uint8Array,
overrides?: DeliveryOverrideArgs
) {
ctx.logger.info(`Fetching vaas from parsed delivery vaa manifest...`, {
vaaKeys: delivery.vaaKeys.map(vaaKeyPrintable),
});
const vaaIds = delivery.vaaKeys.map((m) => ({
emitterAddress: m.emitterAddress!,
emitterChain: m.chainId! as wh.ChainId,
sequence: m.sequence!.toBigInt(),
}));
let results = await ctx.fetchVaas({
ids: vaaIds,
// txHash: ctx.sourceTxHash,
});
ctx.logger.debug(`Processing delivery`, {
deliveryVaa: deliveryInstructionsPrintable(delivery),
});
// const chainId = assertEvmChainId(ix.targetChain)
const chainId = delivery.targetChainId as EVMChainId;
const receiverValue = overrides?.newReceiverValue
? overrides.newReceiverValue
: (delivery.requestedReceiverValue.add(delivery.extraReceiverValue));
const getMaxRefund = (encodedDeliveryInfo: Buffer) => {
const [deliveryInfo,] = parseEVMExecutionInfoV1(encodedDeliveryInfo, 0);
return deliveryInfo.targetChainRefundPerGasUnused.mul(deliveryInfo.gasLimit);
}
const maxRefund = getMaxRefund(overrides?.newExecutionInfo
? overrides.newExecutionInfo
: delivery.encodedExecutionInfo);
const budget = receiverValue.add(maxRefund);
await ctx.wallets.onEVM(chainId, async ({ wallet }) => {
const wormholeRelayer = IWormholeRelayerDelivery__factory.connect(
ctx.wormholeRelayers[chainId],
wallet
);
const encodedVMs = results.map((v) => v.bytes);
const packedOverrides = overrides ? packOverrides(overrides) : [];
const gasUnitsEstimate = await wormholeRelayer.estimateGas.deliver(encodedVMs, deliveryVaa, wallet.address, packedOverrides, {
value: budget,
gasLimit: 3000000,
});
const gasPrice = await wormholeRelayer.provider.getGasPrice();
const estimatedTransactionFee = gasPrice.mul(gasUnitsEstimate);
const estimatedTransactionFeeEther = ethers.utils.formatEther(
estimatedTransactionFee
);
ctx.logger.info(
`Estimated transaction cost (ether): ${estimatedTransactionFeeEther}`,
{
gasUnitsEstimate: gasUnitsEstimate.toString(),
gasPrice: gasPrice.toString(),
estimatedTransactionFee: estimatedTransactionFee.toString(),
estimatedTransactionFeeEther,
valueEther: ethers.utils.formatEther(budget),
}
);
process.stdout.write("");
await sleep(200);
ctx.logger.debug("Sending 'deliver' tx...");
const receipt = await wormholeRelayer
.deliver(encodedVMs, deliveryVaa, wallet.address, packedOverrides, { value: budget, gasLimit: 3000000 })
.then((x: any) => x.wait());
logResults(ctx, receipt, chainId);
});
}
function logResults(
ctx: GRContext,
receipt: ethers.ContractReceipt,
chainId: EVMChainId
) {
const relayerContractLog = receipt.logs?.find((x: any) => {
return x.address === ctx.wormholeRelayers[chainId];
});
if (relayerContractLog) {
const parsedLog = IWormholeRelayerDelivery__factory.createInterface().parseLog(
relayerContractLog!
);
const logArgs = {
recipientAddress: parsedLog.args[0],
sourceChain: parsedLog.args[1],
sourceSequence: parsedLog.args[2],
vaaHash: parsedLog.args[3],
status: parsedLog.args[4],
};
ctx.logger.info("Parsed Delivery event", logArgs);
switch (logArgs.status) {
case 0:
ctx.logger.info("Delivery Success");
break;
case 1:
ctx.logger.info("Receiver Failure");
break;
case 2:
ctx.logger.info("Forwarding Failure");
break;
}
}
ctx.logger.info(
`Relayed instruction to chain ${chainId}, tx hash: ${receipt.transactionHash}`
);
}

View File

@ -0,0 +1,22 @@
{
"compilerOptions": {
"outDir": "lib",
"target": "esnext",
"module": "CommonJS",
"moduleResolution": "node",
"lib": ["es2019"],
"declaration": true,
"skipLibCheck": true,
"allowJs": true,
"strict": true,
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"isolatedModules": true,
"resolveJsonModule": true,
"downlevelIteration": true,
"sourceMap": true,
"esModuleInterop": true
},
"include": ["src"],
"exclude": ["node_modules", "**/__tests__/*"]
}

View File

@ -3,7 +3,7 @@
"^.+\\.(t|j)sx?$": "ts-jest"
},
"testRegex": "(/__tests__/.*|(\\.|/)(test|spec))\\.(jsx?|tsx?)$",
"testPathIgnorePatterns": ["__tests__/utils", "__tests__/wormhole_relayer.ts"],
"testPathIgnorePatterns": ["__tests__/utils"],
"moduleFileExtensions": ["ts", "tsx", "js", "jsx", "json", "node"],
"testTimeout": 60000
}

View File

@ -3,4 +3,5 @@ set -e
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' guardian:6060/readyz)" != "200" ]]; do sleep 5; done
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' spy:6060/metrics)" != "200" ]]; do sleep 5; done
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' ibc-relayer:7597/debug/pprof/)" != "200" ]]; do sleep 5; done
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' relayer-engine:3000/metrics)" != "200" ]]; do sleep 5; done
CI=true npm --prefix ../sdk/js run test-ci