chore: cleanup repo

This commit is contained in:
Ali Behjati 2023-10-18 18:06:34 +02:00
parent c0057809f5
commit 49d1a579c3
101 changed files with 18 additions and 8895 deletions

View File

@ -37,7 +37,7 @@ jobs:
env:
AWS_REGION: us-east-1
- run: |
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f third_party/pyth/Dockerfile.p2w-attest .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f wormhole-attester/client/Dockerfile.p2w-attest .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:
ECR_REGISTRY: public.ecr.aws

View File

@ -39,7 +39,7 @@ jobs:
- uses: aws-actions/amazon-ecr-login@v1
id: ecr_login
- run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f tilt_devnet/docker_images/Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_pusher/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:

View File

@ -37,7 +37,7 @@ jobs:
env:
AWS_REGION: us-east-1
- run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f tilt_devnet/docker_images/Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f price_service/server/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:

View File

@ -24,7 +24,7 @@ jobs:
id: ecr_login
- name: Build docker image
run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f tilt_devnet/docker_images/Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/packages/xc_admin_frontend/Dockerfile .
env:
ECR_REGISTRY: ${{ steps.ecr_login.outputs.registry }}

View File

@ -24,7 +24,7 @@ jobs:
- uses: aws-actions/amazon-ecr-login@v1
id: ecr_login
- run: |
DOCKER_BUILDKIT=1 docker build -t lerna -f tilt_devnet/docker_images/Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t lerna -f Dockerfile.lerna .
DOCKER_BUILDKIT=1 docker build -t $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG -f governance/xc_admin/Dockerfile .
docker push $ECR_REGISTRY/$ECR_REPOSITORY:$IMAGE_TAG
env:

354
Tiltfile
View File

@ -1,354 +0,0 @@
# This Tiltfile contains the deployment and build config for the Pyth Crosschain development environment.
#
# We use Buildkit cache mounts and careful layering to avoid unnecessary rebuilds - almost
# all source code changes result in small, incremental rebuilds. Dockerfiles are written such
# that, for example, changing the contract source code won't cause Solana itself to be rebuilt.
#
image_registry = os.environ.get("TILT_IMAGE_REGISTRY")
namespace = os.environ.get("TILT_NAMESPACE", "development")
load("ext://namespace", "namespace_create", "namespace_inject")
load("ext://secret", "secret_yaml_generic")
namespace_create(namespace)
if image_registry:
default_registry(image_registry, single_name="development")
allow_k8s_contexts(k8s_context())
# Disable telemetry by default
analytics_settings(False)
# Moar updates (default is 3)
update_settings(max_parallel_updates=10)
# Runtime configuration
config.define_bool("ci", False, "We are running in CI")
config.define_bool("manual", False, "Set TRIGGER_MODE_MANUAL by default")
config.define_string("num", False, "Number of guardian nodes to run")
# You do not usually need to set this argument - this argument is for debugging only. If you do use a different
# namespace, note that the "wormhole" namespace is hardcoded in tests and don't forget specifying the argument
# when running "tilt down".
#
config.define_string("namespace", False, "Kubernetes namespace to use")
# These arguments will enable writing Guardian events to a cloud BigTable instance.
# Writing to a cloud BigTable is optional. These arguments are not required to run the devnet.
config.define_string("gcpProject", False, "GCP project ID for BigTable persistence")
config.define_string("bigTableKeyPath", False, "Path to BigTable json key file")
# When running Tilt on a server, this can be used to set the public hostname Tilt runs on
# for service links in the UI to work.
config.define_string("webHost", False, "Public hostname for port forwards")
# Components
config.define_bool("pyth", False, "Enable Pyth-to-Wormhole component")
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
gcpProject = cfg.get("gcpProject", "local-dev")
bigTableKeyPath = cfg.get("bigTableKeyPath", "./event_database/devnet_key.json")
webHost = cfg.get("webHost", "localhost")
if cfg.get("manual", False):
trigger_mode = TRIGGER_MODE_MANUAL
else:
trigger_mode = TRIGGER_MODE_AUTO
def k8s_yaml_with_ns(objects):
return k8s_yaml(namespace_inject(objects, namespace))
# Build lerna docker base for npm project
docker_build(
ref = "lerna",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.lerna",
)
def build_node_yaml():
node_yaml = read_yaml_stream("tilt_devnet/k8s/node.yaml")
for obj in node_yaml:
if obj["kind"] == "StatefulSet" and obj["metadata"]["name"] == "guardian":
obj["spec"]["replicas"] = num_guardians
container = obj["spec"]["template"]["spec"]["containers"][0]
if container["name"] != "guardiand":
fail("container 0 is not guardiand")
container["command"] += ["--devNumGuardians", str(num_guardians)]
return encode_yaml_stream(node_yaml)
k8s_yaml_with_ns(build_node_yaml())
k8s_resource(
"guardian",
resource_deps = ["eth-devnet", "eth-devnet2", "terra-terrad", "solana-devnet"],
port_forwards = [
port_forward(6060, name = "Debug/Status Server [:6060]", host = webHost),
port_forward(7070, name = "Public gRPC [:7070]", host = webHost),
port_forward(7071, name = "Public REST [:7071]", host = webHost),
port_forward(2345, name = "Debugger [:2345]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# spy
k8s_yaml_with_ns("tilt_devnet/k8s/spy.yaml")
k8s_resource(
"spy",
resource_deps = ["guardian"],
port_forwards = [
port_forward(6061, container_port = 6060, name = "Debug/Status Server [:6061]", host = webHost),
port_forward(7072, name = "Spy gRPC [:7072]", host = webHost),
],
labels = ["guardian"],
trigger_mode = trigger_mode,
)
# solana client cli (used for devnet setup)
docker_build(
ref = "bridge-client",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.client",
)
# solana smart contract
docker_build(
ref = "solana-contract",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.solana",
)
# solana local devnet
k8s_yaml_with_ns("tilt_devnet/k8s/solana-devnet.yaml")
k8s_resource(
"solana-devnet",
port_forwards = [
port_forward(8899, name = "Solana RPC [:8899]", host = webHost),
port_forward(8900, name = "Solana WS [:8900]", host = webHost),
port_forward(9000, name = "Solana PubSub [:9000]", host = webHost),
],
labels = ["solana"],
trigger_mode = trigger_mode,
)
# eth devnet
docker_build(
ref = "eth-node",
context = "./",
dockerfile = "tilt_devnet/docker_images/Dockerfile.ethereum",
# sync external scripts for incremental development
# (everything else needs to be restarted from scratch for determinism)
#
# This relies on --update-mode=exec to work properly with a non-root user.
# https://github.com/tilt-dev/tilt/issues/3708
live_update = [
sync("./ethereum/src", "/home/node/app/src"),
],
)
# pyth autopublisher
docker_build(
ref = "pyth",
context = ".",
dockerfile = "third_party/pyth/Dockerfile.pyth",
)
k8s_yaml_with_ns("./tilt_devnet/k8s/pyth.yaml")
k8s_resource(
"pyth",
resource_deps = ["solana-devnet"],
labels = ["pyth"],
trigger_mode = trigger_mode,
)
# pyth2wormhole client autoattester
docker_build(
ref = "p2w-attest",
context = ".",
dockerfile = "./third_party/pyth/Dockerfile.p2w-attest",
)
k8s_yaml_with_ns("tilt_devnet/k8s/p2w-attest.yaml")
k8s_resource(
"p2w-attest",
resource_deps = ["solana-devnet", "pyth", "guardian"],
port_forwards = [port_forward(3000, name = "metrics", host = webHost)],
labels = ["pyth"],
trigger_mode = trigger_mode,
)
# attestations checking script
docker_build(
ref = "check-attestations",
context = ".",
only = ["./third_party"],
dockerfile = "./third_party/pyth/Dockerfile.check-attestations",
)
k8s_yaml_with_ns("tilt_devnet/k8s/check-attestations.yaml")
k8s_resource(
"check-attestations",
resource_deps = ["pyth-price-server", "pyth", "p2w-attest"],
labels = ["pyth"],
trigger_mode = trigger_mode,
)
# Pyth2wormhole relay
docker_build(
ref = "p2w-relay",
context = ".",
dockerfile = "third_party/pyth/p2w-relay/Dockerfile.pyth_relay",
)
k8s_yaml_with_ns("tilt_devnet/k8s/p2w-terra-relay.yaml")
k8s_resource(
"p2w-terra-relay",
resource_deps = ["pyth", "p2w-attest", "spy", "terra-terrad"],
port_forwards = [
port_forward(4200, name = "Rest API (Status + Query) [:4200]", host = webHost),
port_forward(8081, name = "Prometheus [:8081]", host = webHost)],
labels = ["pyth"]
)
k8s_yaml_with_ns("tilt_devnet/k8s/p2w-evm-relay.yaml")
k8s_resource(
"p2w-evm-relay",
resource_deps = ["pyth", "p2w-attest", "spy", "eth-devnet"],
port_forwards = [
port_forward(4201, container_port = 4200, name = "Rest API (Status + Query) [:4201]", host = webHost),
port_forward(8082, container_port = 8081, name = "Prometheus [:8082]", host = webHost)],
labels = ["pyth"]
)
# Pyth Price server
docker_build(
ref = "pyth-price-server",
context = ".",
dockerfile = "price_service/server/Dockerfile",
)
k8s_yaml_with_ns("tilt_devnet/k8s/pyth-price-server.yaml")
k8s_resource(
"pyth-price-server",
resource_deps = ["pyth", "p2w-attest", "spy", "eth-devnet"],
port_forwards = [
port_forward(4202, container_port = 4200, name = "Rest API (Status + Query) [:4202]", host = webHost),
port_forward(8083, container_port = 8081, name = "Prometheus [:8083]", host = webHost)],
labels = ["pyth"]
)
k8s_yaml_with_ns("tilt_devnet/k8s/eth-devnet.yaml")
k8s_resource(
"eth-devnet",
port_forwards = [
port_forward(8545, name = "Ganache RPC [:8545]", host = webHost),
],
labels = ["evm"],
trigger_mode = trigger_mode,
)
k8s_resource(
"eth-devnet2",
port_forwards = [
port_forward(8546, name = "Ganache RPC [:8546]", host = webHost),
],
labels = ["evm"],
trigger_mode = trigger_mode,
)
# terra devnet
docker_build(
ref = "terra-image",
context = "./target_chains/cosmwasm/devnet",
dockerfile = "./target_chains/cosmwasm/devnet/Dockerfile",
)
docker_build(
ref = "cosmwasm-contracts",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.cosmwasm",
)
k8s_yaml_with_ns("tilt_devnet/k8s/terra-devnet.yaml")
k8s_resource(
"terra-terrad",
port_forwards = [
port_forward(26657, name = "Terra RPC [:26657]", host = webHost),
port_forward(1317, name = "Terra LCD [:1317]", host = webHost),
],
labels = ["terra"],
trigger_mode = trigger_mode,
)
k8s_resource(
"terra-postgres",
labels = ["terra"],
trigger_mode = trigger_mode,
)
k8s_resource(
"terra-fcd",
resource_deps = ["terra-terrad", "terra-postgres"],
port_forwards = [port_forward(3060, name = "Terra FCD [:3060]", host = webHost)],
labels = ["terra"],
trigger_mode = trigger_mode,
)
docker_build(
ref = "prometheus",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.prometheus",
)
k8s_yaml_with_ns("tilt_devnet/k8s/prometheus.yaml")
k8s_resource(
"prometheus",
port_forwards = [port_forward(9090, name = "Prometheus dashboard", host = webHost)],
labels = ["prometheus"],
trigger_mode = trigger_mode,
)
docker_build(
ref = "multisig",
context = ".",
dockerfile = "tilt_devnet/docker_images/Dockerfile.multisig",
)
k8s_yaml_with_ns("tilt_devnet/k8s/multisig.yaml")
k8s_resource(
"multisig",
resource_deps = ["solana-devnet"],
labels = ["solana"],
trigger_mode = trigger_mode,
)
# Pyth Price Client JS e2e test
docker_build(
ref = "pyth-price-client-js",
context = ".",
dockerfile = "price_service/client/js/Dockerfile",
)
k8s_yaml_with_ns("tilt_devnet/k8s/pyth-price-client-js.yaml")
k8s_resource(
"pyth-price-client-js",
resource_deps = ["pyth-price-server"],
labels = ["pyth"]
)

View File

@ -1,4 +1,4 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
# Defined in Dockerfile.lerna
FROM lerna
USER root

View File

@ -1,4 +1,4 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
# Defined in Dockerfile.lerna
FROM lerna
USER root

3306
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,6 @@
"price_service/client/js",
"pythnet/message_buffer",
"target_chains/aptos/sdk/js",
"target_chains/cosmwasm/sdk/js",
"target_chains/cosmwasm/tools",
"target_chains/cosmwasm/deploy-scripts",
"target_chains/ethereum/contracts",
@ -18,7 +17,6 @@
"target_chains/ethereum/examples/oracle_swap/app",
"target_chains/sui/sdk/js",
"target_chains/sui/cli",
"third_party/pyth/p2w-relay",
"wormhole_attester/sdk/js",
"contract_manager"
],

View File

@ -1,4 +1,4 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
# Defined in Dockerfile.lerna
FROM lerna:latest
USER root

View File

@ -53,7 +53,7 @@ services:
# Use this to build the price pusher from source. A dependency of the pusher docker
# image is the pyth-crosschain monorepo lerna docker image. Build lerna image
# using the following command from the repo root:
# `docker buildx build -f tilt_devnet/docker_images/Dockerfile.lerna -t lerna .`
# `docker buildx build -f Dockerfile.lerna -t lerna .`
#
# Please note that the docker build from source only works in x86_64 platforms
# and doesn't work on arm platforms (like Apple M1/M2).

View File

@ -53,7 +53,7 @@ services:
# Use this to build the price pusher from source. A dependency of the pusher docker
# image is the pyth-crosschain monorepo lerna docker image. Build lerna image
# using the following command from the repo root:
# `docker buildx build -f tilt_devnet/docker_images/Dockerfile.lerna -t lerna .`
# `docker buildx build -f Dockerfile.lerna -t lerna .`
#
# Please note that the docker build from source only works in x86_64 platforms
# and doesn't work on arm platforms (like Apple M1/M2).

View File

@ -1,4 +1,4 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
# Defined in Dockerfile.lerna
FROM lerna
USER root

View File

@ -1,4 +1,4 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
# Defined in Dockerfile.lerna
FROM lerna
USER root

View File

@ -1,119 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
#
# This script provisions a working Wormhole dev environment on a blank Debian VM.
# It expects to run as a user without root permissions.
#
# Can safely run multiple times to update to the latest versions.
#
# Make sure this is Debian 10 or 11
if [ "$(lsb_release -rs)" != "10" ] && [ "$(lsb_release -rs)" != "11" ]; then
echo "This script is only for Debian 10 or 11"
exit 1
fi
# Refuse to run as root
if [[ $EUID -eq 0 ]]; then
echo "This script must not be run as root" 1>&2
exit 1
fi
# Check if we can use sudo to get root
if ! sudo -n true; then
echo "This script requires sudo to run."
exit 1
fi
# Make sure Docker Debian package isn't installed
if dpkg -s docker.io &>/dev/null; then
echo "Docker is already installed from Debian's repository. Please uninstall it first."
exit 1
fi
# Upgrade everything
# (this ensures that an existing Docker CE installation is up to date before continuing)
sudo apt-get update && sudo apt-get upgrade -y
# Install dependencies
sudo apt-get -y install bash-completion git git-review vim
# Install Go
ARCH=amd64
GO=1.17.5
(
if [[ -d /usr/local/go ]]; then
sudo rm -rf /usr/local/go
fi
TMP=$(mktemp -d)
(
cd "$TMP"
curl -OJ "https://dl.google.com/go/go${GO}.linux-${ARCH}.tar.gz"
sudo tar -C /usr/local -xzf "go${GO}.linux-${ARCH}.tar.gz"
echo 'PATH=/usr/local/go/bin:$PATH' | sudo tee /etc/profile.d/local_go.sh
)
rm -rf "$TMP"
)
. /etc/profile.d/local_go.sh
# Install Docker and add ourselves to Docker group
if [[ ! -f /usr/bin/docker ]]; then
TMP=$(mktemp -d)
(
cd "$TMP"
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
)
rm -rf "$TMP"
sudo gpasswd -a $USER docker
fi
sudo systemctl enable --now docker
# Install Minikube
TMP=$(mktemp -d)
(
cd "$TMP"
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_latest_amd64.deb
sudo dpkg -i minikube_latest_amd64.deb
)
rm -rf "$TMP"
# Install tilt
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | sudo bash
# Shell aliases
cat <<'EOF' | sudo tee /etc/profile.d/wormhole_aliases.sh
alias kubectl="minikube kubectl --"
alias vi=vim
alias kc=kubectl
. <(kubectl completion bash)
. <(minikube completion bash)
complete -F __start_kubectl kc
function use-namespace {
kubectl config set-context --current --namespace=$1
}
export DOCKER_BUILDKIT=1
alias start-recommended-minikube="minikube start --driver=docker --kubernetes-version=v1.22.3 --cpus=$(nproc) --memory=16G --disk-size=120g --namespace=wormhole"
EOF
cat <<EOF
┍━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┑
│ │
│ SUCCESS │
│ │
│ Re-log into your session to apply changes. │
│ │
└━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┘
EOF

View File

@ -1,92 +0,0 @@
#!/bin/bash
# This script install foundry and the solidity compiler required to build the
# ethereum contracts. Foundry itself provides a mechanism to install solc, but
# it doesn't work with certain firewall configurations.
set -euo pipefail
# check if foundry.toml exists
if [ ! -f foundry.toml ]; then
echo "foundry.toml not found. Please call from the ethereum directory." >& 2
exit 1
fi
# Read compiler version from foundry.toml
SOLC_VERSION=$(grep solc_version foundry.toml | cut -d'=' -f2 | tr -d "' ") || true
if [ -z "$SOLC_VERSION" ]; then
echo "solc_version not found in foundry.toml." >& 2
exit 1
fi
main() {
OS=$(uname -s)
case "$OS" in
Darwin)
install_mac
;;
Linux)
install_linux
;;
*)
echo "Unsupported OS: $OS"
exit 1
;;
esac
}
function install_mac() {
if ! command -v brew > /dev/null; then
echo "brew is unavailable. Please install: https://brew.sh"
fi
if ! brew list libusb > /dev/null 2>&1; then
echo "Installing libusb"
brew install libusb
fi
if ! command -v foundryup > /dev/null; then
curl -L https://foundry.paradigm.xyz --silent | bash
"$HOME/.foundry/bin/foundryup"
fi
INSTALL_DIR="$HOME/.svm/$SOLC_VERSION"
mkdir -p "$INSTALL_DIR"
SOLC_PATH="$INSTALL_DIR/solc-$SOLC_VERSION"
if [ ! -f "$SOLC_PATH" ]; then
echo "Installing solc-$SOLC_VERSION"
curl -L --silent "https://github.com/ethereum/solidity/releases/download/v$SOLC_VERSION/solc-macos" > "$SOLC_PATH"
chmod +x "$SOLC_PATH"
echo "Installed $SOLC_PATH"
else
echo "Solidity compiler found: $SOLC_PATH"
fi
}
function install_linux() {
if ! command -v foundryup > /dev/null; then
curl -L https://foundry.paradigm.xyz --silent | bash
"$HOME/.foundry/bin/foundryup"
fi
INSTALL_DIR="$HOME/.svm/$SOLC_VERSION"
mkdir -p "$INSTALL_DIR"
SOLC_PATH="$INSTALL_DIR/solc-$SOLC_VERSION"
if [ ! -f "$SOLC_PATH" ]; then
echo "Installing solc-$SOLC_VERSION"
curl -L --silent "https://github.com/ethereum/solidity/releases/download/v$SOLC_VERSION/solc-static-linux" > "$SOLC_PATH"
chmod +x "$SOLC_PATH"
echo "Installed $SOLC_PATH"
else
echo "Solidity compiler found: $SOLC_PATH"
fi
}
main "$@"; exit

View File

@ -1,6 +0,0 @@
#!/usr/bin/env bash
while : ; do
kubectl logs --tail=1000 --follow=true $1 guardiand
sleep 1
done

View File

@ -1,9 +0,0 @@
module.exports = {
root: true,
parser: "@typescript-eslint/parser",
plugins: ["@typescript-eslint"],
extends: ["eslint:recommended", "plugin:@typescript-eslint/recommended"],
rules: {
"@typescript-eslint/no-explicit-any": "off",
},
};

View File

@ -1 +0,0 @@
lib

View File

@ -1,115 +0,0 @@
# Pyth Terra JS
[Pyth](https://pyth.network/) provides real-time pricing data in a variety of asset classes, including cryptocurrency, equities, FX and commodities. This library allows you to use these real-time prices in Terra DeFi protocols.
## Installation
### npm
```
$ npm install --save @pythnetwork/pyth-terra-js
```
### Yarn
```
$ yarn add @pythnetwork/pyth-terra-js
```
## Quickstart
Pyth stores prices off-chain to minimize gas fees, which allows us to offer a wider selection of products and faster update times.
See [How Pyth Works in Terra](#how-pyth-works-in-terra) for more details about this approach. In order to use Pyth prices on chain,
they must be fetched from an off-chain price service. The TerraPriceServiceConnection class can be used to interact with these services,
providing a way to fetch these prices directly in your code. The following example wraps an existing RPC provider and shows how to consume
Pyth prices before submitting them to Terra:
```typescript
const connection = new TerraPriceServiceConnection(
"https://xc-testnet.pyth.network"
); // See Price Service endpoints section below for other endpoints
const priceIds = [
// You can find the ids of prices at https://pyth.network/developers/price-feeds#terra-testnet
"f9c0172ba10dfa4d19088d94f5bf61d3b54d5bd7483a322a982e1373ee8ea31b", // BTC/USD price id in testnet
"6de025a4cf28124f8ea6cb8085f860096dbc36d9c40002e221fc449337e065b2", // LUNA/USD price id in testnet
];
// `getLatestPriceFeeds` returns a `PriceFeed` for each price id. It contains all information about a price and has
// utility functions to get the current and exponentially-weighted moving average price, and other functionality.
const priceFeeds = connection.getLatestPriceFeeds(priceIds);
// Get the price if it is not older than 60 seconds from the current time.
console.log(priceFeeds[0].getPriceNoOlderThan(60)); // Price { conf: '1234', expo: -8, price: '12345678' }
// Get the exponentially-weighted moving average price if it is not older than 60 seconds from the current time.
console.log(priceFeeds[1].getEmaPriceNoOlderThan(60));
// By subscribing to the price feeds you can get their updates realtime.
connection.subscribePriceFeedUpdates(priceIds, (priceFeed) => {
console.log("Received a new price feed update!");
console.log(priceFeed.getPriceNoOlderThan(60));
});
// When using subscription, make sure to close the websocket upon termination to finish the process gracefully.
setTimeout(() => {
connection.closeWebSocket();
}, 60000);
// In order to use Pyth prices in your protocol you need to submit the latest price to the Terra network alongside your
// own transactions. `getPriceUpdateMessages` creates messages that can update the prices.
const pythContractAddr = CONTRACT_ADDR["testnet"];
const msgs = await connection.getPriceUpdateMessages(
priceIds,
pythContractAddr,
wallet.key.accAddress
);
const tx = await wallet.createAndSignTx({
msgs: [...pythMsgs, otherMsg, anotherMsg],
});
```
We strongly recommend reading our guide which explains [how to work with Pyth price feeds](https://docs.pyth.network/documentation/pythnet-price-feeds/best-practices).
### Examples
There are two examples in [examples](./src/examples/).
#### TerraPriceServiceClient
[This example](./src/examples/TerraPriceServiceClient.ts) fetches `PriceFeed` updates using both a HTTP-request API and a streaming websocket API. You can run it with `npm run example-client`. A full command that prints BTC and LUNA price feeds, in the testnet network, looks like so:
```bash
npm run example-client -- --endpoint https://xc-testnet.pyth.network --price-ids f9c0172ba10dfa4d19088d94f5bf61d3b54d5bd7483a322a982e1373ee8ea31b 6de025a4cf28124f8ea6cb8085f860096dbc36d9c40002e221fc449337e065b2
```
#### TerraRelay
[This example](./src/examples/TerraRelay.ts) shows how to update prices on the Terra network. It does the following:
1. Creates an update message for each given price id.
2. Creates a transaction to update those prices.
3. Submits it to the network and will print the txhash if successful.
You can run this example with `npm run example-relay`. A full command that updates BTC and LUNA prices on the testnet network looks like so:
```bash
npm run example-relay -- --network testnet --mnemonic "my good mnemonic" --endpoint https://xc-testnet.pyth.network --price-ids f9c0172ba10dfa4d19088d94f5bf61d3b54d5bd7483a322a982e1373ee8ea31b 6de025a4cf28124f8ea6cb8085f860096dbc36d9c40002e221fc449337e065b2
```
## How Pyth Works in Terra
Pyth prices are published on Pythnet, and relayed to Terra using the [Wormhole Network](https://wormholenetwork.com/) as a cross-chain message passing bridge. The Wormhole Network observes when Pyth prices on Pythnet have changed and publishes an off-chain signed message attesting to this fact. This is explained in more detail [here](https://docs.wormholenetwork.com/wormhole/).
This signed message can then be submitted to the Pyth contract on the Terra network, which will verify the Wormhole message and update the Pyth Terra contract with the new price.
### On-demand price updates
Price updates are not submitted on the Terra network automatically: rather, when a consumer needs to use the value of a price they should first submit the latest Wormhole update for that price to Terra. This will make the most recent price update available on-chain for Terra contracts to use.
## Price Service endpoints
Public endpoints for the Price Service are provided for both mainnet and testnet. These can be used regardless of which network you deploy your own contracts to as long as it is a Pyth supported network. For example, you can use the testnet Price Service whether you are deploying your contract to the BNB or Polygon testnet.
| network | url |
| ------- | ------------------------------- |
| mainnet | https://xc-mainnet.pyth.network |
| testnet | https://xc-testnet.pyth.network |

View File

@ -1,5 +0,0 @@
/** @type {import('ts-jest/dist/types').InitialOptionsTsJest} */
module.exports = {
preset: "ts-jest",
testEnvironment: "node",
};

View File

@ -1,56 +0,0 @@
{
"name": "@pythnetwork/pyth-terra-js",
"version": "1.5.1",
"description": "Pyth Network Terra Utils in JS",
"homepage": "https://pyth.network",
"author": {
"name": "Pyth Data Association"
},
"main": "lib/index.js",
"types": "lib/index.d.ts",
"files": [
"lib/**/*"
],
"repository": {
"type": "git",
"url": "https://github.com/pyth-network/pyth-crosschain",
"directory": "target_chains/cosmwasm/sdk/js"
},
"publishConfig": {
"access": "public"
},
"scripts": {
"test": "jest --passWithNoTests",
"build": "tsc",
"example-client": "npm run build && node lib/examples/TerraPriceServiceClient.js",
"example-relay": "npm run build && node lib/examples/TerraRelay.js",
"format": "prettier --write \"src/**/*.ts\"",
"lint": "eslint src/",
"prepublishOnly": "npm run build && npm test && npm run lint",
"preversion": "npm run lint",
"version": "npm run format && git add -A src"
},
"keywords": [
"pyth",
"oracle"
],
"license": "Apache-2.0",
"devDependencies": {
"@types/jest": "^29.4.0",
"@types/node": "^18.11.18",
"@types/yargs": "^17.0.10",
"@typescript-eslint/eslint-plugin": "^5.21.0",
"@typescript-eslint/parser": "^5.21.0",
"eslint": "^8.14.0",
"jest": "^29.4.1",
"prettier": "^2.6.2",
"ts-jest": "^29.0.5",
"typescript": "^4.6.3",
"yargs": "^17.4.1"
},
"dependencies": {
"@pythnetwork/price-service-client": "*",
"@terra-money/terra.js": "^3.0.11",
"axios": "^0.26.1"
}
}

View File

@ -1,43 +0,0 @@
import {
PriceServiceConnection,
HexString,
} from "@pythnetwork/price-service-client";
import { MsgExecuteContract } from "@terra-money/terra.js";
export class TerraPriceServiceConnection extends PriceServiceConnection {
/**
* Creates Terra messages for updating given price feeds.
* The messages can be included alongside other messages in a single transaction.
* They will succeed even if the prices are updated with newer messages;
*
* Example usage:
* ```typescript
* const pythContractAddr = CONTRACT_ADDR['testnet'];
* const pythMsgs = await connection.getPythPriceUpdateMessage(priceIds, pythContractAddr, wallet.key.accAddress);
* const tx = await wallet.createAndSignTx({ msgs: [...pythMsgs, otherMsg, anotherMsg] });
* ```
*
* This will throw an axios error if there is a network problem or the price service returns non-ok response (e.g: Invalid price ids)
*
* @param priceIds Array of hex-encoded price ids without leading 0x.
* @param pythContractAddr: Pyth contract address. you can use CONTRACT_ADDR that contains Pyth contract addresses in
* the networks that Pyth is live on.
* @param senderAddr: Sender address of the created messages. Sender should sign and pay the transaction that contains them.
* @returns Array of Terra messages that can be included in a transaction to update the given prices.
*/
async getPriceUpdateMessages(
priceIds: HexString[],
pythContractAddr: string,
senderAddr: string
): Promise<MsgExecuteContract[]> {
const latestVaas = await this.getLatestVaas(priceIds);
return latestVaas.map(
(vaa) =>
new MsgExecuteContract(senderAddr, pythContractAddr, {
submit_vaa: {
data: vaa,
},
})
);
}
}

View File

@ -1,68 +0,0 @@
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { CONTRACT_ADDR, TerraPriceServiceConnection } from "../index";
function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
const argv = yargs(hideBin(process.argv))
.option("endpoint", {
description:
"Endpoint URL for the Price Service. e.g: https://endpoint/example",
type: "string",
required: true,
})
.option("price-ids", {
description:
"Space separated price feed ids (in hex) to fetch." +
" e.g: f9c0172ba10dfa4d19088d...",
type: "array",
required: true,
})
.help()
.alias("help", "h")
.parserConfiguration({
"parse-numbers": false,
})
.parseSync();
async function run() {
const connection = new TerraPriceServiceConnection(argv.endpoint, {
logger: console, // Providing logger will allow the connection to log its events.
});
const priceIds = argv.priceIds as string[];
console.log(priceIds);
const priceFeeds = await connection.getLatestPriceFeeds(priceIds);
console.log(priceFeeds);
console.log(priceFeeds?.at(0)?.getPriceNoOlderThan(60));
const msgs = await connection.getPriceUpdateMessages(
priceIds,
CONTRACT_ADDR["testnet"],
"terra123456789abcdefghijklmonpqrstuvwxyz1234"
);
console.log(msgs);
console.log("Subscribing to price feed updates.");
await connection.subscribePriceFeedUpdates(priceIds, (priceFeed) => {
console.log(
`Current price for ${priceFeed.id}: ${JSON.stringify(
priceFeed.getPriceNoOlderThan(60)
)}.`
);
});
await sleep(600000);
// To close the websocket you should either unsubscribe from all
// price feeds or call `connection.closeWebSocket()` directly.
console.log("Unsubscribing from price feed updates.");
await connection.unsubscribePriceFeedUpdates(priceIds);
}
run();

View File

@ -1,128 +0,0 @@
import { LCDClient, MnemonicKey } from "@terra-money/terra.js";
import axios from "axios";
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { TerraPriceServiceConnection, CONTRACT_ADDR } from "../index";
const argv = yargs(hideBin(process.argv))
.option("network", {
description:
"Network to relay on. Provide node url if you are using localterra",
required: true,
default: "testnet",
})
.option("endpoint", {
description:
"Endpoint URL for the price service. e.g: https://endpoint/example",
type: "string",
required: true,
})
.option("pyth-contract", {
description:
"Pyth contract address. You should provide this value if you are using localterra",
type: "string",
required: false,
})
.option("price-ids", {
description:
"Space separated price feed ids (in hex) to fetch." +
" e.g: f9c0172ba10dfa4d19088d...",
type: "array",
required: true,
})
.option("mnemonic", {
description: "Mnemonic (private key) for sender",
type: "string",
required: true,
})
.help()
.alias("help", "h")
.parserConfiguration({
"parse-numbers": false,
})
.parseSync();
const CONFIG: Record<string, any> = {
testnet: {
terraHost: {
URL: "https://bombay-lcd.terra.dev",
chainID: "bombay-12",
name: "testnet",
},
},
};
export const TERRA_GAS_PRICES_URL = "https://fcd.terra.dev/v1/txs/gas_prices";
let terraHost;
let pythContractAddr: string;
if (CONFIG[argv.network] !== undefined) {
terraHost = CONFIG[argv.network].terraHost;
pythContractAddr = CONTRACT_ADDR[argv.network];
} else {
terraHost = {
URL: argv.network,
chainID: "localterra",
name: "localterra",
};
if (argv.pythContract === undefined) {
throw new Error(
"You should provide pyth contract address when using localterra"
);
}
pythContractAddr = argv.pythContract;
}
const feeDenoms = ["uluna"];
const connection = new TerraPriceServiceConnection(argv.endpoint);
const lcd = new LCDClient(terraHost);
const wallet = lcd.wallet(
new MnemonicKey({
mnemonic: argv.mnemonic,
})
);
const priceIds = argv.priceIds as string[];
async function run() {
const priceFeeds = await connection.getLatestPriceFeeds(priceIds);
console.log(priceFeeds);
const gasPrices = await axios
.get(TERRA_GAS_PRICES_URL)
.then((result) => result.data);
const msgs = await connection.getPriceUpdateMessages(
priceIds,
pythContractAddr,
wallet.key.accAddress
);
console.log(msgs);
const feeEstimate = await lcd.tx.estimateFee(
[
{
sequenceNumber: await wallet.sequence(),
},
],
{
msgs: msgs,
feeDenoms,
gasPrices,
}
);
const tx = await wallet.createAndSignTx({
msgs: msgs,
feeDenoms,
gasPrices,
fee: feeEstimate,
});
const rs = await lcd.tx.broadcastSync(tx);
console.log("Relay successful.", rs.txhash);
}
run();

View File

@ -1,14 +0,0 @@
export { TerraPriceServiceConnection } from "./TerraPriceServiceConnection";
export {
DurationInMs,
HexString,
Price,
PriceFeed,
PriceServiceConnectionConfig,
UnixTimestamp,
} from "@pythnetwork/price-service-client";
export const CONTRACT_ADDR: Record<string, string> = {
testnet: "terra1wzs3rgzgjdde3kg7k3aaz6qx7sc5dcwxqe9fuc",
};

View File

@ -1,14 +0,0 @@
{
"extends": "../../../../tsconfig.base.json",
"compilerOptions": {
"target": "esnext",
"module": "commonjs",
"declaration": true,
"rootDir": "src/",
"outDir": "./lib",
"strict": true,
"esModuleInterop": true
},
"include": ["src"],
"exclude": ["node_modules", "**/__tests__/*"]
}

View File

@ -41,7 +41,7 @@ Open `coverage/index.html` in your web browser to see the results.
### Governance tests
There is a separate test suite executed by truffle for testing governance messages and contract upgrades. You can either use tilt to test automatically or run ganache-cli as a blockchain instance and test it manually. To do the latter, run the following commands in the `contracts` folder:
There is a separate test suite executed by truffle for testing governance messages and contract upgrades. You can run ganache-cli as a blockchain instance and test it manually. To do the latter, run the following commands in the `contracts` folder:
1. Spawn a new network on a seperate terminal (do not close it while running tests):

View File

@ -1,11 +0,0 @@
#syntax=docker/dockerfile:1.2@sha256:e2a8561e419ab1ba6b2fe6cbdf49fd92b95912df1cf7d313c3e2230a333fdbcc
FROM python:3.9-alpine
RUN pip install base58
ADD third_party/pyth/pyth_utils.py /usr/src/pyth/pyth_utils.py
ADD third_party/pyth/check_attestations.py /usr/src/pyth/check_attestations.py
RUN chmod a+rx /usr/src/pyth/*.py
ENV READINESS_PORT=2000

View File

@ -1,38 +0,0 @@
# syntax=docker/dockerfile:1.2
# Wormhole-specific setup for pyth
FROM pythfoundation/pyth-client:devnet-v2.20.0
USER root
# At the time of this writing, debian is fussy about performing an
# apt-get update. Please add one if repos go stale
RUN apt-get install -y netcat-openbsd python3 && \
rm -rf /var/lib/apt/lists/*
ADD tilt_devnet/secrets/solana/ /solana-secrets
ENV PYTH_KEY_STORE=/home/pyth/.pythd
# Prepare keys
WORKDIR $PYTH_KEY_STORE
RUN cp /solana-secrets/pyth_publisher.json publish_key_pair.json && \
cp /solana-secrets/pyth_program.json program_key_pair.json && \
chown pyth:pyth -R . && \
chmod go-rwx -R .
ENV PYTH_SRC_ROOT=/home/pyth/pyth-client
WORKDIR $PYTH_SRC_ROOT/build
# Prepare setup script
ADD third_party/pyth/pyth_utils.py /opt/pyth/pyth_utils.py
ADD third_party/pyth/pyth_publisher.py /opt/pyth/pyth_publisher.py
RUN chmod a+rx /opt/pyth/*.py
USER pyth
ENV PYTH=$PYTH_SRC_ROOT/build/pyth
ENV PYTH_ADMIN=$PYTH_SRC_ROOT/build/pyth_admin
ENV READINESS_PORT=2000
ENV SOL_AIRDROP_AMT=100

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python3
# This script is a CI test in tilt that verifies that prices are flowing through the entire system properly.
# It checks that all prices being published by the pyth publisher are showing up at the price service.
import base58
import logging
import time
from pyth_utils import *
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s | %(module)s | %(levelname)s | %(message)s"
)
# Where to read the set of accounts from
PYTH_TEST_ACCOUNTS_HOST = "pyth"
PYTH_TEST_ACCOUNTS_PORT = 4242
PRICE_SERVICE_HOST = "pyth-price-server"
PRICE_SERVICE_PORT = 4200
def base58_to_hex(base58_string):
asc_string = base58.b58decode(base58_string)
return asc_string.hex()
all_prices_attested = False
while not all_prices_attested:
publisher_state_map = get_pyth_accounts(PYTH_TEST_ACCOUNTS_HOST, PYTH_TEST_ACCOUNTS_PORT)
pyth_price_account_ids = sorted([base58_to_hex(x["price"]) for x in publisher_state_map["symbols"]])
price_ids = sorted(get_json(PRICE_SERVICE_HOST, PRICE_SERVICE_PORT, "/api/price_feed_ids"))
if price_ids == pyth_price_account_ids:
if publisher_state_map["all_symbols_added"]:
logging.info("Price ids match and all symbols added. Enabling readiness probe")
all_prices_attested = True
else:
logging.info("Price ids match but still waiting for more symbols to come online.")
else:
logging.info("Price ids do not match")
logging.info(f"published ids: {pyth_price_account_ids}")
logging.info(f"attested ids: {price_ids}")
time.sleep(10)
# Let k8s know the service is up
readiness()

View File

@ -1,3 +0,0 @@
/lib
/node_modules
/src/evm

View File

@ -1,42 +0,0 @@
# DevNet:
SPY_SERVICE_HOST=0.0.0.0:7072
SPY_SERVICE_FILTERS=[{"chain_id":1,"emitter_address":"71f8dcb863d176e2c420ad6610cf687359612b6fb392e0642b0ca6b1f186aa3b"}]
TERRA_NODE_URL=http://localhost:1317
TERRA_PRIVATE_KEY=notice oak worry limit wrap speak medal online prefer cluster roof addict wrist behave treat actual wasp year salad speed social layer crew genius
TERRA_PYTH_CONTRACT_ADDRESS=terra1wgh6adn8geywx0v78zs9azrqtqdegufuegnwep
TERRA_CHAIN_ID=columbus-5
TERRA_NAME=localterra
TERRA_COIN=uluna
# TestNet:
#SPY_SERVICE_HOST=0.0.0.0:7073
#SPY_SERVICE_FILTERS=[{"chain_id":1,"emitter_address":"3afda841c1f43dd7d546c8a581ba1f92a139f4133f9f6ab095558f6a359df5d4"}]
#TERRA_NODE_URL=https://bombay-lcd.terra.dev
#TERRA_PRIVATE_KEY=your key here
#TERRA_PYTH_CONTRACT_ADDRESS=terra1wjkzgcrg3a2jh2cyc5lekvtjydf600splmvdk4
#TERRA_CHAIN_ID=bombay-12
#TERRA_NAME=testnet
#TERRA_COIN=uluna
# MainNet:
#SPY_SERVICE_HOST=0.0.0.0:7074
#SPY_SERVICE_FILTERS=[{"chain_id":1,"emitter_address":"b2dd468c9b8c80b3dd9211e9e3fd6ee4d652eb5997b7c9020feae971c278ab07"}]
#TERRA_NODE_URL=https://lcd.terra.dev
#TERRA_PRIVATE_KEY=your key here
#TERRA_PYTH_CONTRACT_ADDRESS=fill_this_in
#TERRA_CHAIN_ID=columbus-5
#TERRA_NAME=mainnet
#TERRA_COIN=uluna
REST_PORT=4200
PROM_PORT=8081
BAL_QUERY_INTERVAL=60000
RETRY_MAX_ATTEMPTS=4
RETRY_DELAY_IN_MS=250
MAX_MSGS_PER_BATCH=1
MAX_HEALTHY_NO_RELAY_DURATION_IN_SECONDS=120
# The default is to log the console with level info.
LOG_DIR=/var/pyth_relay/logs
#LOG_LEVEL=debug

View File

@ -1,4 +0,0 @@
/lib
# EVM artifacts
/src/evm

View File

@ -1,95 +0,0 @@
# Overview
The pyth_relay program is designed to listen to Pyth messages published on Solana and relay them to other chains.
Although in its initial release, the only supported destination chain is Terra, the design supports publishing to multiple chains.
<p>
The relayer listens to the spy_guardian for signed VAA messages. It can be configured to only request specific emitters, so that only Pyth messages get forwarded.
<p>
When the relayer receives messages from the spy, it drops redundant messages based on sequence numbers, verifies the message is a Pyth message, and relays the pyth
messages to Terra.
# Operational Details
The relayer can be run as a docker image. Additionally, you need to have an instance of the spy guardian running, which can be started using a docker image.
<p>
The relayer is configured using an env file, as specified by the PYTH_RELAY_CONFIG environment variable. Please see the env.samples file in the source directory for
valid variables.
<p>
The relayer can be configured to log to a file in the directory specified by the LOG_DIR environment variable. If the variable is not specified, it logs to the console.
<p>
The log level can be controlled by the LOG_LEVEL environment variable, where info is the default. The valid values are debug, info, warn, and error.
# External Dependencies
The relayer connects to Terra, so it therefore has the following dependencies
1. A Pyth to Wormhole publisher
2. A highly reliable connection to a local Terra node via Wormhole
3. A unique Terra Wallet per instance of pyth_relayer
4. A Wormhole spy guardian process running that the pyth_relayer can subscribe to for Pyth messages
Note that for performance reasons, pyth_relayer manages the Terra wallet sequence number locally. If it does not do so, it will get wallet sequence number errors if it publishes faster than the Terra node can handle it. For this to work, the relayer should be connected to a local Terra node, to minimize the possible paths the published message could take, and maintain sequence number ordering.
# High Availability
If high availability is a goal, then two completely seperate instances of pyth_relay should be run. They should run on completely separate hardware, using separate Terra connections and wallets. Additionally, they should connect to separate instances of the spy_guardian. They will both be publishing messages to the Pyth Terra contract, which will simply drop the duplicates.
# Design Details
The relayer code is divided into separate source files, based on functionality. The main entry point is index.ts. It invokes code in the other files as follows.
## listener.ts
The listener code parses the emitter filter parameter, which may consist of none, one or more chain / emitter pairs. If any filters are specified, then only VAAs from those emitters will be processed. The listener then registers those emitters with the spy guardian via RPC callback.
<p>
When the listener receives a VAA from the spy, it verifies that it has not already been seen, based on the sequence number. This is necessary since there are multiple guardians signing and publishing the same VAAs. It then validates that it is a Pyth message. All Pyth payloads start with P2WH. If so, it invokes the postEvent method on the worker to forward the VAA for publishing.
## worker.ts
The worker code is responsible for taking VAAs to be published from the listener and passing them to the relay code for relaying to Terra.
<p>
The worker uses a map of pending events, and a condition variable to signal that there are events waiting to be published, and a map of the latest state of each Pyth price.
The worker protects all of these objects with a mutex.
<p>
The worker maintains performance metrics to be published by the Prometeus interface.
<p>
The worker also provides methods to query the status of the wallet being used for relaying, the current status of all maintained prices, and can query Terra for the current
data for a given price. These are used by the REST interface, if it is enabled in the config.
<p>
In most cases, if a Terra transaction fails, the worker will retry up to a configurable number of times, with a configurable delay between each time. For each successive retry of a given message, they delay is increased by the retry attempt number (delay * attempt).
## main.ts and terra.ts
This is the code that actually communicates with the Terra block chain. It takes configuration data from the env file, and provides methods to relay a Pyth message, query the wallet balance, and query the current data for a given price.
## promHelper.ts
Prometheus is being used as a framework for storing metrics. Currently, the following metrics are being collected:
- The last sequence number sent
- The total number of successful relays
- The total number of failed relays
- A histogram of transfer times
- The current wallet balance
- The total number of VAAs received by the listener
- The total number of VAAs already executed on Terra
- The total number of Terra transaction timeouts
- The total number of Terra sequence number errors
- The total number of Terra retry attempts
- The total number of retry limit exceeded errors
- The total number of transactions failed due to insufficient funds
All the above metrics can be viewed at http://localhost:8081/metrics
<p>
The port 8081 is the default. The port can be specified by the `PROM_PORT` tunable in the env file.
<p>
This file contains a class named `PromHelper`. It is an encapsulation of the Prometheus API.
## helpers.ts
This contains an assortment of helper functions and objects used by the other code, including logger initialization and parsing of Pyth messages.

View File

@ -1,16 +0,0 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
FROM lerna
WORKDIR /home/node/
USER 1000
COPY --chown=1000:1000 target_chains/ethereum/sdk/solidity target_chains/ethereum/sdk/solidity
COPY --chown=1000:1000 price_service/sdk/js price_service/sdk/js
COPY --chown=1000:1000 third_party/pyth/p2w-relay third_party/pyth/p2w-relay
COPY --chown=1000:1000 wormhole_attester/sdk/js wormhole_attester/sdk/js
RUN npx lerna run build --scope="pyth_relay" --include-dependencies
WORKDIR /home/node/third_party/pyth/p2w-relay
CMD [ "npm", "run", "start" ]

View File

@ -1,46 +0,0 @@
# Setup Spy Guardian and Pyth Relay
To build the spy_guardian docker container:
```
$ docker build -f Dockerfile.spy_guardian -t spy_guardian .
```
To build the pyth_relay docker container:
```
$ docker build -f Dockerfile.pyth_relay -t pyth_relay .
```
Run the spy_guardian docker container in TestNet:
```
$ docker run --platform linux/amd64 -d --network=host spy_guardian \
--bootstrap /dns4/wormhole-testnet-v2-bootstrap.certus.one/udp/8999/quic/p2p/12D3KooWBY9ty9CXLBXGQzMuqkziLntsVcyz4pk1zWaJRvJn6Mmt \
--network /wormhole/testnet/2/1 \
--spyRPC "[::]:7073"
```
Or run the spy_guardian docker container in MainNet:
For the MainNet gossip network parameters, see https://github.com/wormhole-foundation/wormhole-networks/blob/master/mainnetv2/info.md
```
$ docker run --platform linux/amd64 -d --network=host spy_guardian \
--bootstrap <guardianNetworkBootstrapParameterForMainNet> \
--network <guardianNetworkPathForMainNet> \
--spyRPC "[::]:7073"
```
Then to run the pyth_relay docker container using a config file called
${HOME}/pyth_relay/env and logging to directory ${HOME}/pyth_relay/logs, do the
following:
```
$ docker run \
--volume=${HOME}/pyth_relay:/var/pyth_relay \
-e PYTH_RELAY_CONFIG=/var/pyth_relay/env \
--network=host \
-d \
pyth_relay
```

View File

@ -1,57 +0,0 @@
{
"name": "pyth_relay",
"version": "1.0.0",
"description": "Pyth relayer",
"private": "true",
"main": "index.js",
"scripts": {
"build": "npm run build-evm && npm run build-lib",
"build-evm": "npm run copy-evm-abis && npm run build-evm-bindings",
"build-lib": "tsc",
"copy-evm-abis": "mkdir -p ./src/evm/abis && cp -r ../../../node_modules/@pythnetwork/pyth-sdk-solidity/abis ./src/evm/",
"build-evm-bindings": "mkdir -p ./src/evm/bindings/ && typechain --target=ethers-v5 --out-dir=src/evm/bindings/ src/evm/abis/*Pyth*.json",
"start": "node lib/index.js",
"listen_only": "node lib/index.js --listen_only"
},
"author": "",
"license": "Apache-2.0",
"devDependencies": {
"@improbable-eng/grpc-web-node-http-transport": "^0.15.0",
"@types/jest": "^27.0.2",
"@types/long": "^4.0.1",
"@types/node": "^16.6.1",
"esm": "^3.2.25",
"ethers": "^5.4.4",
"jest": "^27.3.1",
"prettier": "^2.3.2",
"ts-jest": "^27.0.7",
"tslint": "^6.1.3",
"tslint-config-prettier": "^1.18.0",
"typescript": "^4.3.5"
},
"dependencies": {
"@certusone/wormhole-sdk": "^0.1.4",
"@certusone/wormhole-spydk": "^0.0.1",
"@pythnetwork/pyth-sdk-solidity": "^2.2.0",
"@pythnetwork/wormhole-attester-sdk": "*",
"@solana/spl-token": "^0.1.8",
"@solana/web3.js": "^1.24.0",
"@terra-money/terra.js": "^3.1.3",
"@typechain/ethers-v5": "^7.0.1",
"@types/express": "^4.17.13",
"async-mutex": "^0.3.2",
"axios": "^0.24.0",
"body-parser": "^1.19.0",
"condition-variable": "^1.0.0",
"cors": "^2.8.5",
"dotenv": "^10.0.0",
"express": "^4.17.2",
"prom-client": "^14.0.1",
"redis": "^4.0.0",
"winston": "^3.3.3"
},
"directories": {
"lib": "lib"
},
"keywords": []
}

View File

@ -1,75 +0,0 @@
////////////////////////////////// Start of Logger Stuff //////////////////////////////////////
export let logger: any;
export function initLogger() {
const winston = require("winston");
let useConsole: boolean = true;
let logFileName: string = "";
if (process.env.LOG_DIR) {
useConsole = false;
logFileName =
process.env.LOG_DIR + "/pyth_relay." + new Date().toISOString() + ".log";
}
let logLevel = "info";
if (process.env.LOG_LEVEL) {
logLevel = process.env.LOG_LEVEL;
}
let transport: any;
if (useConsole) {
console.log("pyth_relay is logging to the console at level [%s]", logLevel);
transport = new winston.transports.Console({
level: logLevel,
});
} else {
console.log(
"pyth_relay is logging to [%s] at level [%s]",
logFileName,
logLevel
);
transport = new winston.transports.File({
filename: logFileName,
level: logLevel,
});
}
const logConfiguration = {
transports: [transport],
format: winston.format.combine(
winston.format.splat(),
winston.format.simple(),
winston.format.timestamp({
format: "YYYY-MM-DD HH:mm:ss.SSS",
}),
winston.format.printf(
(info: any) => `${[info.timestamp]}|${info.level}|${info.message}`
)
),
};
logger = winston.createLogger(logConfiguration);
}
////////////////////////////////// Start of Other Helpful Stuff //////////////////////////////////////
export function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
// Shorthand for optional/mandatory envs
export function envOrErr(env: string, defaultValue?: string): string {
let val = process.env[env];
if (!val) {
if (!defaultValue) {
throw `environment variable "${env}" must be set`;
} else {
return defaultValue;
}
}
return String(process.env[env]);
}

View File

@ -1,84 +0,0 @@
import { setDefaultWasm } from "@certusone/wormhole-sdk/lib/cjs/solana/wasm";
import * as fs from "fs";
import * as listen from "./listen";
import * as worker from "./worker";
import * as rest from "./rest";
import * as helpers from "./helpers";
import { logger } from "./helpers";
import { PromHelper } from "./promHelpers";
import { Relay } from "./relay/iface";
import { TerraRelay } from "./relay/terra";
import { EvmRelay } from "./relay/evm";
let configFile: string = ".env";
if (process.env.PYTH_RELAY_CONFIG) {
configFile = process.env.PYTH_RELAY_CONFIG;
}
console.log("Loading config file [%s]", configFile);
require("dotenv").config({ path: configFile });
setDefaultWasm("node");
// Set up the logger.
helpers.initLogger();
let error: boolean = false;
let listenOnly: boolean = false;
let relayImpl: Relay | null = null;
for (let idx = 0; idx < process.argv.length; ++idx) {
if (process.argv[idx] === "--listen_only") {
logger.info("running in listen only mode, will not relay anything!");
listenOnly = true;
} else if (process.argv[idx] === "--terra" && !relayImpl) {
relayImpl = new TerraRelay({
nodeUrl: helpers.envOrErr("TERRA_NODE_URL"),
terraChainId: helpers.envOrErr("TERRA_CHAIN_ID"),
walletPrivateKey: helpers.envOrErr("TERRA_PRIVATE_KEY"),
coinDenom: helpers.envOrErr("TERRA_COIN"),
contractAddress: helpers.envOrErr("TERRA_PYTH_CONTRACT_ADDRESS"),
});
logger.info("Relaying to Terra");
} else if (process.argv[idx] === "--evm" && !relayImpl) {
relayImpl = new EvmRelay({
jsonRpcUrl: helpers.envOrErr("EVM_NODE_JSON_RPC_URL"),
payerWalletMnemonic: helpers.envOrErr("EVM_WALLET_MNEMONIC"),
payerHDWalletPath: helpers.envOrErr(
"EVM_HDWALLET_PATH",
"m/44'/60'/0'/0"
), // ETH mainnet default
p2wContractAddress: helpers.envOrErr("EVM_PYTH_CONTRACT_ADDRESS"),
verifyPriceFeeds:
helpers.envOrErr("EVM_VERIFY_PRICE_FEEDS", "no") == "yes",
});
logger.info("Relaying to EVM.");
}
}
if (!relayImpl) {
logger.error("No relay implementation specified");
error = true;
}
if (
!error &&
listen.init(listenOnly) &&
worker.init(!listenOnly, relayImpl as any) &&
rest.init(!listenOnly)
) {
// Start the Prometheus client with the app name and http port
let promPort = 8081;
if (process.env.PROM_PORT) {
promPort = parseInt(process.env.PROM_PORT);
}
logger.info("prometheus client listening on port " + promPort);
const promClient = new PromHelper("pyth_relay", promPort);
listen.run(promClient);
if (!listenOnly) {
worker.run(promClient);
rest.run();
}
}

View File

@ -1,214 +0,0 @@
import {
ChainId,
CHAIN_ID_SOLANA,
CHAIN_ID_TERRA,
hexToUint8Array,
uint8ArrayToHex,
getEmitterAddressEth,
getEmitterAddressSolana,
getEmitterAddressTerra,
} from "@certusone/wormhole-sdk";
import {
createSpyRPCServiceClient,
subscribeSignedVAA,
} from "@certusone/wormhole-spydk";
import {
parseBatchPriceAttestation,
getBatchSummary,
} from "@pythnetwork/wormhole-attester-sdk";
import { importCoreWasm } from "@certusone/wormhole-sdk/lib/cjs/solana/wasm";
import * as helpers from "./helpers";
import { logger } from "./helpers";
import { postEvent } from "./worker";
import { PromHelper } from "./promHelpers";
let seqMap = new Map<string, number>();
let listenOnly: boolean = false;
let metrics: PromHelper;
export function init(lo: boolean): boolean {
listenOnly = lo;
if (!process.env.SPY_SERVICE_HOST) {
logger.error("Missing environment variable SPY_SERVICE_HOST");
return false;
}
return true;
}
export async function run(pm: PromHelper) {
metrics = pm;
logger.info(
"pyth_relay starting up, will listen for signed VAAs from [" +
process.env.SPY_SERVICE_HOST +
"]"
);
(async () => {
let filter = {};
if (process.env.SPY_SERVICE_FILTERS) {
const parsedJsonFilters = eval(process.env.SPY_SERVICE_FILTERS);
let myFilters = [];
for (let i = 0; i < parsedJsonFilters.length; i++) {
let myChainId = parseInt(parsedJsonFilters[i].chain_id) as ChainId;
let myEmitterAddress = parsedJsonFilters[i].emitter_address;
// let myEmitterAddress = await encodeEmitterAddress(
// myChainId,
// parsedJsonFilters[i].emitter_address
// );
let myEmitterFilter = {
emitterFilter: {
chainId: myChainId,
emitterAddress: myEmitterAddress,
},
};
logger.info(
"adding filter: chainId: [" +
myEmitterFilter.emitterFilter.chainId +
"], emitterAddress: [" +
myEmitterFilter.emitterFilter.emitterAddress +
"]"
);
myFilters.push(myEmitterFilter);
}
logger.info("setting " + myFilters.length + " filters");
filter = {
filters: myFilters,
};
} else {
logger.info("processing all signed VAAs");
}
while (true) {
let stream: any;
try {
const client = createSpyRPCServiceClient(
process.env.SPY_SERVICE_HOST || ""
);
stream = await subscribeSignedVAA(client, filter);
stream.on("data", ({ vaaBytes }: { vaaBytes: string }) => {
processVaa(vaaBytes);
});
let connected = true;
stream.on("error", (err: any) => {
logger.error("spy service returned an error: %o", err);
connected = false;
});
stream.on("close", () => {
logger.error("spy service closed the connection!");
connected = false;
});
logger.info("connected to spy service, listening for messages");
while (connected) {
await helpers.sleep(1000);
}
} catch (e) {
logger.error("spy service threw an exception: %o", e);
}
stream.end;
await helpers.sleep(5 * 1000);
logger.info("attempting to reconnect to the spy service");
}
})();
}
async function encodeEmitterAddress(
myChainId: ChainId,
emitterAddressStr: string
): Promise<string> {
if (myChainId === CHAIN_ID_SOLANA) {
return await getEmitterAddressSolana(emitterAddressStr);
}
if (myChainId === CHAIN_ID_TERRA) {
return await getEmitterAddressTerra(emitterAddressStr);
}
return getEmitterAddressEth(emitterAddressStr);
}
async function processVaa(vaaBytes: string) {
let receiveTime = new Date();
const { parse_vaa } = await importCoreWasm();
const parsedVAA = parse_vaa(hexToUint8Array(vaaBytes));
// logger.debug(
// "processVaa, vaa len: " +
// vaaBytes.length +
// ", payload len: " +
// parsedVAA.payload.length
// );
// logger.debug("listen:processVaa: parsedVAA: %o", parsedVAA);
let batchAttestation;
try {
batchAttestation = await parseBatchPriceAttestation(
Buffer.from(parsedVAA.payload)
);
} catch (e: any) {
logger.error(e, e.stack);
logger.error("Parsing failed. Dropping vaa: %o", parsedVAA);
return;
}
let isAnyPriceNew = batchAttestation.priceAttestations.some(
(priceAttestation) => {
const key = priceAttestation.priceId;
let lastSeqNum = seqMap.get(key);
return lastSeqNum === undefined || lastSeqNum < parsedVAA.sequence;
}
);
if (!isAnyPriceNew) {
logger.debug(
"For all prices there exists an update with newer sequence number. batch price attestation: %o",
batchAttestation
);
return;
}
for (let priceAttestation of batchAttestation.priceAttestations) {
const key = priceAttestation.priceId;
let lastSeqNum = seqMap.get(key);
if (lastSeqNum === undefined || lastSeqNum < parsedVAA.sequence) {
seqMap.set(key, parsedVAA.sequence);
}
}
logger.info(
"received: emitter: [" +
parsedVAA.emitter_chain +
":" +
uint8ArrayToHex(parsedVAA.emitter_address) +
"], seqNum: " +
parsedVAA.sequence +
", Batch Summary: " +
getBatchSummary(batchAttestation)
);
metrics.incIncoming();
if (!listenOnly) {
logger.debug("posting to worker");
await postEvent(
vaaBytes,
batchAttestation,
parsedVAA.sequence,
receiveTime
);
}
}

View File

@ -1,160 +0,0 @@
import http = require("http");
import client = require("prom-client");
import helpers = require("./helpers");
// NOTE: To create a new metric:
// 1) Create a private counter/gauge with appropriate name and help
// 2) Create a method to set the metric to a value
// 3) Register the metric
export class PromHelper {
private register = new client.Registry();
private walletReg = new client.Registry();
private collectDefaultMetrics = client.collectDefaultMetrics;
// Actual metrics
private seqNumGauge = new client.Gauge({
name: "seqNum",
help: "Last sent sequence number",
});
private successCounter = new client.Counter({
name: "successes",
help: "number of successful relays",
});
private failureCounter = new client.Counter({
name: "failures",
help: "number of failed relays",
});
private completeTime = new client.Histogram({
name: "complete_time",
help: "Time is took to complete transfer",
buckets: [400, 800, 1600, 3200, 6400, 12800],
});
private walletBalance = new client.Gauge({
name: "wallet_balance",
help: "The wallet balance",
labelNames: ["timestamp"],
registers: [this.walletReg],
});
private listenCounter = new client.Counter({
name: "VAAs_received",
help: "number of Pyth VAAs received",
});
private alreadyExecutedCounter = new client.Counter({
name: "already_executed",
help: "number of transfers rejected due to already having been executed",
});
private transferTimeoutCounter = new client.Counter({
name: "transfer_timeout",
help: "number of transfers that timed out",
});
private seqNumMismatchCounter = new client.Counter({
name: "seq_num_mismatch",
help: "number of transfers that failed due to sequence number mismatch",
});
private retryCounter = new client.Counter({
name: "retries",
help: "number of retry attempts",
});
private retriesExceededCounter = new client.Counter({
name: "retries_exceeded",
help: "number of transfers that failed due to exceeding the retry count",
});
private insufficentFundsCounter = new client.Counter({
name: "insufficient_funds",
help: "number of transfers that failed due to insufficient funds count",
});
// End metrics
private server = http.createServer(async (req, res) => {
if (req.url === "/metrics") {
// Return all metrics in the Prometheus exposition format
res.setHeader("Content-Type", this.register.contentType);
res.write(await this.register.metrics());
res.end(await this.walletReg.metrics());
}
});
constructor(name: string, port: number) {
this.register.setDefaultLabels({
app: name,
});
this.collectDefaultMetrics({ register: this.register });
// Register each metric
this.register.registerMetric(this.seqNumGauge);
this.register.registerMetric(this.successCounter);
this.register.registerMetric(this.failureCounter);
this.register.registerMetric(this.completeTime);
this.register.registerMetric(this.listenCounter);
this.register.registerMetric(this.alreadyExecutedCounter);
this.register.registerMetric(this.transferTimeoutCounter);
this.register.registerMetric(this.seqNumMismatchCounter);
this.register.registerMetric(this.retryCounter);
this.register.registerMetric(this.retriesExceededCounter);
this.register.registerMetric(this.insufficentFundsCounter);
// End registering metric
this.server.listen(port);
}
// These are the accessor methods for the metrics
setSeqNum(sn: number) {
this.seqNumGauge.set(sn);
}
incSuccesses() {
this.successCounter.inc();
}
incFailures() {
this.failureCounter.inc();
}
addCompleteTime(val: number) {
this.completeTime.observe(val);
}
setWalletBalance(bal: bigint) {
this.walletReg.clear();
// this.walletReg = new client.Registry();
this.walletBalance = new client.Gauge({
name: "wallet_balance",
help: "The wallet balance",
labelNames: ["timestamp"],
registers: [this.walletReg],
});
this.walletReg.registerMetric(this.walletBalance);
let now = new Date();
let balance_converted: number | null = null;
// CAUTION(2022-03-22): Conversion to Number may overflow;
// at this time TS build fails without the conversion.
try {
balance_converted = Number(bal);
} catch (e) {
helpers.logger.error(
"setWalletBalance(): BigInt -> Number conversion failed"
);
}
// Do not crash if there's a problem with the balance value
if (balance_converted) {
this.walletBalance.set({ timestamp: now.toString() }, balance_converted);
}
}
incIncoming() {
this.listenCounter.inc();
}
incAlreadyExec() {
this.alreadyExecutedCounter.inc();
}
incTransferTimeout() {
this.transferTimeoutCounter.inc();
}
incSeqNumMismatch() {
this.seqNumMismatchCounter.inc();
}
incRetries() {
this.retryCounter.inc();
}
incRetriesExceeded() {
this.retriesExceededCounter.inc();
}
incInsufficentFunds() {
this.insufficentFundsCounter.inc();
}
}

View File

@ -1,191 +0,0 @@
import { Relay, RelayResult, RelayRetcode, PriceId } from "./iface";
import { ethers } from "ethers";
import { logger } from "../helpers";
import { hexToUint8Array } from "@certusone/wormhole-sdk";
import { importCoreWasm } from "@certusone/wormhole-sdk/lib/cjs/solana/wasm";
import { AbstractPyth__factory, AbstractPyth } from "../evm/bindings/";
import { parseBatchPriceAttestation } from "@pythnetwork/wormhole-attester-sdk";
let WH_WASM: any = null;
// Neat trick to import wormhole wasm cheaply
async function whWasm(): Promise<any> {
if (!WH_WASM) {
WH_WASM = await importCoreWasm();
}
return WH_WASM;
}
export class EvmRelay implements Relay {
payerWallet: ethers.Wallet;
p2wContract: AbstractPyth;
// p2w contract sanity check; If set to true, we log query() results
// on all prices in a batch before and after relaying.
verifyPriceFeeds: boolean;
async relay(signedVAAs: Array<string>): Promise<RelayResult> {
let batchCount = signedVAAs.length;
const { parse_vaa } = await whWasm();
// Schedule all received batches in parallel
let txs = [];
for (let i = 0; i < signedVAAs.length; ++i) {
let batchNo = i + 1;
let parsedVAA = parse_vaa(hexToUint8Array(signedVAAs[i]));
let parsedBatch = await parseBatchPriceAttestation(
Buffer.from(parsedVAA.payload)
);
let priceIds: PriceId[] = [];
for (let j = 0; j < parsedBatch.priceAttestations.length; ++j) {
priceIds.push(parsedBatch.priceAttestations[j].priceId);
}
let batchFeedsBefore = this.verifyPriceFeeds
? await this.queryMany(priceIds)
: null;
const updateData = ["0x" + signedVAAs[i]];
const updateFee = await this.p2wContract.getUpdateFee(updateData);
let tx = this.p2wContract
.updatePriceFeeds(updateData, { gasLimit: 2000000, value: updateFee })
.then(async (pending) => {
let receipt = await pending.wait();
logger.info(
`Batch ${batchNo}/${batchCount} tx OK, status ${receipt.status} tx hash ${receipt.transactionHash}`
);
logger.debug(
`Batch ${batchNo}/${batchCount} Full details ${JSON.stringify(
receipt
)}`
);
let batchFeedsAfter = this.verifyPriceFeeds
? await this.queryMany(priceIds)
: null;
if (batchFeedsBefore && batchFeedsAfter) {
this.logFeedCmp(batchFeedsBefore, batchFeedsAfter);
}
return new RelayResult(RelayRetcode.Success, [
receipt.transactionHash,
]);
})
.catch((e) => {
logger.error(
`Batch ${batchNo}/${batchCount} tx failed: ${e.code}, failed tx hash ${e.transactionHash}`
);
let detailed_msg = `Batch ${batchNo}/${batchCount} failure details: ${JSON.stringify(
e
)}`;
logger.debug(detailed_msg);
return new RelayResult(RelayRetcode.Fail, []);
});
txs.push(tx);
}
logger.info(`scheduled ${txs.length} EVM transaction(s)`);
let results = await Promise.all(txs);
let ok = true;
let txHashes: Array<string> = [];
for (let res of results) {
if (res.is_ok()) {
txHashes.concat(res.txHashes);
} else {
ok = false;
}
}
// TODO(2021-03-23): Make error reporting for caller more granular (Array<RelayResult>, retries etc.)
if (ok) {
return new RelayResult(RelayRetcode.Success, txHashes);
} else {
return new RelayResult(RelayRetcode.Fail, []);
}
}
async query(priceId: PriceId): Promise<any> {
return await this.p2wContract.queryPriceFeed(priceId);
}
/// Query many `priceIds` in parallel, used internally by `relay()`
/// for implementing `this.verifyPriceFeeds`.
async queryMany(priceIds: Array<PriceId>): Promise<any[]> {
let batchFeedLookups = [];
for (let i = 0; i < priceIds.length; ++i) {
let lookup = this.query("0x" + priceIds[i]).catch((e) => {
logger.warn(`Could not look up price ${priceIds[i]}`);
return `<failed query() for ${priceIds[i]}>`;
});
batchFeedLookups.push(lookup);
}
return Promise.all(batchFeedLookups);
}
/// Helper method for relay(); compares two arrays of batch records with relevant log messages.
/// A comparison before and after a relay() call is a useful sanity check.
logFeedCmp(before: Array<any>, after: Array<any>) {
if (before.length != after.length) {
logger.error("INTERNAL: logFeedCmp() before/after length mismatch");
return;
}
let changedCount = 0;
for (let j = 0; j < before.length; ++j) {
if (before[j] != after[j]) {
changedCount++;
logger.debug(
`price ${j + 1}/${before.length} changed:\n==== OLD ====\n${
before[j]
}\n==== NEW ====\n${after[j]}`
);
} else {
logger.debug(`price ${j + 1}/${before.length} unchanged`);
}
}
if (changedCount > 0) {
logger.info(`${changedCount} price feeds changed in relay() run`);
} else {
// Yell louder if feeds hadn't changed
logger.warn(`All ${changedCount} price feeds unchanged in relay() run`);
}
}
async getPayerInfo(): Promise<{ address: string; balance: bigint }> {
return {
address: this.payerWallet.address,
balance: BigInt(`${await this.payerWallet.getBalance()}`),
};
}
constructor(cfg: {
jsonRpcUrl: string;
payerWalletMnemonic: string;
payerHDWalletPath: string;
p2wContractAddress: string;
verifyPriceFeeds: boolean;
}) {
let provider = new ethers.providers.JsonRpcProvider(cfg.jsonRpcUrl);
let wallet = ethers.Wallet.fromMnemonic(
cfg.payerWalletMnemonic,
cfg.payerHDWalletPath
);
this.payerWallet = new ethers.Wallet(wallet.privateKey, provider);
this.p2wContract = AbstractPyth__factory.connect(
cfg.p2wContractAddress,
this.payerWallet
);
this.verifyPriceFeeds = cfg.verifyPriceFeeds;
// This promise and throw exist because of constructor() limitations.
provider.getCode(cfg.p2wContractAddress).then((code) => {
if (code == "0x") {
let msg = `Address ${cfg.p2wContractAddress} does not appear to be a contract (getCode() yields 0x)`;
logger.error(msg);
throw msg;
}
});
}
}

View File

@ -1,44 +0,0 @@
// This module describes a common interface in front of chain-specific
// relay logic.
export type PriceId = string;
/// Describes the possible outcomes of a relay() call on target chain
/// NOTE(2022-03-21): order reflects the historically used constants
export enum RelayRetcode {
Success = 0,
Fail, // Generic failure
AlreadyExecuted, // TODO(2022-03-18): Terra-specific leak, remove ASAP
Timeout, // Our desired timeout expired
SeqNumMismatch, // TODO(2022-03-18): Terra-specific leak, remove ASAP
InsufficientFunds, // Payer's too poor
}
/// relay() return type
export class RelayResult {
code: RelayRetcode;
txHashes: Array<string>; /// One or more tx hashes produced by a successful relay() call
constructor(code: RelayRetcode, hashes: Array<string>) {
this.code = code;
this.txHashes = hashes;
}
is_ok(): boolean {
return this.code == RelayRetcode.Success;
}
}
/// Represents a target chain relay client generically.
export interface Relay {
/// Relay a signed Wormhole payload to this chain
relay(signedVAAs: Array<string>): Promise<RelayResult>;
/// Query price data on this chain
query(priceId: PriceId): Promise<any>;
/// Monitor the payer account balance. Balance is a bigint (the JS
/// OG implementation) to accomodate for big number impl
/// differences.
getPayerInfo(): Promise<{ address: string; balance: bigint }>;
}

View File

@ -1,250 +0,0 @@
import {
Coin,
LCDClient,
LCDClientConfig,
MnemonicKey,
Msg,
MsgExecuteContract,
} from "@terra-money/terra.js";
import axios from "axios";
import { logger } from "../helpers";
import { Relay, RelayResult, RelayRetcode, PriceId } from "./iface";
export const TERRA_GAS_PRICES_URL = "https://fcd.terra.dev/v1/txs/gas_prices";
export class TerraRelay implements Relay {
readonly nodeUrl: string;
readonly terraChainId: string;
readonly walletPrivateKey: string;
readonly coinDenom: string;
readonly contractAddress: string;
readonly lcdConfig: LCDClientConfig;
constructor(cfg: {
nodeUrl: string;
terraChainId: string;
walletPrivateKey: string;
coinDenom: string;
contractAddress: string;
}) {
this.nodeUrl = cfg.nodeUrl;
this.terraChainId = cfg.terraChainId;
this.walletPrivateKey = cfg.walletPrivateKey;
this.coinDenom = cfg.coinDenom;
this.contractAddress = cfg.contractAddress;
this.lcdConfig = {
URL: this.nodeUrl,
chainID: this.terraChainId,
};
logger.info(
"Terra connection parameters: url: [" +
this.nodeUrl +
"], terraChainId: [" +
this.terraChainId +
"], coin: [" +
this.coinDenom +
"], contractAddress: [" +
this.contractAddress +
"]"
);
}
async relay(signedVAAs: Array<string>) {
let terraRes;
try {
logger.debug("relaying " + signedVAAs.length + " messages to terra");
logger.debug("TIME: connecting to terra");
const lcdClient = new LCDClient(this.lcdConfig);
const mk = new MnemonicKey({
mnemonic: this.walletPrivateKey,
});
const wallet = lcdClient.wallet(mk);
logger.debug("TIME: Querying fee");
let fee: Coin = await this.getUpdateFee(signedVAAs);
logger.debug("TIME: creating messages");
let base64VAAs = [];
for (let idx = 0; idx < signedVAAs.length; ++idx) {
base64VAAs.push(Buffer.from(signedVAAs[idx], "hex").toString("base64"));
}
let msg = new MsgExecuteContract(
wallet.key.accAddress,
this.contractAddress,
{
update_price_feeds: {
data: base64VAAs,
},
},
[fee]
);
let gasPrices;
try {
gasPrices = await axios
.get(TERRA_GAS_PRICES_URL)
.then((result) => result.data);
} catch (e: any) {
logger.warn(e);
logger.warn(e.stack);
logger.warn(
"Couldn't fetch gas price and fee estimate. Using default values"
);
}
const tx = await wallet.createAndSignTx({
msgs: [msg],
memo: "P2T",
feeDenoms: [this.coinDenom],
gasPrices,
});
logger.debug("TIME: sending msg");
terraRes = await lcdClient.tx.broadcastSync(tx);
logger.debug(
`TIME:submitted to terra: terraRes: ${JSON.stringify(terraRes)}`
);
// Act on known Terra errors
if (terraRes.raw_log) {
if (terraRes.raw_log.search("VaaAlreadyExecuted") >= 0) {
logger.error(
"Already Executed:",
terraRes.txhash
? terraRes.txhash
: "<INTERNAL: no txhash for AlreadyExecuted>"
);
return new RelayResult(RelayRetcode.AlreadyExecuted, []);
} else if (terraRes.raw_log.search("insufficient funds") >= 0) {
logger.error(
"relay failed due to insufficient funds: ",
JSON.stringify(terraRes)
);
return new RelayResult(RelayRetcode.InsufficientFunds, []);
} else if (terraRes.raw_log.search("failed") >= 0) {
logger.error(
"relay seems to have failed: ",
JSON.stringify(terraRes)
);
return new RelayResult(RelayRetcode.Fail, []);
}
} else {
logger.warn("No logs were found, result: ", JSON.stringify(terraRes));
}
// Base case, no errors were detected and no exceptions were thrown
if (terraRes.txhash) {
return new RelayResult(RelayRetcode.Success, [terraRes.txhash]);
}
} catch (e: any) {
// Act on known Terra exceptions
logger.error(e);
logger.error(e.stack);
if (
e.message &&
e.message.search("timeout") >= 0 &&
e.message.search("exceeded") >= 0
) {
logger.error("relay timed out: %o", e);
return new RelayResult(RelayRetcode.Timeout, []);
} else if (
e.response?.data?.error &&
e.response.data.error.search("VaaAlreadyExecuted") >= 0
) {
logger.error("VAA Already Executed");
logger.error(e.response.data.error);
return new RelayResult(RelayRetcode.AlreadyExecuted, []);
} else if (
e.response?.data?.message &&
e.response.data.message.search("account sequence mismatch") >= 0
) {
logger.error("Account sequence mismatch");
logger.error(e.response.data.message);
return new RelayResult(RelayRetcode.SeqNumMismatch, []);
} else {
logger.error("Unknown error:");
logger.error(e.toString());
return new RelayResult(RelayRetcode.Fail, []);
}
}
logger.error("INTERNAL: Terra relay() logic failed to produce a result");
return new RelayResult(RelayRetcode.Fail, []);
}
async query(priceId: PriceId) {
logger.info("Querying terra for price info for priceId [" + priceId + "]");
const lcdClient = new LCDClient(this.lcdConfig);
return await lcdClient.wasm.contractQuery(this.contractAddress, {
price_feed: {
id: priceId,
},
});
}
async getUpdateFee(hexVAAs: Array<string>): Promise<Coin> {
const lcdClient = new LCDClient(this.lcdConfig);
let base64VAAs = [];
for (let idx = 0; idx < hexVAAs.length; ++idx) {
base64VAAs.push(Buffer.from(hexVAAs[idx], "hex").toString("base64"));
}
let result = await lcdClient.wasm.contractQuery<Coin.Data>(
this.contractAddress,
{
get_update_fee: {
vaas: base64VAAs,
},
}
);
return Coin.fromData(result);
}
async getPayerInfo(): Promise<{ address: string; balance: bigint }> {
const lcdClient = new LCDClient(this.lcdConfig);
const mk = new MnemonicKey({
mnemonic: this.walletPrivateKey,
});
const wallet = lcdClient.wallet(mk);
let balance: number = NaN;
try {
logger.debug("querying wallet balance");
let coins: any;
let pagnation: any;
[coins, pagnation] = await lcdClient.bank.balance(wallet.key.accAddress);
logger.debug("wallet query returned: %o", coins);
if (coins) {
let coin = coins.get(this.coinDenom);
if (coin) {
balance = parseInt(coin.toData().amount);
} else {
logger.error(
"failed to query coin balance, coin [" +
this.coinDenom +
"] is not in the wallet, coins: %o",
coins
);
}
} else {
logger.error("failed to query coin balance!");
}
} catch (e) {
logger.error("failed to query coin balance: %o", e);
}
return { address: wallet.key.accAddress, balance: BigInt(balance) };
}
}

View File

@ -1,50 +0,0 @@
import { Request, Response } from "express";
import { logger } from "./helpers";
import { getStatus, getPriceData, isHealthy } from "./worker";
let restPort: number = 0;
export function init(runRest: boolean): boolean {
if (!runRest) return true;
if (!process.env.REST_PORT) return true;
restPort = parseInt(process.env.REST_PORT);
return true;
}
export async function run() {
if (restPort == 0) return;
const express = require("express");
const cors = require("cors");
const app = express();
app.use(cors());
app.listen(restPort, () =>
logger.debug("listening on REST port " + restPort)
);
(async () => {
app.get("/status", async (req: Request, res: Response) => {
let result = await getStatus();
res.json(result);
});
app.get("/queryterra/:price_id", async (req: Request, res: Response) => {
let result = await getPriceData(req.params.price_id);
res.json(result);
});
app.get("/health", async (req: Request, res: Response) => {
if (isHealthy()) {
res.sendStatus(200);
} else {
res.sendStatus(503);
}
});
app.get("/", (req: Request, res: Response) =>
res.json(["/status", "/queryterra/<price_id>", "/health"])
);
})();
}

View File

@ -1,523 +0,0 @@
import { Mutex } from "async-mutex";
let CondVar = require("condition-variable");
import { setDefaultWasm } from "@certusone/wormhole-sdk/lib/cjs/solana/wasm";
import { uint8ArrayToHex } from "@certusone/wormhole-sdk";
import { Relay, RelayResult, RelayRetcode } from "./relay/iface";
import * as helpers from "./helpers";
import { logger } from "./helpers";
import { PromHelper } from "./promHelpers";
import {
BatchPriceAttestation,
getBatchAttestationHashKey,
getBatchSummary,
} from "@pythnetwork/wormhole-attester-sdk";
const mutex = new Mutex();
let condition = new CondVar();
let conditionTimeout = 20000;
type PendingPayload = {
vaa_bytes: string;
batchAttestation: BatchPriceAttestation;
receiveTime: Date;
seqNum: number;
};
let pendingMap = new Map<string, PendingPayload>(); // The key to this is hash of price_ids in the batch attestation. Note that Map maintains insertion order, not key order.
type ProductData = {
key: string;
lastTimePublished: Date;
numTimesPublished: number;
lastBatchAttestation: BatchPriceAttestation;
lastResult: any;
};
type CurrentEntry = {
pendingEntry: PendingPayload;
currObj: ProductData;
};
let productMap = new Map<string, ProductData>(); // The key to this is hash of price_ids in the batch attestation.
let relayImpl: Relay;
let metrics: PromHelper;
let nextBalanceQueryTimeAsMs: number = 0;
let balanceQueryInterval = 0;
let walletTimeStamp: Date;
let maxPerBatch: number = 1;
let maxAttempts: number = 2;
let retryDelayInMs: number = 0;
let maxHealthyNoRelayDurationInSeconds: number = 120;
let lastSuccessfulRelayTime: Date;
export function init(runWorker: boolean, relay: Relay): boolean {
if (!runWorker) return true;
relayImpl = relay;
if (process.env.MAX_MSGS_PER_BATCH) {
maxPerBatch = parseInt(process.env.MAX_MSGS_PER_BATCH);
}
if (maxPerBatch <= 0) {
logger.error(
"Environment variable MAX_MSGS_PER_BATCH has an invalid value of " +
maxPerBatch +
", must be greater than zero."
);
return false;
}
if (process.env.RETRY_MAX_ATTEMPTS) {
maxAttempts = parseInt(process.env.RETRY_MAX_ATTEMPTS);
}
if (maxAttempts <= 0) {
logger.error(
"Environment variable RETRY_MAX_ATTEMPTS has an invalid value of " +
maxAttempts +
", must be greater than zero."
);
return false;
}
if (process.env.RETRY_DELAY_IN_MS) {
retryDelayInMs = parseInt(process.env.RETRY_DELAY_IN_MS);
}
if (retryDelayInMs < 0) {
logger.error(
"Environment variable RETRY_DELAY_IN_MS has an invalid value of " +
retryDelayInMs +
", must be positive or zero."
);
return false;
}
if (process.env.MAX_HEALTHY_NO_RELAY_DURATION_IN_SECONDS) {
maxHealthyNoRelayDurationInSeconds = parseInt(
process.env.MAX_HEALTHY_NO_RELAY_DURATION_IN_SECONDS
);
}
if (maxHealthyNoRelayDurationInSeconds <= 0) {
logger.error(
"Environment variable MAX_HEALTHY_NO_RELAY_DURATION_IN_SECONDS has an invalid value of " +
maxHealthyNoRelayDurationInSeconds +
", must be positive."
);
return false;
}
return true;
}
export async function run(met: PromHelper) {
setDefaultWasm("node");
metrics = met;
await mutex.runExclusive(async () => {
logger.info(
"will attempt to relay each pyth message at most " +
maxAttempts +
" times, with a delay of " +
retryDelayInMs +
" milliseconds between attempts, will batch up to " +
maxPerBatch +
" pyth messages in a batch"
);
if (process.env.BAL_QUERY_INTERVAL) {
balanceQueryInterval = parseInt(process.env.BAL_QUERY_INTERVAL);
}
try {
let { address: payerAddress, balance: payerBalance } =
await relayImpl.getPayerInfo();
if (balanceQueryInterval !== 0) {
logger.info(
"initial wallet balance is " +
payerBalance +
", will query every " +
balanceQueryInterval +
" milliseconds."
);
metrics.setWalletBalance(payerBalance);
nextBalanceQueryTimeAsMs = new Date().getTime() + balanceQueryInterval;
} else {
logger.info("initial wallet balance is " + payerBalance);
metrics.setWalletBalance(payerBalance);
}
} catch (e) {
walletTimeStamp = new Date();
}
await condition.wait(computeTimeout(), callBack);
});
}
async function callBack(err: any, result: any) {
logger.debug(
"entering callback, pendingEvents: " +
pendingMap.size +
", err: %o, result: %o",
err,
result
);
await updateBalance();
// condition = null;
// await helpers.sleep(10000);
// logger.debug("done with long sleep");
let done = false;
do {
let currObjs = new Array<CurrentEntry>();
let messages = new Array<string>();
await mutex.runExclusive(async () => {
condition = null;
logger.debug("in callback, getting pending events.");
await getPendingEventsAlreadyLocked(currObjs, messages);
if (currObjs.length === 0) {
done = true;
condition = new CondVar();
await condition.wait(computeTimeout(), callBack);
}
});
if (currObjs.length !== 0) {
logger.debug("in callback, relaying " + currObjs.length + " events.");
let sendTime = new Date();
let relayResult = await relayEventsNotLocked(messages);
await mutex.runExclusive(async () => {
logger.debug("in callback, finalizing " + currObjs.length + " events.");
await finalizeEventsAlreadyLocked(currObjs, relayResult, sendTime);
await updateBalance();
if (pendingMap.size === 0) {
logger.debug("in callback, rearming the condition.");
done = true;
condition = new CondVar();
await condition.wait(computeTimeout(), callBack);
}
});
}
} while (!done);
logger.debug("leaving callback.");
}
function computeTimeout(): number {
if (balanceQueryInterval !== 0) {
let now = new Date().getTime();
if (now < nextBalanceQueryTimeAsMs) {
return nextBalanceQueryTimeAsMs - now;
}
// Since a lot of time has passed, timeout in 1ms (0 means no-timeout)
// In most cases this line should not be reached.
return 1;
}
return conditionTimeout;
}
async function getPendingEventsAlreadyLocked(
currObjs: Array<CurrentEntry>,
messages: Array<string>
) {
while (pendingMap.size !== 0 && currObjs.length < maxPerBatch) {
const first = pendingMap.entries().next();
logger.debug("processing event with key [" + first.value[0] + "]");
const pendingValue: PendingPayload = first.value[1];
let pendingKey = getBatchAttestationHashKey(pendingValue.batchAttestation);
let currObj = productMap.get(pendingKey);
if (currObj) {
currObj.lastBatchAttestation = pendingValue.batchAttestation;
currObj.lastTimePublished = new Date();
productMap.set(pendingKey, currObj);
logger.debug(
"processing update " +
currObj.numTimesPublished +
" for [" +
pendingKey +
"], seq num " +
pendingValue.seqNum
);
} else {
logger.debug(
"processing first update for [" +
pendingKey +
"], seq num " +
pendingValue.seqNum
);
currObj = {
key: pendingKey,
lastBatchAttestation: pendingValue.batchAttestation,
lastTimePublished: new Date(),
numTimesPublished: 0,
lastResult: "",
};
productMap.set(pendingKey, currObj);
}
currObjs.push({ pendingEntry: pendingValue, currObj: currObj });
messages.push(pendingValue.vaa_bytes);
pendingMap.delete(first.value[0]);
}
if (currObjs.length !== 0) {
for (let idx = 0; idx < currObjs.length; ++idx) {
pendingMap.delete(currObjs[idx].currObj.key);
}
}
}
const RELAY_SUCCESS: number = 0;
const RELAY_FAIL: number = 1;
const RELAY_ALREADY_EXECUTED: number = 2;
const RELAY_TIMEOUT: number = 3;
const RELAY_SEQ_NUM_MISMATCH: number = 4;
const RELAY_INSUFFICIENT_FUNDS: number = 5;
async function relayEventsNotLocked(
messages: Array<string>
): Promise<RelayResult> {
let relayResult: RelayResult | null = null;
let retry: boolean = false;
// CAUTION(2022-03-21): The retry logic is not very efficient at
// handling more than one messsage. It may attempt redundant
// transactions during retries for messasges that were successful on a
// previous attempt.
for (let attempt = 0; attempt < maxAttempts; ++attempt) {
retry = false;
relayResult = await relayImpl.relay(messages).catch((e) => {
logger.error(
`INTERNAL: Uncaught relayImpl.relay() exception, details:\n${JSON.stringify(
e
)}`
);
return new RelayResult(RelayRetcode.Fail, []);
});
switch (relayResult.code) {
case RelayRetcode.Success:
case RelayRetcode.AlreadyExecuted:
case RelayRetcode.InsufficientFunds:
logger.info(`Not retrying for relay retcode ${relayResult.code}`);
break;
case RelayRetcode.Fail:
case RelayRetcode.SeqNumMismatch:
case RelayRetcode.Timeout:
retry = true;
break;
default:
logger.warn(`Retrying for unknown relay retcode ${relayResult.code}`);
retry = true;
break;
}
logger.debug(
"relay attempt complete: " +
JSON.stringify(relayResult) +
", retry: " +
retry +
", attempt " +
attempt +
" of " +
maxAttempts
);
if (!retry) {
break;
} else {
metrics.incRetries();
if (retryDelayInMs != 0) {
logger.debug(
"delaying for " + retryDelayInMs + " milliseconds before retrying"
);
await helpers.sleep(retryDelayInMs * (attempt + 1));
}
}
}
if (retry) {
logger.error("failed to relay batch, retry count exceeded!");
metrics.incRetriesExceeded();
}
if (!relayResult) {
logger.error("INTERNAL: worker failed to produce a relay result.");
relayResult = new RelayResult(RelayRetcode.Fail, []);
}
return relayResult;
}
async function finalizeEventsAlreadyLocked(
currObjs: Array<CurrentEntry>,
relayResult: RelayResult,
sendTime: Date
) {
for (let idx = 0; idx < currObjs.length; ++idx) {
let currObj = currObjs[idx].currObj;
let currEntry = currObjs[idx].pendingEntry;
currObj.lastResult = relayResult;
currObj.numTimesPublished = currObj.numTimesPublished + 1;
if (relayResult.code == RelayRetcode.Success) {
metrics.incSuccesses();
} else if (relayResult.code == RelayRetcode.AlreadyExecuted) {
metrics.incAlreadyExec();
} else if (relayResult.code == RelayRetcode.Timeout) {
metrics.incTransferTimeout();
metrics.incFailures();
} else if (relayResult.code == RelayRetcode.SeqNumMismatch) {
metrics.incSeqNumMismatch();
metrics.incFailures();
} else if (relayResult.code == RelayRetcode.InsufficientFunds) {
metrics.incInsufficentFunds();
metrics.incFailures();
} else {
metrics.incFailures();
}
productMap.set(currObj.key, currObj);
let completeTime = new Date();
metrics.setSeqNum(currEntry.seqNum);
metrics.addCompleteTime(
completeTime.getTime() - currEntry.receiveTime.getTime()
);
logger.info(
"complete:" +
"seqNum: " +
currEntry.seqNum +
", price_ids: " +
getBatchSummary(currEntry.batchAttestation) +
", rcv2SendBegin: " +
(sendTime.getTime() - currEntry.receiveTime.getTime()) +
", rcv2SendComplete: " +
(completeTime.getTime() - currEntry.receiveTime.getTime()) +
", totalSends: " +
currObj.numTimesPublished +
", result: " +
JSON.stringify(relayResult)
);
}
if (relayResult.is_ok()) {
lastSuccessfulRelayTime = new Date();
}
}
async function updateBalance() {
let now = new Date();
if (balanceQueryInterval > 0 && now.getTime() >= nextBalanceQueryTimeAsMs) {
try {
let { address, balance } = await relayImpl.getPayerInfo();
walletTimeStamp = new Date();
logger.info(
"wallet " +
address +
" balance: " +
balance +
", update time: " +
walletTimeStamp.toISOString()
);
metrics.setWalletBalance(balance);
} catch (e) {
logger.error("failed to query wallet balance:" + e);
}
nextBalanceQueryTimeAsMs = now.getTime() + balanceQueryInterval;
}
}
export async function postEvent(
vaaBytes: any,
batchAttestation: BatchPriceAttestation,
sequence: number,
receiveTime: Date
) {
let event: PendingPayload = {
vaa_bytes: uint8ArrayToHex(vaaBytes),
batchAttestation: batchAttestation,
receiveTime: receiveTime,
seqNum: sequence,
};
let pendingKey = getBatchAttestationHashKey(batchAttestation);
await mutex.runExclusive(() => {
logger.debug("posting event with key [" + pendingKey + "]");
pendingMap.set(pendingKey, event);
if (condition) {
logger.debug("hitting condition variable.");
condition.complete(true);
}
});
}
export async function getStatus() {
let result = "[";
await mutex.runExclusive(() => {
let first: boolean = true;
for (let [key, value] of productMap) {
if (first) {
first = false;
} else {
result = result + ", ";
}
let item: object = {
summary: getBatchSummary(value.lastBatchAttestation),
num_times_published: value.numTimesPublished,
last_time_published: value.lastTimePublished.toISOString(),
result: value.lastResult,
};
result = result + JSON.stringify(item);
}
});
result = result + "]";
return result;
}
// Note that querying the contract does not update the sequence number, so we don't need to be locked.
export async function getPriceData(priceId: string): Promise<any> {
let result: any;
// await mutex.runExclusive(async () => {
result = await relayImpl.query(priceId);
// });
return result;
}
export function isHealthy(): boolean {
if (lastSuccessfulRelayTime === undefined) {
return false;
}
const currentDate = new Date();
const timeDiffMs = currentDate.getTime() - lastSuccessfulRelayTime.getTime();
if (timeDiffMs > maxHealthyNoRelayDurationInSeconds * 1000) {
return false;
}
return true;
}

View File

@ -1,9 +0,0 @@
{
"extends": "../../../tsconfig.base.json",
"include": ["src"],
"exclude": ["node_modules", "**/__tests__/*"],
"compilerOptions": {
"rootDir": "src/",
"outDir": "./lib"
}
}

View File

@ -1,199 +0,0 @@
#!/usr/bin/env python3
# This script sets up a simple loop for periodical attestation of Pyth data
import json
import logging
import os
import re
import sys
import threading
from http.client import HTTPConnection
from subprocess import PIPE, STDOUT, Popen
from pyth_utils import *
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s | %(module)s | %(levelname)s | %(message)s"
)
P2W_SOL_ADDRESS = os.environ.get(
"P2W_SOL_ADDRESS", "P2WH424242424242424242424242424242424242424"
)
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", "/solana-secrets/p2w_owner.json"
)
P2W_ATTESTATIONS_PORT = int(os.environ.get("P2W_ATTESTATIONS_PORT", 4343))
P2W_INITIALIZE_SOL_CONTRACT = os.environ.get("P2W_INITIALIZE_SOL_CONTRACT", None)
PYTH_TEST_ACCOUNTS_HOST = "pyth"
PYTH_TEST_ACCOUNTS_PORT = 4242
P2W_ATTESTATION_CFG = os.environ.get("P2W_ATTESTATION_CFG", None)
WORMHOLE_ADDRESS = os.environ.get(
"WORMHOLE_ADDRESS", "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
)
# attester needs string, but we validate as int first
P2W_RPC_TIMEOUT_SECS = str(int(os.environ.get("P2W_RPC_TIMEOUT_SECS", "20")))
if P2W_INITIALIZE_SOL_CONTRACT is not None:
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True
).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True,
).stdout.strip()
init_result = run_or_die(
[
"pwhac",
"--p2w-addr",
P2W_SOL_ADDRESS,
"--rpc-url",
SOL_RPC_URL,
"--payer",
SOL_PAYER_KEYPAIR,
"init",
"--wh-prog",
WORMHOLE_ADDRESS,
"--owner",
P2W_OWNER_ADDRESS,
"--pyth-owner",
PYTH_OWNER_ADDRESS,
],
capture_output=True,
debug=True,
die=False,
)
if init_result.returncode != 0:
logging.error(
"NOTE: pwhac init failed, retrying with set_config"
)
run_or_die(
[
"pwhac",
"--p2w-addr",
P2W_SOL_ADDRESS,
"--rpc-url",
SOL_RPC_URL,
"--payer",
SOL_PAYER_KEYPAIR,
"set-config",
"--owner",
P2W_OWNER_KEYPAIR,
"--new-owner",
P2W_OWNER_ADDRESS,
"--new-wh-prog",
WORMHOLE_ADDRESS,
"--new-pyth-owner",
PYTH_OWNER_ADDRESS,
],
capture_output=True,
)
# Retrieve available symbols from the test pyth publisher if not provided in envs
if P2W_ATTESTATION_CFG is None:
P2W_ATTESTATION_CFG = "./attestation_cfg_test.yaml"
publisher_state_map = get_pyth_accounts(PYTH_TEST_ACCOUNTS_HOST, PYTH_TEST_ACCOUNTS_PORT)
pyth_accounts = publisher_state_map["symbols"]
logging.info(
f"Retrieved {len(pyth_accounts)} Pyth accounts from endpoint: {pyth_accounts}"
)
mapping_addr = publisher_state_map["mapping_addr"]
cfg_yaml = f"""
---
mapping_addr: {mapping_addr}
mapping_reload_interval_mins: 1 # Very fast for testing purposes
min_rpc_interval_ms: 0 # RIP RPC
max_batch_jobs: 1000 # Where we're going there's no oomkiller
default_attestation_conditions:
min_interval_ms: 10000
symbol_groups:
- group_name: fast_interval_rate_limited
conditions:
min_interval_ms: 1000
rate_limit_interval_secs: 2
symbols:
"""
# integer-divide the symbols in ~half for two test
# groups. Assumes arr[:idx] is exclusive, and arr[idx:] is
# inclusive
third_len = len(pyth_accounts) // 3;
for thing in pyth_accounts[:third_len]:
name = thing["name"]
price = thing["price"]
product = thing["product"]
cfg_yaml += f"""
- type: key
name: {name}
price: {price}
product: {product}"""
# End of fast_interval_only
cfg_yaml += f"""
- group_name: longer_interval_sensitive_changes
conditions:
min_interval_ms: 3000
price_changed_bps: 300
symbols:
"""
for stuff in pyth_accounts[third_len:-third_len]:
name = stuff["name"]
price = stuff["price"]
product = stuff["product"]
cfg_yaml += f"""
- type: key
name: {name}
price: {price}
product: {product}"""
with open(P2W_ATTESTATION_CFG, "w") as f:
f.write(cfg_yaml)
f.flush()
# Set helpfully chatty logging default, filtering especially annoying
# modules like async HTTP requests and tokio runtime logs
os.environ["RUST_LOG"] = os.environ.get("RUST_LOG", "info")
# Do not exit this script if a continuous attestation stops for
# whatever reason (this avoids k8s restart penalty)
while True:
# Start the child process in daemon mode
pwhac_process = Popen(
[
"pwhac",
"--commitment",
"confirmed",
"--p2w-addr",
P2W_SOL_ADDRESS,
"--rpc-url",
SOL_RPC_URL,
"--payer",
SOL_PAYER_KEYPAIR,
"attest",
"-f",
P2W_ATTESTATION_CFG,
"--timeout",
P2W_RPC_TIMEOUT_SECS,
]
)
# Wait for an unexpected process exit
retcode = pwhac_process.wait()
# Yell if the supposedly non-stop attestation process exits
logging.warn(f"pwhac stopped unexpectedly with code {retcode}")

View File

@ -1,57 +0,0 @@
# This script prepares a local Squads multisig deployment for use with
# the multisig_wh_message_builder
import errno
import os
import sys
from pyth_utils import *
MULTISIG_SCRIPT_CMD_PREFIX = "npm run start --".split(" ")
MULTISIG_SCRIPT_DIR = os.environ.get("MULTISIG_SCRIPT_DIR", "/home/node/governance/multisig_wh_message_builder")
MESH_KEY_DIR = "/home/node/tilt_devnet/secrets/solana/squads/"
MESH_PROGRAM_ADDR = "SMPLVC8MxZ5Bf5EfF7PaMiTCxoBAcmkbM2vkrvMK8ho"
MESH_VAULT_EXT_AUTHORITY_KEY_PATH = MESH_KEY_DIR + "external_authority.json"
ALICE_KEY_PATH = MESH_KEY_DIR + "member_alice.json"
BOB_KEY_PATH = MESH_KEY_DIR + "member_bob.json"
create_key_addr = "73UuSY2yXat7h7T49MMGg8TiHPqJJKKVc33DmC4b41Hf" # The person that instantiated the multisig on mainnet used this create key, it never needs to sign but we're using it to match mainnet
ext_authority_addr = sol_run_or_die("address", ["--keypair", MESH_VAULT_EXT_AUTHORITY_KEY_PATH], capture_output=True).stdout.strip()
alice_addr = sol_run_or_die("address", ["--keypair", ALICE_KEY_PATH], capture_output=True).stdout.strip()
bob_addr = sol_run_or_die("address", ["--keypair", BOB_KEY_PATH], capture_output=True).stdout.strip()
# wrap run_or_die in msg builder common cli args
def msg_builder_run_or_die(args = [], debug=False, **kwargs):
"""
Message builder boilerplate in front of run_or_die()
"""
return run_or_die(
MULTISIG_SCRIPT_CMD_PREFIX + args, cwd=MULTISIG_SCRIPT_DIR, debug=debug, **kwargs)
# create a Multisig Vault
res = msg_builder_run_or_die([
"init-vault",
"-k", create_key_addr,
"-x", ext_authority_addr,
"-p", SOL_PAYER_KEYPAIR,
"-c", "localdevnet",
"-r", SOL_RPC_URL,
"-i", f"{alice_addr},{bob_addr}",
"-t", "1", # 1/3 threshold
],
capture_output=True, debug=True, die=False)
if res.returncode == errno.EEXIST:
print("WARNING: Skipping vault creation and testing, received EEXIST from script", file=sys.stderr)
elif res.returncode != 0:
print(f"ERROR: unexpected failure with code {res.returncode}", file=sys.stderr)
sys.exit(res.returncode)
else:
print("Vault created, starting test routine", file=sys.stderr)
# TODO(2022-12-08): Add test scenarios
sys.stderr.flush()
readiness()

View File

@ -1,182 +0,0 @@
#!/usr/bin/env python3
from pyth_utils import *
from http.server import HTTPServer, BaseHTTPRequestHandler
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
import os
import random
import sys
import threading
import time
# The mock publisher needs to fund the publisher identity account,
# unable to use a separate payer
SOL_AIRDROP_AMT = int(os.environ.get("SOL_AIRDROP_AMT", 0))
class PythAccEndpoint(BaseHTTPRequestHandler):
"""
A dumb endpoint to respond with a JSON containing Pyth symbol and mapping addresses
"""
def do_GET(self):
print(f"Got path {self.path}")
sys.stdout.flush()
data = json.dumps(HTTP_ENDPOINT_DATA).encode("utf-8")
print(f"Sending:\n{data}")
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
self.wfile.write(data)
self.wfile.flush()
# Test publisher state that gets served via the HTTP endpoint. Note: the schema of this dict is extended here and there
# all_symbols_added is set to True once all dynamically-created symbols are added to the on-chain program. This
# flag allows the integration test in check_attestations.py to determine that every on-chain symbol is being attested.
HTTP_ENDPOINT_DATA = {"symbols": [], "mapping_address": None, "all_symbols_added": False}
def publisher_random_update(price_pubkey):
"""
Update the specified price with random values
"""
value = random.randrange(1000, 2000)
confidence = random.randrange(1, 10)
pyth_run_or_die("upd_price_val", args=[
price_pubkey, str(value), str(confidence), "trading"
])
print(f"Price {price_pubkey} value updated to {str(value)}!")
def accounts_endpoint():
"""
Run a barebones HTTP server to share the dynamic Pyth
mapping/product/price account addresses
"""
server_address = ('', 4242)
httpd = HTTPServer(server_address, PythAccEndpoint)
httpd.serve_forever()
def add_symbol(num: int):
"""
NOTE: Updates HTTP_ENDPOINT_DATA
"""
symbol_name = f"Test symbol {num}"
# Add a product
prod_pubkey = pyth_admin_run_or_die(
"add_product", capture_output=True).stdout.strip()
print(f"{symbol_name}: Added product {prod_pubkey}")
# Add a price
price_pubkey = pyth_admin_run_or_die(
"add_price",
args=[prod_pubkey, "price"],
capture_output=True
).stdout.strip()
print(f"{symbol_name}: Added price {price_pubkey}")
# Become a publisher for the new price
pyth_admin_run_or_die(
"add_publisher", args=[publisher_pubkey, price_pubkey],
debug=True,
capture_output=True)
print(f"{symbol_name}: Added publisher {publisher_pubkey}")
# Update the prices as the newly added publisher
publisher_random_update(price_pubkey)
sym = {
"name": symbol_name,
"product": prod_pubkey,
"price": price_pubkey
}
HTTP_ENDPOINT_DATA["symbols"].append(sym)
sys.stdout.flush()
print(f"New symbol: {num}")
return num
# Fund the publisher
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", PYTH_PUBLISHER_KEYPAIR,
"--commitment", "finalized",
])
# Create a mapping
pyth_admin_run_or_die("init_mapping", capture_output=True)
mapping_addr = sol_run_or_die("address", args=[
"--keypair", PYTH_MAPPING_KEYPAIR
], capture_output=True).stdout.strip()
HTTP_ENDPOINT_DATA["mapping_addr"] = mapping_addr
print(f"New mapping at {mapping_addr}")
print(f"Creating {PYTH_TEST_SYMBOL_COUNT} test Pyth symbols")
publisher_pubkey = sol_run_or_die("address", args=[
"--keypair", PYTH_PUBLISHER_KEYPAIR
], capture_output=True).stdout.strip()
with ThreadPoolExecutor(max_workers=PYTH_TEST_SYMBOL_COUNT) as executor:
add_symbol_futures = {executor.submit(add_symbol, sym_id) for sym_id in range(PYTH_TEST_SYMBOL_COUNT)}
for future in as_completed(add_symbol_futures):
print(f"Completed {future.result()}")
print(
f"Mock updates ready to roll. Updating every {str(PYTH_PUBLISHER_INTERVAL_SECS)} seconds")
# Spin off the readiness probe endpoint into a separate thread
readiness_thread = threading.Thread(target=readiness, daemon=True)
# Start an HTTP endpoint for looking up test product/price addresses
http_service = threading.Thread(target=accounts_endpoint, daemon=True)
readiness_thread.start()
http_service.start()
next_new_symbol_id = PYTH_TEST_SYMBOL_COUNT
last_new_sym_added_at = time.monotonic()
with ThreadPoolExecutor() as executor: # Used for async adding of products and prices
dynamically_added_symbols = 0
while True:
for sym in HTTP_ENDPOINT_DATA["symbols"]:
publisher_random_update(sym["price"])
# Add a symbol if new symbol interval configured. This will add a new symbol if PYTH_NEW_SYMBOL_INTERVAL_SECS
# is passed since adding the previous symbol. The second constraint ensures that
# at most PYTH_DYNAMIC_SYMBOL_COUNT new price symbols are created.
if PYTH_NEW_SYMBOL_INTERVAL_SECS > 0 and dynamically_added_symbols < PYTH_DYNAMIC_SYMBOL_COUNT:
# Do it if enough time passed
now = time.monotonic()
if (now - last_new_sym_added_at) >= PYTH_NEW_SYMBOL_INTERVAL_SECS:
executor.submit(add_symbol, next_new_symbol_id) # Returns immediately, runs in background
last_sym_added_at = now
next_new_symbol_id += 1
dynamically_added_symbols += 1
if dynamically_added_symbols >= PYTH_DYNAMIC_SYMBOL_COUNT:
HTTP_ENDPOINT_DATA["all_symbols_added"] = True
time.sleep(PYTH_PUBLISHER_INTERVAL_SECS)
sys.stdout.flush()
readiness_thread.join()
http_service.join()

View File

@ -1,156 +0,0 @@
import logging
import os
import json
import socketserver
import subprocess
import sys
from http.client import HTTPConnection
# A generic unprivileged payer account with funds
SOL_PAYER_KEYPAIR = os.environ.get(
"SOL_PAYER_KEYPAIR", "/solana-secrets/solana-devnet.json"
)
# Settings specific to local devnet Pyth instance
PYTH = os.environ.get("PYTH", "./pyth")
PYTH_ADMIN = os.environ.get("PYTH_ADMIN", "./pyth_admin")
PYTH_KEY_STORE = os.environ.get("PYTH_KEY_STORE", "/home/pyth/.pythd")
PYTH_PROGRAM_KEYPAIR = os.environ.get(
"PYTH_PROGRAM_KEYPAIR", f"{PYTH_KEY_STORE}/publish_key_pair.json"
)
PYTH_PUBLISHER_KEYPAIR = os.environ.get(
"PYTH_PUBLISHER_KEYPAIR", f"{PYTH_KEY_STORE}/publish_key_pair.json"
)
# How long to sleep between mock Pyth price updates
PYTH_PUBLISHER_INTERVAL_SECS = float(os.environ.get("PYTH_PUBLISHER_INTERVAL_SECS", "5"))
PYTH_TEST_SYMBOL_COUNT = int(os.environ.get("PYTH_TEST_SYMBOL_COUNT", "11"))
PYTH_DYNAMIC_SYMBOL_COUNT = int(os.environ.get("PYTH_DYNAMIC_SYMBOL_COUNT", "3"))
# If above 0, adds a new test symbol periodically, waiting at least
# the given number of seconds in between
#
# NOTE: the new symbols are added in the HTTP endpoint used by the
# p2w-attest service in Tilt. You may need to wait to see p2w-attest
# pick up brand new symbols
PYTH_NEW_SYMBOL_INTERVAL_SECS = int(os.environ.get("PYTH_NEW_SYMBOL_INTERVAL_SECS", "30"))
PYTH_MAPPING_KEYPAIR = os.environ.get(
"PYTH_MAPPING_KEYPAIR", f"{PYTH_KEY_STORE}/mapping_key_pair.json"
)
# SOL RPC settings
SOL_RPC_HOST = os.environ.get("SOL_RPC_HOST", "solana-devnet")
SOL_RPC_PORT = int(os.environ.get("SOL_RPC_PORT", 8899))
SOL_RPC_URL = os.environ.get(
"SOL_RPC_URL", "http://{0}:{1}".format(SOL_RPC_HOST, SOL_RPC_PORT)
)
# A TCP port we open when a service is ready
READINESS_PORT = int(os.environ.get("READINESS_PORT", "2000"))
def run_or_die(args, die=True, debug=False, **kwargs):
"""
Opinionated subprocess.run() call with fancy logging
"""
args_readable = " ".join(args)
print(f"CMD RUN\t{args_readable}", file=sys.stderr)
sys.stderr.flush()
ret = subprocess.run(args, text=True, **kwargs)
if ret.returncode == 0:
print(f"CMD OK\t{args_readable}", file=sys.stderr)
else:
print(f"CMD FAIL {ret.returncode}\t{args_readable}", file=sys.stderr)
if debug:
out = ret.stdout if ret.stdout is not None else "<not captured>"
err = ret.stderr if ret.stderr is not None else "<not captured>"
print(f"CMD STDOUT\n{out}", file=sys.stderr)
print(f"CMD STDERR\n{err}", file=sys.stderr)
sys.stderr.flush()
if ret.returncode != 0:
if die:
sys.exit(ret.returncode)
else:
print(f'{"CMD DIE FALSE"}', file=sys.stderr)
sys.stderr.flush()
return ret
def pyth_run_or_die(subcommand, args=[], debug=False, **kwargs):
"""
Pyth boilerplate in front of run_or_die.
"""
return run_or_die(
[PYTH, subcommand] + args + (["-d"] if debug else [])
+ ["-k", PYTH_KEY_STORE]
+ ["-r", SOL_RPC_HOST]
+ ["-c", "finalized"]
+ ["-x"], # These means to bypass transaction proxy server. In this setup it's not running and it's required to bypass
**kwargs,
)
def pyth_admin_run_or_die(subcommand, args=[], debug=False, **kwargs):
"""
Pyth_admin boilerplate in front of run_or_die.
"""
return run_or_die(
[PYTH_ADMIN, subcommand] + args + (["-d"] if debug else [])
+ ["-n"] # These commands require y/n confirmation. This bypasses that
+ ["-k", PYTH_KEY_STORE]
+ ["-r", SOL_RPC_HOST]
+ ["-c", "finalized"],
**kwargs,
)
def sol_run_or_die(subcommand, args=[], **kwargs):
"""
Solana boilerplate in front of run_or_die
"""
return run_or_die(["solana", subcommand] + args + ["--url", SOL_RPC_URL], **kwargs)
def get_json(host, port, path):
conn = HTTPConnection(host, port)
conn.request("GET", path)
res = conn.getresponse()
# starstwith because the header value may include optional fields after (like charset)
if res.getheader("Content-Type").startswith("application/json"):
return json.load(res)
else:
logging.error(f"Error getting {host}:{port}{path} : Content-Type was not application/json")
logging.error(f"HTTP response code: {res.getcode()}")
logging.error(f"HTTP headers: {res.getheaders()}")
logging.error(f"Message: {res.msg}")
sys.exit(1)
def get_pyth_accounts(host, port):
return get_json(host, port, "/")
class ReadinessTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
"""TCP black hole"""
self.rfile.read(64)
def readiness():
"""
Accept connections from readiness probe
"""
with socketserver.TCPServer(
("0.0.0.0", READINESS_PORT), ReadinessTCPHandler
) as srv:
print(f"Opening port {READINESS_PORT} for readiness TCP probe")
srv.serve_forever()

View File

@ -1,118 +0,0 @@
# Tilt Devnet
We use Tilt to run integration tests. These tests instantiate docker containers with all of the
various blockchains and services in order to verify that they interoperate correctly.
## Installation
The following dependencies are required for local development:
- [Go](https://golang.org/dl/) >= 1.17.5
- [Tilt](http://tilt.dev/) >= 0.20.8
- Any of the local Kubernetes clusters supported by Tilt.
We strongly recommend [minikube](https://kubernetes.io/docs/setup/learning-environment/minikube/) >=
v1.21.0 .
- Tilt will use Minikube's embedded Docker server. If Minikube is not used, a local instance of
[Docker](https://docs.docker.com/engine/install/) / moby-engine >= 19.03 is required.
See the [Tilt docs](https://docs.tilt.dev/install.html) docs on how to set up your local cluster -
it won't take more than a few minutes to set up! Example minikube invocation, adjust limits as needed:
minikube start --cpus=8 --memory=8G --disk-size=50G --driver=docker
npm wants to set up an insane number of inotify watches in the web container which may exceed kernel limits.
The minikube default is too low, adjust it like this:
minikube ssh 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p'
This should work on Linux, MacOS and Windows.
By default, the devnet is deployed to the `wormhole` namespace rather than `default`. This makes it easy to clean up the
entire deployment by simply removing the namespace, which isn't possible with `default`. Change your default namespace
to avoid having to specify `-n wormhole` for all commands:
kubectl config set-context --current --namespace=wormhole
After installing all dependencies, just run `tilt up`.
Whenever you modify a file, the devnet is automatically rebuilt and a rolling update is done.
Launch the devnet while specifying the number of guardians nodes to run (default is five):
tilt up -- --num=1
If you want to work on non-consensus parts of the code, running with a single guardian is easiest since
you won't have to wait for k8s to restart all pods.
## Usage
Watch pod status in your cluster:
kubectl get pod -A -w
Get logs for single guardian node:
kubectl logs guardian-0
Restart a specific pod:
kubectl delete pod guardian-0
Adjust number of nodes in running cluster: (this is only useful if you want to test scenarios where the number
of nodes diverges from the guardian set - otherwise, `tilt down --delete-namespaces` and restart the cluster)
tilt args -- --num=2
Tear down cluster:
tilt down --delete-namespaces
Once you're done, press Ctrl-C. Run `tilt down` to tear down the devnet.
## Getting started on a development VM
This tutorial assumes a clean Debian >=10 VM. We recommend at least **16 vCPU, 64G of RAM and 500G of disk**.
Rust eats CPU for breakfast, so the more CPUs, the nicer your Solana compilation experience will be.
Install Git first:
sudo apt-get install -y git
First, create an SSH key on the VM:
ssh-keygen -t ed25519
cat .ssh/id_ed25519.pub
You can then [add your public key on GitHub](https://github.com/settings/keys) and clone the repository:
git clone git@github.com:certusone/wormhole.git
Configure your Git identity:
git config --global user.name "Your Name"
git config --global user.email "yourname@company.com"
Your email address should be linked to your personal or company GitHub account.
### Set up devnet on the VM
After cloning the repo, run the setup script. It expects to run as a regular user account with sudo permissions.
It installs Go, Minikube, Tilt and any other dependencies required for Wormhole development:
cd wormhole
scripts/dev-setup.sh
You then need to close and re-open your session to apply the new environment.
If you use ControlMaster SSH sessions, make sure to kill the session before reconnecting (`ssh -O exit hostname`).
Start a minikube session with recommended parameters:
start-recommended-minikube
You can then run tilt normally (see above).
The easiest way to get access to the Tilt UI is to simply run Tilt on a public port, and use a firewall
of your choice to control access. For GCP, we ship a script that automatically runs `tilt up` on the right IP:
scripts/tilt-gcp-up.sh
If something breaks, just run `minikube delete` and start from scratch by running `start-recommended-minikube`.

View File

@ -1,9 +0,0 @@
scrape_configs:
- job_name: p2w_attest
scrape_interval: 5s
static_configs:
- targets: ["p2w-attest:3000"]
- job_name: price_service
scrape_interval: 5s
static_configs:
- targets: ["pyth-price-service:8081"]

View File

@ -1,34 +0,0 @@
#syntax=docker/dockerfile:1.2@sha256:e2a8561e419ab1ba6b2fe6cbdf49fd92b95912df1cf7d313c3e2230a333fdbcc
FROM ghcr.io/certusone/solana:1.10.31@sha256:d31e8db926a1d3fbaa9d9211d9979023692614b7b64912651aba0383e8c01bad AS solana
ARG WORMHOLE_TAG=v2.8.9
# libudev is needed by spl-token-cli, and ncat is needed by the devnet setup
# script to be able to signal a health status for tilt
RUN apt-get update && apt-get install -yq libudev-dev ncat
RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash - && apt-get install -y nodejs
ADD wormhole_attester/rust-toolchain /rust-toolchain
WORKDIR /usr/src/bridge-client
RUN --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=target \
cargo install --version =2.0.12 --locked spl-token-cli --target-dir target
RUN solana config set --keypair "/solana-secrets/solana-devnet.json"
RUN solana config set --url "http://solana-devnet:8899"
ENV EMITTER_ADDRESS="11111111111111111111111111111115"
ENV BRIDGE_ADDRESS="Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
RUN --mount=type=cache,target=/root/.cache \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=target \
set -xe && \
cargo install bridge_client --git https://github.com/wormhole-foundation/wormhole --tag $WORMHOLE_TAG --locked --root /usr/local --target-dir target && \
cargo install token_bridge_client --git https://github.com/wormhole-foundation/wormhole --tag $WORMHOLE_TAG --locked --root /usr/local --target-dir target
ADD tilt_devnet/scripts/solana-devnet-setup.sh /usr/src/solana-devnet-setup.sh
ADD tilt_devnet/secrets/solana/ /solana-secrets

View File

@ -1,45 +0,0 @@
# This is a multi-stage docker file, first and second stage builds contracts
# And the third one creates node.js environment to deploy them
FROM cosmwasm/workspace-optimizer:0.12.11@sha256:b6b0b1d5ed76dafd0ec0d3812a1e7a275316cf862fd6d0a918f14d71c4cb187f AS wormhole_builder
WORKDIR /tmp/wormhole-repo
ARG WORMHOLE_REV=2.8.9
ADD https://github.com/certusone/wormhole/archive/refs/tags/v${WORMHOLE_REV}.tar.gz .
RUN tar -xvf v${WORMHOLE_REV}.tar.gz
RUN mv wormhole-${WORMHOLE_REV}/cosmwasm/Cargo.lock /code/
RUN mv wormhole-${WORMHOLE_REV}/cosmwasm/Cargo.toml /code/
RUN mv wormhole-${WORMHOLE_REV}/cosmwasm/contracts /code/contracts
WORKDIR /code
RUN --mount=type=cache,target=/code/target,id=cosmwasm_wormhole_target --mount=type=cache,target=/usr/local/cargo/registry optimize_workspace.sh
FROM cosmwasm/workspace-optimizer:0.12.11@sha256:b6b0b1d5ed76dafd0ec0d3812a1e7a275316cf862fd6d0a918f14d71c4cb187f AS pyth_builder
COPY target_chains/cosmwasm/Cargo.lock /code/
COPY target_chains/cosmwasm/Cargo.toml /code/
COPY target_chains/cosmwasm/contracts /code/contracts
COPY target_chains/cosmwasm/sdk/rust /code/sdk/rust
COPY wormhole_attester/sdk/rust /wormhole_attester/sdk/rust
RUN --mount=type=cache,target=/code/target,id=cosmwasm_pyth_target --mount=type=cache,target=/usr/local/cargo/registry optimize_workspace.sh
# Contract deployment stage
FROM lerna
USER root
RUN apt update && apt install netcat curl jq -y
USER 1000
COPY --from=wormhole_builder /code/artifacts/wormhole.wasm /home/node/target_chains/cosmwasm/artifacts/wormhole.wasm
COPY --from=pyth_builder /code/artifacts/pyth_cosmwasm.wasm /home/node/target_chains/cosmwasm/artifacts/pyth_cosmwasm.wasm
WORKDIR /home/node/
COPY --chown=1000:1000 governance/xc_admin/packages/xc_admin_common/ governance/xc_admin/packages/xc_admin_common/
COPY --chown=1000:1000 target_chains/cosmwasm/tools target_chains/cosmwasm/tools
RUN npx lerna run build --scope="@pythnetwork/cosmwasm-deploy-tools" --include-dependencies
WORKDIR /home/node/target_chains/cosmwasm/tools

View File

@ -1,18 +0,0 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
FROM lerna
USER root
RUN apt-get update && apt-get install -y ncat
# Run as node, otherwise, npx explodes.
USER 1000
WORKDIR /home/node
COPY --chown=1000:1000 governance/multisig_wh_message_builder governance/multisig_wh_message_builder
COPY --chown=1000:1000 governance/xc_admin/packages/xc_admin_common/ governance/xc_admin/packages/xc_admin_common/
COPY --chown=1000:1000 target_chains/ethereum/sdk/solidity target_chains/ethereum/sdk/solidity
COPY --chown=1000:1000 target_chains/ethereum/contracts target_chains/ethereum/contracts
RUN npx lerna run build --scope="@pythnetwork/pyth-evm-contract" --include-dependencies
WORKDIR /home/node/target_chains/ethereum/contracts
COPY --chown=1000:1000 target_chains/ethereum/contracts/.env.test .env

View File

@ -1,15 +0,0 @@
# Defined in tilt_devnet/docker_images/Dockerfile.lerna
FROM lerna
WORKDIR /home/node/
USER 1000
RUN sh -c "$(curl -sSfL https://release.solana.com/v1.10.31/install)"
ENV PATH="/home/node/.local/share/solana/install/active_release/bin:$PATH"
COPY --chown=1000:1000 governance/multisig_wh_message_builder governance/multisig_wh_message_builder
COPY --chown=1000:1000 third_party/pyth/prepare_multisig.py third_party/pyth/pyth_utils.py third_party/pyth/
COPY --chown=1000:1000 tilt_devnet/secrets/solana tilt_devnet/secrets/solana
ENV SOL_PAYER_KEYPAIR /home/node/tilt_devnet/secrets/solana/solana-devnet.json
RUN npx lerna run build --scope="@pythnetwork/pyth-multisig-wh-message-builder" --include-dependencies

View File

@ -1,3 +0,0 @@
FROM prom/prometheus
ADD --chown=nobody:nobody tilt_devnet/configs/prometheus_config.yaml .

View File

@ -1,76 +0,0 @@
#syntax=docker/dockerfile:1.2@sha256:e2a8561e419ab1ba6b2fe6cbdf49fd92b95912df1cf7d313c3e2230a333fdbcc
FROM pythfoundation/pyth-client:devnet-v2.20.0 as pyth-oracle-copy
FROM docker.io/library/rust:1.49@sha256:a50165ea96983c21832578afb1c8c028674c965bc1ed43b607871b1f362e06a5 as build
RUN apt-get update\
&& apt-get install -y \
clang \
libudev-dev \
libssl-dev \
llvm \
pkg-config \
zlib1g-dev \
&& rm -rf /var/lib/apt/lists/* \
&& rustup component add rustfmt
RUN sh -c "$(curl -sSfL https://release.solana.com/v1.10.31/install)"
ENV PATH="/root/.local/share/solana/install/active_release/bin:$PATH"
ADD wormhole_attester/rust-toolchain /rust-toolchain
USER root
# Solana does a questionable download at the beginning of a *first* build-bpf call. Trigger and layer-cache it explicitly.
RUN cargo init --lib /tmp/decoy-crate && \
cd /tmp/decoy-crate \
&& echo '[lib]\nname=decoy_crate\ncrate-type=["cdylib"]' >> /tmp/decoy-crate/Cargo.toml # Make a more plausible decoy crate with real cdylib target \
&& cargo build-bpf \
&& rm -rf /tmp/decoy-crate
WORKDIR /usr/src/bridge
ARG WORMHOLE_REV=2.14.8
ADD https://github.com/wormhole-foundation/wormhole/archive/refs/tags/v${WORMHOLE_REV}.tar.gz .
RUN tar -xvf v${WORMHOLE_REV}.tar.gz
RUN mv wormhole-${WORMHOLE_REV} wormhole
WORKDIR /usr/src/squads
ARG SQUADS_REV=1.2.0
ADD https://github.com/Squads-Protocol/squads-mpl/archive/refs/tags/v${SQUADS_REV}.tar.gz .
RUN tar -xvf v${SQUADS_REV}.tar.gz
RUN mv squads-mpl-${SQUADS_REV} squads-mpl
WORKDIR /usr/src/
ADD wormhole_attester wormhole_attester
ADD governance/remote_executor governance/remote_executor
RUN mkdir -p /opt/solana/deps
ENV EMITTER_ADDRESS="11111111111111111111111111111115"
ENV BRIDGE_ADDRESS="Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
WORKDIR /usr/src
# Build Wormhole Solana programs
RUN --mount=type=cache,target=/usr/src/bridge/wormhole/solana/target \
--mount=type=cache,target=/usr/src/wormhole_attester/target \
--mount=type=cache,target=/usr/src/squads/squads-mpl/target \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/root/.cache \
cargo build-bpf --manifest-path "bridge/wormhole/solana/bridge/program/Cargo.toml" -- --locked && \
cargo build-bpf --manifest-path "wormhole_attester/program/Cargo.toml" -- --locked && \
cargo build-bpf --manifest-path "squads/squads-mpl/programs/mesh/Cargo.toml" -- --locked && \
cp bridge/wormhole/solana/target/deploy/bridge.so /opt/solana/deps/bridge.so && \
cp wormhole_attester/target/deploy/pyth_wormhole_attester.so /opt/solana/deps/pyth_wormhole_attester.so && \
cp squads/squads-mpl/target/deploy/mesh.so /opt/solana/deps/mesh.so
COPY --from=pyth-oracle-copy /home/pyth/pyth-client/target/deploy/pyth_oracle.so /opt/solana/deps/pyth_oracle.so
ENV RUST_LOG="solana_runtime::system_instruction_processor=trace,solana_runtime::message_processor=trace,solana_bpf_loader=debug,solana_rbpf=debug"
ENV RUST_BACKTRACE=1
ADD tilt_devnet/secrets/solana /solana-secrets

View File

@ -1,5 +0,0 @@
# Miscellaneous Tilt Docker images
This directory holds docker images for services that don't have a
meaningful source code in this monorepo. Notably, images that prepare
Tilt devnet environments are stored here.

View File

@ -1,41 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: check-attestations
labels:
app: check-attestations
spec:
clusterIP: None
selector:
app: check-attestations
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: check-attestations
spec:
selector:
matchLabels:
app: check-attestations
serviceName: check-attestations
replicas: 1
template:
metadata:
labels:
app: check-attestations
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: check-attestations
image: check-attestations
command:
- python3
- /usr/src/pyth/check_attestations.py
tty: true
readinessProbe:
tcpSocket:
port: 2000
periodSeconds: 1
failureThreshold: 300

View File

@ -1,143 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: eth-devnet
labels:
app: eth-devnet
spec:
ports:
- port: 8545
name: rpc
protocol: TCP
clusterIP: None
selector:
app: eth-devnet
---
apiVersion: v1
kind: Service
metadata:
name: eth-devnet2
labels:
app: eth-devnet2
spec:
ports:
- port: 8546
targetPort: 8545
name: rpc
protocol: TCP
clusterIP: None
selector:
app: eth-devnet2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: eth-devnet
spec:
selector:
matchLabels:
app: eth-devnet
serviceName: eth-devnet
replicas: 1
template:
metadata:
labels:
app: eth-devnet
spec:
terminationGracePeriodSeconds: 1
containers:
- name: ganache
image: eth-node
command:
- npx
- ganache-cli
- -e 10000
- --deterministic
# NOTE(2022-04-13): Some unit tests need block timestamp
# to be sufficiently far above UNIX epoch (Pyth EVM tests
# check feed staleness logic against 0 as lowest possible
# timestamp)
- --time="1970-01-02T00:00:00+00:00"
- --host=0.0.0.0
ports:
- containerPort: 8545
name: rpc
protocol: TCP
readinessProbe:
tcpSocket:
port: rpc
- name: tests
image: eth-node
stdin: true
command:
- /bin/sh
- -c
- "npm run migrate -- --network development &&
npx truffle test test/pyth.js 2>&1 &&
nc -lk 0.0.0.0 2000"
readinessProbe:
periodSeconds: 1
failureThreshold: 300
tcpSocket:
port: 2000
- name: mine
image: eth-node
command:
- /bin/sh
- -c
- "npx truffle exec mine.js"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: eth-devnet2
spec:
selector:
matchLabels:
app: eth-devnet2
serviceName: eth-devnet2
replicas: 1
template:
metadata:
labels:
app: eth-devnet2
spec:
terminationGracePeriodSeconds: 1
containers:
- name: ganache
image: eth-node
command:
- npx
- ganache-cli
- -e 10000
- --deterministic
- --time="1970-01-01T00:00:00+00:00"
- --host=0.0.0.0
- --chain.chainId=1397
ports:
- containerPort: 8545
name: rpc
protocol: TCP
readinessProbe:
tcpSocket:
port: rpc
- name: tests
image: eth-node
stdin: true
command:
- /bin/sh
- -c
- "npm run migrate -- --network development &&
nc -lk 0.0.0.0 2000"
readinessProbe:
periodSeconds: 1
failureThreshold: 300
tcpSocket:
port: 2000
- name: mine
image: eth-node
command:
- /bin/sh
- -c
- "npx truffle exec mine.js"

View File

@ -1,39 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: multisig
labels:
app: multisig
spec:
clusterIP: None
selector:
app: multisig
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: multisig
spec:
selector:
matchLabels:
app: multisig
serviceName: multisig
template:
metadata:
labels:
app: multisig
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: multisig
image: multisig
readinessProbe:
tcpSocket:
port: 2000
periodSeconds: 1
failureThreshold: 300
command:
- python3
- /home/node/third_party/pyth/prepare_multisig.py

View File

@ -1,161 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: guardian
labels:
app: guardian
spec:
ports:
- port: 8999
name: p2p
protocol: UDP
- port: 7070
name: public-grpc
protocol: TCP
- port: 7071
name: public-rest
protocol: TCP
clusterIP: None
selector:
app: guardian
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: guardian
spec:
selector:
matchLabels:
app: guardian
serviceName: guardian
replicas: 5
updateStrategy:
# The StatefulSet rolling update strategy is rather dumb, and updates one pod after another.
# If we want blue-green deployments, we should use a Deployment instead.
type: RollingUpdate
template:
metadata:
labels:
app: guardian
spec:
terminationGracePeriodSeconds: 0
volumes:
# mount shared between containers for runtime state
- name: node-rundir
emptyDir: {}
- name: node-keysdir
secret:
secretName: node-bigtable-key
optional: true
items:
- key: bigtable-key.json
path: bigtable-key.json
containers:
- name: guardiand
image: ghcr.io/wormhole-foundation/guardiand:v2.17.0
volumeMounts:
- mountPath: /run/node
name: node-rundir
- mountPath: /tmp/mounted-keys
name: node-keysdir
env:
- name: BIGTABLE_EMULATOR_HOST
value: bigtable-emulator:8086
- name: PUBSUB_EMULATOR_HOST
value: pubsub-emulator:8085
command:
- /guardiand
- node
- --ethRPC
- ws://eth-devnet:8545
- --bscRPC
- ws://eth-devnet2:8545
- --polygonRPC
- ws://eth-devnet:8545
- --avalancheRPC
- ws://eth-devnet:8545
- --auroraRPC
- ws://eth-devnet:8545
- --fantomRPC
- ws://eth-devnet:8545
- --oasisRPC
- ws://eth-devnet:8545
- --karuraRPC
- ws://eth-devnet:8545
- --acalaRPC
- ws://eth-devnet:8545
- --klaytnRPC
- ws://eth-devnet:8545
- --celoRPC
- ws://eth-devnet:8545
- --moonbeamRPC
- ws://eth-devnet:8545
- --neonRPC
- ws://eth-devnet:8545
- --terraWS
- ws://terra-terrad:26657/websocket
- --terraLCD
- http://terra-terrad:1317
- --terraContract
- terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au
# - --terra2WS
# - ws://terra2-terrad:26657/websocket
# - --terra2LCD
# - http://terra2-terrad:1317
# - --terra2Contract
# - terra14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9ssrc8au
# - --algorandAppID
# - "4"
# - --algorandIndexerRPC
# - http://algorand:8980
# - --algorandIndexerToken
# - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# - --algorandAlgodRPC
# - http://algorand:4001
# - --algorandAlgodToken
# - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- --solanaContract
- Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o
- --solanaWS
- ws://solana-devnet:8900
- --solanaRPC
- http://solana-devnet:8899
- --unsafeDevMode
- --guardianKey
- /tmp/bridge.key
- --publicRPC
- "[::]:7070"
- --publicWeb
- "[::]:7071"
- --adminSocket
- /tmp/admin.sock
- --dataDir
- /tmp/data
# - --chainGovernorEnabled=true
# - --logLevel=debug
securityContext:
capabilities:
add:
# required for syscall.Mlockall
- IPC_LOCK
readinessProbe:
httpGet:
port: 6060
path: /readyz
ports:
- containerPort: 8999
name: p2p
protocol: UDP
- containerPort: 6060
name: pprof
protocol: TCP
- containerPort: 7070
name: public-grpc
protocol: TCP
- containerPort: 7071
name: public-grpcweb
protocol: TCP
- containerPort: 2345
name: debugger
protocol: TCP

View File

@ -1,83 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: p2w-attest
labels:
app: p2w-attest
spec:
ports:
- port: 4343
name: p2w-attest
protocol: TCP
- port: 3000
name: metrics
clusterIP: None
selector:
app: p2w-attest
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: p2w-attest
spec:
selector:
matchLabels:
app: p2w-attest
serviceName: p2w-attest
replicas: 1
template:
metadata:
labels:
app: p2w-attest
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: p2w-attest
image: p2w-attest
command:
- python3
- /usr/src/pyth/p2w_autoattest.py
env:
- name: P2W_INITIALIZE_SOL_CONTRACT
value: "1"
- name: P2W_EXIT_ON_ERROR
value: "true"
tty: true
# Probes, in order of appearance https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
#
# Startup probe - delays other probes until it gets its first success
startupProbe:
httpGet:
path: /healthcheck
port: 3000
failureThreshold: 100 # up to 100 * 10 seconds to report initial healthy status
periodSeconds: 10
# Readiness probe - Used to tell load balancers to
# start/stop sending traffic to the container, *without*
# restarting it. The attester does not accept any traffic as
# part of its workflow, which means this isn't very useful.
# readinessProbe:
# httpGet:
# path: /healthcheck
# port: 3000
# failureThreshold: 1
# periodSeconds: 10
#
# Liveness probe - decides restarts for misbehaving
# containers
livenessProbe:
httpGet:
path: /healthcheck
port: 3000
failureThreshold: 1 # If the attester healthcheck fails once,
periodSeconds: 10
ports:
- containerPort: 4343
name: p2w-attest
protocol: TCP
- containerPort: 3000
name: metrics
protocol: TCP

View File

@ -1,83 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: p2w-evm-relay
labels:
app: p2w-evm-relay
spec:
ports:
- port: 8081
name: prometheus
protocol: TCP
- port: 4200
name: rest-api
protocol: TCP
clusterIP: None
selector:
app: p2w-evm-relay
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: p2w-evm-relay
spec:
selector:
matchLabels:
app: p2w-evm-relay
serviceName: p2w-evm-relay
replicas: 1
template:
metadata:
labels:
app: p2w-evm-relay
spec:
terminationGracePeriodSeconds: 0
containers:
- name: p2w-evm-relay
image: p2w-relay
ports:
- containerPort: 8081
name: prometheus
protocol: TCP
- containerPort: 4200
name: rest-api
protocol: TCP
readinessProbe:
httpGet:
path: "/health"
port: 4200
command:
- node
- lib/index.js
- "--"
- "--evm"
env:
- name: SPY_SERVICE_HOST
value: spy:7072
- name: SPY_SERVICE_FILTERS
value: '[{"chain_id":1,"emitter_address":"71f8dcb863d176e2c420ad6610cf687359612b6fb392e0642b0ca6b1f186aa3b"}]'
- name: EVM_NODE_JSON_RPC_URL
value: "http://eth-devnet:8545"
- name: EVM_WALLET_MNEMONIC
value: "myth like bonus scare over problem client lizard pioneer submit female collect"
- name: EVM_HDWALLET_PATH
value: "m/44'/60'/0'/0/1" # Use account with idx 1
- name: EVM_PYTH_CONTRACT_ADDRESS
value: "0xe982E462b094850F12AF94d21D470e21bE9D0E9C"
- name: EVM_VERIFY_PRICE_FEEDS
value: "yes"
- name: REST_PORT
value: "4200"
- name: PROM_PORT
value: "8081"
- name: BAL_QUERY_INTERVAL
value: "60000"
- name: RETRY_MAX_ATTEMPTS
value: "4"
- name: RETRY_DELAY_IN_MS
value: "250"
- name: MAX_MSGS_PER_BATCH
value: "1"
- name: LOG_LEVEL
value: debug

View File

@ -1,88 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: p2w-terra-relay
labels:
app: p2w-terra-relay
spec:
ports:
- port: 8081
name: prometheus
protocol: TCP
- port: 4200
name: rest-api
protocol: TCP
clusterIP: None
selector:
app: p2w-terra-relay
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: p2w-terra-relay
spec:
selector:
matchLabels:
app: p2w-terra-relay
serviceName: p2w-terra-relay
replicas: 1
template:
metadata:
labels:
app: p2w-terra-relay
spec:
terminationGracePeriodSeconds: 0
containers:
- name: p2w-terra-relay
image: p2w-relay
ports:
- containerPort: 8081
name: prometheus
protocol: TCP
- containerPort: 4200
name: rest-api
protocol: TCP
readinessProbe:
httpGet:
path: "/health"
port: 4200
command:
- node
- lib/index.js
- "--"
- "--terra"
env:
- name: SPY_SERVICE_HOST
value: spy:7072
- name: SPY_SERVICE_FILTERS
value: '[{"chain_id":1,"emitter_address":"71f8dcb863d176e2c420ad6610cf687359612b6fb392e0642b0ca6b1f186aa3b"}]'
- name: TERRA_NODE_URL
value: http://terra-terrad:1317
- name: TERRA_PRIVATE_KEY
value: notice oak worry limit wrap speak medal online prefer cluster roof addict wrist behave treat actual wasp year salad speed social layer crew genius
- name: TERRA_PYTH_CONTRACT_ADDRESS
value: terra1nc5tatafv6eyq7llkr2gv50ff9e22mnf70qgjlv737ktmt4eswrquka9l6
# ^^ It can change if order of terra contract creation changes or anything is added/removed in terra/tools/deploy.ts
- name: TERRA_CHAIN_ID
value: localterra
- name: TERRA_NAME
value: localterra
- name: TERRA_COIN
value: uluna
- name: REST_PORT
value: "4200"
- name: PROM_PORT
value: "8081"
- name: BAL_QUERY_INTERVAL
value: "60000"
- name: RETRY_MAX_ATTEMPTS
value: "6"
- name: RETRY_DELAY_IN_MS
value: "1000"
- name: MAX_MSGS_PER_BATCH
value: "1"
- name: MAX_HEALTHY_NO_RELAY_DURATION_IN_SECONDS
value: "120"
- name: LOG_LEVEL
value: debug

View File

@ -1,48 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
labels:
app: prometheus
spec:
clusterIP: None
selector:
app: prometheus
ports:
- port: 9090
name: dashboard
protocol: TCP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: prometheus
spec:
selector:
matchLabels:
app: prometheus
serviceName: prometheus
template:
metadata:
labels:
app: prometheus
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: prometheus
image: prometheus
readinessProbe:
tcpSocket:
port: 9090
periodSeconds: 1
failureThreshold: 300
ports:
- containerPort: 9090
name: dashboard
protocol: TCP
command:
- "prometheus"
- "--config.file=prometheus_config.yaml"
- "--web.external-url=http://[::]:9090"

View File

@ -1,32 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pyth-price-client-js
spec:
selector:
matchLabels:
app: pyth-price-client-js
serviceName: pyth-price-client-js
replicas: 1
template:
metadata:
labels:
app: pyth-price-client-js
spec:
terminationGracePeriodSeconds: 0
containers:
- name: tests
image: pyth-price-client-js
command:
- /bin/sh
- -c
- "npm run test:e2e && nc -lk 0.0.0.0 2000"
readinessProbe:
periodSeconds: 5
failureThreshold: 300
tcpSocket:
port: 2000
resources:
limits:
cpu: "2"
memory: 1Gi

View File

@ -1,91 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: pyth-price-server
labels:
app: pyth-price-server
spec:
ports:
- port: 8081
name: prometheus
protocol: TCP
- port: 4200
name: rest-api
protocol: TCP
clusterIP: None
selector:
app: pyth-price-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pyth-price-server
spec:
selector:
matchLabels:
app: pyth-price-server
serviceName: pyth-price-server
replicas: 1
template:
metadata:
labels:
app: pyth-price-server
spec:
terminationGracePeriodSeconds: 0
containers:
- name: pyth-price-server
image: pyth-price-server
ports:
- containerPort: 8081
name: prometheus
protocol: TCP
- containerPort: 4200
name: rest-api
protocol: TCP
readinessProbe:
httpGet:
path: "/ready"
port: 4200
initialDelaySeconds: 10
periodSeconds: 1
failureThreshold: 1
livenessProbe:
httpGet:
path: "/live"
port: 4200
initialDelaySeconds: 20
periodSeconds: 30
timeoutSeconds: 30
env:
- name: SPY_SERVICE_HOST
value: spy:7072
- name: SPY_SERVICE_FILTERS
value: '[{"chain_id":1,"emitter_address":"71f8dcb863d176e2c420ad6610cf687359612b6fb392e0642b0ca6b1f186aa3b"}]'
- name: WORMHOLE_CLUSTER
value: localnet
- name: REST_PORT
value: "4200"
- name: PROM_PORT
value: "8081"
- name: READINESS_SPY_SYNC_TIME_SECONDS
value: "5"
- name: READINESS_NUM_LOADED_SYMBOLS
value: "6"
- name: LOG_LEVEL
value: debug
- name: REMOVE_EXPIRED_VALUES_INTERVAL_SECONDS
value: "60"
- name: CACHE_TTL_SECONDS
value: "300"
- name: tests
image: pyth-price-server
command:
- /bin/sh
- -c
- "npm run test && nc -lk 0.0.0.0 2358"
readinessProbe:
periodSeconds: 5
failureThreshold: 300
tcpSocket:
port: 2358

View File

@ -1,47 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: pyth
labels:
app: pyth
spec:
clusterIP: None
selector:
app: pyth
ports:
- port: 4242
name: pyth-accounts
protocol: TCP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: pyth
spec:
selector:
matchLabels:
app: pyth
serviceName: pyth
template:
metadata:
labels:
app: pyth
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 0
containers:
- name: pyth-publisher
image: pyth
command:
- python3
- /opt/pyth/pyth_publisher.py
readinessProbe:
tcpSocket:
port: 2000
periodSeconds: 1
failureThreshold: 300
ports:
- containerPort: 4242
name: pyth-accounts
protocol: TCP

View File

@ -1,98 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: solana-devnet
labels:
app: solana-devnet
spec:
ports:
- port: 8899
name: rpc
protocol: TCP
- port: 9900
name: faucet
protocol: TCP
clusterIP: None
selector:
app: solana-devnet
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: solana-devnet
spec:
selector:
matchLabels:
app: solana-devnet
serviceName: solana-devnet
replicas: 1
template:
metadata:
labels:
app: solana-devnet
spec:
terminationGracePeriodSeconds: 1
containers:
- name: devnet
image: solana-contract
command:
- /root/.local/share/solana/install/active_release/bin/solana-test-validator
- --bpf-program
- Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o
- /opt/solana/deps/bridge.so
- --bpf-program
- gMYYig2utAxVoXnM9UhtTWrt8e7x2SVBZqsWZJeT5Gw # Derived from pyth_program.json
- /opt/solana/deps/pyth_oracle.so
- --bpf-program
- P2WH424242424242424242424242424242424242424
- /opt/solana/deps/pyth_wormhole_attester.so
- --bpf-program
- SMPLVC8MxZ5Bf5EfF7PaMiTCxoBAcmkbM2vkrvMK8ho # copied from squads-mpl/programs/mesh/src/lib.rs
- /opt/solana/deps/mesh.so
- --log
ports:
- containerPort: 8001
name: gossip
protocol: UDP
- containerPort: 8003
name: tpu
protocol: UDP
- containerPort: 8004
name: tpufwd
protocol: UDP
- containerPort: 8000
name: tvu
protocol: UDP
- containerPort: 8002
name: tvufwd
protocol: UDP
- containerPort: 8006
name: repair
protocol: UDP
- containerPort: 8007
name: serverepair
protocol: UDP
- containerPort: 8899
name: rpc
protocol: TCP
- containerPort: 8900
name: pubsub
protocol: TCP
- containerPort: 9900
name: faucet
protocol: TCP
readinessProbe:
httpGet:
port: rpc
path: /health
periodSeconds: 1
- name: setup
image: bridge-client
command:
- /usr/src/solana-devnet-setup.sh
readinessProbe:
tcpSocket:
port: 2000
periodSeconds: 1
failureThreshold: 300

View File

@ -1,60 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: spy
labels:
app: spy
spec:
ports:
- port: 7072
name: spyrpc
protocol: TCP
- port: 6060
name: status
protocol: TCP
clusterIP: None
selector:
app: spy
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: spy
spec:
selector:
matchLabels:
app: spy
serviceName: spy
replicas: 1
template:
metadata:
labels:
app: spy
spec:
terminationGracePeriodSeconds: 0
containers:
- name: spy
image: ghcr.io/wormhole-foundation/guardiand:v2.17.0
command:
- /guardiand
- spy
- --nodeKey
- /node.key
- --spyRPC
- "[::]:7072"
# Hardcoded devnet bootstrap (generated from deterministic key in guardiand)
- --bootstrap
- /dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw
# - --logLevel=debug
ports:
- containerPort: 7072
name: spyrpc
protocol: TCP
- containerPort: 6060
name: status
protocol: TCP
readinessProbe:
httpGet:
port: 6060
path: /metrics

View File

@ -1,205 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: terra-terrad
name: terra-terrad
spec:
ports:
- name: rpc
port: 26657
protocol: TCP
- name: rest
port: 1317
protocol: TCP
selector:
app: terra-terrad
---
apiVersion: v1
kind: Service
metadata:
labels:
app: terra-postgres
name: terra-postgres
spec:
ports:
- name: postgres
port: 5432
protocol: TCP
selector:
app: terra-postgres
---
apiVersion: v1
kind: Service
metadata:
labels:
app: terra-fcd
name: terra-fcd
spec:
ports:
- name: fcd
port: 3060
protocol: TCP
selector:
app: terra-fcd
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: terra-terrad
name: terra-terrad
spec:
replicas: 1
selector:
matchLabels:
app: terra-terrad
template:
metadata:
labels:
app: terra-terrad
spec:
containers:
- args:
- terrad
- start
image: terra-image
name: terra-terrad
ports:
- containerPort: 26657
- containerPort: 1317
readinessProbe:
httpGet:
port: 26657
resources: {}
- name: cosmwasm-contracts
image: cosmwasm-contracts
command:
- /bin/sh
- -c
- "sh /home/node/target_chains/cosmwasm/tools/deploy.sh &&
touch /home/node/success && sleep infinity"
readinessProbe:
exec:
command:
- test
- -e
- "/home/node/success"
initialDelaySeconds: 5
periodSeconds: 5
restartPolicy: Always
serviceName: terra-terrad
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: terra-postgres
name: terra-postgres
spec:
replicas: 1
selector:
matchLabels:
app: terra-postgres
template:
metadata:
labels:
app: terra-postgres
spec:
containers:
- image: postgres:12
name: fcd-postgres
ports:
- containerPort: 5432
resources: {}
env:
- name: POSTGRES_USER
value: dev
- name: POSTGRES_PASSWORD
value: dev
- name: POSTGRES_DB
value: fcd
restartPolicy: Always
serviceName: terra-fcd
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: terra-fcd
name: terra-fcd
spec:
replicas: 1
selector:
matchLabels:
app: terra-fcd
template:
metadata:
labels:
app: terra-fcd
spec:
containers:
- image: terramoney/fcd:2.0.5
name: fcd-collector
args:
- collector
resources: {}
env:
- name: CHAIN_ID
value: localterra
- name: LCD_URI
value: http://terra-terrad:1317
- name: BYPASS_URI
value: http://terra-terrad:1317
- name: RPC_URI
value: http://terra-terrad:26657
- name: TYPEORM_CONNECTION
value: postgres
- name: TYPEORM_HOST
value: terra-postgres
- name: TYPEORM_USERNAME
value: dev
- name: TYPEORM_PASSWORD
value: dev
- name: TYPEORM_DATABASE
value: fcd
- name: TYPEORM_SYNCHRONIZE
value: "true"
- name: TYPEORM_LOGGING
value: "false"
- name: TYPEORM_ENTITIES
value: "src/orm/*Entity.ts"
- image: terramoney/fcd:2.0.5
name: fcd-api
args:
- start
resources: {}
ports:
- containerPort: 3060
env:
- name: CHAIN_ID
value: localterra
- name: LCD_URI
value: http://terra-terrad:1317
- name: BYPASS_URI
value: http://terra-terrad:1317
- name: RPC_URI
value: http://terra-terrad:26657
- name: TYPEORM_CONNECTION
value: postgres
- name: TYPEORM_HOST
value: terra-postgres
- name: TYPEORM_USERNAME
value: dev
- name: TYPEORM_PASSWORD
value: dev
- name: TYPEORM_DATABASE
value: fcd
- name: TYPEORM_SYNCHRONIZE
value: "true"
- name: TYPEORM_LOGGING
value: "false"
- name: TYPEORM_ENTITIES
value: "src/orm/*Entity.ts"
restartPolicy: Always
serviceName: terra-fcd

View File

@ -1,31 +0,0 @@
#!/usr/bin/env bash
# This script configures the devnet for test transfers with hardcoded addresses.
set -x
# Configure CLI (works the same as upstream Solana CLI)
mkdir -p ~/.config/solana/cli
cat <<EOF > ~/.config/solana/cli/config.yml
json_rpc_url: "http://127.0.0.1:8899"
websocket_url: ""
keypair_path: /solana-secrets/solana-devnet.json
EOF
# Constants
bridge_address=Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o
initial_guardian=befa429d57cd18b7f8a4d91a2da9ab4af05d0fbe
retry () {
while ! $@; do
sleep 1
done
}
# Fund our account (as defined in solana/keys/solana-devnet.json).
retry solana airdrop 1000
# Create the bridge contract at a known address
# OK to fail on subsequent attempts (already created).
retry bridge_client create-bridge "$bridge_address" "$initial_guardian" 86400 100
# Let k8s startup probe succeed
nc -k -l -p 2000

View File

@ -1,4 +0,0 @@
# IMPORTANT: Never, ever use any of these private keys in a public chain environment
The secrets gathered here may only be used to perform automated
testing scenarios in Tilt. They are plainly visible in the code repository **on purpose**.

View File

@ -1,6 +0,0 @@
[
39, 20, 181, 104, 82, 27, 70, 145, 227, 136, 168, 14, 170, 24, 33, 88, 145,
152, 180, 229, 219, 142, 247, 114, 237, 79, 52, 97, 84, 65, 213, 172, 49, 165,
99, 116, 254, 135, 110, 132, 214, 114, 59, 200, 109, 253, 45, 43, 74, 172,
107, 84, 162, 223, 23, 15, 78, 167, 240, 137, 234, 123, 4, 231
]

View File

@ -1,6 +0,0 @@
[
151, 156, 152, 229, 131, 186, 5, 254, 107, 42, 234, 87, 191, 209, 182, 237,
170, 57, 174, 150, 37, 14, 5, 58, 100, 237, 114, 141, 46, 22, 155, 104, 10,
20, 225, 112, 227, 95, 250, 0, 102, 170, 119, 34, 187, 74, 144, 163, 181, 123,
233, 253, 191, 6, 2, 70, 127, 227, 138, 51, 98, 209, 205, 172
]

View File

@ -1,6 +0,0 @@
[
62, 189, 176, 181, 215, 49, 125, 17, 130, 43, 109, 83, 115, 112, 151, 110,
117, 239, 235, 54, 205, 209, 6, 255, 76, 27, 210, 115, 206, 166, 217, 165,
250, 48, 211, 191, 77, 246, 195, 18, 170, 246, 162, 103, 141, 129, 14, 143,
127, 4, 243, 114, 79, 112, 11, 46, 90, 174, 215, 2, 63, 42, 134, 56
]

View File

@ -1,6 +0,0 @@
[
14, 173, 153, 4, 176, 224, 201, 111, 32, 237, 183, 185, 159, 247, 22, 161, 89,
84, 215, 209, 212, 137, 10, 92, 157, 49, 29, 192, 101, 164, 152, 70, 87, 65,
8, 174, 214, 157, 175, 126, 98, 90, 54, 24, 100, 177, 247, 77, 19, 112, 47,
44, 165, 109, 233, 102, 14, 86, 109, 29, 134, 145, 132, 141
]

View File

@ -1,6 +0,0 @@
[
172, 234, 168, 90, 159, 133, 183, 38, 206, 220, 115, 240, 201, 186, 191, 12,
38, 133, 233, 164, 62, 92, 164, 155, 149, 133, 68, 83, 168, 233, 67, 12, 1,
134, 165, 231, 211, 192, 216, 167, 186, 77, 109, 120, 172, 131, 36, 27, 95,
207, 60, 228, 128, 201, 74, 109, 132, 176, 165, 156, 62, 146, 247, 75
]

View File

@ -1,6 +0,0 @@
[
196, 148, 217, 170, 205, 37, 40, 95, 214, 198, 118, 8, 52, 12, 250, 196, 95,
138, 15, 163, 55, 212, 93, 215, 72, 15, 11, 125, 221, 67, 196, 176, 219, 38,
22, 196, 10, 226, 177, 210, 88, 255, 245, 194, 140, 68, 61, 222, 16, 199, 151,
74, 161, 165, 178, 130, 124, 60, 99, 168, 130, 199, 251, 149
]

View File

@ -1,6 +0,0 @@
[
238, 68, 150, 179, 87, 216, 135, 224, 44, 190, 97, 182, 75, 109, 167, 101,
146, 236, 95, 142, 190, 237, 251, 179, 186, 54, 100, 145, 166, 113, 222, 85,
0, 42, 46, 190, 161, 239, 138, 33, 240, 218, 84, 112, 63, 54, 170, 185, 140,
21, 211, 216, 57, 146, 161, 87, 170, 18, 29, 186, 231, 15, 241, 91
]

View File

@ -1,14 +0,0 @@
{
"Extensions": [
{
"Name": "namespace",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2020-12-05T16:06:07.229737938+01:00"
},
{
"Name": "secret",
"ExtensionRegistry": "https://github.com/tilt-dev/tilt-extensions",
"TimeFetched": "2021-07-01T15:08:09.818136358-05:00"
}
]
}

View File

@ -1,52 +0,0 @@
# Namespace
Author: [Nick Santos](https://github.com/nicks)
Helper functions for creating Kubernetes namespaces and manipulating
namespaces on Kubernetes objects.
## Functions
### `namespace_yaml(name: str): Blob`
Returns YAML for a Kubernetes namespace.
### `namespace_create(name: str)`
Deploys a namespace to the cluster. Equivalent to
```
load('ext://namespace', 'namespace_yaml')
k8s_yaml(namespace_yaml('name'))
```
### `namespace_inject(objects: Union[str, Blob], namespace: str): Blob`
Given YAML for Kubernetes objects, return new YAML with a different namespace.
## Example Usage
### For a fixed namespace:
```
load('ext://namespace', 'namespace_create', 'namespace_inject')
namespace_create('my-namespace')
k8s_yaml(namespace_inject(read_file('deployment.yaml'), 'my-namespace'))
```
### For a user-specific namespace:
```
load('ext://namespace', 'namespace_create', 'namespace_inject')
ns = 'user-%s' % os.environ.get('USER', 'anonymous')
namespace_create(ns)
k8s_yaml(namespace_inject(read_file('deployment.yaml'), ns))
```
## Caveats
- `namespace_inject` assumes all resources are namespaced-scoped.
The behavior is undefined for cluster-scoped resources.
- This extension doesn't do any validation to confirm that namespace names are valid.
The behavior is undefined on invalid namespaces.

View File

@ -1,71 +0,0 @@
# -*- mode: Python -*-
def namespace_yaml(name):
"""Returns YAML for a namespace
Args:
name: The namespace name. Currently not validated.
Returns:
The namespace YAML as a blob
"""
return blob("""apiVersion: v1
kind: Namespace
metadata:
name: %s
""" % name)
def namespace_create(name):
"""Creates a namespace in the current Kubernetes cluster.
Args:
name: The namespace name. Currently not validated.
"""
k8s_yaml(namespace_yaml(name))
def namespace_inject(x, ns):
"""Takes K8s yaml, sets its namespace to `ns`, and returns it as a blob.
This modifies the yaml in two ways:
1. Sets .metadata.namespace to `ns`
2. Sets ..template.metadata.namespace to `ns`
This ensures the namespace in, e.g., Deployment Pod Template Specs is
set, but might have false positives if you have a CRD with some other
element named 'template'.
Args:
x: K8s yaml. Either a filename (string) or the yaml itself (Blob)
ns: The namespace to set the K8s objects to.
Returns:
Blob containing the K8s objects as yaml, with namespaces set to `ns`.
"""
return _mutate_yaml(x, lambda o: _set_k8s_yaml_namespace(o, ns))
def _mutate_yaml(x, f):
if type(x) == 'string':
objects = read_yaml_stream(x)
elif type(x) == 'blob':
objects = decode_yaml_stream(x)
else:
fail('only takes string or blob, got: %s' % type(x))
return encode_yaml_stream([f(o) for o in objects])
def _set_k8s_yaml_namespace(o, ns):
o['metadata']['namespace'] = ns
_set_template_namespace(o, ns)
return o
def _set_template_namespace(o, ns):
if type(o) == 'dict':
for k, v in o.items():
if k == 'template' and type(v) == 'dict' and type(v.get('metadata', None)) == 'dict':
v['metadata']['namespace'] = ns
if type(v) == 'dict' or type(v) == 'list':
_set_template_namespace(v, ns)
elif type(o) == 'list':
for v in o:
_set_template_namespace(v, ns)

View File

@ -1,12 +0,0 @@
load('../Tiltfile', 'namespace_create', 'namespace_inject')
# Disable parallelism until this issue is fixed:
# https://github.com/tilt-dev/tilt/issues/3421
update_settings(max_parallel_updates=1)
namespace_create('namespace-test')
k8s_yaml(namespace_inject('deployment.yaml', 'namespace-test'))
k8s_yaml('job.yaml')
k8s_yaml(namespace_inject('job-default-namespace.yaml', 'namespace-test'))
k8s_resource('namespace-test-verify', resource_deps=['namespace-test-busybox'])
k8s_resource('namespace-test-verify2', resource_deps=['namespace-test-busybox'])

View File

@ -1,40 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: namespace-test-busybox
spec:
selector:
matchLabels:
app: namespace-test-busybox
template:
metadata:
labels:
app: namespace-test-busybox
spec:
containers:
- name: busybox
image: busybox
ports:
- containerPort: 8000
command:
[
"sh",
"-c",
"echo 'hello world' > index.html; busybox httpd -f -p 8000",
]
readinessProbe:
tcpSocket:
port: 8000
periodSeconds: 1
---
apiVersion: v1
kind: Service
metadata:
name: namespace-test-busybox
spec:
selector:
app: namespace-test-busybox
ports:
- protocol: TCP
port: 8000
targetPort: 8000

View File

@ -1,13 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: namespace-test-verify2
spec:
backoffLimit: 1
template:
spec:
containers:
- name: namespace-test-verify
image: curlimages/curl
command: ["curl", "-fsSL", "http://namespace-test-busybox:8000/"]
restartPolicy: Never

View File

@ -1,14 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: namespace-test-verify
namespace: namespace-test
spec:
backoffLimit: 1
template:
spec:
containers:
- name: namespace-test-verify
image: curlimages/curl
command: ["curl", "-fsSL", "http://namespace-test-busybox:8000/"]
restartPolicy: Never

View File

@ -1,7 +0,0 @@
#!/usr/bin/env bash
cd $(dirname $0)
set -ex
tilt ci
tilt down --delete-namespaces

View File

@ -1,51 +0,0 @@
# Secret
Author: [Nick Santos](https://github.com/nicks)
Helper functions for creating Kubernetes secrets.
## Functions
### secret_yaml_generic
```
secret_yaml_generic(name: str, namespace: str = "", from_file: Union[str, List] = None, secret_type: str = None): Blob
```
Returns YAML for a generic secret.
- `from_file` ( str ) equivalent to `kubectl create secret --from-file`
- `secret_type` ( str ) - equivalent to `kubectl create secret --type`
### secret_create_generic
```
secret_create_generic(name: str, namespace: str = "", from_file: Union[str, List] = None, secret_type: str = None)
```
Deploys a secret to the cluster. Equivalent to
```
load('ext://namespace', 'secret_yaml_generic')
k8s_yaml(secret_yaml_generic('name', from_file=[...]))
```
## Example Usage
### For a Postgres password:
```
load('ext://secret', 'secret_create_generic')
secret_create_generic('pgpass', from_file='.pgpass=./.pgpass')
```
### For Google Cloud Platform Key:
```
load('ext://secret', 'secret_generic_create')
secret_create_generic('gcp-key', from_file='key.json=./gcp-creds.json')
```
## Caveats
- This extension doesn't do any validation to confirm that names or namespaces are valid.

View File

@ -1,75 +0,0 @@
# -*- mode: Python -*-
def secret_yaml_generic(name, namespace="", from_file=None, secret_type=None, from_env_file=None):
"""Returns YAML for a generic secret
Args:
name: The secret name.
namespace: The namespace.
from_file: Use the from-file secret generator. May be a string or a list of strings.
Example: ["ssh--privatekey=path/to/id_rsa", "ssh-publickey=path/to/id_rsa.pub"]
from_env_file: Specify the path to a file to read lines of key=val pairs to create a secret
(i.e. a Docker .env file)
secret_type (optional): Specify the type of the secret
Example: 'kubernetes.io/dockerconfigjson'
Returns:
The secret YAML as a blob
"""
args = [
"kubectl",
"create",
"secret",
"generic",
name,
]
if namespace:
args.extend(["-n", namespace])
generator = False
if from_file:
if type(from_file) == "string":
args.extend(["--from-file", from_file])
generator = True
elif type(from_file) == "list":
for f in from_file:
args.extend(["--from-file", f])
generator = True
else:
fail("Bad from_file argument: %s" % from_file)
if from_env_file:
if type(from_env_file) != "string":
fail("from_env_file only accepts strings")
args.extend(["--from-env-file", from_env_file])
generator = True
if not generator:
fail("No secret generator specified")
if secret_type:
if type(secret_type) == "string":
args.extend(["--type", secret_type])
else:
fail("Bad secret_type argument: %s" % secret_type)
args.extend(["-o=yaml", "--dry-run=client"])
return local(args)
def secret_create_generic(name, namespace="", from_file=None, secret_type=None, from_env_file=None):
"""Creates a secret in the current Kubernetes cluster.
Args:
name: The secret name.
namespace: The namespace.
from_file: Use the from-file secret generator. May be a string or a list of strings.
Example: ["ssh--privatekey=path/to/id_rsa", "ssh-publickey=path/to/id_rsa.pub"]
from_env_file: Specify the path to a file to read lines of key=val pairs to create a secret
(i.e. a Docker .env file)
secret_type (optional): Specify the type of the secret
Example: 'kubernetes.io/dockerconfigjson'
"""
k8s_yaml(secret_yaml_generic(name, namespace, from_file, secret_type, from_env_file))

View File

@ -1 +0,0 @@
hostname:5432:database:username:password

View File

@ -1,4 +0,0 @@
load('../Tiltfile', 'secret_create_generic')
secret_create_generic('pgpass', namespace='default', from_file='.pgpass=./.pgpass')
k8s_yaml('job.yaml')

View File

@ -1,24 +0,0 @@
apiVersion: batch/v1
kind: Job
metadata:
name: secret-verify
spec:
backoffLimit: 1
template:
spec:
containers:
- name: secret-verify
image: alpine
command: ["grep", "password", "/var/secrets/pgpass/.pgpass"]
volumeMounts:
- name: pgpass
mountPath: /var/secrets/pgpass
env:
- name: PGPASSFILE
value: /var/secrets/pgpass/.pgpass
restartPolicy: Never
volumes:
- name: pgpass
secret:
secretName: pgpass
defaultMode: 0600

View File

@ -1,7 +0,0 @@
#!/bin/bash
cd $(dirname $0)
set -ex
tilt ci
tilt down --delete-namespaces

Some files were not shown because too many files have changed in this diff Show More