remove explorer related code

This commit is contained in:
Evan Gray 2022-10-28 03:19:17 +00:00 committed by Evan Gray
parent 24d38183a8
commit 83e0f053f8
47 changed files with 6 additions and 12282 deletions

View File

@ -45,7 +45,6 @@ config.define_bool("evm2", False, "Enable second Eth component")
config.define_bool("solana", False, "Enable Solana component")
config.define_bool("terra_classic", False, "Enable Terra Classic component")
config.define_bool("terra2", False, "Enable Terra 2 component")
config.define_bool("explorer", False, "Enable explorer component")
config.define_bool("spy_relayer", False, "Enable spy relayer")
config.define_bool("ci_tests", False, "Enable tests runner component")
config.define_bool("guardiand_debug", False, "Enable dlv endpoint for guardiand")
@ -57,8 +56,8 @@ config.define_bool("secondWormchain", False, "Enable a second wormchain node wit
cfg = config.parse()
num_guardians = int(cfg.get("num", "1"))
namespace = cfg.get("namespace", "wormhole")
gcpProject = cfg.get("gcpProject", "local-dev")
bigTableKeyPath = cfg.get("bigTableKeyPath", "./event_database/devnet_key.json")
gcpProject = cfg.get("gcpProject", "")
bigTableKeyPath = cfg.get("bigTableKeyPath", "")
webHost = cfg.get("webHost", "localhost")
ci = cfg.get("ci", False)
algorand = cfg.get("algorand", ci)
@ -69,7 +68,6 @@ solana = cfg.get("solana", ci)
terra_classic = cfg.get("terra_classic", ci)
terra2 = cfg.get("terra2", ci)
wormchain = cfg.get("wormchain", ci)
explorer = cfg.get("explorer", ci)
spy_relayer = cfg.get("spy_relayer", ci)
ci_tests = cfg.get("ci_tests", ci)
guardiand_debug = cfg.get("guardiand_debug", False)
@ -101,7 +99,7 @@ local_resource(
# node
if explorer:
if bigTableKeyPath != "":
k8s_yaml_with_ns(
secret_yaml_generic(
"node-bigtable-key",
@ -148,7 +146,7 @@ def build_node_yaml():
elif ci:
container["command"] += ["--logLevel=warn"]
if explorer:
if gcpProject != "":
container["command"] += [
"--bigTablePersistenceEnabled",
"--bigTableInstanceName",
@ -532,39 +530,6 @@ if ci_tests:
resource_deps = [], # testing/spydk.sh handles waiting for spy, not having deps gets the build earlier
)
# bigtable
if explorer:
k8s_yaml_with_ns("devnet/bigtable.yaml")
k8s_resource(
"bigtable-emulator",
port_forwards = [port_forward(8086, name = "BigTable clients [:8086]")],
labels = ["explorer"],
trigger_mode = trigger_mode,
)
k8s_resource(
"pubsub-emulator",
port_forwards = [port_forward(8085, name = "PubSub listeners [:8085]")],
labels = ["explorer"],
)
docker_build(
ref = "cloud-functions",
context = "./event_database",
dockerfile = "./event_database/functions_server/Dockerfile",
live_update = [
sync("./event_database/cloud_functions", "/app/cloud_functions"),
],
)
k8s_resource(
"cloud-functions",
resource_deps = ["bigtable-emulator", "pubsub-emulator"],
port_forwards = [port_forward(8090, name = "Cloud Functions [:8090]", host = webHost)],
labels = ["explorer"],
trigger_mode = trigger_mode,
)
if terra_classic:
docker_build(
ref = "terra-image",

View File

@ -1,176 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: bigtable-emulator
name: bigtable-emulator
spec:
ports:
- name: clients
port: 8086
targetPort: clients
protocol: TCP
selector:
app: bigtable-emulator
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: bigtable-emulator
name: bigtable-emulator
spec:
serviceName: bigtable-emulator
replicas: 1
selector:
matchLabels:
app: bigtable-emulator
template:
metadata:
labels:
app: bigtable-emulator
spec:
containers:
- name: bigtable-emulator
image: google/cloud-sdk:353.0.0-alpine@sha256:594278737ce111f7922afa8722b4b526c19938e74b9ab931d23c4d2755802d3a
env:
- name: BIGTABLE_EMULATOR_HOST
value: localhost:8086
command:
- /bin/sh
- -c
- |
echo project = local-dev > ~/.cbtrc
echo instance = wormhole >> ~/.cbtrc
gcloud --quiet components install beta cbt bigtable
gcloud --quiet beta emulators bigtable start --host-port=0.0.0.0:8086 &
sleep 3
cbt createtable v2Events "families=MessagePublication,QuorumState,TokenTransferPayload,AssetMetaPayload,NFTTransferPayload,TokenTransferDetails,ChainDetails"
nc -lkp 2000 0.0.0.0
readinessProbe:
periodSeconds: 2
failureThreshold: 300
tcpSocket:
port: 2000
ports:
- containerPort: 8086
name: clients
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: pubsub-emulator
name: pubsub-emulator
spec:
ports:
- name: listeners
port: 8085
targetPort: listeners
protocol: TCP
selector:
app: pubsub-emulator
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: pubsub-emulator
name: pubsub-emulator
spec:
serviceName: pubsub-emulator
replicas: 1
selector:
matchLabels:
app: pubsub-emulator
template:
metadata:
labels:
app: pubsub-emulator
spec:
containers:
- name: pubsub-emulator
image: gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:d7748afac2e3dc3768bfd0db16d26a7c538821146e433874acff0eb1cfd853ba
env:
- name: PUBSUB_EMULATOR_HOST
value: localhost:8085
- name: PUBSUB_PROJECT_ID
value: local-dev
command:
- /bin/sh
- -c
- |
gcloud --quiet components install beta pubsub-emulator
gcloud --quiet beta emulators pubsub start --host-port=0.0.0.0:8085
readinessProbe:
periodSeconds: 2
failureThreshold: 300
tcpSocket:
port: 8085
ports:
- containerPort: 8085
name: listeners
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: bigtable-functions
name: bigtable-functions
spec:
ports:
- name: functions
port: 8090
targetPort: functions
protocol: TCP
selector:
app: bigtable-functions
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
app: cloud-functions
name: cloud-functions
spec:
serviceName: cloud-functions
replicas: 1
selector:
matchLabels:
app: cloud-functions
template:
metadata:
labels:
app: cloud-functions
spec:
containers:
- name: cloud-functions
image: cloud-functions
env:
- name: BIGTABLE_EMULATOR_HOST
value: bigtable-emulator:8086
- name: PUBSUB_EMULATOR_HOST
value: pubsub-emulator:8085
- name: GCP_PROJECT
value: local-dev
- name: BIGTABLE_INSTANCE
value: wormhole
- name: PUBSUB_NEW_VAA_TOPIC
value: new-vaa-devnet
- name: PUBSUB_NEW_VAA_SUBSCRIPTION
value: extract-payload-devnet
- name: PUBSUB_TOKEN_TRANSFER_DETAILS_TOPIC
value: create-token-transfer-details-devnet
- name: PUBSUB_TOKEN_TRANSFER_DETAILS_SUBSCRIPTION
value: calculate-transfer-data-devnet
ports:
- containerPort: 8080
name: functions
protocol: TCP
readinessProbe:
httpGet:
port: 8080
path: /readyz

View File

@ -59,11 +59,6 @@ spec:
name: node-rundir
- mountPath: /tmp/mounted-keys
name: node-keysdir
env:
- name: BIGTABLE_EMULATOR_HOST
value: bigtable-emulator:8086
- name: PUBSUB_EMULATOR_HOST
value: pubsub-emulator:8085
command:
- /guardiand
- node
@ -92,7 +87,7 @@ spec:
- --moonbeamRPC
- ws://eth-devnet:8545
- --arbitrumRPC
- ws://eth-devnet:8545
- ws://eth-devnet:8545
- --neonRPC
- ws://eth-devnet:8545
# - --wormchainWS

View File

@ -1,8 +0,0 @@
# Event Database
Contents
- `cloud_functions` functions that interact with BigTable.
- `functions_server` an HTTP server for hosting cloud_functions locally. Used for development and running the functions for tilt devnet.
- `initialize_db` script for initializing a table and column families.
- `schema-docs` design documents.

View File

@ -1,23 +0,0 @@
.git
.gitignore
.vscode
.env
cmd
*.md
*.txt
*.json
Dockerfile
# vendor bundle is supplied rather than fetching deps.
# google cloud does not respect the "replace" in go.mod:
# replace github.com/gogo/protobuf => github.com/regen-network/protobuf
# so, vendor bundle is supplied instead.
go.mod

View File

@ -1,6 +0,0 @@
vendor
*.json
!token-allowlist-*.json
*.log
*.txt

View File

@ -1,62 +0,0 @@
## Google Cloud functions for BigTable
This is a reference implementaion for getting data out of BigTable.
## Contents
This directory holds GCP Cloud Functions, one per file, along with shared utilities in `shared.go`. The file names correspond to the hosted endpoints. ie endpoint `.../*-notionaltvl` is the file `notional-tvl.go`
## Debugging with VSCode
### prereqs
- Golang >= 1.16 installed and available on your path.
- The Go VSCode extension, and gopls installed.
### IDE setup
- open a new VSCode window
- File menu --> "Open Workspace from File..."
- Select `event_database/cloud_functions/workspace.code-workspace`
Opening the workspace file as described above will open both `cloud_functions` and `functions_server`, so that you get all the VSCode goodness of intellesense, ability to run the code with the Go debugger, set breakpoints, etc.
Add your environment variables to `functions_server/.vscode/launch.json`
Start the debug server by pressing `F5`. You can check your server is up by requesting http://localhost:8080/readyz.
### deploying
First deploy (creation) must include all the flags to configure the environment:
gcloud functions --project your-project deploy testnet --region europe-west3 --entry-point Entry --runtime go116 --trigger-http --allow-unauthenticated --service-account=your-readonly@your-project.iam.gserviceaccount.com --update-env-vars GCP_PROJECT=your-project,BIGTABLE_INSTANCE=wormhole-testnet
gcloud functions --project your-project deploy processvaa-testnet --region europe-west3 --entry-point ProcessVAA --runtime go116 --trigger-topic new-vaa-testnet --service-account=your-readonly@your-project.iam.gserviceaccount.com --update-env-vars GCP_PROJECT=your-project,BIGTABLE_INSTANCE=wormhole-testnet
Subsequent deploys (updates) only need include flags to indentify the resource for updating: project, region, name.
gcloud functions --project your-project deploy testnet --region europe-west3 --entry-point Entry
gcloud functions --project your-project deploy processvaa-testnet --region europe-west3 --entry-point ProcessVAA
### invocation
All routes accept their input(s) as query parameters, or request body. Just two different ways of querying:
GET
```bash
curl "https://region-project-id.cloudfunctions.net/testnet/readrow?emitterChain=2&emitterAddress=000000000000000000000000e982e462b094850f12af94d21d470e21be9d0e9c&sequence=0000000000000006"
```
POST
```bash
curl -X POST https://region-project-id.cloudfunctions.net/testnet/readrow \
-H "Content-Type:application/json" \
-d \
'{"emitterChain":"2", "emitterAddress":"000000000000000000000000e982e462b094850f12af94d21d470e21be9d0e9c", "sequence":"0000000000000006"}'
```
See [./bigtable-endpoints.md](./bigtable-endpoints.md) for API patterns

View File

@ -1,447 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type cumulativeAddressesResult struct {
AllTimeAmounts map[string]map[string]float64
AllTimeCounts map[string]int
AllTimeDurationDays int
DailyAmounts map[string]map[string]map[string]float64
DailyCounts map[string]map[string]int
}
// an in-memory cache of previously calculated results
var warmCumulativeAddressesCache = map[string]map[string]map[string]map[string]float64{}
var muWarmCumulativeAddressesCache sync.RWMutex
var warmCumulativeAddressesCacheFilePath = "addresses-transferred-to-cumulative-cache.json"
var addressesToUpToYesterday = map[string]map[string]map[string]map[string]float64{}
var muAddressesToUpToYesterday sync.RWMutex
var addressesToUpToYesterdayFilePath = "addresses-transferred-to-up-to-yesterday-cache.json"
// finds all the unique addresses that have received tokens since a particular moment.
func addressesTransferredToSince(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
if _, ok := addressesToUpToYesterday["*"]; !ok && loadCache {
loadJsonToInterface(ctx, addressesToUpToYesterdayFilePath, &muAddressesToUpToYesterday, &addressesToUpToYesterday)
}
now := time.Now().UTC()
today := now.Format("2006-01-02")
oneDayAgo := -time.Duration(24) * time.Hour
yesterday := now.Add(oneDayAgo).Format("2006-01-02")
result := map[string]map[string]float64{}
// create the unique identifier for this query, for cache
cachePrefix := createCachePrefix(prefix)
muAddressesToUpToYesterday.Lock()
if _, ok := addressesToUpToYesterday[cachePrefix]; !ok {
addressesToUpToYesterday[cachePrefix] = map[string]map[string]map[string]float64{}
}
if cacheData, ok := addressesToUpToYesterday[cachePrefix][yesterday]; ok {
// cache has data through midnight yesterday
for chain, addresses := range cacheData {
result[chain] = map[string]float64{}
for address, amount := range addresses {
result[chain][address] = amount
}
}
// set the start to be the start of today
start = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
}
muAddressesToUpToYesterday.Unlock()
// fetch data for days not in the cache
dailyAddresses := createAddressesOfInterval(tbl, ctx, prefix, start)
// loop through the query results to combine cache + fresh data
for _, chains := range dailyAddresses {
for chain, addresses := range chains {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]float64{}
}
for address, amount := range addresses {
if _, ok := result[chain][address]; !ok {
result[chain][address] = 0
}
// add the amount the address received this day to the
// amount already in the result (amount the address has recieved so far)
result[chain][address] = result[chain][address] + amount
}
}
}
muAddressesToUpToYesterday.Lock()
if _, ok := addressesToUpToYesterday[cachePrefix][yesterday]; !ok {
addressesToUpToYesterday[cachePrefix][yesterday] = map[string]map[string]float64{}
// no cache, populate it
upToYesterday := map[string]map[string]float64{}
for chain, addresses := range result {
upToYesterday[chain] = map[string]float64{}
for address, amount := range addresses {
upToYesterday[chain][address] = amount
}
}
for chain, addresses := range dailyAddresses[today] {
for address, amount := range addresses {
// subtract the amounts from today, in order to create an "upToYesterday" amount
upToYesterday[chain][address] = result[chain][address] - amount
}
}
// loop again to assign values to the cache
for chain, addresses := range upToYesterday {
if _, ok := addressesToUpToYesterday[cachePrefix][yesterday][chain]; !ok {
addressesToUpToYesterday[cachePrefix][yesterday][chain] = map[string]float64{}
}
for address, amount := range addresses {
addressesToUpToYesterday[cachePrefix][yesterday][chain][address] = amount
}
}
muAddressesToUpToYesterday.Unlock()
// write cache to disc
persistInterfaceToJson(ctx, addressesToUpToYesterdayFilePath, &muAddressesToUpToYesterday, addressesToUpToYesterday)
} else {
muAddressesToUpToYesterday.Unlock()
}
return result
}
// calcuates a map of recepient address to notional value received, by chain, since the start time specified.
func createCumulativeAddressesOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmCumulativeAddressesCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmCumulativeAddressesCacheFilePath, &muWarmCumulativeAddressesCache, &warmCumulativeAddressesCache)
}
now := time.Now().UTC()
today := now.Format("2006-01-02")
cachePrefix := createCachePrefix(prefix)
cacheNeedsUpdate := false
muWarmCumulativeAddressesCache.Lock()
if _, ok := warmCumulativeAddressesCache[cachePrefix]; !ok {
warmCumulativeAddressesCache[cachePrefix] = map[string]map[string]map[string]float64{}
}
muWarmCumulativeAddressesCache.Unlock()
results := map[string]map[string]map[string]float64{}
dailyAddresses := createAddressesOfInterval(tbl, ctx, prefix, releaseDay)
dateKeys := make([]string, 0, len(dailyAddresses))
for k := range dailyAddresses {
dateKeys = append(dateKeys, k)
}
sort.Strings(dateKeys)
// iterate through the dates in the result set, and accumulate the amounts
// of each token transfer by symbol, based on the destination of the transfer.
for i, date := range dateKeys {
results[date] = map[string]map[string]float64{"*": {"*": 0}}
muWarmCumulativeAddressesCache.RLock()
if dateCache, ok := warmCumulativeAddressesCache[cachePrefix][date]; ok && dateCache != nil && useCache(date) {
// have a cached value for this day, use it.
// iterate through cache and copy values to the result
for chain, addresses := range dateCache {
results[date][chain] = map[string]float64{}
for address, amount := range addresses {
results[date][chain][address] = amount
}
}
muWarmCumulativeAddressesCache.RUnlock()
} else {
// no cached value for this day, must calculate it
muWarmCumulativeAddressesCache.RUnlock()
if i == 0 {
// special case for first day, no need to sum.
for chain, addresses := range dailyAddresses[date] {
results[date][chain] = map[string]float64{}
for address, amount := range addresses {
results[date][chain][address] = amount
}
}
} else {
// find the string of the previous day
prevDate := dateKeys[i-1]
prevDayChains := results[prevDate]
thisDayChains := dailyAddresses[date]
for chain, thisDayAddresses := range thisDayChains {
// create a union of the addresses from this day, and previous days
addressUnion := map[string]string{}
for address := range prevDayChains[chain] {
addressUnion[address] = address
}
for address := range thisDayAddresses {
addressUnion[address] = address
}
// initalize the chain/symbol map for this date
if _, ok := results[date][chain]; !ok {
results[date][chain] = map[string]float64{}
}
// iterate through the union of addresses, creating an amount for each one,
// and adding it the the results.
for address := range addressUnion {
thisDayAmount := float64(0)
if amt, ok := thisDayAddresses[address]; ok {
thisDayAmount = amt
}
prevDayAmount := float64(0)
if prevAmount, ok := results[prevDate][chain][address]; ok && prevAmount != 0 {
prevDayAmount = prevAmount
}
cumulativeAmount := prevDayAmount + thisDayAmount
results[date][chain][address] = cumulativeAmount
}
}
}
// dont cache today
if date != today {
// set the result in the cache
muWarmCumulativeAddressesCache.Lock()
if _, ok := warmCumulativeAddressesCache[cachePrefix][date]; !ok || !useCache(date) {
// cache does not have this date, persist it for other instances.
warmCumulativeAddressesCache[cachePrefix][date] = map[string]map[string]float64{}
for chain, addresses := range results[date] {
warmCumulativeAddressesCache[cachePrefix][date][chain] = map[string]float64{}
for address, amount := range addresses {
warmCumulativeAddressesCache[cachePrefix][date][chain][address] = amount
}
}
cacheNeedsUpdate = true
}
muWarmCumulativeAddressesCache.Unlock()
}
}
}
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmCumulativeAddressesCacheFilePath, &muWarmCumulativeAddressesCache, warmCumulativeAddressesCache)
}
// take the most recent n days, rather than returning all days since launch
selectDays := map[string]map[string]map[string]float64{}
days := getDaysInRange(start, now)
for _, day := range days {
selectDays[day] = map[string]map[string]float64{}
for chain, addresses := range results[day] {
selectDays[day][chain] = map[string]float64{}
for address, amount := range addresses {
selectDays[day][chain][address] = amount
}
}
}
return selectDays
}
// finds unique addresses that tokens have been transferred to.
func AddressesTransferredToCumulative(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays, forChain, forAddress, daily, allTime, counts, amounts string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
daily = queryParams.Get("daily")
allTime = queryParams.Get("allTime")
counts = queryParams.Get("counts")
amounts = queryParams.Get("amounts")
case http.MethodPost:
// declare request body properties
var d struct {
NumDays string `json:"numDays"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
Daily string `json:"daily"`
AllTime string `json:"allTime"`
Counts string `json:"counts"`
Amounts string `json:"amounts"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numDays = d.NumDays
forChain = d.ForChain
forAddress = d.ForAddress
daily = d.Daily
allTime = d.AllTime
counts = d.Counts
amounts = d.Amounts
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if daily == "" && allTime == "" {
// none of the options were set, so set one
allTime = "true"
}
if counts == "" && amounts == "" {
// neither of the options were set, so set one
counts = "true"
}
var queryDays int
if numDays == "" {
queryDays = 30
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
// if the request is forChain, always groupBy chain
if forAddress != "" {
// if the request is forAddress, always groupBy address
prefix = forChain + ":" + forAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of the last numDays
addressesDailyAmounts := map[string]map[string]float64{}
addressesDailyCounts := map[string]int{}
allTimeDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
if allTime != "" {
wg.Add(1)
go func(prefix string) {
defer wg.Done()
periodAmounts := addressesTransferredToSince(tbl, ctx, prefix, releaseDay)
if amounts != "" {
for chain, addresses := range periodAmounts {
addressesDailyAmounts[chain] = map[string]float64{}
for address, amount := range addresses {
addressesDailyAmounts[chain][address] = roundToTwoDecimalPlaces(amount)
}
}
}
if counts != "" {
for chain, addresses := range periodAmounts {
// need to sum all the chains to get the total count of addresses,
// since addresses are not unique across chains.
numAddresses := len(addresses)
addressesDailyCounts[chain] = len(addresses)
addressesDailyCounts["*"] = addressesDailyCounts["*"] + numAddresses
}
}
}(prefix)
}
// daily totals
dailyAmounts := map[string]map[string]map[string]float64{}
dailyCounts := map[string]map[string]int{}
if daily != "" {
wg.Add(1)
go func(prefix string, queryDays int) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
dailyTotals := createCumulativeAddressesOfInterval(tbl, ctx, prefix, start)
if amounts != "" {
for date, chains := range dailyTotals {
dailyAmounts[date] = map[string]map[string]float64{}
for chain, addresses := range chains {
dailyAmounts[date][chain] = map[string]float64{}
for address, amount := range addresses {
dailyAmounts[date][chain][address] = roundToTwoDecimalPlaces(amount)
}
}
}
}
if counts != "" {
for date, chains := range dailyTotals {
dailyCounts[date] = map[string]int{}
for chain, addresses := range chains {
// need to sum all the chains to get the total count of addresses,
// since addresses are not unique across chains.
numAddresses := len(addresses)
dailyCounts[date][chain] = numAddresses
dailyCounts[date]["*"] = dailyCounts[date]["*"] + numAddresses
}
}
}
}(prefix, queryDays)
}
wg.Wait()
result := &cumulativeAddressesResult{
AllTimeAmounts: addressesDailyAmounts,
AllTimeCounts: addressesDailyCounts,
AllTimeDurationDays: allTimeDays,
DailyAmounts: dailyAmounts,
DailyCounts: dailyCounts,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Header().Add("Content-Type", "application/json")
w.Write(jsonBytes)
}

View File

@ -1,509 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type addressesResult struct {
Last24HoursAmounts map[string]map[string]float64
Last24HoursCounts map[string]int
WithinPeriodAmounts map[string]map[string]float64
WithinPeriodCounts map[string]int
PeriodDurationDays int
DailyAmounts map[string]map[string]map[string]float64
DailyCounts map[string]map[string]int
}
// an in-memory cache of previously calculated results
var warmAddressesCache = map[string]map[string]map[string]map[string]float64{}
var muWarmAddressesCache sync.RWMutex
var warmAddressesCacheFilePath = "addresses-transferred-to-cache.json"
type AddressData struct {
TokenSymbol string
TokenAmount float64
OriginChain string
LeavingChain string
DestinationChain string
DestinationAddress string
Notional float64
}
func fetchAddressRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) []AddressData {
rows := []AddressData{}
err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool {
t := &AddressData{}
if _, ok := row[transferDetailsFam]; ok {
for _, item := range row[transferDetailsFam] {
switch item.Column {
case "TokenTransferDetails:Amount":
amount, _ := strconv.ParseFloat(string(item.Value), 64)
t.TokenAmount = amount
case "TokenTransferDetails:NotionalUSD":
reader := bytes.NewReader(item.Value)
var notionalFloat float64
if err := binary.Read(reader, binary.BigEndian, &notionalFloat); err != nil {
log.Fatalf("failed to read NotionalUSD of row: %v. err %v ", row.Key(), err)
}
t.Notional = notionalFloat
case "TokenTransferDetails:OriginSymbol":
t.TokenSymbol = string(item.Value)
}
}
if _, ok := row[transferPayloadFam]; ok {
for _, item := range row[transferPayloadFam] {
switch item.Column {
case "TokenTransferPayload:OriginChain":
t.OriginChain = string(item.Value)
case "TokenTransferPayload:TargetChain":
t.DestinationChain = string(item.Value)
case "TokenTransferPayload:TargetAddress":
t.DestinationAddress = string(item.Value)
}
}
t.DestinationAddress = transformHexAddressToNative(chainIdStringToType(t.DestinationChain), t.DestinationAddress)
}
keyParts := strings.Split(row.Key(), ":")
t.LeavingChain = keyParts[0]
rows = append(rows, *t)
}
return true
}, bigtable.RowFilter(
bigtable.ConditionFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamilies[1]),
bigtable.CellsPerRowLimitFilter(1), // only the first cell in column
bigtable.TimestampRangeFilter(start, end), // within time range
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
),
bigtable.ChainFilters(
bigtable.FamilyFilter(fmt.Sprintf("%v|%v", columnFamilies[2], columnFamilies[5])),
bigtable.ColumnFilter("Amount|NotionalUSD|OriginSymbol|OriginChain|TargetChain|TargetAddress"),
bigtable.LatestNFilter(1),
),
bigtable.BlockAllFilter(),
),
))
if err != nil {
log.Fatalln("failed reading rows to create RowList.", err)
}
return rows
}
// finds unique addresses tokens have been sent to, for each day since the start time passed in.
func createAddressesOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmAddressesCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmAddressesCacheFilePath, &muWarmAddressesCache, &warmAddressesCache)
}
results := map[string]map[string]map[string]float64{}
now := time.Now().UTC()
numPrevDays := int(now.Sub(start).Hours() / 24)
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
// create the unique identifier for this query, for cache
cachePrefix := createCachePrefix(prefix)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calulate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmAddressesCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]map[string]float64{}
// check to see if there is cache data for this date/query
if dates, ok := warmAddressesCache[cachePrefix]; ok {
// have a cache for this query
if dateCache, ok := dates[dateStr]; ok && useCache(dateStr) {
// have a cache for this date
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = dateCache
muWarmAddressesCache.Unlock()
intervalsWG.Done()
return
}
}
} else {
// no cache for this query, initialize the map
warmAddressesCache[cachePrefix] = map[string]map[string]map[string]float64{}
}
muWarmAddressesCache.Unlock()
defer intervalsWG.Done()
queryResult := fetchAddressRowsInInterval(tbl, ctx, prefix, start, end)
// iterate through the rows and increment the count
for _, row := range queryResult {
if _, ok := results[dateStr][row.DestinationChain]; !ok {
results[dateStr][row.DestinationChain] = map[string]float64{}
}
results[dateStr][row.DestinationChain][row.DestinationAddress] = results[dateStr][row.DestinationChain][row.DestinationAddress] + row.Notional
}
if daysAgo >= 1 {
// set the result in the cache
muWarmAddressesCache.Lock()
if _, ok := warmAddressesCache[cachePrefix][dateStr]; !ok || !useCache(dateStr) {
// cache does not have this date, persist it for other instances.
warmAddressesCache[cachePrefix][dateStr] = results[dateStr]
cacheNeedsUpdate = true
}
muWarmAddressesCache.Unlock()
}
}(tbl, ctx, prefix, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmAddressesCacheFilePath, &muWarmAddressesCache, warmAddressesCache)
}
// create a set of all the keys from all dates/chains, to ensure the result objects all have the same keys
seenChainSet := map[string]bool{}
for _, chains := range results {
for leaving := range chains {
seenChainSet[leaving] = true
}
}
// ensure each chain object has all the same symbol keys:
for date := range results {
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := results[date][chain]; !ok {
results[date][chain] = map[string]float64{}
}
}
}
return results
}
// finds all the unique addresses that have received tokens since a particular moment.
func addressesTransferredToSinceDate(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
result := map[string]map[string]float64{}
// fetch data for days not in the cache
dailyAddresses := createAddressesOfInterval(tbl, ctx, prefix, start)
// loop through the query results to combine cache + fresh data
for _, chains := range dailyAddresses {
for chain, addresses := range chains {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]float64{}
}
for address, amount := range addresses {
if _, ok := result[chain][address]; !ok {
result[chain][address] = 0
}
// add the amount the address received this day to the
// amount already in the result (amount the address has recieved so far)
result[chain][address] = result[chain][address] + amount
}
}
}
return result
}
// returns addresses that received tokens within the specified time range
func addressesForInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) map[string]map[string]float64 {
// query for all rows in time range, return result count
queryResult := fetchAddressRowsInInterval(tbl, ctx, prefix, start, end)
result := map[string]map[string]float64{}
// iterate through the rows and increment the count for each index
for _, row := range queryResult {
if _, ok := result[row.DestinationChain]; !ok {
result[row.DestinationChain] = map[string]float64{}
}
result[row.DestinationChain][row.DestinationAddress] = result[row.DestinationChain][row.DestinationAddress] + row.Notional
}
return result
}
// find the addresses tokens have been transferred to
func AddressesTransferredTo(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays, forChain, forAddress, daily, last24Hours, forPeriod, counts, amounts string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
daily = queryParams.Get("daily")
last24Hours = queryParams.Get("last24Hours")
forPeriod = queryParams.Get("forPeriod")
counts = queryParams.Get("counts")
amounts = queryParams.Get("amounts")
case http.MethodPost:
// declare request body properties
var d struct {
NumDays string `json:"numDays"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
Daily string `json:"daily"`
Last24Hours string `json:"last24Hours"`
ForPeriod string `json:"forPeriod"`
Counts string `json:"counts"`
Amounts string `json:"amounts"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numDays = d.NumDays
forChain = d.ForChain
forAddress = d.ForAddress
daily = d.Daily
last24Hours = d.Last24Hours
forPeriod = d.ForPeriod
counts = d.Counts
amounts = d.Amounts
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if daily == "" && last24Hours == "" && forPeriod == "" {
// none of the options were set, so set one
last24Hours = "true"
}
if counts == "" && amounts == "" {
// neither of the options were set, so set one
counts = "true"
}
var queryDays int
if numDays == "" {
queryDays = 30
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
// if the request is forChain, always groupBy chain
if forAddress != "" {
// if the request is forAddress, always groupBy address
prefix = forChain + ":" + forAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of last 24 hours
last24HourAmounts := map[string]map[string]float64{}
last24HourCounts := map[string]int{}
if last24Hours != "" {
wg.Add(1)
go func(prefix string) {
last24HourInterval := -time.Duration(24) * time.Hour
now := time.Now().UTC()
start := now.Add(last24HourInterval)
defer wg.Done()
last24HourAddresses := addressesForInterval(tbl, ctx, prefix, start, now)
if amounts != "" {
for chain, addresses := range last24HourAddresses {
last24HourAmounts[chain] = map[string]float64{}
for address, amount := range addresses {
last24HourAmounts[chain][address] = roundToTwoDecimalPlaces(amount)
}
}
}
if counts != "" {
for chain, addresses := range last24HourAddresses {
// need to sum all the chains to get the total count of addresses,
// since addresses are not unique across chains.
numAddresses := len(addresses)
last24HourCounts[chain] = numAddresses
last24HourCounts["*"] = last24HourCounts["*"] + numAddresses
}
}
}(prefix)
}
// total of the last numDays
addressesDailyAmounts := map[string]map[string]float64{}
addressesDailyCounts := map[string]int{}
if forPeriod != "" {
wg.Add(1)
go func(prefix string) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
// periodAmounts, err := addressesTransferredToSince(tbl, ctx, prefix, start)
periodAmounts := addressesTransferredToSinceDate(tbl, ctx, prefix, start)
if amounts != "" {
for chain, addresses := range periodAmounts {
addressesDailyAmounts[chain] = map[string]float64{}
for address, amount := range addresses {
addressesDailyAmounts[chain][address] = roundToTwoDecimalPlaces(amount)
}
}
}
if counts != "" {
for chain, addresses := range periodAmounts {
// need to sum all the chains to get the total count of addresses,
// since addresses are not unique across chains.
numAddresses := len(addresses)
addressesDailyCounts[chain] = numAddresses
addressesDailyCounts["*"] = addressesDailyCounts["*"] + numAddresses
}
}
}(prefix)
}
// daily totals
dailyAmounts := map[string]map[string]map[string]float64{}
dailyCounts := map[string]map[string]int{}
if daily != "" {
wg.Add(1)
go func(prefix string, queryDays int) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
dailyTotals := createAddressesOfInterval(tbl, ctx, prefix, start)
if amounts != "" {
for date, chains := range dailyTotals {
dailyAmounts[date] = map[string]map[string]float64{}
for chain, addresses := range chains {
dailyAmounts[date][chain] = map[string]float64{}
for address, amount := range addresses {
dailyAmounts[date][chain][address] = roundToTwoDecimalPlaces(amount)
}
}
}
}
if counts != "" {
for date, chains := range dailyTotals {
dailyCounts[date] = map[string]int{}
for chain, addresses := range chains {
// need to sum all the chains to get the total count of addresses,
// since addresses are not unique across chains.
numAddresses := len(addresses)
dailyCounts[date][chain] = numAddresses
dailyCounts[date]["*"] = dailyCounts[date]["*"] + numAddresses
}
}
}
}(prefix, queryDays)
}
wg.Wait()
result := &addressesResult{
Last24HoursAmounts: last24HourAmounts,
Last24HoursCounts: last24HourCounts,
WithinPeriodAmounts: addressesDailyAmounts,
WithinPeriodCounts: addressesDailyCounts,
PeriodDurationDays: queryDays,
DailyAmounts: dailyAmounts,
DailyCounts: dailyCounts,
}
w.WriteHeader(http.StatusOK)
w.Header().Add("Content-Type", "application/json")
json.NewEncoder(w).Encode(result)
}

View File

@ -1,375 +0,0 @@
# API design
There are two endpoints designed to be flexible enough to answer most questions; "recent" and "totals".
- "recent" returns rows, is a gap-list query
- "totals" returns counts of how many rows were found in the period
---
## QueryParams
These endpoints can be used to query across all chains and addresses, and you can also drill-down into a chain or address.
### groupBy
- `groupBy=chain` results will be grouped by (keyed by) `emitterChain`.
- `groupBy=address` results will be be grouped by (keyed by) `emitterChain:emitterAddress`.
### filter
- `forChain=2` only returns results for the specified chain.
- `forChain=2&forAddress=c69a...cb4f` only returns results for the specified chain + address.
### endpoint specific
- `/totals?numDays=6` specify the query interval.
- `/recent?numRows=6` specify the number of results.
---
## `Totals` function
Get the number of messages in the last 7 days. The `*` key designates all results.
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/totals?numDays=7
```json
{
"LastDayCount": { "*": 14},
"PeriodCount": { "*": 69},
"DailyTotals": {
"2021-09-21": {"*": 55},
"2021-09-22": {"*": 0},
"2021-09-23": {"*": 0},
"2021-09-24": {"*": 0},
"2021-09-25": {"*": 0},
"2021-09-26": {"*": 0},
"2021-09-27": {"*": 14},
"2021-09-28": {"*": 0},
}
}
```
Get message counts grouped by chain, for the last 7 days:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/totals?groupBy=chain&numDays=7
```json
{
"LastDayCount": {
"1": 8,
"2": 3,
"4": 3,
"*": 14
},
"LastMonthCount": {
"1": 21,
"2": 24,
"4": 24,
"*": 69
},
"DailyTotals": {
"2021-09-21": {
"1": 13,
"2": 21,
"4": 21,
"*": 55
},
"2021-09-22": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
},
"2021-09-23": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
},
"2021-09-24": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
},
"2021-09-25": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
},
"2021-09-26": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
},
"2021-09-27": {
"1": 8,
"2": 3,
"4": 3,
"*": 14
},
"2021-09-28": {
"1": 0,
"2": 0,
"4": 0,
"*": 0
}
}
}
```
Get message counts grouped by EmitterAddress, for the previous 3 days (includes the current day):
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/totals?groupBy=address&numDays=3
```json
{
"LastDayCount": {
"*": 14,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 1,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 7,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3
},
"TotalCount": {
"*": 14,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 1,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 7,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3
},
"DailyTotals": {
"2021-09-25": {
"*": 0,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 0,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 0,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0
},
"2021-09-26": {
"*": 0,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 0,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 0,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0
},
"2021-09-27": {
"*": 14,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 1,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 7,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 3
},
"2021-09-28": {
"*": 0,
"1:96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": 0,
"1:c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": 0,
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0,
"4:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": 0
}
}
}
```
---
## `Recent` function
Get the 2 most recent messages:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/recent?numRows=2
```json
{
"*": [
{
"EmitterChain": "solana",
"EmitterAddress": "c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f",
"Sequence": "17",
"InitiatingTxID": "0xd418d81b7b2f298a37b28b97e240237b6210f00b702d2101d5e423ab5fa6366b",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABADjricLUCKqwbuHYEgG8dMetrH5acGibV/l4z6mNzYmyXlE0sPK4lVngQ5c+vwWU0XYVlrh1KoCsEhZF132ouo8BYUk6ywAA1PUAAcaaGxpl3TNr8d9qd6+1Afwl23/Ak4ywhZWp70cyZctPAAAAAAAAABEgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:52:26.038 +0000 UTC"
},
{
"EmitterChain": "solana",
"EmitterAddress": "c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f",
"Sequence": "16",
"InitiatingTxID": "0xd2bcadceb8c1beb7cd531e2c621733b96df96a397ea88abb948cc28c1546e139",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABACISbeEGlIf5z32yTEQDw2zNgS4GUj36YSTlSCqTj4lgaH663yeir/4Gi9iM6OWWc4Vct2UiE5jfv4PW8MTrdr0BYUk6sAAABBMAAcaaGxpl3TNr8d9qd6+1Afwl23/Ak4ywhZWp70cyZctPAAAAAAAAABAgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:51:59.138 +0000 UTC"
}
]
}
```
Get the 2 most recent messages for each chain:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/recent?numRows=2&groupBy=chain
```json
{
"1": [
{
"EmitterChain": "solana",
"EmitterAddress": "c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f",
"Sequence": "19",
"InitiatingTxID": "0xd7a34663ce6ee1d1c42f24513f6f37221e81e16a5153d542d2c951af1401e49d",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAOcc6ah0v1QFBl8SOkzKzAme6I2Us/kGwM1QCumJNqOnGmsH82w0k+1kgxu6yHA1XKRNUbJFgz/RfHrgfXUXKeEBYUk7PwAAph4AAcaaGxpl3TNr8d9qd6+1Afwl23/Ak4ywhZWp70cyZctPAAAAAAAAABMgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:54:22.107 +0000 UTC"
},
{
"EmitterChain": "solana",
"EmitterAddress": "c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f",
"Sequence": "18",
"InitiatingTxID": "0x32e8a87d4cd8a717e4d785bb317398c4cc8e36fbe45c53b75e4e85dc1181c92b",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAMCe6wEJplDwtyr7ELM15nrSSMSr6xYcuDC3qA0Mx1WKdy7WRXE13tP9SyMJ/sYESqpJtgvYnNEB3wnUeEbW2scAYUk6+AAAGp4AAcaaGxpl3TNr8d9qd6+1Afwl23/Ak4ywhZWp70cyZctPAAAAAAAAABIgAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAgAAAAAAAAAAAAAAAJD4v2pHnzIOrQdEEaSw55ROqMnBAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:53:11.139 +0000 UTC"
}
],
"2": [
{
"EmitterChain": "ethereum",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "23",
"InitiatingTxID": "0x0515a7375f101e79a1d5e0f5159cce98fe8fe861bd2ab548e22f43375b04defb",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAGclDJrZDoZ2BxHBCxpPHZFwRhwesOgV9gkcGCeqBQaTZj/PjYM/25a5owDllBvS2pAg0nkRWYJskJf+Z3vIqLcAAAAW9pRWAAAAAgAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABcPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:48:27.025 +0000 UTC"
},
{
"EmitterChain": "ethereum",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "22",
"InitiatingTxID": "0x9f2dbf04c8088009b8c0ae1313baee546ac604ad5f608dcf5291bee4aa19b57b",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAAPsvYSDgik3jFPBiH97URck6lQxeXKixD/U3YplSwx4EZPeVWLzqgzjCb5nhBhAafYY5MmVSf8YF1cnPW4qXO0BAAAW0sNgAQAAAgAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABYPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:47:51.506 +0000 UTC"
}
],
"4": [
{
"EmitterChain": "bsc",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "23",
"InitiatingTxID": "0x0515a7375f101e79a1d5e0f5159cce98fe8fe861bd2ab548e22f43375b04defb",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAEc9grHDBKGhicCbWPFFuEKxfEuWc+PS0C3smLeIrBkVCdm9Tg8q76MK47OeuTF+ieTAxG+d/z2B9OeMWd87oMsAAAAW9pRWAAAABAAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABcPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:48:26.983 +0000 UTC"
},
{
"EmitterChain": "bsc",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "22",
"InitiatingTxID": "0x9f2dbf04c8088009b8c0ae1313baee546ac604ad5f608dcf5291bee4aa19b57b",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABABSFvsV41QWUwqKJC+Q62PtxHWmludvu4AKQDxorezX4BzYhX0rkj9BDxPtEc+utn6Y5q/ryft+PdWX8WIDhxSMAAAAW0sNgAQAABAAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABYPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:47:51.419 +0000 UTC"
}
]
}
```
Get the 2 most recent messages for a specific address:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/recent?numRows=2&forChain=2&forAddress=0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16
```json
{
"2:0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": [
{
"EmitterChain": "ethereum",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "23",
"InitiatingTxID": "0x0515a7375f101e79a1d5e0f5159cce98fe8fe861bd2ab548e22f43375b04defb",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAGclDJrZDoZ2BxHBCxpPHZFwRhwesOgV9gkcGCeqBQaTZj/PjYM/25a5owDllBvS2pAg0nkRWYJskJf+Z3vIqLcAAAAW9pRWAAAAAgAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABcPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:48:27.025 +0000 UTC"
},
{
"EmitterChain": "ethereum",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "22",
"InitiatingTxID": "0x9f2dbf04c8088009b8c0ae1313baee546ac604ad5f608dcf5291bee4aa19b57b",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAAPsvYSDgik3jFPBiH97URck6lQxeXKixD/U3YplSwx4EZPeVWLzqgzjCb5nhBhAafYY5MmVSf8YF1cnPW4qXO0BAAAW0sNgAQAAAgAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABYPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:47:51.506 +0000 UTC"
}
]
}
```
---
## `Transaction` function
Lookup a message by the native transaction identifier from the user's interaction:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/transaction?id=0x0515a7375f101e79a1d5e0f5159cce98fe8fe861bd2ab548e22f43375b04defb
```json
{
"EmitterChain": "bsc",
"EmitterAddress": "0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16",
"Sequence": "23",
"InitiatingTxID": "0x0515a7375f101e79a1d5e0f5159cce98fe8fe861bd2ab548e22f43375b04defb",
"Payload": "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAEc9grHDBKGhicCbWPFFuEKxfEuWc+PS0C3smLeIrBkVCdm9Tg8q76MK47OeuTF+ieTAxG+d/z2B9OeMWd87oMsAAAAW9pRWAAAABAAAAAAAAAAAAAAAAAKQ+xZyCK9FW7E3eAFjt7epoQwWAAAAAAAAABcPAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAAAAAAA3bZP5GqR1G7ilCBTn8Jf0Hxf6j4AAlraZ6SC3I261q1BLAdbD9zRURvzAgIW7YAEZEXawNBFAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"QuorumTime": "2021-09-21 01:48:26.983 +0000 UTC"
}
```
---
## `ReadRow` function
Lookup a message by the MessageID values:
https://us-east4-wormhole-315720.cloudfunctions.net/devnet/readrow?emitterChain=1&emitterAddress=96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab&sequence=0
```json
{
"EmitterChain": "solana",
"EmitterAddress": "96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab",
"Sequence": "0",
"InitiatingTxID": "0xcc3aedef591ff7725b9a1873a006b1431a6cc6e3ae69f03f7692a6053de06b3e",
"Payload": "AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAFQVU5L8J+OuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE5vdCBhIFBVTkvwn464AAAAAAAAAAAAAAAAAAAAAAAAnABsSMjL8zhJywej+TYVnMUj+VkcsZmavUWJDsX+6bczaHR0cHM6Ly93cmFwcGVkcHVua3MuY29tOjMwMDAvYXBpL3B1bmtzL21ldGFkYXRhLzM5AAAAAAAAAAAAAAAAkPi/akefMg6tB0QRpLDnlE6oycEAAg==",
"GuardiansThatSigned": [
"0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"
],
"SignedVAABytes": "AQAAAAABAP9HdhYz1TU+XRH7fVlYU9FJH8WVxknCJwDoPHvCM/2FMkRS8vuEIo/yvoW8TLkNJq7ydXhhZNzc/elwsBEEqZkBYVJaqAABTIMAAZbumCKTJRtIcpgEyOiyS1U+triHhnAklI0iNv03pXerAAAAAAAAAAABAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAAFQVU5L8J+OuAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE5vdCBhIFBVTkvwn464AAAAAAAAAAAAAAAAAAAAAAAAnABsSMjL8zhJywej+TYVnMUj+VkcsZmavUWJDsX+6bczaHR0cHM6Ly93cmFwcGVkcHVua3MuY29tOjMwMDAvYXBpL3B1bmtzL21ldGFkYXRhLzM5AAAAAAAAAAAAAAAAkPi/akefMg6tB0QRpLDnlE6oycEAAg==",
"QuorumTime": "2021-09-27 23:58:33.874 +0000 UTC"
}
```

View File

@ -1,543 +0,0 @@
package p
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"time"
"net/http"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
)
const cgBaseUrl = "https://api.coingecko.com/api/v3/"
const cgProBaseUrl = "https://pro-api.coingecko.com/api/v3/"
type CoinGeckoCoin struct {
Id string `json:"id"`
Symbol string `json:"symbol"`
Name string `json:"name"`
}
type CoinGeckoCoins []CoinGeckoCoin
type CoinGeckoMarket [2]float64
type CoinGeckoMarketRes struct {
Prices []CoinGeckoMarket `json:"prices"`
}
type CoinGeckoErrorRes struct {
Error string `json:"error"`
}
func fetchCoinGeckoCoins() map[string][]CoinGeckoCoin {
defer timeTrack(time.Now(), "fetchCoinGeckoCoins")
baseUrl := cgBaseUrl
cgApiKey := os.Getenv("COINGECKO_API_KEY")
if cgApiKey != "" {
baseUrl = cgProBaseUrl
}
url := fmt.Sprintf("%vcoins/list", baseUrl)
req, reqErr := http.NewRequest("GET", url, nil)
if reqErr != nil {
log.Fatalf("failed coins request, err: %v", reqErr)
}
if cgApiKey != "" {
req.Header.Set("X-Cg-Pro-Api-Key", cgApiKey)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get coins response, err: %v", resErr)
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding coins body, err: %v", bodyErr)
}
var parsed []CoinGeckoCoin
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchCoinGeckoCoins failed parsing body. err %v\n", parseErr)
}
var geckoCoins = map[string][]CoinGeckoCoin{}
for _, coin := range parsed {
symbol := strings.ToLower(coin.Symbol)
geckoCoins[symbol] = append(geckoCoins[symbol], coin)
}
return geckoCoins
}
func chainIdToCoinGeckoPlatform(chain vaa.ChainID) string {
// used when symbol not found in cg's coins/list
switch chain {
case vaa.ChainIDSolana:
return "solana"
case vaa.ChainIDEthereum:
return "ethereum"
case vaa.ChainIDTerra:
return "terra"
case vaa.ChainIDBSC:
return "binance-smart-chain"
case vaa.ChainIDPolygon:
return "polygon-pos"
case vaa.ChainIDAvalanche:
return "avalanche"
case vaa.ChainIDOasis:
return "oasis"
case vaa.ChainIDAlgorand:
return "algorand"
case vaa.ChainIDAurora:
return "aurora"
case vaa.ChainIDFantom:
return "fantom"
case vaa.ChainIDKarura:
return "karura"
case vaa.ChainIDAcala:
return "acala"
case vaa.ChainIDKlaytn:
return "klay-token"
case vaa.ChainIDCelo:
return "celo"
case vaa.ChainIDNear:
return "near-protocol"
case vaa.ChainIDMoonbeam:
return "moonbeam"
case vaa.ChainIDTerra2:
return "" // TODO
case vaa.ChainIDAptos:
return "" // TODO: not currently supported
case vaa.ChainIDXpla:
return "" // TODO: not currently supported
}
return ""
}
func fetchCoinGeckoCoinFromContract(chainId vaa.ChainID, address string) CoinGeckoCoin {
baseUrl := cgBaseUrl
cgApiKey := os.Getenv("COINGECKO_API_KEY")
if cgApiKey != "" {
baseUrl = cgProBaseUrl
}
platform := chainIdToCoinGeckoPlatform(chainId)
url := fmt.Sprintf("%vcoins/%v/contract/%v", baseUrl, platform, address)
req, reqErr := http.NewRequest("GET", url, nil)
if reqErr != nil {
log.Fatalf("failed contract request, err: %v\n", reqErr)
}
if cgApiKey != "" {
req.Header.Set("X-Cg-Pro-Api-Key", cgApiKey)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get contract response, err: %v\n", resErr)
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding contract body, err: %v\n", bodyErr)
}
var parsed CoinGeckoCoin
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchCoinGeckoCoinFromContract failed parsing body. err %v\n", parseErr)
var errRes CoinGeckoErrorRes
if err := json.Unmarshal(body, &errRes); err == nil {
if errRes.Error == "Could not find coin with the given id" {
log.Printf("Could not find CoinGecko coin by contract address, for chain %v, address, %v\n", chainId, address)
} else {
log.Println("Failed calling CoinGecko, got err", errRes.Error)
}
}
}
return parsed
}
func fetchCoinGeckoCoinId(chainId vaa.ChainID, address, symbol, name string) (coinId, foundSymbol, foundName string) {
// try coingecko, return if good
// if coingecko does not work, try chain-specific options
// initialize strings that will be returned if we find a symbol/name
// when looking up this token by contract address
newSymbol := ""
newName := ""
if symbol == "" && chainId == vaa.ChainIDSolana {
// try to lookup the symbol in solana token list, from the address
if token, ok := solanaTokens[address]; ok {
symbol = token.Symbol
name = token.Name
newSymbol = token.Symbol
newName = token.Name
}
}
if _, ok := coinGeckoCoins[strings.ToLower(symbol)]; ok {
tokens := coinGeckoCoins[strings.ToLower(symbol)]
if len(tokens) == 1 {
// only one match found for this symbol
return tokens[0].Id, newSymbol, newName
}
for _, token := range tokens {
if token.Name == name {
// found token by name match
return token.Id, newSymbol, newName
}
if strings.Contains(strings.ToLower(strings.ReplaceAll(name, " ", "")), strings.ReplaceAll(token.Id, "-", "")) {
// found token by id match
log.Println("found token by symbol and name match", name)
return token.Id, newSymbol, newName
}
}
// more than one symbol with this name, let contract lookup try
}
coin := fetchCoinGeckoCoinFromContract(chainId, address)
if coin.Id != "" {
return coin.Id, newSymbol, newName
}
// could not find a CoinGecko coin
return "", newSymbol, newName
}
func fetchCoinGeckoPrice(coinId string, timestamp time.Time) (float64, error) {
if coinId == "" {
return 0, errors.New("coinId is empty")
}
hourAgo := time.Now().Add(-time.Duration(1) * time.Hour)
withinLastHour := timestamp.After(hourAgo)
start, end := rangeFromTime(timestamp, 12)
baseUrl := cgBaseUrl
cgApiKey := os.Getenv("COINGECKO_API_KEY")
if cgApiKey != "" {
baseUrl = cgProBaseUrl
}
url := fmt.Sprintf("%vcoins/%v/market_chart/range?vs_currency=usd&from=%v&to=%v", baseUrl, coinId, start.Unix(), end.Unix())
req, reqErr := http.NewRequest("GET", url, nil)
if reqErr != nil {
log.Fatalf("failed coins request, err: %v\n", reqErr)
}
if cgApiKey != "" {
req.Header.Set("X-Cg-Pro-Api-Key", cgApiKey)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get coins response, err: %v\n", resErr)
}
if res.StatusCode >= 400 {
log.Fatal("failed to get CoinGecko prices. Status", res.Status)
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding coins body, err: %v\n", bodyErr)
}
var parsed CoinGeckoMarketRes
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchCoinGeckoPrice failed parsing body. err %v\n", parseErr)
var errRes CoinGeckoErrorRes
if err := json.Unmarshal(body, &errRes); err == nil {
log.Println("Failed calling CoinGecko, got err", errRes.Error)
}
}
if len(parsed.Prices) >= 1 {
var priceIndex int
if withinLastHour {
// use the last price in the list, latest price
priceIndex = len(parsed.Prices) - 1
} else {
// use a price from the middle of the list, as that should be
// closest to the timestamp.
numPrices := len(parsed.Prices)
priceIndex = numPrices / 2
}
price := parsed.Prices[priceIndex][1]
log.Printf("found a price of $%f for %v!\n", price, coinId)
return price, nil
}
log.Println("no price found in coinGecko for", coinId)
return 0, fmt.Errorf("no price found for %v", coinId)
}
type Price struct {
USD float64 `json:"usd"`
}
type CoinGeckoCoinPrices map[string]Price
// takes a list of CoinGeckoCoinIds, returns a map of { coinId: price }.
func fetchCoinGeckoPrices(coinIds []string) (map[string]float64, error) {
baseUrl := cgBaseUrl
cgApiKey := os.Getenv("COINGECKO_API_KEY")
if cgApiKey != "" {
baseUrl = cgProBaseUrl
}
url := fmt.Sprintf("%vsimple/price?ids=%v&vs_currencies=usd", baseUrl, strings.Join(coinIds, ","))
req, reqErr := http.NewRequest("GET", url, nil)
if reqErr != nil {
log.Fatalf("failed coins request, err: %v\n", reqErr)
}
if cgApiKey != "" {
req.Header.Set("X-Cg-Pro-Api-Key", cgApiKey)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get coins response, err: %v\n", resErr)
}
if res.StatusCode >= 400 {
log.Fatal("failed to get CoinGecko prices. Status", res.Status)
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding coins body, err: %v\n", bodyErr)
}
var parsed CoinGeckoCoinPrices
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchCoinGeckoPrice failed parsing body. err %v\n", parseErr)
var errRes CoinGeckoErrorRes
if err := json.Unmarshal(body, &errRes); err == nil {
log.Println("Failed calling CoinGecko, got err", errRes.Error)
}
}
priceMap := map[string]float64{}
for coinId, price := range parsed {
price := price.USD
priceMap[coinId] = price
}
return priceMap, nil
}
// takes a list of CoinGeckoCoinIds, returns a map of { coinId: price }.
// makes batches of requests to CoinGecko.
func fetchTokenPrices(ctx context.Context, coinIds []string) map[string]float64 {
allPrices := map[string]float64{}
// Split the list into batches, otherwise the request could be too large
batch := 100
for i := 0; i < len(coinIds); i += batch {
j := i + batch
if j > len(coinIds) {
j = len(coinIds)
}
prices, err := fetchCoinGeckoPrices(coinIds[i:j])
if err != nil {
log.Fatalf("failed to get price for coinIds. err %v", err)
}
for coinId, price := range prices {
allPrices[coinId] = price
}
coinGeckoRateLimitSleep()
}
return allPrices
}
func coinGeckoRateLimitSleep() {
// CoinGecko rate limit is low (5/second), be very cautious about bursty requests
time.Sleep(time.Millisecond * 200)
}
// fetchTokenPriceHistories returns the daily prices for coinIds from start to end
func fetchTokenPriceHistories(ctx context.Context, coinIds []string, start time.Time, end time.Time) map[string]map[string]float64 {
log.Printf("Fetching price history for %d tokens\n", len(coinIds))
priceHistories := map[string]map[string]float64{}
baseUrl := cgBaseUrl
cgApiKey := os.Getenv("COINGECKO_API_KEY")
if cgApiKey != "" {
baseUrl = cgProBaseUrl
}
startTimestamp := start.Unix()
endTimestamp := end.Unix()
for _, coinId := range coinIds {
defer coinGeckoRateLimitSleep()
url := fmt.Sprintf("%vcoins/%v/market_chart/range?vs_currency=usd&from=%v&to=%v", baseUrl, coinId, startTimestamp, endTimestamp)
req, reqErr := http.NewRequest("GET", url, nil)
if reqErr != nil {
log.Fatalf("failed coins request, err: %v\n", reqErr)
}
if cgApiKey != "" {
req.Header.Set("X-Cg-Pro-Api-Key", cgApiKey)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get coins response, err: %v\n", resErr)
}
defer res.Body.Close()
if res.StatusCode >= 400 {
errorMsg := fmt.Sprintf("failed to get CoinGecko price history for %s, Status: %s", coinId, res.Status)
if res.StatusCode == 404 {
log.Println(errorMsg)
continue
} else {
log.Fatalln(errorMsg)
}
}
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding coins body, err: %v\n", bodyErr)
}
var parsed CoinGeckoMarketRes
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchTokenPriceHistories failed parsing body. err %v\n", parseErr)
var errRes CoinGeckoErrorRes
if err := json.Unmarshal(body, &errRes); err == nil {
log.Println("Failed calling CoinGecko, got err", errRes.Error)
}
} else {
for _, market := range parsed.Prices {
seconds := int64(market[0]) / 1e3
date := time.Unix(seconds, 0).Format("2006-01-02")
price := market[1]
if _, ok := priceHistories[date]; !ok {
priceHistories[date] = map[string]float64{}
}
priceHistories[date][coinId] = price
}
}
}
return priceHistories
}
const solanaTokenListURL = "https://raw.githubusercontent.com/solana-labs/token-list/main/src/tokens/solana.tokenlist.json"
type SolanaToken struct {
Address string `json:"address"`
Symbol string `json:"symbol"`
Name string `json:"name"`
Decimals int `json:"decimals"`
}
type SolanaTokenListRes struct {
Tokens []SolanaToken `json:"tokens"`
}
func fetchSolanaTokenList() map[string]SolanaToken {
defer timeTrack(time.Now(), "fetchSolanaTokenList")
req, reqErr := http.NewRequest("GET", solanaTokenListURL, nil)
if reqErr != nil {
log.Fatalf("failed solana token list request, err: %v", reqErr)
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Fatalf("failed get solana token list response, err: %v", resErr)
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Fatalf("failed decoding solana token list body, err: %v", bodyErr)
}
var parsed SolanaTokenListRes
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchSolanaTokenList failed parsing body. err %v\n", parseErr)
}
var solTokens = map[string]SolanaToken{}
for _, token := range parsed.Tokens {
if _, ok := solTokens[token.Address]; !ok {
solTokens[token.Address] = token
}
}
return solTokens
}
const solanaBeachPublicBaseURL = "https://prod-api.solana.surf/v1/"
const solanaBeachPrivateBaseURL = "https://api.solanabeach.io/v1/"
type SolanaBeachAccountOwner struct {
Owner SolanaBeachAccountOwnerAddress `json:"owner"`
}
type SolanaBeachAccountOwnerAddress struct {
Address string `json:"address"`
}
type SolanaBeachAccountResponse struct {
Value struct {
Extended struct {
SolanaBeachAccountOwner
} `json:"extended"`
} `json:"value"`
}
func fetchSolanaAccountOwner(account string) string {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
defer cancel()
baseUrl := solanaBeachPublicBaseURL
sbApiKey := os.Getenv("SOLANABEACH_API_KEY")
if sbApiKey != "" {
baseUrl = solanaBeachPrivateBaseURL
}
url := fmt.Sprintf("%vaccount/%v", baseUrl, account)
req, reqErr := http.NewRequestWithContext(ctx, "GET", url, nil)
if reqErr != nil {
log.Printf("failed solanabeach request, err: %v", reqErr)
return ""
}
if sbApiKey != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %v", sbApiKey))
}
res, resErr := http.DefaultClient.Do(req)
if resErr != nil {
log.Printf("failed get solana beach account response, err: %v", resErr)
return ""
}
defer res.Body.Close()
body, bodyErr := ioutil.ReadAll(res.Body)
if bodyErr != nil {
log.Printf("failed decoding solana beach account body, err: %v", bodyErr)
return ""
}
var parsed SolanaBeachAccountResponse
parseErr := json.Unmarshal(body, &parsed)
if parseErr != nil {
log.Printf("fetchSolanaAccountOwner failed parsing body. err %v\n", parseErr)
return ""
}
address := parsed.Value.Extended.Owner.Address
if address == "" {
log.Println("failed to find owner address for Solana account", account)
}
return address
}

View File

@ -1,221 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"time"
"cloud.google.com/go/bigtable"
)
// fetch rows by matching payload value
func FindValues(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var columnFamily, columnName, value, emitterChain, emitterAddress, vaaBytes, numRows string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
columnFamily = queryParams.Get("columnFamily")
columnName = queryParams.Get("columnName")
value = queryParams.Get("value")
emitterChain = queryParams.Get("emitterChain")
emitterAddress = queryParams.Get("emitterAddress")
vaaBytes = queryParams.Get("vaaBytes")
numRows = queryParams.Get("numRows")
// check for empty values
if columnFamily == "" || columnName == "" || value == "" {
fmt.Fprint(w, "query params ['columnFamily', 'columnName', 'value'] cannot be empty")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
case http.MethodPost:
// declare request body properties
var d struct {
ColumnFamily string `json:"columnFamily"`
ColumnName string `json:"columnName"`
Value string `json:"value"`
EmitterChain string `json:"emitterChain"`
EmitterAddress string `json:"emitterAddress"`
VAABytes string `json:"vaaBytes"`
NumRows string `json:"numRows"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
fmt.Fprint(w, "request body required")
return
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// check for empty values
if d.ColumnFamily == "" || d.ColumnName == "" || d.Value == "" {
fmt.Fprint(w, "body values ['columnFamily', 'columnName', 'value'] cannot be empty")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
columnFamily = d.ColumnFamily
columnName = d.ColumnName
value = d.Value
emitterChain = d.EmitterChain
emitterAddress = d.EmitterAddress
vaaBytes = d.VAABytes
numRows = d.NumRows
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
var resultCount uint64
if numRows == "" {
resultCount = 0
} else {
var convErr error
resultCount, convErr = strconv.ParseUint(numRows, 10, 64)
if convErr != nil {
fmt.Fprint(w, "numRows must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
if columnFamily != "TokenTransferPayload" &&
columnFamily != "AssetMetaPayload" &&
columnFamily != "NFTTransferPayload" &&
columnFamily != "TokenTransferDetails" &&
columnFamily != "ChainDetails" {
fmt.Fprint(w, "columnFamily must be one of: ['TokenTransferPayload', 'AssetMetaPayload', 'NFTTransferPayload', 'TokenTransferDetails', 'ChainDetails']")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
prefix := ""
if emitterChain != "" {
prefix = emitterChain
if emitterAddress != "" {
prefix = emitterChain + emitterAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
results := []bigtable.Row{}
err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool {
results = append(results, row)
return true
}, bigtable.RowFilter(
bigtable.ConditionFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamily),
bigtable.ColumnFilter(columnName),
bigtable.ValueFilter(value),
),
bigtable.ChainFilters(
bigtable.PassAllFilter(),
bigtable.LatestNFilter(1),
),
bigtable.BlockAllFilter(),
)))
if err != nil {
http.Error(w, "Error reading rows", http.StatusInternalServerError)
log.Printf("tbl.ReadRows(): %v", err)
return
}
if resultCount > 0 {
// means do not limit, cause you'd never query 0 rows.
// if the result set is limited to a number, sort the results
// and return the n latest.
// sort the results to be newest first
sort.Slice(results, func(i, j int) bool {
// bigtable rows dont have timestamps, use a cell timestamp all rows will have.
var iTimestamp bigtable.Timestamp
var jTimestamp bigtable.Timestamp
// rows may have: only MessagePublication, only QuorumState, or both.
// find a timestamp for each row, try to use MessagePublication, if it exists:
if len(results[i]["MessagePublication"]) >= 1 {
iTimestamp = results[i]["MessagePublication"][0].Timestamp
} else if len(results[i]["QuorumState"]) >= 1 {
iTimestamp = results[i]["QuorumState"][0].Timestamp
}
if len(results[j]["MessagePublication"]) >= 1 {
jTimestamp = results[j]["MessagePublication"][0].Timestamp
} else if len(results[j]["QuorumState"]) >= 1 {
jTimestamp = results[j]["QuorumState"][0].Timestamp
}
return iTimestamp > jTimestamp
})
// trim the result down to the requested amount
num := uint64(len(results))
if num > resultCount {
results = results[:resultCount]
} else {
results = results[:]
}
}
details := []Details{}
for _, result := range results {
detail := makeDetails(result)
// create a slimmer version of the details struct
slimDetails := Details{
Summary: Summary{
EmitterChain: detail.EmitterChain,
EmitterAddress: detail.EmitterAddress,
Sequence: detail.Sequence,
InitiatingTxID: detail.InitiatingTxID,
Payload: detail.Payload,
QuorumTime: detail.QuorumTime,
TransferDetails: detail.TransferDetails,
},
TokenTransferPayload: detail.TokenTransferPayload,
AssetMetaPayload: detail.AssetMetaPayload,
NFTTransferPayload: detail.NFTTransferPayload,
ChainDetails: detail.ChainDetails,
}
if vaaBytes != "" {
slimDetails.SignedVAABytes = detail.SignedVAABytes
}
details = append(details, slimDetails)
}
jsonBytes, err := json.Marshal(details)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,20 +0,0 @@
module github.com/certusone/wormhole/event_database/cloud_functions
go 1.16
// cloud runtime is go 1.16. just for reference.
require (
cloud.google.com/go/bigtable v1.12.0
cloud.google.com/go/pubsub v1.17.1
cloud.google.com/go/storage v1.18.2
github.com/cosmos/cosmos-sdk v0.44.5
github.com/gagliardetto/solana-go v1.0.2
github.com/holiman/uint256 v1.2.0
github.com/wormhole-foundation/wormhole/sdk v0.0.0-20221018051913-c289bd9f57e0
)
replace (
github.com/btcsuite/btcd => github.com/btcsuite/btcd v0.23.0
github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
)

File diff suppressed because it is too large Load Diff

View File

@ -1,364 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"html"
"io"
"log"
"net/http"
"strconv"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
// warmNFTCache keeps some data around between invocations, so that we don't have
// to do a full table scan with each request.
// https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations
var warmNFTCache = map[string]map[string]map[string]int{}
var muWarmNFTCache sync.RWMutex
var warmNFTCacheFilePath = "nft-cache.json"
func fetchNFTRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) ([]bigtable.Row, error) {
rows := []bigtable.Row{}
err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool {
rows = append(rows, row)
return true
}, bigtable.RowFilter(
bigtable.ConditionFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamilies[1]),
bigtable.CellsPerRowLimitFilter(1), // only the first cell in column
bigtable.TimestampRangeFilter(start, end), // within time range
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
),
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamilies[4]),
bigtable.ColumnFilter("PayloadId"),
bigtable.ValueFilter("1"),
),
bigtable.BlockAllFilter(),
)))
return rows, err
}
func createNFTCountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, numPrevDays int, keySegments int) (map[string]map[string]int, error) {
if _, ok := warmNFTCache["2021-09-13"]; !ok && loadCache {
loadJsonToInterface(ctx, warmNFTCacheFilePath, &muWarmNFTCache, &warmNFTCache)
}
results := map[string]map[string]int{}
now := time.Now().UTC()
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
// create the unique identifier for this query, for cache
cachePrefix := prefix
if prefix == "" {
cachePrefix = "*"
}
cachePrefix = fmt.Sprintf("%v-%v", cachePrefix, keySegments)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calulate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmNFTCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]int{"*": 0}
// check to see if there is cache data for this date/query
if dateCache, ok := warmNFTCache[dateStr]; ok && useCache(dateStr) {
// have a cache for this date
if val, ok := dateCache[cachePrefix]; ok {
// have a cache for this query
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = val
muWarmNFTCache.Unlock()
intervalsWG.Done()
return
}
} else {
// no cache for this query
warmNFTCache[dateStr][cachePrefix] = map[string]int{}
}
} else {
// no cache for this date, initialize the map
warmNFTCache[dateStr] = map[string]map[string]int{}
warmNFTCache[dateStr][cachePrefix] = map[string]int{}
}
muWarmNFTCache.Unlock()
var result []bigtable.Row
var fetchErr error
defer intervalsWG.Done()
result, fetchErr = fetchNFTRowsInInterval(tbl, ctx, prefix, start, end)
if fetchErr != nil {
log.Fatalf("fetchNFTRowsInInterval returned an error: %v", fetchErr)
}
// iterate through the rows and increment the count
for _, row := range result {
countBy := makeGroupKey(keySegments, row.Key())
if keySegments != 0 {
// increment the total count
results[dateStr]["*"] = results[dateStr]["*"] + 1
}
results[dateStr][countBy] = results[dateStr][countBy] + 1
}
if cacheData, ok := warmNFTCache[dateStr][cachePrefix]; !ok || len(cacheData) <= 1 {
// set the result in the cache
muWarmNFTCache.Lock()
warmNFTCache[dateStr][cachePrefix] = results[dateStr]
muWarmNFTCache.Unlock()
cacheNeedsUpdate = true
}
}(tbl, ctx, prefix, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmNFTCacheFilePath, &muWarmNFTCache, warmNFTCache)
}
// create a set of all the keys from all dates, to ensure the result objects all have the same keys
seenKeySet := map[string]bool{}
for _, v := range results {
for key := range v {
seenKeySet[key] = true
}
}
// ensure each date object has the same keys:
for date := range results {
for key := range seenKeySet {
if _, ok := results[date][key]; !ok {
// add the missing key to the map
results[date][key] = 0
}
}
}
return results, nil
}
// returns the count of the rows in the query response
func nftMessageCountForInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time, keySegments int) (map[string]int, error) {
// query for all rows in time range, return result count
results, fetchErr := fetchNFTRowsInInterval(tbl, ctx, prefix, start, end)
if fetchErr != nil {
log.Printf("fetchRowsInInterval returned an error: %v", fetchErr)
return nil, fetchErr
}
result := map[string]int{"*": len(results)}
// iterate through the rows and increment the count for each index
if keySegments != 0 {
for _, row := range results {
countBy := makeGroupKey(keySegments, row.Key())
result[countBy] = result[countBy] + 1
}
}
return result, nil
}
// get number of recent transactions in the last 24 hours, and daily for a period
// optionally group by a EmitterChain or EmitterAddress
// optionally query for recent rows of a given EmitterChain or EmitterAddress
func NFTs(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var last24Hours, numDays, groupBy, forChain, forAddress string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
last24Hours = queryParams.Get("last24Hours")
numDays = queryParams.Get("numDays")
groupBy = queryParams.Get("groupBy")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
readyCheck := queryParams.Get("readyCheck")
if readyCheck != "" {
// for running in devnet
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, html.EscapeString("ready"))
return
}
case http.MethodPost:
// declare request body properties
var d struct {
Last24Hours string `json:"last24Hours"`
NumDays string `json:"numDays"`
GroupBy string `json:"groupBy"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
last24Hours = d.Last24Hours
numDays = d.NumDays
groupBy = d.GroupBy
forChain = d.ForChain
forAddress = d.ForAddress
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
// default query period is all time
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
// if the request included numDays, set the query period to that
if numDays != "" {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
if groupBy == "" {
// if the request is forChain, and groupBy is empty, set it to groupBy chain
groupBy = "chain"
}
if forAddress != "" {
// if the request is forAddress, always groupBy address
groupBy = "address"
prefix = forChain + ":" + forAddress
}
}
// use the groupBy value to determine how many segements of the rowkey should be used.
keySegments := 0
if groupBy == "chain" {
keySegments = 1
}
if groupBy == "address" {
keySegments = 2
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of last 24 hours
var last24HourCount map[string]int
if last24Hours != "" {
wg.Add(1)
go func(prefix string, keySegments int) {
var err error
last24HourInterval := -time.Duration(24) * time.Hour
now := time.Now().UTC()
start := now.Add(last24HourInterval)
defer wg.Done()
last24HourCount, err = nftMessageCountForInterval(tbl, ctx, prefix, start, now, keySegments)
if err != nil {
log.Printf("failed getting count for interval, err: %v", err)
}
}(prefix, keySegments)
}
periodTotals := map[string]int{}
var dailyTotals map[string]map[string]int
wg.Add(1)
go func(prefix string, keySegments int, queryDays int) {
var err error
defer wg.Done()
dailyTotals, err = createNFTCountsOfInterval(tbl, ctx, prefix, queryDays, keySegments)
if err != nil {
log.Fatalf("failed getting createNFTCountsOfInterval err %v", err)
}
// sum all the days to create a map with totals for the query period
for _, vals := range dailyTotals {
for chain, amount := range vals {
periodTotals[chain] += amount
}
}
}(prefix, keySegments, queryDays)
wg.Wait()
result := &totalsResult{
LastDayCount: last24HourCount,
TotalCount: periodTotals,
TotalCountDurationDays: queryDays,
DailyTotals: dailyTotals,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,160 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"log"
"net/http"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type transfersFromResult struct {
Daily map[string]map[string]float64
Total float64
}
// an in-memory cache of previously calculated results
var transfersFromCache transfersFromResult
var muTransfersFromCache sync.RWMutex
var transfersFromFilePath = "notional-transferred-from.json"
// finds the daily amount transferred from each chain from the specified start to the present.
func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) {
if len(transfersFromCache.Daily) == 0 && loadCache {
loadJsonToInterface(ctx, transfersFromFilePath, &muTransfersFromCache, &transfersFromCache)
}
now := time.Now().UTC()
numPrevDays := int(now.Sub(start).Hours() / 24)
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, daysAgo int) {
defer intervalsWG.Done()
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calculate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muTransfersFromCache.Lock()
// check to see if there is cache data for this date/query
if _, ok := transfersFromCache.Daily[dateStr]; ok && useCache(dateStr) {
// have a cache for this date
if daysAgo >= 1 {
// only use the cache for yesterday and older
muTransfersFromCache.Unlock()
return
}
}
// no cache for this query, initialize the map
if transfersFromCache.Daily == nil {
transfersFromCache.Daily = map[string]map[string]float64{}
}
transfersFromCache.Daily[dateStr] = map[string]float64{"*": 0}
muTransfersFromCache.Unlock()
for _, chainId := range tvlChainIDs {
queryResult := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
// iterate through the rows and increment the amounts
for _, row := range queryResult {
if _, ok := transfersFromCache.Daily[dateStr][row.LeavingChain]; !ok {
transfersFromCache.Daily[dateStr][row.LeavingChain] = 0
}
transfersFromCache.Daily[dateStr]["*"] = transfersFromCache.Daily[dateStr]["*"] + row.Notional
transfersFromCache.Daily[dateStr][row.LeavingChain] = transfersFromCache.Daily[dateStr][row.LeavingChain] + row.Notional
}
}
}(tbl, ctx, daysAgo)
}
intervalsWG.Wait()
// having consistent keys in each object is helpful for clients, explorer GUI
transfersFromCache.Total = 0
seenChainSet := map[string]bool{}
for _, chains := range transfersFromCache.Daily {
for chain, amount := range chains {
seenChainSet[chain] = true
if chain == "*" {
transfersFromCache.Total += amount
}
}
}
for date, chains := range transfersFromCache.Daily {
for chain := range seenChainSet {
if _, ok := chains[chain]; !ok {
transfersFromCache.Daily[date][chain] = 0
}
}
}
persistInterfaceToJson(ctx, transfersFromFilePath, &muTransfersFromCache, transfersFromCache)
}
// finds the value that has been transferred from each chain
func ComputeNotionalTransferredFrom(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
ctx := context.Background()
createTransfersFromOfInterval(tbl, ctx, releaseDay)
w.WriteHeader(http.StatusOK)
}
func NotionalTransferredFrom(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var result transfersFromResult
loadJsonToInterface(ctx, transfersFromFilePath, &muTransfersFromCache, &result)
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,432 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
// "bytes"
"context"
// "encoding/binary"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"sort"
"strconv"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type cumulativeResult struct {
AllTime map[string]map[string]float64
AllTimeDurationDays int
Daily map[string]map[string]map[string]float64
}
// an in-memory cache of previously calculated results
var warmCumulativeCache = map[string]map[string]map[string]map[string]float64{}
var muWarmCumulativeCache sync.RWMutex
var warmCumulativeCacheFilePath = "notional-transferred-to-cumulative-cache.json"
var transferredToUpToYesterday = map[string]map[string]map[string]map[string]float64{}
var muTransferredToUpToYesterday sync.RWMutex
var transferredToUpToYesterdayFilePath = "notional-transferred-to-up-to-yesterday-cache.json"
// calculates the amount of each symbol transfered to each chain.
func transferredToSince(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
if _, ok := transferredToUpToYesterday["*"]; !ok && loadCache {
loadJsonToInterface(ctx, transferredToUpToYesterdayFilePath, &muTransferredToUpToYesterday, &transferredToUpToYesterday)
}
now := time.Now().UTC()
today := now.Format("2006-01-02")
oneDayAgo := -time.Duration(24) * time.Hour
yesterday := now.Add(oneDayAgo).Format("2006-01-02")
result := map[string]map[string]float64{"*": {"*": 0}}
// create the unique identifier for this query, for cache
cachePrefix := createCachePrefix(prefix)
muTransferredToUpToYesterday.Lock()
if _, ok := transferredToUpToYesterday[cachePrefix]; !ok {
transferredToUpToYesterday[cachePrefix] = map[string]map[string]map[string]float64{}
}
if cacheData, ok := transferredToUpToYesterday[cachePrefix][yesterday]; ok {
// cache has data through midnight yesterday
for chain, symbols := range cacheData {
result[chain] = map[string]float64{}
for symbol, amount := range symbols {
result[chain][symbol] = amount
}
}
// set the start to be the start of today
start = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
}
muTransferredToUpToYesterday.Unlock()
dailyTotals := amountsTransferredToInInterval(tbl, ctx, prefix, start)
// loop through the query results to combine cache + fresh data
for _, chains := range dailyTotals {
for chain, tokens := range chains {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]float64{"*": 0}
}
for symbol, amount := range tokens {
if _, ok := result[chain][symbol]; !ok {
result[chain][symbol] = 0
}
// add the amount of this symbol transferred this day to the
// amount already in the result (amount of this symbol prevoiusly transferred)
result[chain][symbol] = result[chain][symbol] + amount
}
}
}
muTransferredToUpToYesterday.Lock()
if _, ok := transferredToUpToYesterday[cachePrefix][yesterday]; !ok {
transferredToUpToYesterday[cachePrefix][yesterday] = map[string]map[string]float64{}
// no cache, populate it
upToYesterday := map[string]map[string]float64{}
for chain, tokens := range result {
upToYesterday[chain] = map[string]float64{}
for symbol, amount := range tokens {
upToYesterday[chain][symbol] = amount
}
}
for chain, tokens := range dailyTotals[today] {
for symbol, amount := range tokens {
// subtract the amounts from today, in order to create an "upToYesterday" amount
upToYesterday[chain][symbol] = result[chain][symbol] - amount
}
}
// loop again to assign values to the cache
for chain, tokens := range upToYesterday {
if _, ok := transferredToUpToYesterday[cachePrefix][yesterday][chain]; !ok {
transferredToUpToYesterday[cachePrefix][yesterday][chain] = map[string]float64{}
}
for symbol, amount := range tokens {
transferredToUpToYesterday[cachePrefix][yesterday][chain][symbol] = amount
}
}
muTransferredToUpToYesterday.Unlock()
// write the updated cache to disc
persistInterfaceToJson(ctx, transferredToUpToYesterdayFilePath, &muTransferredToUpToYesterday, transferredToUpToYesterday)
} else {
muTransferredToUpToYesterday.Unlock()
}
return result
}
// returns a slice of dates (strings) for each day in the period. Dates formatted: "2021-12-30".
func getDaysInRange(start, end time.Time) []string {
now := time.Now().UTC()
numDays := int(end.Sub(start).Hours() / 24)
days := []string{}
for daysAgo := 0; daysAgo <= numDays; daysAgo++ {
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
dateStr := start.Format("2006-01-02")
days = append(days, dateStr)
}
return days
}
// calcuates a running total of notional value transferred, by symbol, since the start time specified.
func createCumulativeAmountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmCumulativeCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmCumulativeCacheFilePath, &muWarmCumulativeCache, &warmCumulativeCache)
}
now := time.Now().UTC()
today := now.Format("2006-01-02")
cachePrefix := createCachePrefix(prefix)
cacheNeedsUpdate := false
muWarmCumulativeCache.Lock()
if _, ok := warmCumulativeCache[cachePrefix]; !ok {
warmCumulativeCache[cachePrefix] = map[string]map[string]map[string]float64{}
}
muWarmCumulativeCache.Unlock()
results := map[string]map[string]map[string]float64{}
// fetch the amounts of transfers by symbol, for each day since launch (releaseDay)
dailyAmounts := amountsTransferredToInInterval(tbl, ctx, prefix, releaseDay)
// create a slice of dates, order oldest first
dateKeys := make([]string, 0, len(dailyAmounts))
for k := range dailyAmounts {
dateKeys = append(dateKeys, k)
}
sort.Strings(dateKeys)
// iterate through the dates in the result set, and accumulate the amounts
// of each token transfer by symbol, based on the destination of the transfer.
for i, date := range dateKeys {
results[date] = map[string]map[string]float64{"*": {"*": 0}}
muWarmCumulativeCache.RLock()
if dateCache, ok := warmCumulativeCache[cachePrefix][date]; ok && dateCache != nil && useCache(date) {
// have a cached value for this day, use it.
// iterate through cache and copy values to the result
for chain, tokens := range dateCache {
results[date][chain] = map[string]float64{}
for token, amount := range tokens {
results[date][chain][token] = amount
}
}
muWarmCumulativeCache.RUnlock()
} else {
// no cached value for this day, must calculate it
muWarmCumulativeCache.RUnlock()
if i == 0 {
// special case for first day, no need to sum.
for chain, tokens := range dailyAmounts[date] {
results[date][chain] = map[string]float64{}
for token, amount := range tokens {
results[date][chain][token] = amount
}
}
} else {
// find the string of the previous day
prevDate := dateKeys[i-1]
prevDayAmounts := results[prevDate]
thisDayAmounts := dailyAmounts[date]
// iterate through all the transfers and add the previous day's amount, if it exists
for chain, thisDaySymbols := range thisDayAmounts {
// create a union of the symbols from this day, and previous days
symbolsUnion := map[string]string{}
for symbol := range prevDayAmounts[chain] {
symbolsUnion[symbol] = symbol
}
for symbol := range thisDaySymbols {
symbolsUnion[symbol] = symbol
}
// initalize the chain/symbol map for this date
if _, ok := results[date][chain]; !ok {
results[date][chain] = map[string]float64{"*": 0}
}
// iterate through the union of symbols, creating an amount for each one,
// and adding it the the results.
for symbol := range symbolsUnion {
thisDayAmount := float64(0)
if amt, ok := thisDaySymbols[symbol]; ok {
thisDayAmount = amt
}
prevDayAmount := float64(0)
if amt, ok := results[prevDate][chain][symbol]; ok {
prevDayAmount = amt
}
cumulativeAmount := prevDayAmount + thisDayAmount
results[date][chain][symbol] = cumulativeAmount
}
}
}
// dont cache today
if date != today {
// set the result in the cache
muWarmCumulativeCache.Lock()
if _, ok := warmCumulativeCache[cachePrefix][date]; !ok || !useCache(date) {
// cache does not have this date, persist it for other instances.
warmCumulativeCache[cachePrefix][date] = map[string]map[string]float64{}
for chain, tokens := range results[date] {
warmCumulativeCache[cachePrefix][date][chain] = map[string]float64{}
for token, amount := range tokens {
warmCumulativeCache[cachePrefix][date][chain][token] = amount
}
}
cacheNeedsUpdate = true
}
muWarmCumulativeCache.Unlock()
}
}
}
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmCumulativeCacheFilePath, &muWarmCumulativeCache, warmCumulativeCache)
}
// take the most recent n days, rather than returning all days since launch
selectDays := map[string]map[string]map[string]float64{}
days := getDaysInRange(start, now)
for _, day := range days {
selectDays[day] = map[string]map[string]float64{}
for chain, tokens := range results[day] {
selectDays[day][chain] = map[string]float64{}
for symbol, amount := range tokens {
selectDays[day][chain][symbol] = amount
}
}
}
return selectDays
}
// calculates the cumulative value transferred each day since launch.
func NotionalTransferredToCumulative(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays, forChain, forAddress, daily, allTime string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
daily = queryParams.Get("daily")
allTime = queryParams.Get("allTime")
case http.MethodPost:
// declare request body properties
var d struct {
NumDays string `json:"numDays"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
Daily string `json:"daily"`
AllTime string `json:"allTime"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numDays = d.NumDays
forChain = d.ForChain
forAddress = d.ForAddress
daily = d.Daily
allTime = d.AllTime
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if daily == "" && allTime == "" {
// none of the options were set, so set one
allTime = "true"
}
var queryDays int
if numDays == "" {
queryDays = 30
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
// if the request is forChain, always groupBy chain
if forAddress != "" {
// if the request is forAddress, always groupBy address
prefix = forChain + ":" + forAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total since launch
periodTransfers := map[string]map[string]float64{}
allTimeDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
if allTime != "" {
wg.Add(1)
go func(prefix string) {
defer wg.Done()
transfers := transferredToSince(tbl, context.Background(), prefix, releaseDay)
for chain, tokens := range transfers {
periodTransfers[chain] = map[string]float64{}
for symbol, amount := range tokens {
periodTransfers[chain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}(prefix)
}
// daily transfers by chain
dailyTransfers := map[string]map[string]map[string]float64{}
if daily != "" {
wg.Add(1)
go func(prefix string, queryDays int) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
transfers := createCumulativeAmountsOfInterval(tbl, ctx, prefix, start)
for date, chains := range transfers {
dailyTransfers[date] = map[string]map[string]float64{}
for chain, tokens := range chains {
dailyTransfers[date][chain] = map[string]float64{}
for symbol, amount := range tokens {
dailyTransfers[date][chain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}
}(prefix, queryDays)
}
wg.Wait()
result := &cumulativeResult{
AllTime: periodTransfers,
AllTimeDurationDays: allTimeDays,
Daily: dailyTransfers,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,501 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"bytes"
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type amountsResult struct {
Last24Hours map[string]map[string]float64
WithinPeriod map[string]map[string]float64
PeriodDurationDays int
Daily map[string]map[string]map[string]float64
}
// an in-memory cache of previously calculated results
var warmTransfersToCache = map[string]map[string]map[string]map[string]float64{}
var muWarmTransfersToCache sync.RWMutex
var warmTransfersToCacheFilePath = "notional-transferred-to-cache.json"
type TransferData struct {
TokenSymbol string
TokenName string
TokenAddress string
TokenAmount float64
CoinGeckoCoinId string
OriginChain string
LeavingChain string
DestinationChain string
Notional float64
TokenPrice float64
TokenDecimals int
TransferTimestamp string
}
// finds all the TokenTransfer rows within the specified period
func fetchTransferRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) []TransferData {
if len(tokenAllowlist) == 0 {
log.Fatal("tokenAllowlist is empty")
}
rows := []TransferData{}
err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool {
t := &TransferData{}
if _, ok := row[transferDetailsFam]; ok {
for _, item := range row[transferDetailsFam] {
switch item.Column {
case "TokenTransferDetails:Amount":
amount, _ := strconv.ParseFloat(string(item.Value), 64)
t.TokenAmount = amount
case "TokenTransferDetails:NotionalUSD":
reader := bytes.NewReader(item.Value)
var notionalFloat float64
if err := binary.Read(reader, binary.BigEndian, &notionalFloat); err != nil {
log.Fatalf("failed to read NotionalUSD of row: %v. err %v ", row.Key(), err)
}
t.Notional = notionalFloat
case "TokenTransferDetails:TokenPriceUSD":
reader := bytes.NewReader(item.Value)
var tokenPriceFloat float64
if err := binary.Read(reader, binary.BigEndian, &tokenPriceFloat); err != nil {
log.Fatalf("failed to read TokenPriceUSD of row: %v. err %v ", row.Key(), err)
}
t.TokenPrice = tokenPriceFloat
case "TokenTransferDetails:OriginSymbol":
t.TokenSymbol = string(item.Value)
case "TokenTransferDetails:OriginName":
t.TokenName = string(item.Value)
case "TokenTransferDetails:OriginTokenAddress":
t.TokenAddress = string(item.Value)
case "TokenTransferDetails:CoinGeckoCoinId":
t.CoinGeckoCoinId = string(item.Value)
case "TokenTransferDetails:Decimals":
t.TokenDecimals, _ = strconv.Atoi(string(item.Value))
case "TokenTransferDetails:TransferTimestamp":
t.TransferTimestamp = string(item.Value)
}
}
if _, ok := row[transferPayloadFam]; ok {
for _, item := range row[transferPayloadFam] {
switch item.Column {
case "TokenTransferPayload:OriginChain":
t.OriginChain = string(item.Value)
case "TokenTransferPayload:TargetChain":
t.DestinationChain = string(item.Value)
}
}
}
keyParts := strings.Split(row.Key(), ":")
t.LeavingChain = keyParts[0]
isAllowed, coinGeckoCoinId := isTokenAllowed(t.OriginChain, t.TokenAddress)
if isAllowed && coinGeckoCoinId != "" {
transferDateStr := t.TransferTimestamp[0:10]
if isTokenActive(t.OriginChain, t.TokenAddress, transferDateStr) {
// use the CoinGeckoCoinId specified in the allowlist
t.CoinGeckoCoinId = coinGeckoCoinId
rows = append(rows, *t)
}
}
}
return true
}, bigtable.RowFilter(
bigtable.ConditionFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamilies[1]),
bigtable.CellsPerRowLimitFilter(1), // only the first cell in column
bigtable.TimestampRangeFilter(start, end), // within time range
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
),
bigtable.ChainFilters(
bigtable.FamilyFilter(fmt.Sprintf("%v|%v", transferPayloadFam, transferDetailsFam)),
bigtable.ColumnFilter("Amount|NotionalUSD|OriginSymbol|OriginName|OriginChain|TargetChain|CoinGeckoCoinId|OriginTokenAddress|TokenPriceUSD|Decimals|TransferTimestamp"),
bigtable.LatestNFilter(1),
),
bigtable.BlockAllFilter(),
),
))
if err != nil {
log.Fatalln("failed reading rows to create RowList.", err)
}
return rows
}
// finds the daily amount of each symbol transferred to each chain, from the specified start to the present.
func amountsTransferredToInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmTransfersToCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTransfersToCacheFilePath, &muWarmTransfersToCache, &warmTransfersToCache)
}
results := map[string]map[string]map[string]float64{}
now := time.Now().UTC()
numPrevDays := int(now.Sub(start).Hours() / 24)
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
// create the unique identifier for this query, for cache
cachePrefix := createCachePrefix(prefix)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calulate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmTransfersToCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]map[string]float64{"*": {"*": 0}}
// check to see if there is cache data for this date/query
if dates, ok := warmTransfersToCache[cachePrefix]; ok {
// have a cache for this query
if dateCache, ok := dates[dateStr]; ok && len(dateCache) > 1 && useCache(dateStr) {
// have a cache for this date
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = dateCache
muWarmTransfersToCache.Unlock()
intervalsWG.Done()
return
}
}
} else {
// no cache for this query, initialize the map
warmTransfersToCache[cachePrefix] = map[string]map[string]map[string]float64{}
}
muWarmTransfersToCache.Unlock()
defer intervalsWG.Done()
queryResult := fetchTransferRowsInInterval(tbl, ctx, prefix, start, end)
// iterate through the rows and increment the count
for _, row := range queryResult {
if _, ok := results[dateStr][row.DestinationChain]; !ok {
results[dateStr][row.DestinationChain] = map[string]float64{"*": 0}
}
// add to the total count for the dest chain
results[dateStr][row.DestinationChain]["*"] = results[dateStr][row.DestinationChain]["*"] + row.Notional
// add to total for the day
results[dateStr]["*"]["*"] = results[dateStr]["*"]["*"] + row.Notional
// add to the symbol's daily total
results[dateStr]["*"][row.TokenSymbol] = results[dateStr]["*"][row.TokenSymbol] + row.Notional
// add to the count for chain/symbol
results[dateStr][row.DestinationChain][row.TokenSymbol] = results[dateStr][row.DestinationChain][row.TokenSymbol] + row.Notional
}
if daysAgo >= 1 {
// set the result in the cache
muWarmTransfersToCache.Lock()
if cacheData, ok := warmTransfersToCache[cachePrefix][dateStr]; !ok || len(cacheData) <= 1 || !useCache(dateStr) {
// cache does not have this date, persist it for other instances.
warmTransfersToCache[cachePrefix][dateStr] = results[dateStr]
cacheNeedsUpdate = true
}
muWarmTransfersToCache.Unlock()
}
}(tbl, ctx, prefix, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmTransfersToCacheFilePath, &muWarmTransfersToCache, warmTransfersToCache)
}
// create a set of all the keys from all dates/chains, to ensure the result objects all have the same chain keys
seenChainSet := map[string]bool{}
for _, chains := range results {
for leaving := range chains {
if _, ok := seenChainSet[leaving]; !ok {
seenChainSet[leaving] = true
}
}
}
var muResult sync.RWMutex
// ensure each chain object has all the same symbol keys:
for date, chains := range results {
// loop through seen chains
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := chains[chain]; !ok {
muResult.Lock()
results[date][chain] = map[string]float64{"*": 0}
muResult.Unlock()
}
}
}
return results
}
func transferredToSinceDate(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
result := map[string]map[string]float64{"*": {"*": 0}}
dailyTotals := amountsTransferredToInInterval(tbl, ctx, prefix, start)
// loop through the query results to combine cache + fresh data
for _, chains := range dailyTotals {
for chain, tokens := range chains {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]float64{"*": 0}
}
for symbol, amount := range tokens {
if _, ok := result[chain][symbol]; !ok {
result[chain][symbol] = 0
}
// add the amount of this symbol transferred this day to the
// amount already in the result (amount of this symbol prevoiusly transferred)
result[chain][symbol] = result[chain][symbol] + amount
}
}
}
return result
}
// returns the count of the rows in the query response
func transfersToForInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) map[string]map[string]float64 {
// query for all rows in time range, return result count
queryResults := fetchTransferRowsInInterval(tbl, ctx, prefix, start, end)
result := map[string]map[string]float64{"*": {"*": 0}}
// iterate through the rows and increment the count for each index
for _, row := range queryResults {
if _, ok := result[row.DestinationChain]; !ok {
result[row.DestinationChain] = map[string]float64{"*": 0}
}
// add to total amount
result[row.DestinationChain]["*"] = result[row.DestinationChain]["*"] + row.Notional
// add to total per symbol
result["*"][row.TokenSymbol] = result["*"][row.TokenSymbol] + row.Notional
// add to symbol amount
result[row.DestinationChain][row.TokenSymbol] = result[row.DestinationChain][row.TokenSymbol] + row.Notional
// add to all chains/all symbols total
result["*"]["*"] = result["*"]["*"] + row.Notional
}
return result
}
// finds the value that has been transferred to each chain, by symbol.
func NotionalTransferredTo(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays, forChain, forAddress, daily, last24Hours, forPeriod string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
daily = queryParams.Get("daily")
last24Hours = queryParams.Get("last24Hours")
forPeriod = queryParams.Get("forPeriod")
case http.MethodPost:
// declare request body properties
var d struct {
NumDays string `json:"numDays"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
Daily string `json:"daily"`
Last24Hours string `json:"last24Hours"`
ForPeriod string `json:"forPeriod"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numDays = d.NumDays
forChain = d.ForChain
forAddress = d.ForAddress
daily = d.Daily
last24Hours = d.Last24Hours
forPeriod = d.ForPeriod
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if daily == "" && last24Hours == "" && forPeriod == "" {
// none of the options were set, so set one
last24Hours = "true"
}
var queryDays int
if numDays == "" {
queryDays = 30
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
// if the request is forChain, always groupBy chain
if forAddress != "" {
// if the request is forAddress, always groupBy address
prefix = forChain + ":" + forAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of last 24 hours
last24HourCount := map[string]map[string]float64{}
if last24Hours != "" {
wg.Add(1)
go func(prefix string) {
last24HourInterval := -time.Duration(24) * time.Hour
now := time.Now().UTC()
start := now.Add(last24HourInterval)
defer wg.Done()
transfers := transfersToForInterval(tbl, ctx, prefix, start, now)
for chain, tokens := range transfers {
last24HourCount[chain] = map[string]float64{}
for symbol, amount := range tokens {
last24HourCount[chain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}(prefix)
}
// total of the last numDays
periodTransfers := map[string]map[string]float64{}
if forPeriod != "" {
wg.Add(1)
go func(prefix string) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
// periodCount, err = transferredToSince(tbl, ctx, prefix, start)
// periodCount, err = transfersToForInterval(tbl, ctx, prefix, start, now)
transfers := transferredToSinceDate(tbl, ctx, prefix, start)
for chain, tokens := range transfers {
periodTransfers[chain] = map[string]float64{}
for symbol, amount := range tokens {
periodTransfers[chain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}(prefix)
}
// daily totals
dailyTransfers := map[string]map[string]map[string]float64{}
if daily != "" {
wg.Add(1)
go func(prefix string, queryDays int) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
transfers := amountsTransferredToInInterval(tbl, ctx, prefix, start)
for date, chains := range transfers {
dailyTransfers[date] = map[string]map[string]float64{}
for chain, tokens := range chains {
dailyTransfers[date][chain] = map[string]float64{}
for symbol, amount := range tokens {
dailyTransfers[date][chain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}
}(prefix, queryDays)
}
wg.Wait()
result := &amountsResult{
Last24Hours: last24HourCount,
WithinPeriod: periodTransfers,
PeriodDurationDays: queryDays,
Daily: dailyTransfers,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,503 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"strconv"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type transfersResult struct {
Last24Hours map[string]map[string]map[string]float64
WithinPeriod map[string]map[string]map[string]float64
PeriodDurationDays int
Daily map[string]map[string]map[string]map[string]float64
}
// an in-memory cache of previously calculated results
var warmTransfersCache = map[string]map[string]map[string]map[string]map[string]float64{}
var muWarmTransfersCache sync.RWMutex
var warmTransfersCacheFilePath = "notional-transferred-cache.json"
// finds the daily amount of each symbol transferred from each chain, to each chain,
// from the specified start to the present.
func createTransfersOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]map[string]float64 {
if _, ok := warmTransfersCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTransfersCacheFilePath, &muWarmTransfersCache, &warmTransfersCache)
}
results := map[string]map[string]map[string]map[string]float64{}
now := time.Now().UTC()
numPrevDays := int(now.Sub(start).Hours() / 24)
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
// create the unique identifier for this query, for cache
cachePrefix := createCachePrefix(prefix)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calulate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmTransfersCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]map[string]map[string]float64{"*": {"*": {"*": 0}}}
// check to see if there is cache data for this date/query
if dates, ok := warmTransfersCache[cachePrefix]; ok {
// have a cache for this query
if dateCache, ok := dates[dateStr]; ok && len(dateCache) > 1 && useCache(dateStr) {
// have a cache for this date
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = dateCache
muWarmTransfersCache.Unlock()
intervalsWG.Done()
return
}
}
} else {
// no cache for this query, initialize the map
warmTransfersCache[cachePrefix] = map[string]map[string]map[string]map[string]float64{}
}
muWarmTransfersCache.Unlock()
defer intervalsWG.Done()
queryResult := fetchTransferRowsInInterval(tbl, ctx, prefix, start, end)
// iterate through the rows and increment the amounts
for _, row := range queryResult {
if _, ok := results[dateStr][row.LeavingChain]; !ok {
results[dateStr][row.LeavingChain] = map[string]map[string]float64{"*": {"*": 0}}
}
if _, ok := results[dateStr][row.LeavingChain][row.DestinationChain]; !ok {
results[dateStr][row.LeavingChain][row.DestinationChain] = map[string]float64{"*": 0}
}
if _, ok := results[dateStr]["*"][row.DestinationChain]; !ok {
results[dateStr]["*"][row.DestinationChain] = map[string]float64{"*": 0}
}
// add the transfer data to the result set every possible way:
// by symbol, aggregated by: "leaving chain", "arriving at chain", "from any chain", "to any chain".
// add to the total amount leaving this chain, going to any chain, for all symbols
results[dateStr][row.LeavingChain]["*"]["*"] = results[dateStr][row.LeavingChain]["*"]["*"] + row.Notional
// add to the total amount leaving this chain, going to the destination chain, for all symbols
results[dateStr][row.LeavingChain][row.DestinationChain]["*"] = results[dateStr][row.LeavingChain][row.DestinationChain]["*"] + row.Notional
// add to the total amount of this symbol leaving this chain, going to any chain
results[dateStr][row.LeavingChain]["*"][row.TokenSymbol] = results[dateStr][row.LeavingChain]["*"][row.TokenSymbol] + row.Notional
// add to the total amount of this symbol leaving this chain, going to the destination chain
results[dateStr][row.LeavingChain][row.DestinationChain][row.TokenSymbol] = results[dateStr][row.LeavingChain][row.DestinationChain][row.TokenSymbol] + row.Notional
// add to the total amount arriving at the destination chain, coming from anywhere, including all symbols
results[dateStr]["*"][row.DestinationChain]["*"] = results[dateStr]["*"][row.DestinationChain]["*"] + row.Notional
// add to the total amount of this symbol arriving at the destination chain
results[dateStr]["*"][row.DestinationChain][row.TokenSymbol] = results[dateStr]["*"][row.DestinationChain][row.TokenSymbol] + row.Notional
// add to the total amount of this symbol transferred, from any chain, to any chain
results[dateStr]["*"]["*"][row.TokenSymbol] = results[dateStr]["*"]["*"][row.TokenSymbol] + row.Notional
// and finally, total/total/total: amount of all symbols transferred from any chain to any other chain
results[dateStr]["*"]["*"]["*"] = results[dateStr]["*"]["*"]["*"] + row.Notional
}
if daysAgo >= 1 {
// set the result in the cache
muWarmTransfersCache.Lock()
if cacheData, ok := warmTransfersCache[cachePrefix][dateStr]; !ok || len(cacheData) == 1 || !useCache(dateStr) {
// cache does not have this date, add the data, and mark the cache stale
warmTransfersCache[cachePrefix][dateStr] = results[dateStr]
cacheNeedsUpdate = true
}
muWarmTransfersCache.Unlock()
}
}(tbl, ctx, prefix, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmTransfersCacheFilePath, &muWarmTransfersCache, warmTransfersCache)
}
// having consistent keys in each object is helpful for clients, explorer GUI
// create a set of all the keys from all dates/chains, to ensure the result objects all have the same chain keys
seenChainSet := map[string]bool{}
for _, chains := range results {
for leaving, dests := range chains {
seenChainSet[leaving] = true
for dest := range dests {
seenChainSet[dest] = true
}
}
}
var muResult sync.RWMutex
// ensure each chain object has all the same symbol keys:
for date, chains := range results {
for chain := range seenChainSet {
if _, ok := chains[chain]; !ok {
muResult.Lock()
results[date][chain] = map[string]map[string]float64{"*": {"*": 0}}
muResult.Unlock()
}
}
for leaving := range chains {
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := chains[chain]; !ok {
muResult.Lock()
results[date][leaving][chain] = map[string]float64{"*": 0}
muResult.Unlock()
}
}
}
}
return results
}
// calculates the amount of each symbol that has gone from each chain, to each other chain, since the specified day.
func transferredSinceDate(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
result := map[string]map[string]map[string]float64{"*": {"*": {"*": 0}}}
dailyTotals := createTransfersOfInterval(tbl, ctx, prefix, start)
for _, leaving := range dailyTotals {
for chain, dests := range leaving {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]map[string]float64{"*": {"*": 0}}
}
for dest, tokens := range dests {
if _, ok := result[chain][dest]; !ok {
result[chain][dest] = map[string]float64{"*": 0}
}
for symbol, amount := range tokens {
if _, ok := result[chain][dest][symbol]; !ok {
result[chain][dest][symbol] = 0
}
// add the amount of this symbol transferred this day to the
// amount already in the result (amount of this symbol prevoiusly transferred)
result[chain][dest][symbol] = result[chain][dest][symbol] + amount
}
}
}
}
// create a set of chainIDs, the union of source and destination chains,
// to ensure the result objects all have the same keys.
seenChainSet := map[string]bool{}
for leaving, dests := range result {
seenChainSet[leaving] = true
for dest := range dests {
seenChainSet[dest] = true
}
}
// make sure the root of the map has all the chainIDs
for chain := range seenChainSet {
if _, ok := result[chain]; !ok {
result[chain] = map[string]map[string]float64{"*": {"*": 0}}
}
}
// make sure that each chain at the root (leaving) as a key (destination) for each chain
for leaving, dests := range result {
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := dests[chain]; !ok {
result[leaving][chain] = map[string]float64{"*": 0}
}
}
}
return result
}
// returns the count of the rows in the query response
func transfersForInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) map[string]map[string]map[string]float64 {
// query for all rows in time range, return result count
queryResults := fetchTransferRowsInInterval(tbl, ctx, prefix, start, end)
result := map[string]map[string]map[string]float64{"*": {"*": {"*": 0}}}
// iterate through the rows and increment the count for each index
for _, row := range queryResults {
if _, ok := result[row.LeavingChain]; !ok {
result[row.LeavingChain] = map[string]map[string]float64{"*": {"*": 0}}
}
if _, ok := result[row.LeavingChain][row.DestinationChain]; !ok {
result[row.LeavingChain][row.DestinationChain] = map[string]float64{"*": 0}
}
if _, ok := result["*"][row.DestinationChain]; !ok {
result["*"][row.DestinationChain] = map[string]float64{"*": 0}
}
// add the transfer data to the result set every possible way:
// by symbol, aggregated by: "leaving chain", "arriving at chain", "from any chain", "to any chain".
// add to the total amount leaving this chain, going to any chain, for all symbols
result[row.LeavingChain]["*"]["*"] = result[row.LeavingChain]["*"]["*"] + row.Notional
// add to the total amount leaving this chain, going to the destination chain, for all symbols
result[row.LeavingChain][row.DestinationChain]["*"] = result[row.LeavingChain][row.DestinationChain]["*"] + row.Notional
// add to the total amount of this symbol leaving this chain, going to any chain
result[row.LeavingChain]["*"][row.TokenSymbol] = result[row.LeavingChain]["*"][row.TokenSymbol] + row.Notional
// add to the total amount of this symbol leaving this chain, going to the destination chain
result[row.LeavingChain][row.DestinationChain][row.TokenSymbol] = result[row.LeavingChain][row.DestinationChain][row.TokenSymbol] + row.Notional
// add to the total amount arriving at the destination chain, coming from anywhere, including all symbols
result["*"][row.DestinationChain]["*"] = result["*"][row.DestinationChain]["*"] + row.Notional
// add to the total amount of this symbol arriving at the destination chain
result["*"][row.DestinationChain][row.TokenSymbol] = result["*"][row.DestinationChain][row.TokenSymbol] + row.Notional
// add to the total amount of this symbol transferred, from any chain, to any chain
result["*"]["*"][row.TokenSymbol] = result["*"]["*"][row.TokenSymbol] + row.Notional
// and finally, total/total/total: amount of all symbols transferred from any chain to any other chain
result["*"]["*"]["*"] = result["*"]["*"]["*"] + row.Notional
}
// create a set of chainIDs, the union of source and destination chains,
// to ensure the result objects all have the same keys.
seenChainSet := map[string]bool{}
for leaving, dests := range result {
seenChainSet[leaving] = true
for dest := range dests {
seenChainSet[dest] = true
}
}
// make sure the root of the map has all the chainIDs
for chain := range seenChainSet {
if _, ok := result[chain]; !ok {
result[chain] = map[string]map[string]float64{"*": {"*": 0}}
}
}
// make sure that each chain at the root (leaving) as a key (destination) for each chain
for leaving, dests := range result {
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := dests[chain]; !ok {
result[leaving][chain] = map[string]float64{"*": 0}
}
}
}
return result
}
// finds the value that has been transferred from each chain to each other, by symbol.
func NotionalTransferred(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays, forChain, forAddress, daily, last24Hours, forPeriod string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
daily = queryParams.Get("daily")
last24Hours = queryParams.Get("last24Hours")
forPeriod = queryParams.Get("forPeriod")
case http.MethodPost:
// declare request body properties
var d struct {
NumDays string `json:"numDays"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
Daily string `json:"daily"`
Last24Hours string `json:"last24Hours"`
ForPeriod string `json:"forPeriod"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numDays = d.NumDays
forChain = d.ForChain
forAddress = d.ForAddress
daily = d.Daily
last24Hours = d.Last24Hours
forPeriod = d.ForPeriod
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if daily == "" && last24Hours == "" && forPeriod == "" {
// none of the options were set, so set one
last24Hours = "true"
}
var queryDays int
if numDays == "" {
queryDays = 30
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
// if the request is forChain, always groupBy chain
if forAddress != "" {
// if the request is forAddress, always groupBy address
prefix = forChain + ":" + forAddress
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of last 24 hours
last24HourCount := map[string]map[string]map[string]float64{}
if last24Hours != "" {
wg.Add(1)
go func(prefix string) {
last24HourInterval := -time.Duration(24) * time.Hour
now := time.Now().UTC()
start := now.Add(last24HourInterval)
defer wg.Done()
transfers := transfersForInterval(tbl, ctx, prefix, start, now)
for chain, dests := range transfers {
last24HourCount[chain] = map[string]map[string]float64{}
for dest, tokens := range dests {
last24HourCount[chain][dest] = map[string]float64{}
for symbol, amount := range tokens {
last24HourCount[chain][dest][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}
}(prefix)
}
// transfers of the last numDays
periodTransfers := map[string]map[string]map[string]float64{}
if forPeriod != "" {
wg.Add(1)
go func(prefix string) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
transfers := transferredSinceDate(tbl, ctx, prefix, start)
for chain, dests := range transfers {
periodTransfers[chain] = map[string]map[string]float64{}
for dest, tokens := range dests {
periodTransfers[chain][dest] = map[string]float64{}
for symbol, amount := range tokens {
periodTransfers[chain][dest][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}
}(prefix)
}
// daily totals
dailyTransfers := map[string]map[string]map[string]map[string]float64{}
if daily != "" {
wg.Add(1)
go func(prefix string, queryDays int) {
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
defer wg.Done()
transfers := createTransfersOfInterval(tbl, ctx, prefix, start)
for date, chains := range transfers {
dailyTransfers[date] = map[string]map[string]map[string]float64{}
for chain, dests := range chains {
dailyTransfers[date][chain] = map[string]map[string]float64{}
for destChain, tokens := range dests {
dailyTransfers[date][chain][destChain] = map[string]float64{}
for symbol, amount := range tokens {
dailyTransfers[date][chain][destChain][symbol] = roundToTwoDecimalPlaces(amount)
}
}
}
}
}(prefix, queryDays)
}
wg.Wait()
result := &transfersResult{
Last24Hours: last24HourCount,
WithinPeriod: periodTransfers,
PeriodDurationDays: queryDays,
Daily: dailyTransfers,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,479 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"sort"
"strconv"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type tvlCumulativeResult struct {
DailyLocked map[string]map[string]map[string]LockedAsset
}
// an in-memory cache of previously calculated results
var warmTvlCumulativeCache = map[string]map[string]map[string]LockedAsset{}
var muWarmTvlCumulativeCache sync.RWMutex
var warmTvlCumulativeCacheFilePath = "tvl-cumulative-cache.json"
var notionalTvlCumulativeResultPath = "notional-tvl-cumulative.json"
var coinGeckoPriceCacheFilePath = "coingecko-price-cache.json"
var coinGeckoPriceCache = map[string]map[string]float64{}
var loadedCoinGeckoPriceCache bool
// days to be excluded from the TVL result
var skipDays = map[string]bool{
// for example:
// "2022-02-19": true,
}
func loadAndUpdateCoinGeckoPriceCache(ctx context.Context, coinIds []string, now time.Time) {
// at cold-start, load the price cache into memory, and fetch any missing token price histories and add them to the cache
if !loadedCoinGeckoPriceCache {
// load the price cache
if loadCache {
loadJsonToInterface(ctx, coinGeckoPriceCacheFilePath, &muWarmTvlCumulativeCache, &coinGeckoPriceCache)
loadedCoinGeckoPriceCache = true
}
// find tokens missing price history
missing := []string{}
for _, coinId := range coinIds {
found := false
for _, prices := range coinGeckoPriceCache {
if _, ok := prices[coinId]; ok {
found = true
break
}
}
if !found {
missing = append(missing, coinId)
}
}
// fetch missing price histories and add them to the cache
priceHistories := fetchTokenPriceHistories(ctx, missing, releaseDay, now)
for date, prices := range priceHistories {
for coinId, price := range prices {
if _, ok := coinGeckoPriceCache[date]; !ok {
coinGeckoPriceCache[date] = map[string]float64{}
}
coinGeckoPriceCache[date][coinId] = price
}
}
}
// fetch today's latest prices
today := now.Format("2006-01-02")
coinGeckoPriceCache[today] = fetchTokenPrices(ctx, coinIds)
// write to the cache file
persistInterfaceToJson(ctx, coinGeckoPriceCacheFilePath, &muWarmCumulativeAddressesCache, coinGeckoPriceCache)
}
// calculates a running total of notional value transferred, by symbol, since the start time specified.
func createTvlCumulativeOfInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) map[string]map[string]map[string]LockedAsset {
if len(warmTvlCumulativeCache) == 0 && loadCache {
loadJsonToInterface(ctx, warmTvlCumulativeCacheFilePath, &muWarmTvlCumulativeCache, &warmTvlCumulativeCache)
}
now := time.Now().UTC()
today := now.Format("2006-01-02")
cacheNeedsUpdate := false
muWarmTvlCumulativeCache.Lock()
if len(warmTvlCumulativeCache) == 0 {
warmTvlCumulativeCache = map[string]map[string]map[string]LockedAsset{}
}
muWarmTvlCumulativeCache.Unlock()
results := map[string]map[string]map[string]LockedAsset{}
// fetch the amounts of transfers by symbol, for each day since launch (releaseDay)
dailyAmounts := tvlInInterval(tbl, ctx, releaseDay)
// create a slice of dates, order oldest first
dateKeys := make([]string, 0, len(dailyAmounts))
for k := range dailyAmounts {
dateKeys = append(dateKeys, k)
}
sort.Strings(dateKeys)
// iterate through the dates in the result set, and accumulate the amounts
// of each token transfer by symbol, based on the destination of the transfer.
for i, date := range dateKeys {
results[date] = map[string]map[string]LockedAsset{"*": {"*": LockedAsset{}}}
muWarmTvlCumulativeCache.RLock()
if dateCache, ok := warmTvlCumulativeCache[date]; ok && useCache(date) && dateCache != nil {
// have a cached value for this day, use it.
// iterate through cache and copy values to the result
for chain, tokens := range dateCache {
results[date][chain] = map[string]LockedAsset{}
for token, lockedAsset := range tokens {
results[date][chain][token] = LockedAsset{
Symbol: lockedAsset.Symbol,
Name: lockedAsset.Name,
Address: lockedAsset.Address,
CoinGeckoId: lockedAsset.CoinGeckoId,
TokenPrice: lockedAsset.TokenPrice,
Amount: lockedAsset.Amount,
Notional: lockedAsset.Notional,
}
}
}
muWarmTvlCumulativeCache.RUnlock()
} else {
// no cached value for this day, must calculate it
muWarmTvlCumulativeCache.RUnlock()
if i == 0 {
// special case for first day, no need to sum.
for chain, tokens := range dailyAmounts[date] {
results[date][chain] = map[string]LockedAsset{}
for token, lockedAsset := range tokens {
results[date][chain][token] = LockedAsset{
Symbol: lockedAsset.Symbol,
Name: lockedAsset.Name,
Address: lockedAsset.Address,
CoinGeckoId: lockedAsset.CoinGeckoId,
TokenPrice: lockedAsset.TokenPrice,
Amount: lockedAsset.Amount,
Notional: lockedAsset.Notional,
}
}
}
} else {
// find the string of the previous day
prevDate := dateKeys[i-1]
prevDayAmounts := results[prevDate]
thisDayAmounts := dailyAmounts[date]
// iterate through all the transfers and add the previous day's amount, if it exists
for chain, thisDaySymbols := range thisDayAmounts {
// create a union of the symbols from this day, and previous days
symbolsUnion := map[string]string{}
for symbol := range prevDayAmounts[chain] {
symbolsUnion[symbol] = symbol
}
for symbol := range thisDaySymbols {
symbolsUnion[symbol] = symbol
}
// initialize the chain/symbol map for this date
if _, ok := results[date][chain]; !ok {
results[date][chain] = map[string]LockedAsset{}
}
// iterate through the union of symbols, creating an amount for each one,
// and adding it the the results.
for symbol := range symbolsUnion {
asset := LockedAsset{}
prevDayAmount := float64(0)
if lockedAsset, ok := results[prevDate][chain][symbol]; ok {
prevDayAmount = lockedAsset.Amount
asset = lockedAsset
}
thisDayAmount := float64(0)
if lockedAsset, ok := thisDaySymbols[symbol]; ok {
thisDayAmount = lockedAsset.Amount
// use today's locked asset, rather than prevDay's, for freshest price.
asset = lockedAsset
}
cumulativeAmount := prevDayAmount + thisDayAmount
results[date][chain][symbol] = LockedAsset{
Symbol: asset.Symbol,
Name: asset.Name,
Address: asset.Address,
CoinGeckoId: asset.CoinGeckoId,
TokenPrice: asset.TokenPrice,
Amount: cumulativeAmount,
}
}
}
}
// don't cache today
if date != today {
// set the result in the cache
muWarmTvlCumulativeCache.Lock()
if _, ok := warmTvlCumulativeCache[date]; !ok || !useCache(date) {
// cache does not have this date, persist it for other instances.
warmTvlCumulativeCache[date] = map[string]map[string]LockedAsset{}
for chain, tokens := range results[date] {
warmTvlCumulativeCache[date][chain] = map[string]LockedAsset{}
for token, asset := range tokens {
warmTvlCumulativeCache[date][chain][token] = LockedAsset{
Symbol: asset.Symbol,
Name: asset.Name,
Address: asset.Address,
CoinGeckoId: asset.CoinGeckoId,
TokenPrice: asset.TokenPrice,
Amount: asset.Amount,
}
}
}
cacheNeedsUpdate = true
}
muWarmTvlCumulativeCache.Unlock()
}
}
}
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmTvlCumulativeCacheFilePath, &muWarmTvlCumulativeCache, warmTvlCumulativeCache)
}
// take the most recent n days, rather than returning all days since launch
selectDays := map[string]map[string]map[string]LockedAsset{}
days := getDaysInRange(start, now)
for _, day := range days {
selectDays[day] = map[string]map[string]LockedAsset{}
for chain, tokens := range results[day] {
selectDays[day][chain] = map[string]LockedAsset{}
for symbol, asset := range tokens {
selectDays[day][chain][symbol] = LockedAsset{
Symbol: asset.Symbol,
Name: asset.Name,
Address: asset.Address,
CoinGeckoId: asset.CoinGeckoId,
TokenPrice: asset.TokenPrice,
Amount: asset.Amount,
}
}
}
}
return selectDays
}
// calculates the cumulative value transferred each day since launch.
func ComputeTvlCumulative(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
// days since launch day
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
ctx := context.Background()
dailyTvl := map[string]map[string]map[string]LockedAsset{}
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
transfers := createTvlCumulativeOfInterval(tbl, ctx, start)
coinIdSet := map[string]bool{}
for _, chains := range transfers {
for _, assets := range chains {
for _, asset := range assets {
if asset.CoinGeckoId != "*" {
coinIdSet[asset.CoinGeckoId] = true
}
}
}
}
coinIds := []string{}
for coinId := range coinIdSet {
coinIds = append(coinIds, coinId)
}
loadAndUpdateCoinGeckoPriceCache(ctx, coinIds, now)
// calculate the notional tvl based on the price of the tokens each day
for date, chains := range transfers {
if _, ok := skipDays[date]; ok {
log.Println("skipping ", date)
continue
}
dailyTvl[date] = map[string]map[string]LockedAsset{}
dailyTvl[date]["*"] = map[string]LockedAsset{}
dailyTvl[date]["*"]["*"] = LockedAsset{
Symbol: "*",
Notional: 0,
}
for chain, tokens := range chains {
if chain == "*" {
continue
}
dailyTvl[date][chain] = map[string]LockedAsset{}
dailyTvl[date][chain]["*"] = LockedAsset{
Symbol: "*",
Notional: 0,
}
for symbol, asset := range tokens {
if symbol == "*" {
continue
}
// asset.TokenPrice is the price that was fetched when this token was last transferred, possibly before this date
// prefer to use the cached price for this date if it's available, because it might be newer
tokenPrice := asset.TokenPrice
if prices, ok := coinGeckoPriceCache[date]; ok {
if price, ok := prices[asset.CoinGeckoId]; ok {
// use the cached price
tokenPrice = price
}
}
notional := asset.Amount * tokenPrice
if notional <= 0 {
continue
}
asset.Notional = roundToTwoDecimalPlaces(notional)
// Note: disable individual symbols to reduce response size for now
//// create a new LockAsset in order to exclude TokenPrice and Amount
//dailyTvl[date][chain][symbol] = LockedAsset{
// Symbol: asset.Symbol,
// Address: asset.Address,
// CoinGeckoId: asset.CoinGeckoId,
// Notional: asset.Notional,
//}
// add this asset's notional to the date/chain/*
if allAssets, ok := dailyTvl[date][chain]["*"]; ok {
allAssets.Notional += notional
dailyTvl[date][chain]["*"] = allAssets
}
} // end symbols iteration
// add chain total to the daily total
if allAssets, ok := dailyTvl[date]["*"]["*"]; ok {
allAssets.Notional += dailyTvl[date][chain]["*"].Notional
dailyTvl[date]["*"]["*"] = allAssets
}
// round the day's chain total
if allAssets, ok := dailyTvl[date][chain]["*"]; ok {
allAssets.Notional = roundToTwoDecimalPlaces(allAssets.Notional)
dailyTvl[date][chain]["*"] = allAssets
}
} // end chains iteration
// round the daily total
if allAssets, ok := dailyTvl[date]["*"]["*"]; ok {
allAssets.Notional = roundToTwoDecimalPlaces(allAssets.Notional)
dailyTvl[date]["*"]["*"] = allAssets
}
}
result := &tvlCumulativeResult{
DailyLocked: dailyTvl,
}
persistInterfaceToJson(ctx, notionalTvlCumulativeResultPath, &muWarmTvlCumulativeCache, result)
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}
func TvlCumulative(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numDays string
var totalsOnly string
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numDays = queryParams.Get("numDays")
totalsOnly = queryParams.Get("totalsOnly")
}
var queryDays int
if numDays == "" {
// days since launch day
queryDays = int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
} else {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
hours := (24 * queryDays)
periodInterval := -time.Duration(hours) * time.Hour
now := time.Now().UTC()
prev := now.Add(periodInterval)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
startStr := start.Format("2006-01-02")
var cachedResult tvlCumulativeResult
loadJsonToInterface(ctx, notionalTvlCumulativeResultPath, &muWarmTvlCumulativeCache, &cachedResult)
dailyLocked := map[string]map[string]map[string]LockedAsset{}
for date, chains := range cachedResult.DailyLocked {
if date >= startStr {
if totalsOnly == "" {
dailyLocked[date] = chains
} else {
dailyLocked[date] = map[string]map[string]LockedAsset{}
for chain, addresses := range chains {
dailyLocked[date][chain] = map[string]LockedAsset{}
dailyLocked[date][chain]["*"] = addresses["*"]
}
}
}
}
result := &tvlCumulativeResult{
DailyLocked: dailyLocked,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,509 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"io"
"log"
"net/http"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
type tvlResult struct {
Last24HoursChange map[string]map[string]LockedAsset
AllTime map[string]map[string]LockedAsset
}
// an in-memory cache of previously calculated results
var warmTvlCache = map[string]map[string]map[string]LockedAsset{}
var muWarmTvlCache sync.RWMutex
var warmTvlFilePath = "tvl-cache.json"
var notionalTvlResultPath = "notional-tvl.json"
type LockedAsset struct {
Symbol string
Name string
Address string
CoinGeckoId string
Amount float64
Notional float64
TokenPrice float64
TokenDecimals int
}
// finds the daily amount of each symbol transferred to each chain, from the specified start to the present.
func tvlInInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) map[string]map[string]map[string]LockedAsset {
if len(warmTvlCache) == 0 && loadCache {
loadJsonToInterface(ctx, warmTvlFilePath, &muWarmTvlCache, &warmTvlCache)
}
results := map[string]map[string]map[string]LockedAsset{}
now := time.Now().UTC()
numPrevDays := int(now.Sub(start).Hours() / 24)
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calculate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmTvlCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]map[string]LockedAsset{}
// check to see if there is cache data for this date/query
if len(warmTvlCache) >= 1 {
// have a cache, check if has the date
if dateCache, ok := warmTvlCache[dateStr]; ok && len(dateCache) > 1 && useCache(dateStr) {
// have a cache for this date
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = dateCache
muWarmTvlCache.Unlock()
intervalsWG.Done()
return
}
}
} else {
// no cache for this query, initialize the map
warmTvlCache = map[string]map[string]map[string]LockedAsset{}
}
muWarmTvlCache.Unlock()
defer intervalsWG.Done()
for _, chainId := range tvlChainIDs {
queryResult := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
// iterate through the rows and increment the count
for _, row := range queryResult {
if row.TokenAddress == "" {
// if the token address is missing, skip
continue
}
if _, ok := results[dateStr][row.OriginChain]; !ok {
results[dateStr][row.OriginChain] = map[string]LockedAsset{}
}
if _, ok := results[dateStr][row.OriginChain][row.TokenAddress]; !ok {
results[dateStr][row.OriginChain][row.TokenAddress] = LockedAsset{
Symbol: row.TokenSymbol,
Name: row.TokenName,
Address: row.TokenAddress,
CoinGeckoId: row.CoinGeckoCoinId,
TokenPrice: row.TokenPrice,
TokenDecimals: row.TokenDecimals,
Amount: 0,
Notional: 0,
}
}
var amountChange float64
amountChange = 0
if row.OriginChain == row.LeavingChain {
// this is a native asset leaving its chain:
// add this to tokens of originChain
amountChange = row.TokenAmount
}
if row.OriginChain == row.DestinationChain {
// this is a native asset going back to its chain:
// subtract this from tokens of originChain
amountChange = row.TokenAmount * -1
}
if prevForChain, ok := results[dateStr][row.OriginChain][row.TokenAddress]; ok {
prevForChain.Amount = prevForChain.Amount + amountChange
results[dateStr][row.OriginChain][row.TokenAddress] = prevForChain
}
}
if daysAgo >= 1 {
// set the result in the cache
muWarmTvlCache.Lock()
if cacheData, ok := warmTvlCache[dateStr]; !ok || len(cacheData) <= 1 || !useCache(dateStr) {
// cache does not have this date, persist it for other instances.
warmTvlCache[dateStr] = results[dateStr]
cacheNeedsUpdate = true
}
muWarmTvlCache.Unlock()
}
}
}(tbl, ctx, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmTvlFilePath, &muWarmTvlCache, warmTvlCache)
}
// create a set of all the keys from all dates/chains, to ensure the result objects all have the same chain keys
seenChainSet := map[string]bool{}
for _, chains := range results {
for leaving := range chains {
if _, ok := seenChainSet[leaving]; !ok {
seenChainSet[leaving] = true
}
}
}
var muResult sync.RWMutex
// ensure each chain object has all the same symbol keys:
for date, chains := range results {
// loop through seen chains
for chain := range seenChainSet {
// check that date has all the chains
if _, ok := chains[chain]; !ok {
muResult.Lock()
results[date][chain] = map[string]LockedAsset{}
muResult.Unlock()
}
}
}
return results
}
// adds dailyTotals to return a map with chainIds for keys, each value is a map of address/amount locked.
func tvlSinceDate(tbl *bigtable.Table, ctx context.Context, dailyTotals map[string]map[string]map[string]LockedAsset) map[string]map[string]LockedAsset {
result := map[string]map[string]LockedAsset{}
// loop through the query results to combine cache + fresh data
for _, chains := range dailyTotals {
for chain, tokens := range chains {
// ensure the chain exists in the result map
if _, ok := result[chain]; !ok {
result[chain] = map[string]LockedAsset{}
}
for address, lockedAsset := range tokens {
amount := lockedAsset.Amount
if asset, ok := result[chain][address]; ok {
// add the amount of this symbol transferred this day to the
// amount already in the result (amount of this symbol previously transferred)
asset.Amount = asset.Amount + amount
result[chain][address] = asset
} else {
// have not seen this asset in previous days
result[chain][address] = LockedAsset{
Symbol: lockedAsset.Symbol,
Name: lockedAsset.Name,
Address: lockedAsset.Address,
CoinGeckoId: lockedAsset.CoinGeckoId,
Amount: lockedAsset.Amount,
TokenPrice: lockedAsset.TokenPrice,
TokenDecimals: lockedAsset.TokenDecimals,
}
}
}
}
}
return result
}
// returns the count of the rows in the query response
func tvlForInterval(tbl *bigtable.Table, ctx context.Context, start, end time.Time) map[string]map[string]LockedAsset {
result := map[string]map[string]LockedAsset{}
for _, chainId := range tvlChainIDs {
// query for all rows in time range, return result count
queryResults := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
// iterate through the rows and increment the count for each index
for _, row := range queryResults {
if _, ok := result[row.OriginChain]; !ok {
result[row.OriginChain] = map[string]LockedAsset{}
}
if row.TokenAddress == "" {
// if the token address is missing, skip
continue
}
if _, ok := result[row.OriginChain][row.TokenAddress]; !ok {
result[row.OriginChain][row.TokenAddress] = LockedAsset{
Symbol: row.TokenSymbol,
Name: row.TokenName,
Address: row.TokenAddress,
CoinGeckoId: row.CoinGeckoCoinId,
Amount: 0,
Notional: 0,
TokenDecimals: row.TokenDecimals,
}
}
var amountChange float64
amountChange = 0
// track notional changes for the previous 24 hour delta
var notionalChange float64
notionalChange = 0
if row.OriginChain == row.LeavingChain {
// this is a native asset leaving its chain:
// add this to tvl of originChain
amountChange = row.TokenAmount
notionalChange = row.Notional
}
if row.OriginChain == row.DestinationChain {
// this is a native asset going back to its chain:
// subtract this from tvl of originChain
amountChange = row.TokenAmount * -1
notionalChange = row.Notional * -1
}
if prevForChain, ok := result[row.OriginChain][row.TokenAddress]; ok {
prevForChain.Amount = prevForChain.Amount + amountChange
prevForChain.Notional = prevForChain.Notional + notionalChange
result[row.OriginChain][row.TokenAddress] = prevForChain
}
if prevAllChains, ok := result["*"][row.TokenAddress]; ok {
prevAllChains.Amount = prevAllChains.Amount + amountChange
prevAllChains.Notional = prevAllChains.Notional + notionalChange
result["*"][row.TokenAddress] = prevAllChains
}
}
}
return result
}
// calculates the value locked
func ComputeTVL(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
ctx := context.Background()
now := time.Now().UTC()
todaysDateStr := now.Format("2006-01-02")
getNotionalAmounts := func(ctx context.Context, tokensLocked map[string]map[string]LockedAsset) map[string]map[string]LockedAsset {
// create a map of all the coinIds
seenCoinIds := map[string]bool{}
for _, tokens := range tokensLocked {
for _, lockedAsset := range tokens {
coinId := lockedAsset.CoinGeckoId
if coinId != "*" {
seenCoinIds[coinId] = true
}
}
}
coinIdSet := []string{}
for coinId := range seenCoinIds {
coinIdSet = append(coinIdSet, coinId)
}
tokenPrices := fetchTokenPrices(ctx, coinIdSet)
notionalLocked := map[string]map[string]LockedAsset{}
// initialize the struct that will hold the total for all chains, all assets
notionalLocked["*"] = map[string]LockedAsset{}
notionalLocked["*"]["*"] = LockedAsset{
Symbol: "*",
Name: "all",
Notional: 0,
}
for chain, tokens := range tokensLocked {
notionalLocked[chain] = map[string]LockedAsset{}
notionalLocked[chain]["*"] = LockedAsset{
Symbol: "all",
Address: "*",
}
for address, lockedAsset := range tokens {
if !isTokenActive(chain, address, todaysDateStr) {
continue
}
coinId := lockedAsset.CoinGeckoId
amount := lockedAsset.Amount
if address != "*" {
currentPrice := tokenPrices[coinId]
notionalVal := amount * currentPrice
if notionalVal <= 0 {
continue
}
notionalLocked[chain][address] = LockedAsset{
Symbol: lockedAsset.Symbol,
Name: lockedAsset.Name,
Address: lockedAsset.Address,
CoinGeckoId: lockedAsset.CoinGeckoId,
Amount: lockedAsset.Amount,
Notional: roundToTwoDecimalPlaces(notionalVal),
TokenPrice: currentPrice,
TokenDecimals: lockedAsset.TokenDecimals,
}
if asset, ok := notionalLocked[chain]["*"]; ok {
asset.Notional = asset.Notional + notionalVal
notionalLocked[chain]["*"] = asset
}
}
}
// add the chain total to the overall total
if all, ok := notionalLocked["*"]["*"]; ok {
all.Notional += notionalLocked[chain]["*"].Notional
notionalLocked["*"]["*"] = all
}
// round the the amount for chain/*
if asset, ok := notionalLocked[chain]["*"]; ok {
asset.Notional = roundToTwoDecimalPlaces(asset.Notional)
notionalLocked[chain]["*"] = asset
}
}
return notionalLocked
}
var wg sync.WaitGroup
// delta of last 24 hours
last24HourDelta := map[string]map[string]LockedAsset{}
wg.Add(1)
go func() {
last24HourInterval := -time.Duration(24) * time.Hour
start := now.Add(last24HourInterval)
defer wg.Done()
transfers := tvlForInterval(tbl, ctx, start, now)
last24HourDelta = getNotionalAmounts(ctx, transfers)
}()
// total since release
allTimeLocked := map[string]map[string]LockedAsset{}
wg.Add(1)
go func() {
defer wg.Done()
dailyTotalsAllTime := tvlInInterval(tbl, ctx, releaseDay)
transfers := tvlSinceDate(tbl, ctx, dailyTotalsAllTime)
allTimeLocked = getNotionalAmounts(ctx, transfers)
}()
wg.Wait()
result := &tvlResult{
Last24HoursChange: last24HourDelta,
AllTime: allTimeLocked,
}
persistInterfaceToJson(ctx, notionalTvlResultPath, &muWarmTvlCache, result)
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}
func TVL(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var last24Hours string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
last24Hours = queryParams.Get("last24Hours")
case http.MethodPost:
// declare request body properties
var d struct {
Last24Hours string `json:"last24Hours"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
last24Hours = d.Last24Hours
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var cachedResult tvlResult
loadJsonToInterface(ctx, notionalTvlResultPath, &muWarmTvlCache, &cachedResult)
// delta of last 24 hours
var last24HourDelta = map[string]map[string]LockedAsset{}
if last24Hours != "" {
last24HourDelta = cachedResult.Last24HoursChange
}
result := &tvlResult{
Last24HoursChange: last24HourDelta,
AllTime: cachedResult.AllTime,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,295 +0,0 @@
package p
import (
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"log"
"math"
"math/big"
"strconv"
"strings"
"time"
"github.com/cosmos/cosmos-sdk/types/bech32"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
"cloud.google.com/go/bigtable"
"github.com/gagliardetto/solana-go"
)
var tokenAddressExceptions = map[string]string{
// terra native tokens do not have a bech32 address like cw20s do, handle them manually.
// terra (classic)
"0100000000000000000000000000000000000000000000000000000075757364": "uusd",
"010000000000000000000000000000000000000000000000000000756c756e61": "uluna",
// near
"0000000000000000000000000000000000000000000000000000000000000000": "near",
"67499b7b8f58eaeb3cd81aea1d1ce9f7f722fd7750ceb2bed13e255073c25e2a": "token.sweat",
// terra2
"01fa6c6fbc36d8c245b0a852a43eb5d644e8b4c477b27bfab9537c10945939da": "uluna",
// xpla
"017ce8aec5af3bb3ac0158d49771d4c8feba2e54a614fa2a1c0c95e9c4c37185": "axpla",
}
// returns a pair of dates before and after the input time.
// useful for creating a time rage for querying historical price APIs.
func rangeFromTime(t time.Time, hours int) (start time.Time, end time.Time) {
duration := time.Duration(hours) * time.Hour
return t.Add(-duration), t.Add(duration)
}
func transformHexAddressToNative(chain vaa.ChainID, address string) string {
switch chain {
case vaa.ChainIDSolana:
addr, err := hex.DecodeString(address)
if err != nil {
log.Fatalf("failed to decode solana string: %v", err)
}
if len(addr) != 32 {
log.Fatalf("address must be 32 bytes. address: %v", address)
}
solPk := solana.PublicKeyFromBytes(addr[:])
return solPk.String()
case vaa.ChainIDEthereum,
vaa.ChainIDBSC,
vaa.ChainIDPolygon,
vaa.ChainIDAvalanche,
vaa.ChainIDOasis,
vaa.ChainIDAurora,
vaa.ChainIDFantom,
vaa.ChainIDKarura,
vaa.ChainIDAcala,
vaa.ChainIDKlaytn,
vaa.ChainIDCelo,
vaa.ChainIDMoonbeam:
addr := fmt.Sprintf("0x%v", address[(len(address)-40):])
return addr
case vaa.ChainIDTerra:
// handle terra native assets manually
if val, ok := tokenAddressExceptions[address]; ok {
return val
}
return humanAddressTerra(address)
case vaa.ChainIDAlgorand:
assetId := big.Int{}
_, ok := assetId.SetString(address, 16)
if ok {
return assetId.String()
}
return address
case vaa.ChainIDNear:
if val, ok := tokenAddressExceptions[address]; ok {
return val
}
// TODO for now use hex/wormhole address string, we'll need to do a contract query to get the native address
return address
case vaa.ChainIDTerra2:
// handle terra2 native assets manually
if val, ok := tokenAddressExceptions[address]; ok {
return val
}
// terra2 has 32 byte addresses for contracts and 20 for wallets
if isLikely20ByteTerra(address) {
return humanAddressTerra(address)
}
// TODO for now use hex/wormhole address string, we'll need to do a contract query to get the native address
return address
case vaa.ChainIDAptos:
if val, ok := tokenAddressExceptions[address]; ok {
return val
}
// TODO for now use hex/wormhole address string, we'll need to do a contract query to get the native address
return address
case vaa.ChainIDXpla:
if val, ok := tokenAddressExceptions[address]; ok {
return val
}
// TODO for now use hex/wormhole address string, we'll need to do a contract query to get the native address
return address
default:
log.Println("cannot process address for unknown chain: ", chain)
return ""
}
}
func isLikely20ByteTerra(address string) bool {
return strings.HasPrefix(address, "00000000000000000000")
}
func humanAddressTerra(address string) string {
trimmed := address[(len(address) - 40):]
data, decodeErr := hex.DecodeString(trimmed)
if decodeErr != nil {
fmt.Printf("failed to decode unpadded string: %v\n", decodeErr)
}
encodedAddr, convertErr := bech32.ConvertAndEncode("terra", data)
if convertErr != nil {
fmt.Println("convert error from cosmos bech32. err", convertErr)
}
return encodedAddr
}
// ProcessTransfer is triggered by a PubSub message, once a TokenTransferPayload is written to a row.
func ProcessTransfer(ctx context.Context, m PubSubMessage) error {
data := string(m.Data)
if data == "" {
return fmt.Errorf("no data to process in message")
}
signedVaa, err := vaa.Unmarshal(m.Data)
if err != nil {
log.Println("failed Unmarshaling VAA")
return err
}
// create the bigtable identifier from the VAA data
rowKey := makeRowKey(signedVaa.EmitterChain, signedVaa.EmitterAddress, signedVaa.Sequence)
row, err := tbl.ReadRow(ctx, rowKey)
if err != nil {
log.Fatalf("Could not read row with key %s: %v", rowKey, err)
}
// get the payload data for this transfer
var tokenAddress string
var tokenChain vaa.ChainID
var amount string
for _, item := range row[columnFamilies[2]] {
switch item.Column {
case "TokenTransferPayload:OriginAddress":
tokenAddress = string(item.Value)
case "TokenTransferPayload:OriginChain":
chainInt, _ := strconv.ParseUint(string(item.Value), 10, 32)
chainID := vaa.ChainID(chainInt)
tokenChain = chainID
case "TokenTransferPayload:Amount":
amount = string(item.Value)
}
}
// lookup the asset meta for this transfer.
// find an AssetMeta message that matches the OriginChain & TokenAddress of the transfer
var result bigtable.Row
chainIDPrefix := fmt.Sprintf("%d", tokenChain) // create a string containing the tokenChain chainID, ie "2"
queryErr := tbl.ReadRows(ctx, bigtable.PrefixRange(chainIDPrefix), func(row bigtable.Row) bool {
result = row
return true
}, bigtable.RowFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(columnFamilies[3]),
bigtable.ColumnFilter("TokenAddress"),
bigtable.ValueFilter(tokenAddress),
),
))
if queryErr != nil {
log.Fatalf("failed to read rows: %v", queryErr)
}
if result == nil {
log.Printf("did not find AssetMeta row for tokenAddress: %v. Transfer rowKey: %v\n", tokenAddress, rowKey)
return fmt.Errorf("did not find AssetMeta row for tokenAddress %v", tokenAddress)
}
// now get the entire row
assetMetaRow, assetMetaErr := tbl.ReadRow(ctx, result.Key(), bigtable.RowFilter(bigtable.LatestNFilter(1)))
if assetMetaErr != nil {
log.Fatalf("Could not read row with key %s: %v", rowKey, assetMetaErr)
}
if _, ok := assetMetaRow[columnFamilies[3]]; !ok {
log.Println("did not find AssetMeta matching TokenAddress", tokenAddress)
return fmt.Errorf("did not find AssetMeta matching TokenAddress %v", tokenAddress)
}
// get AssetMeta values
var decimals int
var symbol string
var name string
var coinId string
var nativeTokenAddress string
for _, item := range assetMetaRow[columnFamilies[3]] {
switch item.Column {
case "AssetMetaPayload:Decimals":
decimalStr := string(item.Value)
dec, err := strconv.Atoi(decimalStr)
if err != nil {
log.Fatalf("failed parsing decimals of row %v", assetMetaRow.Key())
}
decimals = dec
case "AssetMetaPayload:Symbol":
symbol = string(item.Value)
case "AssetMetaPayload:Name":
name = string(item.Value)
case "AssetMetaPayload:CoinGeckoCoinId":
coinId = string(item.Value)
case "AssetMetaPayload:NativeAddress":
nativeTokenAddress = string(item.Value)
}
}
// transfers created by the bridge UI will have at most 8 decimals.
if decimals > 8 {
decimals = 8
}
// ensure amount string is long enough
if len(amount) < decimals {
amount = fmt.Sprintf("%0*v", decimals, amount)
}
intAmount := amount[:len(amount)-decimals]
decAmount := amount[len(amount)-decimals:]
calculatedAmount := intAmount + "." + decAmount
timestamp := signedVaa.Timestamp.UTC()
price, _ := fetchCoinGeckoPrice(coinId, timestamp)
// convert the amount string so it can be used for math
amountFloat, convErr := strconv.ParseFloat(calculatedAmount, 64)
if convErr != nil {
log.Fatalf("failed parsing calculatedAmount '%v' to float64. err %v", calculatedAmount, convErr)
}
notional := amountFloat * price
notionalStr := fmt.Sprintf("%f", notional)
log.Printf("processed transfer of $%0.2f = %v %v * $%0.2f\n", notional, calculatedAmount, symbol, price)
// write to BigTable
colFam := columnFamilies[5]
mutation := bigtable.NewMutation()
ts := bigtable.Now()
mutation.Set(colFam, "Amount", ts, []byte(calculatedAmount))
mutation.Set(colFam, "Decimals", ts, []byte(fmt.Sprint(decimals)))
var notionalbuf [8]byte
binary.BigEndian.PutUint64(notionalbuf[:], math.Float64bits(notional))
mutation.Set(colFam, "NotionalUSD", ts, notionalbuf[:])
mutation.Set(colFam, "NotionalUSDStr", ts, []byte(notionalStr))
var priceBuf [8]byte
binary.BigEndian.PutUint64(priceBuf[:], math.Float64bits(price))
mutation.Set(colFam, "TokenPriceUSD", ts, priceBuf[:])
mutation.Set(colFam, "TokenPriceUSDStr", ts, []byte(fmt.Sprintf("%f", price)))
mutation.Set(colFam, "TransferTimestamp", ts, []byte(timestamp.String()))
mutation.Set(colFam, "OriginSymbol", ts, []byte(symbol))
mutation.Set(colFam, "OriginName", ts, []byte(name))
mutation.Set(colFam, "OriginTokenAddress", ts, []byte(nativeTokenAddress))
mutation.Set(colFam, "CoinGeckoCoinId", ts, []byte(coinId))
// TODO - find the symbol & name of the asset on the target chain?
// mutation.Set(colFam, "TargetSymbol", ts, []byte())
// mutation.Set(colFam, "TargetName", ts, []byte())
// conditional mutation - don't write if row already has an Amount value.
filter := bigtable.ChainFilters(
bigtable.FamilyFilter(colFam),
bigtable.ColumnFilter("Amount"))
conditionalMutation := bigtable.NewCondMutation(filter, nil, mutation)
writeErr := tbl.Apply(ctx, rowKey, conditionalMutation)
if writeErr != nil {
log.Printf("Failed to write TokenTransferDetails for %v to BigTable. err: %v\n", rowKey, writeErr)
return writeErr
}
// success
return nil
}

View File

@ -1,446 +0,0 @@
package p
import (
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"fmt"
"log"
"strings"
"sync"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/pubsub"
"github.com/holiman/uint256"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
)
type PubSubMessage struct {
Data []byte `json:"data"`
}
// The keys are emitterAddress hex values, so that we can quickly check a message against the index to see if it
// meets the criteria for saving payload info: if it is a token transfer, or an NFT transfer.
var nftEmitters = map[string]string{
// mainnet
"0def15a24423e1edd1a5ab16f557b9060303ddbab8c803d2ee48f4b78a1cfd6b": "WnFt12ZrnzZrFZkt2xsNsaNWoQribnuQ5B5FrDbwDhD", // solana
"0000000000000000000000006ffd7ede62328b3af38fcd61461bbfc52f5651fe": "0x6FFd7EdE62328b3Af38FCD61461Bbfc52F5651fE", // ethereum
"0000000000000000000000005a58505a96d1dbf8df91cb21b54419fc36e93fde": "0x5a58505a96D1dbf8dF91cB21B54419FC36e93fdE", // bsc
"00000000000000000000000090bbd86a6fe93d3bc3ed6335935447e75fab7fcf": "0x90bbd86a6fe93d3bc3ed6335935447e75fab7fcf", // polygon
"000000000000000000000000f7B6737Ca9c4e08aE573F75A97B73D7a813f5De5": "0xf7B6737Ca9c4e08aE573F75A97B73D7a813f5De5", // avalanche
"00000000000000000000000004952d522ff217f40b5ef3cbf659eca7b952a6c1": "0x04952d522ff217f40b5ef3cbf659eca7b952a6c1", // oasis
"000000000000000000000000A9c7119aBDa80d4a4E0C06C8F4d8cF5893234535": "0xA9c7119aBDa80d4a4E0C06C8F4d8cF5893234535", // fantom
"0000000000000000000000006dcC0484472523ed9Cdc017F711Bcbf909789284": "0x6dcC0484472523ed9Cdc017F711Bcbf909789284", // aurora
"000000000000000000000000b91e3638F82A1fACb28690b37e3aAE45d2c33808": "0xb91e3638F82A1fACb28690b37e3aAE45d2c33808", // acala & karura
"000000000000000000000000A6A377d75ca5c9052c9a77ED1e865Cc25Bd97bf3": "0xA6A377d75ca5c9052c9a77ED1e865Cc25Bd97bf3", // celo
"0000000000000000000000003c3c561757BAa0b78c5C025CdEAa4ee24C1dFfEf": "0x3c3c561757BAa0b78c5C025CdEAa4ee24C1dFfEf", // klaytn
"000000000000000000000000453cfBe096C0f8D763E8C5F24B441097d577bdE2": "0x453cfBe096C0f8D763E8C5F24B441097d577bdE2", // moonbeam
// devnet
"96ee982293251b48729804c8e8b24b553eb6b887867024948d2236fd37a577ab": "NFTWqJR8YnRVqPDvTJrYuLrQDitTG5AScqbeghi4zSA", // solana
"00000000000000000000000026b4afb60d6c903165150c6f0aa14f8016be4aec": "0x26b4afb60d6c903165150c6f0aa14f8016be4aec", // ethereum goerli
"0000000000000000000000000fe5c51f539a651152ae461086d733777a54a134": "terra1plju286nnfj3z54wgcggd4enwaa9fgf5kgrgzl", // terra
"000000000000000000000000cD16E5613EF35599dc82B24Cb45B5A93D779f1EE": "0xcD16E5613EF35599dc82B24Cb45B5A93D779f1EE", // bsc
"00000000000000000000000051a02d0dcb5e52F5b92bdAA38FA013C91c7309A9": "0x51a02d0dcb5e52F5b92bdAA38FA013C91c7309A9", // polygon
"000000000000000000000000D601BAf2EEE3C028344471684F6b27E789D9075D": "0xD601BAf2EEE3C028344471684F6b27E789D9075D", // avalanche
"000000000000000000000000C5c25B41AB0b797571620F5204Afa116A44c0ebA": "0xC5c25B41AB0b797571620F5204Afa116A44c0ebA", // oasis
}
var muNFTEmitters sync.RWMutex
// NFTEmitters will be populated with lowercase addresses
var NFTEmitters = map[string]string{}
var tokenTransferEmitters = map[string]string{
// mainnet
"ec7372995d5cc8732397fb0ad35c0121e0eaa90d26f828a534cab54391b3a4f5": "wormDTUJ6AWPNvk59vGQbDvGJmqbDTdgWgAqcLBCgUb", // solana
"0000000000000000000000003ee18b2214aff97000d974cf647e7c347e8fa585": "0x3ee18B2214AFF97000D974cf647E7C347E8fa585", // ethereum
"0000000000000000000000007cf7b764e38a0a5e967972c1df77d432510564e2": "terra10nmmwe8r3g99a9newtqa7a75xfgs2e8z87r2sf", // terra
"000000000000000000000000b6f6d86a8f9879a9c87f643768d9efc38c1da6e7": "0xB6F6D86a8f9879A9c87f643768d9efc38c1Da6E7", // bsc
"0000000000000000000000005a58505a96d1dbf8df91cb21b54419fc36e93fde": "0x5a58505a96d1dbf8df91cb21b54419fc36e93fde", // polygon
"0000000000000000000000000e082f06ff657d94310cb8ce8b0d9a04541d8052": "0x0e082F06FF657D94310cB8cE8B0D9a04541d8052", // avalanche
"0000000000000000000000005848c791e09901b40a9ef749f2a6735b418d7564": "0x5848c791e09901b40a9ef749f2a6735b418d7564", // oasis
"0000000000000000000000007c9fc5741288cdfdd83ceb07f3ea7e22618d79d2": "0x7c9fc5741288cdfdd83ceb07f3ea7e22618d79d2", // fantom
"00000000000000000000000051b5123a7b0F9b2bA265f9c4C8de7D78D52f510F": "0x51b5123a7b0F9b2bA265f9c4C8de7D78D52f510F", // aurora
"000000000000000000000000ae9d7fe007b3327AA64A32824Aaac52C42a6E624": "0xae9d7fe007b3327AA64A32824Aaac52C42a6E624", // acala & karura
"0000000000000000000000005b08ac39EAED75c0439FC750d9FE7E1F9dD0193F": "0x5b08ac39EAED75c0439FC750d9FE7E1F9dD0193F", // klaytn
"000000000000000000000000796Dff6D74F3E27060B71255Fe517BFb23C93eed": "0x796Dff6D74F3E27060B71255Fe517BFb23C93eed", // celo
"148410499d3fcda4dcfd68a1ebfcdddda16ab28326448d4aae4d2f0465cdfcb7": "contract.portalbridge.near", // near
"000000000000000000000000B1731c586ca89a23809861c6103F0b96B3F57D92": "0xB1731c586ca89a23809861c6103F0b96B3F57D92", // moonbeam
"a463ad028fb79679cfc8ce1efba35ac0e77b35080a1abe9bebe83461f176b0a3": "terra153366q50k7t8nn7gec00hg66crnhkdggpgdtaxltaq6xrutkkz3s992fw9", // terra2
"67e93fa6c8ac5c819990aa7340c0c16b508abb1178be9b30d024b8ac25193d45": "842126029", // algorand
"0000000000000000000000000000000000000000000000000000000000000001": "0x576410486a2da45eee6c949c995670112ddf2fbeedab20350d506328eefc9d4f", // aptos
"8f9cf727175353b17a5f574270e370776123d90fd74956ae4277962b4fdee24c": "xpla137w0wfch2dfmz7jl2ap8pcmswasj8kg06ay4dtjzw7tzkn77ufxqfw7acv", //xpla
// devnet
"c69a1b1a65dd336bf1df6a77afb501fc25db7fc0938cb08595a9ef473265cb4f": "B6RHG3mfcckmrYN1UhmJzyS1XX3fZKbkeUcpJe9Sy3FE", // solana
"0000000000000000000000000290fb167208af455bb137780163b7b7a9a10c16": "0x0290fb167208af455bb137780163b7b7a9a10c16", // ethereum goerli
"000000000000000000000000784999135aaa8a3ca5914468852fdddbddd8789d": "terra10pyejy66429refv3g35g2t7am0was7ya7kz2a4", // terra
"0000000000000000000000009dcF9D205C9De35334D646BeE44b2D2859712A09": "0x9dcF9D205C9De35334D646BeE44b2D2859712A09", // bsc
"000000000000000000000000377D55a7928c046E18eEbb61977e714d2a76472a": "0x377D55a7928c046E18eEbb61977e714d2a76472a", // polygon
"00000000000000000000000061E44E506Ca5659E6c0bba9b678586fA2d729756": "0x61E44E506Ca5659E6c0bba9b678586fA2d729756", // avalanche
"00000000000000000000000088d8004A9BdbfD9D28090A02010C19897a29605c": "0x88d8004A9BdbfD9D28090A02010C19897a29605c", // oasis
"9e28beafa966b2407bffb0d48651e94972a56e69f3c0897d9e8facbdaeb98386": "terra1nc5tatafv6eyq7llkr2gv50ff9e22mnf70qgjlv737ktmt4eswrquka9l6", // terra2
"8edf5b0e108c3a1a0a4b704cc89591f2ad8d50df24e991567e640ed720a94be2": "6", // algorand
}
var muTokenTransferEmitters sync.RWMutex
// TokenTransferEmitters will be populated with lowercase addresses
var TokenTransferEmitters = map[string]string{}
// this address is an emitter for BSC and Polygon.
var sharedEmitterAddress = "0000000000000000000000005a58505a96d1dbf8df91cb21b54419fc36e93fde"
type (
TokenTransfer struct {
PayloadId uint8
Amount uint256.Int
OriginAddress [32]byte
OriginChain uint16
TargetAddress [32]byte
TargetChain uint16
}
NFTTransfer struct {
PayloadId uint8
OriginAddress [32]byte
OriginChain uint16
Symbol [32]byte
Name [32]byte
TokenId uint256.Int
URI []byte
TargetAddress [32]byte
TargetChain uint16
}
AssetMeta struct {
PayloadId uint8
TokenAddress [32]byte
TokenChain uint16
Decimals uint8
Symbol [32]byte
Name [32]byte
}
)
func DecodeTokenTransfer(data []byte) (*TokenTransfer, error) {
tt := &TokenTransfer{}
tt.PayloadId = data[0]
reader := bytes.NewReader(data[1:])
if err := binary.Read(reader, binary.BigEndian, &tt.Amount); err != nil {
return nil, fmt.Errorf("failed to read Amount: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &tt.OriginAddress); err != nil {
return nil, fmt.Errorf("failed to read OriginAddress: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &tt.OriginChain); err != nil {
return nil, fmt.Errorf("failed to read OriginChain: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &tt.TargetAddress); err != nil {
return nil, fmt.Errorf("failed to read TargetAddress: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &tt.TargetChain); err != nil {
return nil, fmt.Errorf("failed to read TargetChain: %w", err)
}
return tt, nil
}
func DecodeNFTTransfer(data []byte) (*NFTTransfer, error) {
nt := &NFTTransfer{}
nt.PayloadId = data[0]
reader := bytes.NewReader(data[1:])
if err := binary.Read(reader, binary.BigEndian, &nt.OriginAddress); err != nil {
return nil, fmt.Errorf("failed to read OriginAddress: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &nt.OriginChain); err != nil {
return nil, fmt.Errorf("failed to read OriginChain: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &nt.Symbol); err != nil {
return nil, fmt.Errorf("failed to read Symbol: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &nt.Name); err != nil {
return nil, fmt.Errorf("failed to read Name: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &nt.TokenId); err != nil {
return nil, fmt.Errorf("failed to read TokenId: %w", err)
}
// uri len
uriLen, er := reader.ReadByte()
if er != nil {
return nil, fmt.Errorf("failed to read URI length")
}
// uri
uri := make([]byte, int(uriLen))
n, err := reader.Read(uri)
if err != nil || n == 0 {
return nil, fmt.Errorf("failed to read uri [%d]: %w", n, err)
}
nt.URI = uri[:n]
if err := binary.Read(reader, binary.BigEndian, &nt.TargetAddress); err != nil {
return nil, fmt.Errorf("failed to read : %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &nt.TargetChain); err != nil {
return nil, fmt.Errorf("failed to read : %w", err)
}
return nt, nil
}
func DecodeAssetMeta(data []byte) (*AssetMeta, error) {
am := &AssetMeta{}
am.PayloadId = data[0]
reader := bytes.NewReader(data[1:])
tokenAddress := [32]byte{}
if n, err := reader.Read(tokenAddress[:]); err != nil || n != 32 {
return nil, fmt.Errorf("failed to read TokenAddress [%d]: %w", n, err)
}
am.TokenAddress = tokenAddress
if err := binary.Read(reader, binary.BigEndian, &am.TokenChain); err != nil {
return nil, fmt.Errorf("failed to read TokenChain: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &am.Decimals); err != nil {
return nil, fmt.Errorf("failed to read Decimals: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &am.Symbol); err != nil {
return nil, fmt.Errorf("failed to read Symbol: %w", err)
}
if err := binary.Read(reader, binary.BigEndian, &am.Name); err != nil {
return nil, fmt.Errorf("failed to read Name: %w", err)
}
return am, nil
}
// TEMP: until this https://forge.certus.one/c/wormhole/+/1850 lands
func makeRowKey(emitterChain vaa.ChainID, emitterAddress vaa.Address, sequence uint64) string {
// left-pad the sequence with zeros to 16 characters, because bigtable keys are stored lexicographically
return fmt.Sprintf("%d:%s:%016d", emitterChain, emitterAddress, sequence)
}
func writePayloadToBigTable(ctx context.Context, rowKey string, colFam string, mutation *bigtable.Mutation, forceWrite bool) error {
mut := mutation
if !forceWrite {
filter := bigtable.ChainFilters(
bigtable.FamilyFilter(colFam),
bigtable.ColumnFilter("PayloadId"))
mut = bigtable.NewCondMutation(filter, nil, mutation)
}
err := tbl.Apply(ctx, rowKey, mut)
if err != nil {
log.Printf("Failed to write payload for %v to BigTable. err: %v", rowKey, err)
return err
}
return nil
}
func TrimUnicodeFromByteArray(b []byte) []byte {
// Escaped Unicode that has been observed in payload's token names and symbol:
null := "\u0000"
start := "\u0002"
ack := "\u0006"
tab := "\u0009"
control := "\u0012"
return bytes.Trim(b, null+start+ack+tab+control)
}
func addReceiverAddressToMutation(mut *bigtable.Mutation, ts bigtable.Timestamp, chainID uint16, hexAddress string) {
nativeAddress := transformHexAddressToNative(vaa.ChainID(chainID), hexAddress)
if vaa.ChainID(chainID) == vaa.ChainIDSolana {
nativeAddress = fetchSolanaAccountOwner(nativeAddress)
}
if nativeAddress != "" {
mut.Set(columnFamilies[6], "ReceiverAddress", ts, []byte(nativeAddress))
}
}
// ProcessVAA is triggered by a PubSub message, emitted after row is saved to BigTable by guardiand
func ProcessVAA(ctx context.Context, m PubSubMessage) error {
muNFTEmitters.Lock()
if len(NFTEmitters) == 0 {
for k, v := range nftEmitters {
NFTEmitters[strings.ToLower(k)] = strings.ToLower(v)
}
}
muNFTEmitters.Unlock()
muTokenTransferEmitters.Lock()
if len(TokenTransferEmitters) == 0 {
for k, v := range tokenTransferEmitters {
TokenTransferEmitters[strings.ToLower(k)] = strings.ToLower(v)
}
}
muTokenTransferEmitters.Unlock()
data := string(m.Data)
if data == "" {
return fmt.Errorf("no data to process in message")
}
signedVaa, err := vaa.Unmarshal(m.Data)
if err != nil {
log.Println("failed Unmarshaling VAA")
return err
}
// create the bigtable identifier from the VAA data
rowKey := makeRowKey(signedVaa.EmitterChain, signedVaa.EmitterAddress, signedVaa.Sequence)
emitterHex := strings.ToLower(signedVaa.EmitterAddress.String())
payloadId := int(signedVaa.Payload[0])
// BSC and Polygon have the same contract address: "0x5a58505a96d1dbf8df91cb21b54419fc36e93fde".
// The BSC contract is the NFT emitter address.
// The Polygon contract is the token transfer emitter address.
// Due to that, ensure that the block below only runs for token transfers by checking for chain == 4 and emitter address.
if _, ok := TokenTransferEmitters[emitterHex]; ok && !(signedVaa.EmitterChain == 4 && signedVaa.EmitterAddress.String() == sharedEmitterAddress) {
// figure out if it's a transfer or asset metadata
if payloadId == 1 {
// token transfer
payload, decodeErr := DecodeTokenTransfer(signedVaa.Payload)
if decodeErr != nil {
log.Println("failed decoding payload for row ", rowKey)
return decodeErr
}
log.Printf("Processing Transfer: Amount %v\n", fmt.Sprint(payload.Amount[3]))
// save payload to bigtable, then publish a new PubSub message for further processing
colFam := columnFamilies[2]
mutation := bigtable.NewMutation()
ts := bigtable.Now()
mutation.Set(colFam, "PayloadId", ts, []byte(fmt.Sprint(payload.PayloadId)))
// TODO: find a better way of representing amount as a string
amount := []byte(fmt.Sprint(payload.Amount[3]))
if payload.Amount[2] != 0 {
log.Printf("payload.Amount is larger than uint64 for row %v", rowKey)
amount = payload.Amount.Bytes()
}
targetAddressHex := hex.EncodeToString(payload.TargetAddress[:])
mutation.Set(colFam, "Amount", ts, amount)
mutation.Set(colFam, "OriginAddress", ts, []byte(hex.EncodeToString(payload.OriginAddress[:])))
mutation.Set(colFam, "OriginChain", ts, []byte(fmt.Sprint(payload.OriginChain)))
mutation.Set(colFam, "TargetAddress", ts, []byte(targetAddressHex))
mutation.Set(colFam, "TargetChain", ts, []byte(fmt.Sprint(payload.TargetChain)))
addReceiverAddressToMutation(mutation, ts, payload.TargetChain, targetAddressHex)
writeErr := writePayloadToBigTable(ctx, rowKey, colFam, mutation, false)
if writeErr != nil {
return writeErr
}
// now that the payload is saved to BigTable,
// pass along the message to the topic that will calculate TokenTransferDetails
pubSubTokenTransferDetailsTopic.Publish(ctx, &pubsub.Message{Data: m.Data})
} else if payloadId == 2 {
// asset meta
payload, decodeErr := DecodeAssetMeta(signedVaa.Payload)
if decodeErr != nil {
log.Println("failed decoding payload for row ", rowKey)
return decodeErr
}
addressHex := hex.EncodeToString(payload.TokenAddress[:])
chainID := vaa.ChainID(payload.TokenChain)
nativeAddress := transformHexAddressToNative(chainID, addressHex)
name := string(TrimUnicodeFromByteArray(payload.Name[:]))
symbol := string(TrimUnicodeFromByteArray(payload.Symbol[:]))
// find the CoinGecko id of this token
coinGeckoCoinId, foundSymbol, foundName := fetchCoinGeckoCoinId(chainID, nativeAddress, symbol, name)
// populate the symbol & name if they were blank, and we found values
if symbol == "" && foundSymbol != "" {
symbol = foundSymbol
}
if name == "" && foundName != "" {
name = foundName
}
// special case for terra-classic
if symbol == "LUNA" && chainID == vaa.ChainIDTerra {
coinGeckoCoinId = "terra-luna"
name = "LUNA"
}
log.Printf("Processing AssetMeta: Name %v, Symbol: %v, AddressHex: %v, NativeAddress: %v, CoingeckoID: %v, ChainID: %v\n", name, symbol, addressHex, nativeAddress, coinGeckoCoinId, chainID)
// save payload to bigtable
colFam := columnFamilies[3]
mutation := bigtable.NewMutation()
ts := bigtable.Now()
mutation.Set(colFam, "PayloadId", ts, []byte(fmt.Sprint(payload.PayloadId)))
mutation.Set(colFam, "TokenAddress", ts, []byte(addressHex))
mutation.Set(colFam, "TokenChain", ts, []byte(fmt.Sprint(payload.TokenChain)))
mutation.Set(colFam, "Decimals", ts, []byte(fmt.Sprint(payload.Decimals)))
mutation.Set(colFam, "Name", ts, []byte(name))
mutation.Set(colFam, "Symbol", ts, []byte(symbol))
mutation.Set(colFam, "CoinGeckoCoinId", ts, []byte(coinGeckoCoinId))
mutation.Set(colFam, "NativeAddress", ts, []byte(nativeAddress))
writeErr := writePayloadToBigTable(ctx, rowKey, colFam, mutation, false)
return writeErr
} else {
// unknown payload type
log.Println("encountered unknown payload type for row ", rowKey)
return nil
}
} else if _, ok := NFTEmitters[emitterHex]; ok {
if payloadId == 1 {
// NFT transfer
payload, decodeErr := DecodeNFTTransfer(signedVaa.Payload)
if decodeErr != nil {
log.Println("failed decoding payload for row ", rowKey)
return decodeErr
}
log.Printf("Processing NTF: Name %v, Symbol %v\n", string(TrimUnicodeFromByteArray(payload.Name[:])), string(TrimUnicodeFromByteArray(payload.Symbol[:])))
// save payload to bigtable
colFam := columnFamilies[4]
mutation := bigtable.NewMutation()
ts := bigtable.Now()
targetAddressHex := hex.EncodeToString(payload.TargetAddress[:])
mutation.Set(colFam, "PayloadId", ts, []byte(fmt.Sprint(payload.PayloadId)))
mutation.Set(colFam, "OriginAddress", ts, []byte(hex.EncodeToString(payload.OriginAddress[:])))
mutation.Set(colFam, "OriginChain", ts, []byte(fmt.Sprint(payload.OriginChain)))
mutation.Set(colFam, "Symbol", ts, TrimUnicodeFromByteArray(payload.Symbol[:]))
mutation.Set(colFam, "Name", ts, TrimUnicodeFromByteArray(payload.Name[:]))
mutation.Set(colFam, "TokenId", ts, payload.TokenId.Bytes())
mutation.Set(colFam, "URI", ts, TrimUnicodeFromByteArray(payload.URI))
mutation.Set(colFam, "TargetAddress", ts, []byte(targetAddressHex))
mutation.Set(colFam, "TargetChain", ts, []byte(fmt.Sprint(payload.TargetChain)))
addReceiverAddressToMutation(mutation, ts, payload.TargetChain, targetAddressHex)
writeErr := writePayloadToBigTable(ctx, rowKey, colFam, mutation, false)
return writeErr
} else {
// unknown payload type
log.Println("encountered unknown payload type for row ", rowKey)
return nil
}
}
// this is not a payload we are ready to decode & save. return success
return nil
}

View File

@ -1,132 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"encoding/json"
"fmt"
"html"
"io"
"log"
"net/http"
"strings"
"cloud.google.com/go/bigtable"
)
// fetch a single row by the row key
func ReadRow(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var emitterChain, emitterAddress, sequence, rowKey string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
emitterChain = queryParams.Get("emitterChain")
emitterAddress = queryParams.Get("emitterAddress")
sequence = queryParams.Get("sequence")
readyCheck := queryParams.Get("readyCheck")
if readyCheck != "" {
// for running in devnet
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, html.EscapeString("ready"))
return
}
// check for empty values
if emitterChain == "" || emitterAddress == "" || sequence == "" {
fmt.Fprint(w, "query params ['emitterChain', 'emitterAddress', 'sequence'] cannot be empty")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
case http.MethodPost:
// declare request body properties
var d struct {
EmitterChain string `json:"emitterChain"`
EmitterAddress string `json:"emitterAddress"`
Sequence string `json:"sequence"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
fmt.Fprint(w, "request body required")
return
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// check for empty values
if d.EmitterChain == "" || d.EmitterAddress == "" || d.Sequence == "" {
fmt.Fprint(w, "body values ['emitterChain', 'emitterAddress', 'sequence'] cannot be empty")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
emitterChain = d.EmitterChain
emitterAddress = d.EmitterAddress
sequence = d.Sequence
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
// pad sequence to 16 characters
if len(sequence) <= 15 {
sequence = fmt.Sprintf("%016s", sequence)
}
// convert chain name to chainID
if len(emitterChain) > 1 {
chainNameMap := map[string]string{
"solana": "1",
"ethereum": "2",
"terra": "3",
"bsc": "4",
"polygon": "5",
}
lowercaseChain := strings.ToLower(emitterChain)
if _, ok := chainNameMap[lowercaseChain]; ok {
emitterChain = chainNameMap[lowercaseChain]
}
}
rowKey = emitterChain + ":" + emitterAddress + ":" + sequence
row, err := tbl.ReadRow(r.Context(), rowKey, bigtable.RowFilter(bigtable.LatestNFilter(1)))
if err != nil {
http.Error(w, "Error reading rows", http.StatusInternalServerError)
log.Printf("tbl.ReadRows(): %v", err)
return
}
if row == nil {
http.NotFound(w, r)
log.Printf("did not find row for key %v", rowKey)
return
}
details := makeDetails(row)
jsonBytes, err := json.Marshal(details)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,340 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"html"
"io"
"log"
"net/http"
"sort"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
// warmCache keeps some data around between invocations, so that we don't have
// to do a full table scan with each request.
// https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations
var warmCache = map[string]map[string]string{}
var lastCacheReset time.Time
var muWarmRecentCache sync.RWMutex
var warmRecentCacheFilePath = "recent-cache.json"
var timestampKey = "lastUpdate"
// query for last of each rowKey prefix
func getLatestOfEachEmitterAddress(tbl *bigtable.Table, ctx context.Context, prefix string, keySegments int) map[string]string {
// get cache data for query
cachePrefix := prefix
if prefix == "" {
cachePrefix = "*"
}
if _, ok := warmCache[cachePrefix]; !ok && loadCache {
loadJsonToInterface(ctx, warmRecentCacheFilePath, &muWarmRecentCache, &warmCache)
}
cacheNeedsUpdate := false
if cache, ok := warmCache[cachePrefix]; ok {
if lastUpdate, ok := cache[timestampKey]; ok {
time, err := time.Parse(time.RFC3339, lastUpdate)
if err == nil {
lastCacheReset = time
} else {
log.Printf("failed parsing lastUpdate timestamp from cache. lastUpdate %v, err: %v ", lastUpdate, err)
}
}
}
var rowSet bigtable.RowSet
rowSet = bigtable.PrefixRange(prefix)
now := time.Now()
oneHourAgo := now.Add(-time.Duration(1) * time.Hour)
if oneHourAgo.Before(lastCacheReset) {
// cache is less than one hour old, use it
if cached, ok := warmCache[cachePrefix]; ok {
// use the highest possible sequence number as the range end.
maxSeq := "9999999999999999"
rowSets := bigtable.RowRangeList{}
for k, v := range cached {
if k != timestampKey {
start := fmt.Sprintf("%v:%v", k, v)
end := fmt.Sprintf("%v:%v", k, maxSeq)
rowSets = append(rowSets, bigtable.NewRange(start, end))
}
}
if len(rowSets) >= 1 {
rowSet = rowSets
}
}
} else {
// cache is more than hour old, don't use it, reset it
warmCache = map[string]map[string]string{}
lastCacheReset = now
cacheNeedsUpdate = true
}
// create a time range for query: last seven days
sevenDays := -time.Duration(24*7) * time.Hour
prev := now.Add(sevenDays)
start := time.Date(prev.Year(), prev.Month(), prev.Day(), 0, 0, 0, 0, prev.Location())
end := time.Date(now.Year(), now.Month(), now.Day(), 23, 59, 59, maxNano, now.Location())
mostRecentByKeySegment := map[string]string{}
err := tbl.ReadRows(ctx, rowSet, func(row bigtable.Row) bool {
keyParts := strings.Split(row.Key(), ":")
groupByKey := strings.Join(keyParts[:2], ":")
mostRecentByKeySegment[groupByKey] = keyParts[2]
return true
}, bigtable.RowFilter(
bigtable.ChainFilters(
bigtable.CellsPerRowLimitFilter(1),
bigtable.TimestampRangeFilter(start, end),
bigtable.StripValueFilter(),
)))
if err != nil {
log.Fatalf("failed to read recent rows: %v", err)
}
// update the cache with the latest rows
warmCache[cachePrefix] = mostRecentByKeySegment
for k, v := range mostRecentByKeySegment {
warmCache[cachePrefix][k] = v
}
warmCache[cachePrefix][timestampKey] = time.Now().Format(time.RFC3339)
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmRecentCacheFilePath, &muWarmRecentCache, warmCache)
}
return mostRecentByKeySegment
}
const MAX_INT64 = 9223372036854775807
func fetchMostRecentRows(tbl *bigtable.Table, ctx context.Context, prefix string, keySegments int, numRowsToFetch uint64) (map[string][]bigtable.Row, error) {
// returns { key: []bigtable.Row }, key either being "*", "chainID", "chainID:address"
latest := getLatestOfEachEmitterAddress(tbl, ctx, prefix, keySegments)
// key/value pairs are the start/stop rowKeys for range queries
rangePairs := map[string]string{}
for prefixGroup, highestSequence := range latest {
numRows := numRowsToFetch
if prefixGroup == timestampKey {
continue
}
rowKeyParts := strings.Split(prefixGroup, ":")
// convert the sequence part of the rowkey from a string to an int, so it can be used for math
highSequence, err := strconv.ParseUint(highestSequence, 10, 64)
if err != nil {
log.Println("error parsing sequence string", highSequence)
}
if highSequence < numRows {
numRows = highSequence
}
lowSequence := highSequence - numRows
// create a rowKey to use as the start of the range query
rangeQueryStart := fmt.Sprintf("%v:%v:%016d", rowKeyParts[0], rowKeyParts[1], lowSequence)
// create a rowKey with the highest seen sequence + 1, because range end is exclusive
rangeQueryEnd := fmt.Sprintf("%v:%v:%016d", rowKeyParts[0], rowKeyParts[1], highSequence+1)
if highSequence >= lowSequence {
rangePairs[rangeQueryStart] = rangeQueryEnd
} else {
// governance messages have non-sequential sequence numbers.
log.Printf("skipping %v:%v because sequences are strange. high/low: %d/%d", rowKeyParts[0], rowKeyParts[1], highSequence, lowSequence)
}
}
rangeList := bigtable.RowRangeList{}
for k, v := range rangePairs {
rangeList = append(rangeList, bigtable.NewRange(k, v))
}
results := map[string][]bigtable.Row{}
err := tbl.ReadRows(ctx, rangeList, func(row bigtable.Row) bool {
var groupByKey string
if keySegments == 0 {
groupByKey = "*"
} else {
keyParts := strings.Split(row.Key(), ":")
groupByKey = strings.Join(keyParts[:keySegments], ":")
}
results[groupByKey] = append(results[groupByKey], row)
return true
})
if err != nil {
log.Printf("failed reading row ranges. err: %v", err)
return nil, err
}
return results, nil
}
// fetch recent rows.
// optionally group by a EmitterChain or EmitterAddress
// optionally query for recent rows of a given EmitterChain or EmitterAddress
func Recent(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var numRows, groupBy, forChain, forAddress string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
numRows = queryParams.Get("numRows")
groupBy = queryParams.Get("groupBy")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
readyCheck := queryParams.Get("readyCheck")
if readyCheck != "" {
// for running in devnet
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, html.EscapeString("ready"))
return
}
case http.MethodPost:
// declare request body properties
var d struct {
NumRows string `json:"numRows"`
GroupBy string `json:"groupBy"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
numRows = d.NumRows
groupBy = d.GroupBy
forChain = d.ForChain
forAddress = d.ForAddress
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
var resultCount uint64
if numRows == "" {
resultCount = 30
} else {
var convErr error
resultCount, convErr = strconv.ParseUint(numRows, 10, 64)
if convErr != nil {
fmt.Fprint(w, "numRows must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// use the groupBy value to determine how many segements of the rowkey should be used for indexing results.
keySegments := 0
if groupBy == "chain" {
keySegments = 1
}
if groupBy == "address" {
keySegments = 2
}
// create the rowkey prefix for querying, and the keySegments to use for indexing results.
prefix := ""
if forChain != "" {
prefix = forChain + ":"
if groupBy == "" {
// groupBy was not set, but forChain was, so set the keySegments to index by chain
keySegments = 1
}
if forAddress != "" {
prefix = forChain + forAddress
if groupBy == "" {
// groupBy was not set, but forAddress was, so set the keySegments to index by address
keySegments = 2
}
}
}
recent, err := fetchMostRecentRows(tbl, r.Context(), prefix, keySegments, resultCount)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
res := map[string][]*Summary{}
for k, v := range recent {
sort.Slice(v, func(i, j int) bool {
// bigtable rows dont have timestamps, use a cell timestamp all rows will have.
var iTimestamp bigtable.Timestamp
var jTimestamp bigtable.Timestamp
// rows may have: only MessagePublication, only QuorumState, or both.
// find a timestamp for each row, try to use MessagePublication, if it exists:
if len(v[i]["MessagePublication"]) >= 1 {
iTimestamp = v[i]["MessagePublication"][0].Timestamp
} else if len(v[i]["QuorumState"]) >= 1 {
iTimestamp = v[i]["QuorumState"][0].Timestamp
}
if len(v[j]["MessagePublication"]) >= 1 {
jTimestamp = v[j]["MessagePublication"][0].Timestamp
} else if len(v[j]["QuorumState"]) >= 1 {
jTimestamp = v[j]["QuorumState"][0].Timestamp
}
return iTimestamp > jTimestamp
})
// trim the result down to the requested amount now that sorting is complete
num := uint64(len(v))
var rows []bigtable.Row
if num > resultCount {
rows = v[:resultCount]
} else {
rows = v[:]
}
res[k] = make([]*Summary, len(rows))
for i, r := range rows {
res[k][i] = makeSummary(r)
}
}
jsonBytes, err := json.Marshal(res)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,508 +0,0 @@
package p
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"math"
"os"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
"cloud.google.com/go/pubsub"
"cloud.google.com/go/storage"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
)
// shared code for the various functions, primarily response formatting.
// client is a global Bigtable client, to avoid initializing a new client for
// every request.
var client *bigtable.Client
var clientOnce sync.Once
var tbl *bigtable.Table
var storageClient *storage.Client
var cacheBucketName string
var cacheBucket *storage.BucketHandle
var pubsubClient *pubsub.Client
var pubSubTokenTransferDetailsTopic *pubsub.Topic
var coinGeckoCoins = map[string][]CoinGeckoCoin{}
var solanaTokens = map[string]SolanaToken{}
var releaseDay = time.Date(2021, 9, 13, 0, 0, 0, 0, time.UTC)
var loadCache = true
// init runs during cloud function initialization. So, this will only run during an
// an instance's cold start.
// https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis
func init() {
defer timeTrack(time.Now(), "init")
clientOnce.Do(func() {
// Declare a separate err variable to avoid shadowing client.
var err error
project := os.Getenv("GCP_PROJECT")
instance := os.Getenv("BIGTABLE_INSTANCE")
client, err = bigtable.NewClient(context.Background(), project, instance)
if err != nil {
// http.Error(w, "Error initializing client", http.StatusInternalServerError)
log.Printf("bigtable.NewClient error: %v", err)
return
}
// create the topic that will be published to after decoding token transfer payloads
tokenTransferDetailsTopic := os.Getenv("PUBSUB_TOKEN_TRANSFER_DETAILS_TOPIC")
if tokenTransferDetailsTopic != "" {
var pubsubErr error
pubsubClient, pubsubErr = pubsub.NewClient(context.Background(), project)
if pubsubErr != nil {
log.Printf("pubsub.NewClient error: %v", pubsubErr)
return
}
pubSubTokenTransferDetailsTopic = pubsubClient.Topic(tokenTransferDetailsTopic)
// fetch the token lists once at start up
coinGeckoCoins = fetchCoinGeckoCoins()
solanaTokens = fetchSolanaTokenList()
}
})
tbl = client.Open("v2Events")
cacheBucketName = os.Getenv("CACHE_BUCKET")
if cacheBucketName != "" {
// Create storage client.
var err error
storageClient, err = storage.NewClient(context.Background())
if err != nil {
log.Fatalf("Failed to create storage client: %v", err)
}
cacheBucket = storageClient.Bucket(cacheBucketName)
}
tokenAllowlistFilePath := os.Getenv("TOKEN_ALLOWLIST")
if tokenAllowlistFilePath != "" {
loadJsonToInterface(context.Background(), tokenAllowlistFilePath, &sync.RWMutex{}, &tokenAllowlist)
}
loadCacheStr := os.Getenv("LOAD_CACHE")
if val, err := strconv.ParseBool(loadCacheStr); err == nil {
loadCache = val
log.Printf("loadCache set to %v\n", loadCache)
}
}
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
log.Printf("%s took %s", name, elapsed)
}
// reads the specified file from the CACHE_BUCKET and unmarshals the json into the supplied interface.
func loadJsonToInterface(ctx context.Context, filePath string, mutex *sync.RWMutex, cacheMap interface{}) {
if cacheBucket == nil {
log.Println("no cacheBucket supplied, not going to read cache")
return
}
defer timeTrack(time.Now(), fmt.Sprintf("reading %v", filePath))
mutex.Lock()
defer mutex.Unlock()
reader, readErr := cacheBucket.Object(filePath).NewReader(ctx)
if readErr != nil {
log.Printf("Failed reading %v in GCS. err: %v", filePath, readErr)
return
}
defer reader.Close()
fileData, err := io.ReadAll(reader)
if err != nil {
log.Printf("loadJsonToInterface: unable to read data. file %q: %v", filePath, err)
}
unmarshalErr := json.Unmarshal(fileData, &cacheMap)
if unmarshalErr != nil {
log.Printf("failed unmarshaling %v, err: %v", filePath, unmarshalErr)
}
}
// writes the supplied interface to the CACHE_BUCKET/filePath.
func persistInterfaceToJson(ctx context.Context, filePath string, mutex *sync.RWMutex, cacheMap interface{}) {
if cacheBucket == nil {
log.Println("no cacheBucket supplied, not going to persist cache")
return
}
defer timeTrack(time.Now(), fmt.Sprintf("writing %v", filePath))
mutex.Lock()
cacheBytes, marshalErr := json.MarshalIndent(cacheMap, "", " ")
if marshalErr != nil {
log.Fatal("failed marshaling cacheMap.", marshalErr)
}
wc := cacheBucket.Object(filePath).NewWriter(ctx)
reader := bytes.NewReader(cacheBytes)
if _, writeErr := io.Copy(wc, reader); writeErr != nil {
log.Printf("failed writing to file %v, err: %v", filePath, writeErr)
}
mutex.Unlock()
if err := wc.Close(); err != nil {
log.Printf("Writer.Close with error: %v", err)
}
}
var columnFamilies = []string{
"MessagePublication",
"QuorumState",
"TokenTransferPayload",
"AssetMetaPayload",
"NFTTransferPayload",
"TokenTransferDetails",
"ChainDetails",
}
var messagePubFam = columnFamilies[0]
var quorumStateFam = columnFamilies[1]
var transferPayloadFam = columnFamilies[2]
var metaPayloadFam = columnFamilies[3]
var nftPayloadFam = columnFamilies[4]
var transferDetailsFam = columnFamilies[5]
var chainDetailsFam = columnFamilies[6]
type (
// Summary is MessagePublication data & QuorumState data
Summary struct {
EmitterChain string
EmitterAddress string
Sequence string
InitiatingTxID string
Payload []byte
SignedVAABytes []byte
QuorumTime string
TransferDetails *TransferDetails
}
// Details is a Summary extended with all the post-processing ColumnFamilies
Details struct {
Summary
SignedVAA *vaa.VAA
TokenTransferPayload *TokenTransferPayload
AssetMetaPayload *AssetMetaPayload
NFTTransferPayload *NFTTransferPayload
ChainDetails *ChainDetails
}
// The following structs match the ColumnFamiles they are named after
TokenTransferPayload struct {
Amount string
OriginAddress string
OriginChain string
TargetAddress string
TargetChain string
}
AssetMetaPayload struct {
TokenAddress string
TokenChain string
Decimals string
Symbol string
Name string
CoinGeckoCoinId string
NativeAddress string
}
NFTTransferPayload struct {
OriginAddress string
OriginChain string
Symbol string
Name string
TokenId string
URI string
TargetAddress string
TargetChain string
}
TransferDetails struct {
Amount string
Decimals string
NotionalUSDStr string
TokenPriceUSDStr string
TransferTimestamp string
OriginSymbol string
OriginName string
OriginTokenAddress string
}
ChainDetails struct {
SenderAddress string
ReceiverAddress string
}
)
// ChainIDs to compute TVL/stats for
// Useful to exclude chains we don't want to compute TVL for which can improve performance
// (notably PythNet is excluded, ChainID 26)
var tvlChainIDs = []vaa.ChainID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 22, 28}
func chainIdStringToType(chainId string) vaa.ChainID {
switch chainId {
case "1":
return vaa.ChainIDSolana
case "2":
return vaa.ChainIDEthereum
case "3":
return vaa.ChainIDTerra
case "4":
return vaa.ChainIDBSC
case "5":
return vaa.ChainIDPolygon
case "6":
return vaa.ChainIDAvalanche
case "7":
return vaa.ChainIDOasis
case "8":
return vaa.ChainIDAlgorand
case "9":
return vaa.ChainIDAurora
case "10":
return vaa.ChainIDFantom
case "11":
return vaa.ChainIDKarura
case "12":
return vaa.ChainIDAcala
case "13":
return vaa.ChainIDKlaytn
case "14":
return vaa.ChainIDCelo
case "15":
return vaa.ChainIDNear
case "16":
return vaa.ChainIDMoonbeam
case "18":
return vaa.ChainIDTerra2
case "22":
return vaa.ChainIDAptos
case "28":
return vaa.ChainIDXpla
}
return vaa.ChainIDUnset
}
func chainIDToNumberString(c vaa.ChainID) string {
return strconv.FormatUint(uint64(c), 10)
}
func makeSummary(row bigtable.Row) *Summary {
summary := &Summary{}
if _, ok := row[messagePubFam]; ok {
for _, item := range row[messagePubFam] {
switch item.Column {
case "MessagePublication:InitiatingTxID":
summary.InitiatingTxID = string(item.Value)
case "MessagePublication:Payload":
summary.Payload = item.Value
case "MessagePublication:EmitterChain":
summary.EmitterChain = string(item.Value)
case "MessagePublication:EmitterAddress":
summary.EmitterAddress = string(item.Value)
case "MessagePublication:Sequence":
summary.Sequence = string(item.Value)
}
}
} else {
// Some rows have a QuorumState, but no MessagePublication,
// so populate Summary values from the rowKey.
keyParts := strings.Split(row.Key(), ":")
chainId := chainIdStringToType(keyParts[0])
summary.EmitterChain = chainId.String()
summary.EmitterAddress = keyParts[1]
seq := strings.TrimLeft(keyParts[2], "0")
if seq == "" {
seq = "0"
}
summary.Sequence = seq
}
if _, ok := row[quorumStateFam]; ok {
item := row[quorumStateFam][0]
summary.SignedVAABytes = item.Value
summary.QuorumTime = item.Timestamp.Time().String()
}
if _, ok := row[transferDetailsFam]; ok {
transferDetails := &TransferDetails{}
for _, item := range row[transferDetailsFam] {
switch item.Column {
case "TokenTransferDetails:Amount":
transferDetails.Amount = string(item.Value)
case "TokenTransferDetails:Decimals":
transferDetails.Decimals = string(item.Value)
case "TokenTransferDetails:NotionalUSDStr":
transferDetails.NotionalUSDStr = string(item.Value)
case "TokenTransferDetails:TokenPriceUSDStr":
transferDetails.TokenPriceUSDStr = string(item.Value)
case "TokenTransferDetails:TransferTimestamp":
transferDetails.TransferTimestamp = string(item.Value)
case "TokenTransferDetails:OriginSymbol":
transferDetails.OriginSymbol = string(item.Value)
case "TokenTransferDetails:OriginName":
transferDetails.OriginName = string(item.Value)
case "TokenTransferDetails:OriginTokenAddress":
transferDetails.OriginTokenAddress = string(item.Value)
}
}
summary.TransferDetails = transferDetails
}
return summary
}
func makeDetails(row bigtable.Row) *Details {
deets := &Details{}
sum := makeSummary(row)
deets.Summary = Summary{
EmitterChain: sum.EmitterChain,
EmitterAddress: sum.EmitterAddress,
Sequence: sum.Sequence,
InitiatingTxID: sum.InitiatingTxID,
Payload: sum.Payload,
SignedVAABytes: sum.SignedVAABytes,
QuorumTime: sum.QuorumTime,
TransferDetails: sum.TransferDetails,
}
if _, ok := row[quorumStateFam]; ok {
item := row[quorumStateFam][0]
deets.SignedVAA, _ = vaa.Unmarshal(item.Value)
}
if _, ok := row[transferPayloadFam]; ok {
tokenTransferPayload := &TokenTransferPayload{}
for _, item := range row[transferPayloadFam] {
switch item.Column {
case "TokenTransferPayload:Amount":
tokenTransferPayload.Amount = string(item.Value)
case "TokenTransferPayload:OriginAddress":
tokenTransferPayload.OriginAddress = string(item.Value)
case "TokenTransferPayload:OriginChain":
tokenTransferPayload.OriginChain = string(item.Value)
case "TokenTransferPayload:TargetAddress":
tokenTransferPayload.TargetAddress = string(item.Value)
case "TokenTransferPayload:TargetChain":
tokenTransferPayload.TargetChain = string(item.Value)
}
}
deets.TokenTransferPayload = tokenTransferPayload
}
if _, ok := row[metaPayloadFam]; ok {
assetMetaPayload := &AssetMetaPayload{}
for _, item := range row[metaPayloadFam] {
switch item.Column {
case "AssetMetaPayload:TokenAddress":
assetMetaPayload.TokenAddress = string(item.Value)
case "AssetMetaPayload:TokenChain":
assetMetaPayload.TokenChain = string(item.Value)
case "AssetMetaPayload:Decimals":
assetMetaPayload.Decimals = string(item.Value)
case "AssetMetaPayload:Symbol":
assetMetaPayload.Symbol = string(item.Value)
case "AssetMetaPayload:Name":
assetMetaPayload.Name = string(item.Value)
case "AssetMetaPayload:CoinGeckoCoinId":
assetMetaPayload.CoinGeckoCoinId = string(item.Value)
case "AssetMetaPayload:NativeAddress":
assetMetaPayload.NativeAddress = string(item.Value)
}
}
deets.AssetMetaPayload = assetMetaPayload
}
if _, ok := row[nftPayloadFam]; ok {
nftTransferPayload := &NFTTransferPayload{}
for _, item := range row[nftPayloadFam] {
switch item.Column {
case "NFTTransferPayload:OriginAddress":
nftTransferPayload.OriginAddress = string(item.Value)
case "NFTTransferPayload:OriginChain":
nftTransferPayload.OriginChain = string(item.Value)
case "NFTTransferPayload:Symbol":
nftTransferPayload.Symbol = string(item.Value)
case "NFTTransferPayload:Name":
nftTransferPayload.Name = string(item.Value)
case "NFTTransferPayload:TokenId":
nftTransferPayload.TokenId = string(item.Value)
case "NFTTransferPayload:URI":
nftTransferPayload.URI = string(TrimUnicodeFromByteArray(item.Value))
case "NFTTransferPayload:TargetAddress":
nftTransferPayload.TargetAddress = string(item.Value)
case "NFTTransferPayload:TargetChain":
nftTransferPayload.TargetChain = string(item.Value)
}
}
deets.NFTTransferPayload = nftTransferPayload
}
if _, ok := row[chainDetailsFam]; ok {
chainDetails := &ChainDetails{}
for _, item := range row[chainDetailsFam] {
switch item.Column {
// TEMP - until we have this backfilled/populating for new messages
// case "ChainDetails:SenderAddress":
// chainDetails.SenderAddress = string(item.Value)
case "ChainDetails:ReceiverAddress":
chainDetails.ReceiverAddress = string(item.Value)
}
}
deets.ChainDetails = chainDetails
}
return deets
}
func roundToTwoDecimalPlaces(num float64) float64 {
return math.Round(num*100) / 100
}
func createCachePrefix(prefix string) string {
cachePrefix := prefix
if prefix == "" {
cachePrefix = "*"
}
return cachePrefix
}
// useCache allows overriding the cache for a given day.
// This is useful for debugging, to generate fresh data
func useCache(date string) bool {
skipDates := map[string]bool{
// for example, add to skip:
// "2022-02-01": true,
}
if _, ok := skipDates[date]; ok {
return false
}
return true
}
// tokens allowed in TVL calculation
var tokenAllowlist = map[string]map[string]string{}
// isTokenAllowed returns whether or not the token is in the allowlist along with its CoinGecko ID
func isTokenAllowed(chainId string, tokenAddress string) (bool, string) {
if tokenAddresses, ok := tokenAllowlist[chainId]; ok {
if coinGeckoCoinId, ok := tokenAddresses[tokenAddress]; ok {
return true, coinGeckoCoinId
}
}
return false, ""
}
// tokens with no trading activity recorded by exchanges integrated on CoinGecko since the specified date
var inactiveTokens = map[string]map[string]string{
chainIDToNumberString(vaa.ChainIDEthereum): {
"0x707f9118e33a9b8998bea41dd0d46f38bb963fc8": "2022-06-15", // Anchor bETH token
},
}
func isTokenActive(chainId string, tokenAddress string, date string) bool {
if deactivatedDates, ok := inactiveTokens[chainId]; ok {
if deactivatedDate, ok := deactivatedDates[tokenAddress]; ok {
return date < deactivatedDate
}
}
return true
}
func chainIDRowPrefix(chainId vaa.ChainID) string {
return fmt.Sprintf("%d:", chainId)
}

View File

@ -1,12 +0,0 @@
{
"1": {
"So11111111111111111111111111111111111111112": "wrapped-solana"
},
"2": { "0xddb64fe46a91d46ee29420539fc25fd07c5fea3e": "weth" },
"3": {
"uluna": "terra-luna",
"uusd": "terrausd"
},
"4": { "0xddb64fe46a91d46ee29420539fc25fd07c5fea3e": "wbnb" },
"18": { "uluna": "terra-luna-2" }
}

View File

@ -1,180 +0,0 @@
{
"1": {
"ATLASXmbPQxBUYbxPsV97usA3fPQYEqzQBUHgiFCUsXx": "star-atlas",
"NFTUkR4u7wKxy9QLaX2TGvd9oZSWoMo4jqSJqdMb7Nk": "blockasset",
"AkhdZGVbJXPuQZ53u2LrimCjkRP6ZyxG1SoM85T98eE1": "starbots",
"4k3Dyjzvzp8eMZWUXbBCjEvwSkkk59S5iCNLY3QrkX6R": "raydium",
"4Te4KJgjtnZe4aE2zne8G4NPfrPjCwDmaiEx9rKnyDVZ": "solclout",
"So11111111111111111111111111111111111111112": "wrapped-solana",
"EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v": "usd-coin",
"Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB": "tether",
"7dHbWXmci3dT8UFYWYZweBLXgycu7Y3iL6trKn1Y7ARj": "lido-staked-sol"
},
"2": {
"0x111111111117dc0aa78b770fa6a738034120c302": "1inch",
"0x009178997aff09a67d4caccfeb897fb79d036214": "1sol",
"0x7fc66500c84a76ad7e9c93437bfc5ac33e2ddae9": "aave",
"0x27702a26126e0b3702af63ee09ac4d1a084ef628": "aleph",
"0xe0cca86b254005889ac3a81e737f56a14f4a38f5": "alta-finance",
"0x9b83f827928abdf18cf1f7e67053572b9bceff3a": "artem",
"0x18aaa7115705e8be94bffebde57af9bfc265b998": "audius",
"0xbb0e17ef65f82ab018d8edd776e8dd940327b28b": "axie-infinity",
"0x0d8775f648430679a709e98d2b0cb6250d2887ef": "basic-attention-token",
"0xf17e65822b568b3903685a7c9f496cf7656cc6c2": "biconomy",
"0xef19f4e48830093ce5bc8b3ff7f903a0ae3e9fa1": "botxcoin",
"0xbba39fd2935d5769116ce38d46a71bde9cf03099": "choise",
"0xd49efa7bc0d339d74f487959c573d518ba3f8437": "shield-finance",
"0xc00e94cb662c3520282e6f5717214004a7f26888": "compound-governance-token",
"0x2ba592f78db6436527729929aaf6c908497cb200": "cream-2",
"0x6b175474e89094c44da98b954eedeac495271d0f": "dai",
"0x92d6c1e31e14520e676a687f0a93788b716beff5": "dydx",
"0x4da34f8264cb33a5c9f17081b9ef5ff6091116f4": "elyfi",
"0xfd09911130e6930bf87f2b0554c44f400bd80d3e": "ethichub",
"0x853d955acef822db058eb8505911ed77f175b99e": "frax",
"0xf8c3527cc04340b208c854e985240c02f7b7793f": "frontier-token",
"0x50d1c9771902476076ecfc8b2a83ad6b9355a4c9": "ftx-token",
"0x3432b6a60d23ca0dfca7761b7ab56459d9c964d0": "frax-share",
"0xc944e90c64b2c07662a292be6244bdf05cda44a7": "the-graph",
"0x4674672bcddda2ea5300f5207e1158185c944bc0": "gem-exchange-and-trading",
"0x0316eb71485b0ab14103307bf65a021042c6d380": "huobi-btc",
"0x4bd70556ae3f8a6ec6c4080a0c327b24325438f3": "hxro",
"0xe28b3b32b6c345a34ff64674606124dd5aceca30": "injective-protocol",
"0x8a9c67fee641579deba04928c4bc45f66e26343a": "jarvis-reward-token",
"0x85eee30c52b0b379b046fb0f85f4f3dc3009afec": "keep-network",
"0x5a98fcbea516cf06857215779fd812ca3bef1b32": "lido-dao",
"0x514910771af9ca656af840dff83e8264ecf986ca": "chainlink",
"0x0f5d2fb29fb7d3cfee444a200298f468908cc942": "decentraland",
"0x08d967bb0134f2d07f7cfb6e246680c53927dd30": "math",
"0xe831f96a7a1dce1aa2eb760b1e296c6a74caa9d5": "nexum",
"0xdfdb7f72c1f195c5951a234e8db9806eb0635346": "feisty-doge-nft",
"0x727f064a78dc734d33eec18d5370aef32ffd46e4": "orion-money",
"0x45804880de22913dafe09f4980848ece6ecbaf78": "pax-gold",
"0x65e6b60ea01668634d68d0513fe814679f925bad": "pixelverse",
"0xf1f955016ecbcd7321c7266bccfb96c68ea5e49b": "rally-2",
"0x3845badade8e6dff049820680d1f14bd3903a5d0": "the-sandbox",
"0x30d20208d987713f46dfd34ef128bb16c404d10f": "stader",
"0x95ad61b0a150d79219dcf64e1e6cc01f0b64c4ce": "shiba-inu",
"0x5ab6a4f46ce182356b6fa2661ed8ebcafce995ad": "sportium",
"0x476c5e26a75bd202a9683ffd34359c0cc15be0ff": "serum",
"0x6b3595068778dd592e39a122f4f5a5cf09c90fe2": "sushi",
"0x8ce9137d39326ad0cd6491fb5cc0cba0e089b6a9": "swipe",
"0x2e95cea14dd384429eb3c4331b776c4cfbb6fcd9": "throne",
"0x05d3606d5c81eb9b7b18530995ec9b29da05faba": "tomoe",
"0x2c537e5624e4af88a7ae4060c022609376c8d0eb": "bilira",
"0x8564653879a18c560e7c0ea0e084c516c62f5653": "upbots",
"0x1f9840a85d5af5bf1d1762f925bdaddc4201f984": "uniswap",
"0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48": "usd-coin",
"0xdac17f958d2ee523a2206206994597c13d831ec7": "tether",
"0x0c572544a4ee47904d54aaa6a970af96b6f00e1b": "wasder",
"0x2260fac5e5542a773aa44fbcfedf7c193bc2c599": "wrapped-bitcoin",
"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2": "ethereum",
"0x72b886d09c117654ab7da13a14d603001de0b777": "xdefi",
"0x0bc529c00c6401aef6d220be8c6ea1667f6ad93e": "yearn-finance",
"0x1a7e4e63778b4f12a199c062f3efdd288afcbce8": "ageur",
"0x707f9118e33a9b8998bea41dd0d46f38bb963fc8": "ethereum",
"0x7f39c581f595b53c5cb19bd0b3f8da6c935e2ca0": "wrapped-steth",
"0xa2cd3d43c775978a96bdbf12d733d5a1ed94fb18": "chain-2"
},
"3": {
"terra193c42lfwmlkasvcw22l9qqzc5q2dx208tkd7wl": "bitlocus",
"uluna": "terra-luna",
"terra13awdgcx40tz5uygkgm79dytez3x87rpg4uhnvu": "playnity",
"uusd": "terrausd",
"terra1hzh9vpxhsk8253se0vv5jj6etdvxu3nv8z07zu": "anchorust"
},
"4": {
"0x7e46d5eb5b7ca573b367275fee94af1945f5b636": "abitshadow-token",
"0xe9e7cea3dedca5984780bafc599bd69add087d56": "binance-usd",
"0x8ebc361536094fd5b4ffb8521e31900614c9f55d": "darcmatter-coin",
"0x2170ed0880ac9a755fd29b2688956bd959f933f8": "weth",
"0x3019bf2a2ef8040c242c9a4c5c4bd4c81678b2a1": "stepn",
"0x8ac76a51cc950d9822d68b83fe1ad97b32cd580d": "usd-coin",
"0x55d398326f99059ff775485246999027b3197955": "tether",
"0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c": "wbnb",
"0xfafd4cb703b25cb22f43d017e7e0d75febc26743": "weyu",
"0xfa40d8fc324bcdd6bbae0e086de886c571c225d4": "wizardia"
},
"5": {
"0x9c891326fd8b1a713974f73bb604677e1e63396d": "islamicoin",
"0x2791bca1f2de4661ed88a30c99a7a9449aa84174": "usd-coin",
"0xc2132d05d31c914a87c6611c10748aeb04b58e8f": "tether",
"0x7ceb23fd6bc0add59e62ac25578270cff1b9f619": "weth",
"0x0d500b1d8e8ef31e21c99d1db9a6444d3adf1270": "matic-network"
},
"6": {
"0xb97ef9ef8734c71904d8002f8b6bc66dd9c48a6e": "usd-coin",
"0xa7d7079b0fead91f3e65f86e8915cb59c1a4c664": "usd-coin",
"0x9702230a8ea53601f5cd2dc00fdbc13d4df4a8c7": "tether",
"0xb31f66aa3c1e785363f0875a1b74e27b85fd66c7": "avalanche-2",
"0x2b2c81e08f1af8835a78bb2a90ae924ace0ea4be": "benqi-liquid-staked-avax"
},
"7": {
"0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee2": "oasis-network",
"0x94fbffe5698db6f54d6ca524dbe673a7729014be": "usd-coin",
"0x366ef31c8dc715cbeff5fa54ad106dc9c25c6153": "tether-usd-wormhole-from-bsc",
"0x3223f17957ba502cbe71401d55a0db26e5f7c68f": "ethereum-wormhole",
"0x21c718c22d52d0f3a789b752d4c2fd5908a8a733": "oasis-network"
},
"8": {
"0": "algorand",
"31566704": "usd-coin",
"312769": "tether"
},
"9": {
"0x8BEc47865aDe3B172A928df8f990Bc7f2A3b9f79": "aurora",
"0xE4B9e004389d91e4134a28F19BD833cBA1d994B6": "frax",
"0xb12bfca5a55806aaf64e99521918a4bf0fc40802": "usd-coin",
"0x4988a896b1227218e4a686fde5eabdcabd91571f": "tether",
"0x5183e1b1091804bc2602586919e6880ac1cf2896": "usn",
"0xc9bdeed33cd01541e1eed10f90519d2c06fe3feb": "weth",
"0xC9BdeEd33CD01541e1eeD10f90519d2C06Fe3feB": "ethereum",
"0xc4bdd27c33ec7daa6fcfd8532ddb524bf4038096": "wrapped-terra"
},
"10": {
"0x321162cd933e2be498cd2267a90534a804051b11": "wrapped-bitcoin",
"0x74b23882a30290451a17c44f4f05243b6b58c76d": "weth",
"0x260b3e40c714ce8196465ec824cd8bb915081812": "iron-bsc",
"0x04068da6c83afcfa0e13ba15a6696662335d5b75": "usd-coin",
"0x21be370d5312f44cb42ce377bc9b8a0cef1a4c83": "wrapped-fantom"
},
"11": {
"0x0000000000000000000100000000000000000080": "karura",
"0x0000000000000000000100000000000000000082": "kusama",
"0x0000000000000000000500000000000000000007": "tether",
"0x0000000000000000000100000000000000000081": "acala-dollar"
},
"12": {
"0x0000000000000000000100000000000000000000": "acala",
"0x0000000000000000000100000000000000000002": "polkadot",
"0x0000000000000000000100000000000000000001": "acala-dollar"
},
"13": {
"0x5c74070fdea071359b86082bd9f9b3deaafbe32b": "dai",
"0x5fff3a6c16c2208103f318f4713d4d90601a7313": "kleva",
"0x5096db80b21ef45230c9e423c373f1fc9c0198dd": "wemix-token",
"0xe4f05a66ec68b54a58b17c22107b02e0232cc817": "klay-token",
"0xcee8faf64bb97a73bb51e115aa89c17ffa8dd167": "tether"
},
"14": {
"0x471ece3750da237f93b8e339c536989b8978a438": "celo",
"0x46c9757c5497c5b1f2eb73ae79b6b67d119b0b58": "impactmarket",
"0xd8763cba276a3738e6de85b4b3bf5fded6d6ca73": "celo-euro",
"0x765de816845861e75a25fca122bb6898b8b1282a": "celo-dollar"
},
"15": {
"near": "near",
"token.sweat": "sweatcoin"
},
"16": {
"0xacc15dc74880c9944775448304b263d191c6077f": "moonbeam"
},
"18": {
"uluna": "terra-luna-2"
},
"22": {
"a867703f5395cb2965feb7ebff5cdf39b771fc6156085da3ae4147a00be91b38": "aptos"
},
"28": {
"axpla": "xpla"
}
}

View File

@ -1,381 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"fmt"
"html"
"io"
"log"
"net/http"
"strconv"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
)
const maxNano int = 999999999
type totalsResult struct {
LastDayCount map[string]int
TotalCount map[string]int
TotalCountDurationDays int
DailyTotals map[string]map[string]int
}
// warmCache keeps some data around between invocations, so that we don't have
// to do a full table scan with each request.
// https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations
var warmTotalsCache = map[string]map[string]map[string]int{}
var muWarmTotalsCache sync.RWMutex
var warmTotalsCacheFilePath = "totals-cache.json"
// derive the result index relevant to a row.
func makeGroupKey(keySegments int, rowKey string) string {
var countBy string
if keySegments == 0 {
countBy = "*"
} else {
keyParts := strings.Split(rowKey, ":")
countBy = strings.Join(keyParts[:keySegments], ":")
}
return countBy
}
func fetchRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time) ([]bigtable.Row, error) {
rows := []bigtable.Row{}
err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool {
rows = append(rows, row)
return true
}, bigtable.RowFilter(
bigtable.ChainFilters(
// combine filters to get only what we need:
bigtable.FamilyFilter(columnFamilies[1]),
bigtable.CellsPerRowLimitFilter(1), // only the first cell in each column (helps for devnet where sequence resets)
bigtable.TimestampRangeFilter(start, end), // within time range
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
)))
return rows, err
}
func createCountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, numPrevDays int, keySegments int) (map[string]map[string]int, error) {
if _, ok := warmTotalsCache["2021-09-13"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTotalsCacheFilePath, &muWarmTotalsCache, &warmTotalsCache)
}
results := map[string]map[string]int{}
now := time.Now().UTC()
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
// create the unique identifier for this query, for cache
cachePrefix := prefix
if prefix == "" {
cachePrefix = "*"
}
cachePrefix = fmt.Sprintf("%v-%v", cachePrefix, keySegments)
cacheNeedsUpdate := false
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calulate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, maxNano, loc)
dateStr := start.Format("2006-01-02")
muWarmTotalsCache.Lock()
// initialize the map for this date in the result set
results[dateStr] = map[string]int{"*": 0}
// check to see if there is cache data for this date/query
if dateCache, ok := warmTotalsCache[dateStr]; ok && useCache(dateStr) {
// have a cache for this date
if val, ok := dateCache[cachePrefix]; ok {
// have a cache for this query
if daysAgo >= 1 {
// only use the cache for yesterday and older
results[dateStr] = val
muWarmTotalsCache.Unlock()
intervalsWG.Done()
return
}
}
}
muWarmTotalsCache.Unlock()
var result []bigtable.Row
var fetchErr error
defer intervalsWG.Done()
result, fetchErr = fetchRowsInInterval(tbl, ctx, prefix, start, end)
if fetchErr != nil {
log.Fatalf("fetchRowsInInterval returned an error: %v", fetchErr)
}
// iterate through the rows and increment the count
for _, row := range result {
countBy := makeGroupKey(keySegments, row.Key())
if keySegments != 0 {
// increment the total count
results[dateStr]["*"] = results[dateStr]["*"] + 1
}
results[dateStr][countBy] = results[dateStr][countBy] + 1
}
if daysAgo >= 1 {
muWarmTotalsCache.Lock()
if _, ok := warmTotalsCache[dateStr]; !ok {
warmTotalsCache[dateStr] = map[string]map[string]int{}
}
if _, ok := warmTotalsCache[dateStr][cachePrefix]; !ok {
warmTotalsCache[dateStr][cachePrefix] = map[string]int{}
}
if len(warmTotalsCache[dateStr][cachePrefix]) <= 1 || !useCache(dateStr) {
// set the result in the cache
warmTotalsCache[dateStr][cachePrefix] = results[dateStr]
cacheNeedsUpdate = true
}
muWarmTotalsCache.Unlock()
}
}(tbl, ctx, prefix, daysAgo)
}
intervalsWG.Wait()
if cacheNeedsUpdate {
persistInterfaceToJson(ctx, warmTotalsCacheFilePath, &muWarmTotalsCache, warmTotalsCache)
}
// create a set of all the keys from all dates, to ensure the result objects all have the same keys
seenKeySet := map[string]bool{}
for _, v := range results {
for key := range v {
seenKeySet[key] = true
}
}
// ensure each date object has the same keys:
for date := range results {
for key := range seenKeySet {
if _, ok := results[date][key]; !ok {
// add the missing key to the map
results[date][key] = 0
}
}
}
return results, nil
}
// returns the count of the rows in the query response
func messageCountForInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start, end time.Time, keySegments int) (map[string]int, error) {
// query for all rows in time range, return result count
results, fetchErr := fetchRowsInInterval(tbl, ctx, prefix, start, end)
if fetchErr != nil {
log.Printf("fetchRowsInInterval returned an error: %v", fetchErr)
return nil, fetchErr
}
result := map[string]int{"*": len(results)}
// iterate through the rows and increment the count for each index
if keySegments != 0 {
for _, row := range results {
countBy := makeGroupKey(keySegments, row.Key())
result[countBy] = result[countBy] + 1
}
}
return result, nil
}
// get number of recent transactions in the last 24 hours, and daily for a period
// optionally group by a EmitterChain or EmitterAddress
// optionally query for recent rows of a given EmitterChain or EmitterAddress
func Totals(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var last24Hours, numDays, groupBy, forChain, forAddress string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
last24Hours = queryParams.Get("last24Hours")
numDays = queryParams.Get("numDays")
groupBy = queryParams.Get("groupBy")
forChain = queryParams.Get("forChain")
forAddress = queryParams.Get("forAddress")
readyCheck := queryParams.Get("readyCheck")
if readyCheck != "" {
// for running in devnet
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, html.EscapeString("ready"))
return
}
case http.MethodPost:
// declare request body properties
var d struct {
Last24Hours string `json:"last24Hours"`
NumDays string `json:"numDays"`
GroupBy string `json:"groupBy"`
ForChain string `json:"forChain"`
ForAddress string `json:"forAddress"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
last24Hours = d.Last24Hours
numDays = d.NumDays
groupBy = d.GroupBy
forChain = d.ForChain
forAddress = d.ForAddress
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
// default query period is all time
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
// if the request included numDays, set the query period to that
if numDays != "" {
var convErr error
queryDays, convErr = strconv.Atoi(numDays)
if convErr != nil {
fmt.Fprint(w, "numDays must be an integer")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
// create the rowkey prefix for querying
prefix := ""
if forChain != "" {
prefix = forChain
if groupBy == "" {
// if the request is forChain, and groupBy is empty, set it to groupBy chain
groupBy = "chain"
}
if forAddress != "" {
// if the request is forAddress, always groupBy address
groupBy = "address"
prefix = forChain + ":" + forAddress
}
}
// use the groupBy value to determine how many segements of the rowkey should be used.
keySegments := 0
if groupBy == "chain" {
keySegments = 1
}
if groupBy == "address" {
keySegments = 2
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
var wg sync.WaitGroup
// total of last 24 hours
var last24HourCount map[string]int
if last24Hours != "" {
wg.Add(1)
go func(prefix string, keySegments int) {
var err error
last24HourInterval := -time.Duration(24) * time.Hour
now := time.Now().UTC()
start := now.Add(last24HourInterval)
defer wg.Done()
last24HourCount, err = messageCountForInterval(tbl, ctx, prefix, start, now, keySegments)
if err != nil {
log.Printf("failed getting count for interval, err: %v", err)
}
}(prefix, keySegments)
}
// daily totals
periodTotals := map[string]int{}
var dailyTotals map[string]map[string]int
wg.Add(1)
go func(prefix string, keySegments int, queryDays int) {
var err error
defer wg.Done()
dailyTotals, err = createCountsOfInterval(tbl, ctx, prefix, queryDays, keySegments)
if err != nil {
log.Fatalf("failed getting createCountsOfInterval err %v", err)
}
// sum all the days to create a map with totals for the query period
for _, vals := range dailyTotals {
for chain, amount := range vals {
periodTotals[chain] += amount
}
}
}(prefix, keySegments, queryDays)
wg.Wait()
result := &totalsResult{
LastDayCount: last24HourCount,
TotalCount: periodTotals,
TotalCountDurationDays: queryDays,
DailyTotals: dailyTotals,
}
jsonBytes, err := json.Marshal(result)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,192 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"context"
"encoding/json"
"log"
"net/http"
"strings"
"sync"
"time"
"cloud.google.com/go/bigtable"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
)
type txTotals struct {
DailyTotals map[string]map[string]int
}
var txTotalsResult txTotals
var txTotalsMutex sync.RWMutex
var txTotalsResultPath = "transaction-totals.json"
func fetchRowKeys(tbl *bigtable.Table, ctx context.Context, start, end time.Time) []string {
rowKeys := []string{}
chainIds := tvlChainIDs
chainIds = append(chainIds, vaa.ChainIDPythNet)
for _, chainId := range chainIds {
err := tbl.ReadRows(ctx, bigtable.PrefixRange(chainIDRowPrefix(chainId)), func(row bigtable.Row) bool {
rowKeys = append(rowKeys, row.Key())
return true
}, bigtable.RowFilter(
bigtable.ChainFilters(
bigtable.FamilyFilter(quorumStateFam), // VAAs that have reached quorum
bigtable.CellsPerRowLimitFilter(1), // only the first cell in each column
bigtable.TimestampRangeFilter(start, end), // within time range
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
)))
if err != nil {
log.Fatalf("fetchRowsInInterval returned an error: %v", err)
}
}
return rowKeys
}
func updateTxTotalsResult(tbl *bigtable.Table, ctx context.Context, numPrevDays int) {
if txTotalsResult.DailyTotals == nil {
txTotalsResult.DailyTotals = map[string]map[string]int{}
if loadCache {
loadJsonToInterface(ctx, txTotalsResultPath, &txTotalsMutex, &txTotalsResult.DailyTotals)
}
}
now := time.Now().UTC()
var intervalsWG sync.WaitGroup
// there will be a query for each previous day, plus today
intervalsWG.Add(numPrevDays + 1)
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
go func(tbl *bigtable.Table, ctx context.Context, daysAgo int) {
// start is the SOD, end is EOD
// "0 daysAgo start" is 00:00:00 AM of the current day
// "0 daysAgo end" is 23:59:59 of the current day (the future)
// calculate the start and end times for the query
hoursAgo := (24 * daysAgo)
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
n := now.Add(daysAgoDuration)
year := n.Year()
month := n.Month()
day := n.Day()
loc := n.Location()
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
end := time.Date(year, month, day, 23, 59, 59, 999999999, loc)
dateStr := start.Format("2006-01-02")
txTotalsMutex.Lock()
if daysAgo >= 1 {
if _, ok := txTotalsResult.DailyTotals[dateStr]; ok && useCache(dateStr) {
txTotalsMutex.Unlock()
intervalsWG.Done()
return
}
}
txTotalsMutex.Unlock()
defer intervalsWG.Done()
result := fetchRowKeys(tbl, ctx, start, end)
// iterate through the rows and increment the counts
countsByDay := map[string]int{}
countsByDay["*"] = 0
for _, rowKey := range result {
chainId := strings.Split(rowKey, ":")[0]
if _, ok := countsByDay[chainId]; !ok {
countsByDay[chainId] = 1
} else {
countsByDay[chainId] = countsByDay[chainId] + 1
}
countsByDay["*"] = countsByDay["*"] + 1
}
txTotalsMutex.Lock()
txTotalsResult.DailyTotals[dateStr] = countsByDay
txTotalsMutex.Unlock()
}(tbl, ctx, daysAgo)
}
intervalsWG.Wait()
// create a set of all the keys from all dates, to ensure the result objects all have the same keys
seenKeySet := map[string]bool{}
for _, v := range txTotalsResult.DailyTotals {
for chainId := range v {
seenKeySet[chainId] = true
}
}
// ensure each date object has the same keys:
for date := range txTotalsResult.DailyTotals {
for chainId := range seenKeySet {
if _, ok := txTotalsResult.DailyTotals[date][chainId]; !ok {
// add the missing key to the map
txTotalsResult.DailyTotals[date][chainId] = 0
}
}
}
persistInterfaceToJson(ctx, txTotalsResultPath, &txTotalsMutex, txTotalsResult.DailyTotals)
}
func ComputeTransactionTotals(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
ctx := context.Background()
var err error
updateTxTotalsResult(tbl, ctx, queryDays)
if err != nil {
log.Fatalf("failed getting createCountsOfInterval err %v", err)
}
jsonBytes, err := json.Marshal(txTotalsResult)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}
func TransactionTotals(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
ctx := context.Background()
var cachedResult txTotals
cachedResult.DailyTotals = map[string]map[string]int{}
loadJsonToInterface(ctx, txTotalsResultPath, &txTotalsMutex, &cachedResult.DailyTotals)
jsonBytes, err := json.Marshal(cachedResult)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,112 +0,0 @@
// Package p contains an HTTP Cloud Function.
package p
import (
"encoding/json"
"fmt"
"html"
"io"
"log"
"net/http"
"cloud.google.com/go/bigtable"
)
// fetch a single row by transaction identifier
func Transaction(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the preflight request
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var transactionID string
// allow GET requests with querystring params, or POST requests with json body.
switch r.Method {
case http.MethodGet:
queryParams := r.URL.Query()
transactionID = queryParams.Get("id")
readyCheck := queryParams.Get("readyCheck")
if readyCheck != "" {
// for running in devnet
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, html.EscapeString("ready"))
return
}
case http.MethodPost:
// declare request body properties
var d struct {
ID string `json:"id"`
}
// deserialize request body
if err := json.NewDecoder(r.Body).Decode(&d); err != nil {
switch err {
case io.EOF:
// do nothing, empty body is ok
default:
log.Printf("json.NewDecoder: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
}
transactionID = d.ID
default:
http.Error(w, "405 - Method Not Allowed", http.StatusMethodNotAllowed)
log.Println("Method Not Allowed")
return
}
if transactionID == "" {
fmt.Fprint(w, "id cannot be blank")
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
var result bigtable.Row
readErr := tbl.ReadRows(r.Context(), bigtable.PrefixRange(""), func(row bigtable.Row) bool {
result = row
return true
}, bigtable.RowFilter(bigtable.ValueFilter(transactionID)))
if readErr != nil {
log.Fatalf("failed to read rows: %v", readErr)
}
if result == nil {
http.NotFound(w, r)
log.Printf("did not find row with transaction ID %v", transactionID)
return
}
key := result.Key()
row, err := tbl.ReadRow(r.Context(), key, bigtable.RowFilter(bigtable.LatestNFilter(1)))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Fatalf("Could not read row with key %s: %v", key, err)
}
details := makeDetails(row)
jsonBytes, err := json.Marshal(details)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
log.Println(err.Error())
return
}
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -1,11 +0,0 @@
{
"folders": [
{
"path": "../functions_server"
},
{
"path": "."
}
],
"settings": {}
}

View File

@ -1,3 +0,0 @@
{
"note": "intentionally left bank - credentials not needed for devnet."
}

View File

@ -1,61 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "mainnet-data",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": [
"-race" // tells Go to report race conditions
],
"env": {
"PORT": "8080",
"GCP_PROJECT": "wormhole-315720",
"BIGTABLE_INSTANCE": "wormhole-mainnet",
"CACHE_BUCKET": "cloud-function-cache-mainnet",
"TOKEN_ALLOWLIST": "token-allowlist-mainnet.json",
"GOOGLE_APPLICATION_CREDENTIALS": "/home/you/path/to/your/service-account.json",
// LOAD_CACHE sets whether or not previously computed results should be loaded and used in calculations
// can be set to false to effectively rebuild the cache
// "LOAD_CACHE": "true",
// CoinGecko API key if you have one. will work without - rate limit is lower.
// "COINGECKO_API_KEY": "your-key-here",
// SolanaBeach API key if you have one. will work without - rate limit is lower.
// "SOLANABEACH_API_KEY": "your-key-here",
// if you'd like run local functions triggered by Pub/Sub messages, uncomment the following
// "PUBSUB_NEW_VAA_TOPIC": "new-vaa-devnet-${env:USERNAME}",
// "PUBSUB_NEW_VAA_SUBSCRIPTION": "extract-payload-devnet-${env:USERNAME}",
// "PUBSUB_TOKEN_TRANSFER_DETAILS_TOPIC": "create-token-transfer-details-${env:USERNAME}",
// "PUBSUB_TOKEN_TRANSFER_DETAILS_SUBSCRIPTION": "calculate-transfer-data-devnet-${env:USERNAME}",
},
},
{
"name": "devnet-emulators",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": [
"-race"
],
"env": {
// Tilt devnet values, set in devnet/bigtable.yaml
"GCP_PROJECT": "local-dev",
"BIGTABLE_INSTANCE": "wormhole",
"BIGTABLE_EMULATOR_HOST": "localhost:8086",
"PUBSUB_EMULATOR_HOST": "localhost:8085",
"PUBSUB_NEW_VAA_TOPIC": "new-vaa-devnet",
"PUBSUB_NEW_VAA_SUBSCRIPTION": "extract-payload-devnet",
"PUBSUB_TOKEN_TRANSFER_DETAILS_TOPIC": "create-token-transfer-details-devnet",
"PUBSUB_TOKEN_TRANSFER_DETAILS_SUBSCRIPTION": "calculate-transfer-data-devnet",
"CACHE_BUCKET": "cloud-function-cache-devnet",
"TOKEN_ALLOWLIST": "token-allowlist-devnet.json",
},
},
]
}

View File

@ -1,12 +0,0 @@
# syntax=docker.io/docker/dockerfile:experimental@sha256:de85b2f3a3e8a2f7fe48e8e84a65f6fdd5cd5183afa6412fff9caa6871649c44
FROM docker.io/golang:1.17.0@sha256:06e92e576fc7a7067a268d47727f3083c0a564331bfcbfdde633157fc91fb17d
WORKDIR /app
COPY . .
WORKDIR /app/functions_server
RUN --mount=type=cache,target=/root/.cache --mount=type=cache,target=/go \
go build -mod=readonly -o /functions main.go
CMD ["/functions"]

View File

@ -1,17 +0,0 @@
module github.com/certusone/wormhole/event_database/functions_server
go 1.16
// cloud runtime is go 1.16. just for reference.
require (
cloud.google.com/go/pubsub v1.17.1
github.com/GoogleCloudPlatform/functions-framework-go v1.5.2
github.com/certusone/wormhole/event_database/cloud_functions v0.0.0-20220126152252-d4735fc7c1aa
)
replace (
github.com/btcsuite/btcd => github.com/btcsuite/btcd v0.23.0
github.com/certusone/wormhole/event_database/cloud_functions => ../cloud_functions
github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
)

File diff suppressed because it is too large Load Diff

View File

@ -1,128 +0,0 @@
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"sync"
"cloud.google.com/go/pubsub"
"github.com/GoogleCloudPlatform/functions-framework-go/funcframework"
p "github.com/certusone/wormhole/event_database/cloud_functions"
)
func createAndSubscribe(client *pubsub.Client, topicName, subscriptionName string, handler func(ctx context.Context, m p.PubSubMessage) error) {
var topic *pubsub.Topic
var topicErr error
ctx := context.Background()
topic, topicErr = client.CreateTopic(ctx, topicName)
if topicErr != nil {
log.Printf("pubsub.CreateTopic err: %v", topicErr)
// already exists
topic = client.Topic(topicName)
} else {
log.Println("created topic:", topicName)
}
subConf := pubsub.SubscriptionConfig{Topic: topic}
_, subErr := client.CreateSubscription(ctx, subscriptionName, subConf)
if subErr != nil {
log.Printf("pubsub.CreateSubscription err: %v", subErr)
} else {
log.Println("created subscription:", subscriptionName)
}
sub := client.Subscription(subscriptionName)
err := sub.Receive(ctx, func(ctx context.Context, msg *pubsub.Message) {
msg.Ack()
handler(ctx, p.PubSubMessage{Data: msg.Data})
})
if err != nil {
fmt.Println(fmt.Errorf("receive err: %v", err))
}
}
var mux = newMux()
// Entry is the cloud function entry point
func Entry(w http.ResponseWriter, r *http.Request) {
mux.ServeHTTP(w, r)
}
func newMux() *http.ServeMux {
mux := http.NewServeMux()
mux.HandleFunc("/notionaltransferred", p.NotionalTransferred)
mux.HandleFunc("/notionaltransferredto", p.NotionalTransferredTo)
mux.HandleFunc("/notionaltransferredfrom", p.NotionalTransferredFrom)
mux.HandleFunc("/computenotionaltransferredfrom", p.ComputeNotionalTransferredFrom)
mux.HandleFunc("/notionaltransferredtocumulative", p.NotionalTransferredToCumulative)
mux.HandleFunc("/notionaltvl", p.TVL)
mux.HandleFunc("/computenotionaltvl", p.ComputeTVL)
mux.HandleFunc("/notionaltvlcumulative", p.TvlCumulative)
mux.HandleFunc("/computenotionaltvlcumulative", p.ComputeTvlCumulative)
mux.HandleFunc("/addressestransferredto", p.AddressesTransferredTo)
mux.HandleFunc("/addressestransferredtocumulative", p.AddressesTransferredToCumulative)
mux.HandleFunc("/totals", p.Totals)
mux.HandleFunc("/nfts", p.NFTs)
mux.HandleFunc("/recent", p.Recent)
mux.HandleFunc("/transaction", p.Transaction)
mux.HandleFunc("/readrow", p.ReadRow)
mux.HandleFunc("/findvalues", p.FindValues)
mux.HandleFunc("/computetransactiontotals", p.ComputeTransactionTotals)
mux.HandleFunc("/transactiontotals", p.TransactionTotals)
mux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) })
return mux
}
func main() {
var wg sync.WaitGroup
// http functions
wg.Add(1)
go func() {
defer wg.Done()
ctx := context.Background()
if err := funcframework.RegisterHTTPFunctionContext(ctx, "/", Entry); err != nil {
log.Fatalf("funcframework.RegisterHTTPFunctionContext: %v\n", err)
}
// Use PORT environment variable, or default to 8080.
port := "8080"
if envPort := os.Getenv("PORT"); envPort != "" {
port = envPort
}
if err := funcframework.Start(port); err != nil {
log.Fatalf("funcframework.Start: %v\n", err)
}
}()
// pubsub functions
pubsubCtx := context.Background()
gcpProject := os.Getenv("GCP_PROJECT")
pubsubClient, err := pubsub.NewClient(pubsubCtx, gcpProject)
if err != nil {
fmt.Println(fmt.Errorf("pubsub.NewClient err: %v", err))
}
pubsubTopicVAA := os.Getenv("PUBSUB_NEW_VAA_TOPIC")
pubsubSubscriptionVAA := os.Getenv("PUBSUB_NEW_VAA_SUBSCRIPTION")
wg.Add(1)
go createAndSubscribe(pubsubClient, pubsubTopicVAA, pubsubSubscriptionVAA, p.ProcessVAA)
wg.Done()
pubsubTopicTransfer := os.Getenv("PUBSUB_TOKEN_TRANSFER_DETAILS_TOPIC")
pubsubSubscriptionTransfer := os.Getenv("PUBSUB_TOKEN_TRANSFER_DETAILS_SUBSCRIPTION")
wg.Add(1)
go createAndSubscribe(pubsubClient, pubsubTopicTransfer, pubsubSubscriptionTransfer, p.ProcessTransfer)
wg.Done()
wg.Wait()
pubsubClient.Close()
}

View File

@ -1,19 +0,0 @@
## Initializing a cloud BigTable instance
Once you've created a BigTable instance and a Service Account key, these Go scripts can create the table and column families to save event data.
Pass your BigTable connection info via args:
- the Google Cloud projectID
- BigTable instance name
- the path to a GCP Service Account with appropriate permissions
Invoke the script with the DB config options and `-setupDB` to create the table and column families, if they do not already exist. If they do already exists when the script runs, it will do nothing.
```bash
go run . \
-project your-GCP-projectID \
-instance your-BigTable-instance-name \
-keyFilePath ./service-account-key.json \
-setupDB
```

View File

@ -1,42 +0,0 @@
package initialize_db
import (
"flag"
"log"
)
// tableName is a const rather than an arg because using different BigTable instances
// will be more common than having multiple tables in a single instance.
// Table name is also passed to devnet guardians.
const tableName = "v2Events"
// These column family names match the guardian code that does the inserting.
var columnFamilies = []string{
"MessagePublication",
"QuorumState",
"TokenTransferPayload",
"AssetMetaPayload",
"NFTTransferPayload",
"TokenTransferDetails",
"ChainDetails",
}
func main() {
project := flag.String("project", "", "The Google Cloud Platform project ID. Required.")
instance := flag.String("instance", "", "The Google Cloud Bigtable instance ID. Required.")
keyFilePath := flag.String("keyFilePath", "", "The Google Cloud Service Account json key file path.")
setupDB := flag.Bool("setupDB", false, "Run database setup - create table and column families.")
flag.Parse()
for _, f := range []string{"project", "instance", "keyFilePath"} {
if flag.Lookup(f).Value.String() == "" {
log.Fatalf("The %s flag is required.", f)
}
}
if *setupDB {
RunSetup(*project, *instance, *keyFilePath)
}
}

View File

@ -1,64 +0,0 @@
package initialize_db
import (
"context"
"log"
"cloud.google.com/go/bigtable"
"google.golang.org/api/option"
)
// sliceContains reports whether the provided string is present in the given slice of strings.
func sliceContains(list []string, target string) bool {
for _, s := range list {
if s == target {
return true
}
}
return false
}
// RunSetup will create a table and column families, if they do not already exist.
func RunSetup(project string, instance string, keyFilePath string) {
ctx := context.Background()
// Set up admin client, tables, and column families.
adminClient, err := bigtable.NewAdminClient(ctx, project, instance, option.WithCredentialsFile(keyFilePath))
if err != nil {
log.Fatalf("Could not create admin client: %v", err)
}
tables, err := adminClient.Tables(ctx)
if err != nil {
log.Fatalf("Could not fetch table list: %v", err)
}
if !sliceContains(tables, tableName) {
log.Printf("Creating table %s", tableName)
if err := adminClient.CreateTable(ctx, tableName); err != nil {
log.Fatalf("Could not create table %s: %v", tableName, err)
}
log.Println("created table: ", tableName)
}
tblInfo, err := adminClient.TableInfo(ctx, tableName)
if err != nil {
log.Fatalf("Could not read info for table %s: %v", tableName, err)
}
for _, familyName := range columnFamilies {
if !sliceContains(tblInfo.Families, familyName) {
if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil {
log.Fatalf("Could not create column family %s: %v", familyName, err)
}
log.Println("created column family: ", familyName)
} else {
log.Println("column family already exists in table: ", tableName)
}
}
if err = adminClient.Close(); err != nil {
log.Fatalf("Could not close admin client: %v", err)
}
}

View File

@ -1,44 +0,0 @@
module github.com/certusone/wormhole/event_database/initialize_db
go 1.16
require (
cloud.google.com/go/bigtable v1.10.1
google.golang.org/api v0.58.0
)
require (
cloud.google.com/go v0.97.0 // indirect
cloud.google.com/go/functions v1.0.0 // indirect
cloud.google.com/go/pubsub v1.17.1 // indirect
github.com/GoogleCloudPlatform/functions-framework-go v1.5.2 // indirect
github.com/cloudevents/sdk-go/v2 v2.6.1 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/json-iterator/go v1.1.10 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.4.0 // indirect
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 // indirect
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.5 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20211019152133-63b7e35f4404 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
)
// replace github.com/certusone/wormhole/event_database/cloud_functions => ./cloud_functions

View File

@ -1,617 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.82.0/go.mod h1:vlKccHJGuFBFufnAnuB08dfEH9Y3H7dzDzRECFdC2TA=
cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/bigtable v1.10.1 h1:QKcRHeAsraxIlrdCZ3LLobXKBvITqcOEnSbHG2rzL9g=
cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/functions v1.0.0 h1:cOFEDJ3sgAFRjRULSUJ0Q8cw9qFa5JdpXIBWoNX5uDw=
cloud.google.com/go/functions v1.0.0/go.mod h1:O9KS8UweFVo6GbbbCBKh5yEzbW08PVkg2spe3RfPMd4=
cloud.google.com/go/kms v1.0.0/go.mod h1:nhUehi+w7zht2XrUfvTRNpxrfayBHqP4lu2NSywui/0=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/pubsub v1.17.1 h1:s2UGTTphpnUQ0Wppkp2OprR4pS3nlBpPvyL2GV9cqdc=
cloud.google.com/go/pubsub v1.17.1/go.mod h1:4qDxMr1WsM9+aQAz36ltDwCIM+R0QdlseyFjBuNvnss=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/GoogleCloudPlatform/functions-framework-go v1.5.2 h1:fPYZMZ8BSK2jfZ28VG6vYxr/PTLbG+9USn8njzxfmWM=
github.com/GoogleCloudPlatform/functions-framework-go v1.5.2/go.mod h1:pq+lZy4vONJ5fjd3q/B6QzWhfHPAbuVweLpxZzMOb9Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/sdk-go/v2 v2.6.1 h1:yHtzgmeBvc0TZx1nrnvYXov1CSvkQyvhEhNMs8Z5Mmk=
github.com/cloudevents/sdk-go/v2 v2.6.1/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210506205249-923b5ab0fc1a/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210601080250-7ecdf8ef093b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.58.0 h1:MDkAbYIB1JpSgCTOCYYoIec/coMlKK4oVbpnBLLcyT0=
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210601144548-a796c710e9b6/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211019152133-63b7e35f4404 h1:ZB48alYoIN+Soc1OcXirVKYOhOOf6Pek+iN+L+pzQI4=
google.golang.org/genproto v0.0.0-20211019152133-63b7e35f4404/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@ -1,97 +0,0 @@
## Wormhole event BigTable schema
### Row Keys
Row keys contain the MessageID, delimited by colons, like so: `EmitterChain:EmitterAddress:Sequence`.
- `EmitterAddress` left padded with `0`s to 32 bytes, then hex encoded.
- `Sequence` left padded with `0`s to 16 characters, so rows are ordered in the sequence they occured. BigTable Rows are sorted lexicographically by row key.
BigTable can only be queried for data in the row key. Only row key data is indexed. You cannot query based on the value of a column; however you may filter based on column value.
### Column Families
BigTable requires that columns are within a "Column family". Families group columns that store related data. Grouping columns is useful for efficient reads, as you may specify which families you want returned.
The column families listed below represent data unique to a phase of the attestation lifecycle.
- `MessagePublication` holds data about a user's interaction with a Wormhole contract. Contains data from the Guardian's VAA struct.
- `QuorumState` stores the signed VAA once quorum is reached.
- `TokenTransferPayload` stores the decoded payload of transfer messages.
- `AssetMetaPayload` stores the decoded payload of asset metadata messages.
- `NFTTransferPayload` stores the decoded payload of NFT transfer messages.
- `TokenTransferDetails` stores information about the transfer.
- `ChainDetails` stores chain-native data supplimented from external source(s).
### Column Qualifiers
Each column qualifier below is prefixed with its column family.
#### MessagePublication
- `MessagePublication:Version` Version of the VAA schema.
- `MessagePublication:GuardianSetIndex` The index of the active Guardian set.
- `MessagePublication:Timestamp` Timestamp when the VAA was created by the Guardian.
- `MessagePublication:Nonce` Nonce of the user's transaction.
- `MessagePublication:Sequence` Sequence from the interaction with the Wormhole contract.
- `MessagePublication:EmitterChain` The chain the message was emitted on.
- `MessagePublication:EmitterAddress` The address of the contract that emitted the message.
- `MessagePublication:InitiatingTxID` The transaction identifier of the user's interaction with the contract.
- `MessagePublication:Payload` The payload of the user's message.
#### QuorumState
- `QuorumState:SignedVAA` the VAA with the signatures that contributed to quorum.
#### TokenTransferPayload
- `TokenTransferPayload:PayloadId` the payload identifier of the payload.
- `TokenTransferPayload:Amount` the amount of the transfer.
- `TokenTransferPayload:OriginAddress` the address the transfer originates from.
- `TokenTransferPayload:OriginChain` the chain identifier of the chain the transfer originates from.
- `TokenTransferPayload:TargetAddress` the destination address of the transfer.
- `TokenTransferPayload:TargetChain` the destination chain identifier of the transfer.
#### AssetMetaPayload
- `AssetMetaPayload:PayloadId` the payload identifier of the payload.
- `AssetMetaPayload:TokenAddress` the address of the token. left padded with `0`s to 32 bytes.
- `AssetMetaPayload:TokenChain` the chain identifier of the chain the transfer originates from.
- `AssetMetaPayload:Decimals` the number of decimals of the token.
- `AssetMetaPayload:Symbol` the ticker symbol of the token.
- `AssetMetaPayload:Name` the name of the token.
#### NFTTransferPayload
- `NFTTransferPayload:PayloadId` the payload identifier of the payload.
- `NFTTransferPayload:OriginAddress` the address the transfer originates from.
- `NFTTransferPayload:OriginChain` the chain identifier of the chain the transfer originates from.
- `NFTTransferPayload:Symbol` the symbol of the nft.
- `NFTTransferPayload:Name` the name of the nft.
- `NFTTransferPayload:TokenId` the token identifier of the nft.
- `NFTTransferPayload:URI` the URI of the nft.
- `NFTTransferPayload:TargetAddress` the destination address of the transfer.
- `NFTTransferPayload:TargetChain` the destination chain identifier of the transfer.
#### TokenTransferDetails
- `TokenTransferDetails:Amount` the amount transfered.
- `TokenTransferDetails:NotionalUSD` the notional value of the transfer in USD.
- `TokenTransferDetails:OriginSymbol` the symbol of the token sent to wormhole.
- `TokenTransferDetails:OriginName` the name of the token sent to wormhole.
- `TokenTransferDetails:OriginTokenAddress` the address of the token sent to wormhole.
#### ChainDetails
- `ChainDetails:SenderAddress` the native address that sent the message.
- `ChainDetails:ReceiverAddress` the native address that received the message.
- `ChainDetails:ResultingTokenAddress` the address of the token on the target chain.
- `ChainDetails:ResultingTokenSymbol` the symbol of the token that was the final result of the transfer.
- `ChainDetails:ResultingTokenName` the name of the token that was the final result of the transfer.

View File

@ -1,53 +0,0 @@
# Centralized datastore for Wormhole visualizations
## Objective
Persist transient Guardian events in a database along with on-chain data, for easier introspection via a block-explorer style GUI.
## Background
Events observed and broadcast between Guardians are transient. Before a message is fully attested by the Guardians, an end user has no way to determine where within the lifecycle of attestation their event is. Saving the attestation state along with the message identifiers would allow the development of discovery interfaces.
Building a GUI that would allow querying and viewing Wormhole data by a single on-chain identifier would make using the Wormhole a friendlier experience. Building such a GUI would be difficult without an off-chain datastore that captures the entire lifecycle of Wormhole events.
## Goals
- Persist user intent with the relevant metadata (sender address, transaction hash/signature).
- Expose the Guardian network's Verifiable Action Approval state. Individual Signatures and if/when quorum was reached.
- Record the transaction hash/signature of all transactions performed by Guardians relevant to the User's intent.
- Allow querying by a transaction identifier and retrieving associated data.
## Non-Goals
- Centrally persisted Wormhole data does not aim to be a source of truth.
- Centrally persisted Wormhole data will not be publicly available for programmatic consumption.
## Overview
A Guardian can be configured to publish Wormhole events to a database. This will enable a discovery interface for users to query for Wormhole events, along with querying for message counts and statistics.
![Wormhole data flow](Wormhole-data-flow.svg)
## Detailed Design
A Google Cloud BigTable instance will be setup to store data about Wormhole events, with the schema described in the following section. BigTable is preferred because it does not require a global schema, along with its ability to efficiently deal with large amounts of historic data by row key sharding.
A block-explorer style web app will use BigTable to retrieve VAA state to create a discovery interface for Wormhole. The explorer web app could allow users to query for Wormhole events by a single identifier, similar to other block explorers, where a user may enter an address or a transaction identifier and see the relevant data.
### API / database schema
BigTable schema: [Wormhole event schema](./bigtable_event_schema.md)
## Caveats
It is undetermined how costly it will be to query for multiple transactions (rows) in the case of bridging tokens. For example, querying to retrieve the `assetMeta` transaction along with `transfer` message transaction.
## Alternatives Considered
### Database schema
Saving each Protobuf SignedObservation as its own row was considered. However, building a picture of the state of the user's intent with only SignedObservations is not ideal, as the logic to interpret the results would need come from somewhere, and additional data would need to be sourced.
Using VAA "digest" as BigTable RowKey was considered. Using the VAA digest would make database writes easy within the existing codebase. However, indexing on digest would heavily penalize reads as the digest will not be known to the user, so a full table scan would be required for every user request.

View File

@ -43,7 +43,7 @@ format(){
fi
# Use -exec because of pitfall #1 in http://mywiki.wooledge.org/BashPitfalls
GOFMT_OUTPUT="$(find "./sdk" "./node" "./event_database" "./wormchain" -type f -name '*.go' -not -path '*.pb.go' -print0 | xargs -r -0 goimports $GOIMPORTS_ARGS 2>&1)"
GOFMT_OUTPUT="$(find "./sdk" "./node" "./wormchain" -type f -name '*.go' -not -path '*.pb.go' -print0 | xargs -r -0 goimports $GOIMPORTS_ARGS 2>&1)"
if [ -n "$GOFMT_OUTPUT" ]; then
if [ "$GITHUB_ACTION" == "true" ]; then