cloud_functions: Performance enhancements to handle increased message volume (#1688)
TVL functions no longer scan the table for PythNet messages Added a light-weight transaction-totals function
This commit is contained in:
parent
09fefb30a9
commit
35b46ae205
|
@ -23,7 +23,7 @@ var muTransfersFromCache sync.RWMutex
|
|||
var transfersFromFilePath = "notional-transferred-from.json"
|
||||
|
||||
// finds the daily amount transferred from each chain from the specified start to the present.
|
||||
func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) {
|
||||
func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) {
|
||||
if len(transfersFromCache.Daily) == 0 && loadCache {
|
||||
loadJsonToInterface(ctx, transfersFromFilePath, &muTransfersFromCache, &transfersFromCache)
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, pre
|
|||
intervalsWG.Add(numPrevDays + 1)
|
||||
|
||||
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
|
||||
go func(tbl *bigtable.Table, ctx context.Context, prefix string, daysAgo int) {
|
||||
go func(tbl *bigtable.Table, ctx context.Context, daysAgo int) {
|
||||
defer intervalsWG.Done()
|
||||
// start is the SOD, end is EOD
|
||||
// "0 daysAgo start" is 00:00:00 AM of the current day
|
||||
|
@ -73,7 +73,8 @@ func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, pre
|
|||
transfersFromCache.Daily[dateStr] = map[string]float64{"*": 0}
|
||||
muTransfersFromCache.Unlock()
|
||||
|
||||
queryResult := fetchTransferRowsInInterval(tbl, ctx, prefix, start, end)
|
||||
for _, chainId := range tvlChainIDs {
|
||||
queryResult := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
|
||||
|
||||
// iterate through the rows and increment the amounts
|
||||
for _, row := range queryResult {
|
||||
|
@ -83,7 +84,8 @@ func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, pre
|
|||
transfersFromCache.Daily[dateStr]["*"] = transfersFromCache.Daily[dateStr]["*"] + row.Notional
|
||||
transfersFromCache.Daily[dateStr][row.LeavingChain] = transfersFromCache.Daily[dateStr][row.LeavingChain] + row.Notional
|
||||
}
|
||||
}(tbl, ctx, prefix, daysAgo)
|
||||
}
|
||||
}(tbl, ctx, daysAgo)
|
||||
}
|
||||
intervalsWG.Wait()
|
||||
|
||||
|
@ -123,7 +125,7 @@ func ComputeNotionalTransferredFrom(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
ctx := context.Background()
|
||||
createTransfersFromOfInterval(tbl, ctx, "", releaseDay)
|
||||
createTransfersFromOfInterval(tbl, ctx, releaseDay)
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
|
@ -98,7 +98,8 @@ func tvlInInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) ma
|
|||
|
||||
defer intervalsWG.Done()
|
||||
|
||||
queryResult := fetchTransferRowsInInterval(tbl, ctx, "", start, end)
|
||||
for _, chainId := range tvlChainIDs {
|
||||
queryResult := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
|
||||
|
||||
// iterate through the rows and increment the count
|
||||
for _, row := range queryResult {
|
||||
|
@ -152,6 +153,7 @@ func tvlInInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) ma
|
|||
}
|
||||
muWarmTvlCache.Unlock()
|
||||
}
|
||||
}
|
||||
}(tbl, ctx, daysAgo)
|
||||
}
|
||||
|
||||
|
@ -227,11 +229,12 @@ func tvlSinceDate(tbl *bigtable.Table, ctx context.Context, dailyTotals map[stri
|
|||
|
||||
// returns the count of the rows in the query response
|
||||
func tvlForInterval(tbl *bigtable.Table, ctx context.Context, start, end time.Time) map[string]map[string]LockedAsset {
|
||||
// query for all rows in time range, return result count
|
||||
queryResults := fetchTransferRowsInInterval(tbl, ctx, "", start, end)
|
||||
|
||||
result := map[string]map[string]LockedAsset{}
|
||||
|
||||
for _, chainId := range tvlChainIDs {
|
||||
// query for all rows in time range, return result count
|
||||
queryResults := fetchTransferRowsInInterval(tbl, ctx, chainIDRowPrefix(chainId), start, end)
|
||||
|
||||
// iterate through the rows and increment the count for each index
|
||||
for _, row := range queryResults {
|
||||
if _, ok := result[row.OriginChain]; !ok {
|
||||
|
@ -283,6 +286,7 @@ func tvlForInterval(tbl *bigtable.Table, ctx context.Context, start, end time.Ti
|
|||
result["*"][row.TokenAddress] = prevAllChains
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
|
|
|
@ -113,10 +113,12 @@ func loadJsonToInterface(ctx context.Context, filePath string, mutex *sync.RWMut
|
|||
}
|
||||
defer timeTrack(time.Now(), fmt.Sprintf("reading %v", filePath))
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
|
||||
reader, readErr := cacheBucket.Object(filePath).NewReader(ctx)
|
||||
if readErr != nil {
|
||||
log.Printf("Failed reading %v in GCS. err: %v", filePath, readErr)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
fileData, err := io.ReadAll(reader)
|
||||
|
@ -124,7 +126,6 @@ func loadJsonToInterface(ctx context.Context, filePath string, mutex *sync.RWMut
|
|||
log.Printf("loadJsonToInterface: unable to read data. file %q: %v", filePath, err)
|
||||
}
|
||||
unmarshalErr := json.Unmarshal(fileData, &cacheMap)
|
||||
mutex.Unlock()
|
||||
if unmarshalErr != nil {
|
||||
log.Printf("failed unmarshaling %v, err: %v", filePath, unmarshalErr)
|
||||
}
|
||||
|
@ -235,6 +236,11 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
// ChainIDs to compute TVL/stats for
|
||||
// Useful to exclude chains we don't want to compute TVL for which can improve performance
|
||||
// (notably PythNet is excluded, ChainID 26)
|
||||
var tvlChainIDs = []vaa.ChainID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18}
|
||||
|
||||
func chainIdStringToType(chainId string) vaa.ChainID {
|
||||
switch chainId {
|
||||
case "1":
|
||||
|
@ -494,3 +500,7 @@ func isTokenActive(chainId string, tokenAddress string, date string) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func chainIDRowPrefix(chainId vaa.ChainID) string {
|
||||
return fmt.Sprintf("%d:", chainId)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
// Package p contains an HTTP Cloud Function.
|
||||
package p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"github.com/wormhole-foundation/wormhole/sdk/vaa"
|
||||
)
|
||||
|
||||
type txTotals struct {
|
||||
DailyTotals map[string]map[string]int
|
||||
}
|
||||
|
||||
var txTotalsResult txTotals
|
||||
var txTotalsMutex sync.RWMutex
|
||||
var txTotalsResultPath = "transaction-totals.json"
|
||||
|
||||
func fetchRowKeys(tbl *bigtable.Table, ctx context.Context, start, end time.Time) []string {
|
||||
rowKeys := []string{}
|
||||
chainIds := tvlChainIDs
|
||||
chainIds = append(chainIds, vaa.ChainIDPythNet)
|
||||
for _, chainId := range chainIds {
|
||||
err := tbl.ReadRows(ctx, bigtable.PrefixRange(chainIDRowPrefix(chainId)), func(row bigtable.Row) bool {
|
||||
rowKeys = append(rowKeys, row.Key())
|
||||
return true
|
||||
}, bigtable.RowFilter(
|
||||
bigtable.ChainFilters(
|
||||
bigtable.FamilyFilter(quorumStateFam), // VAAs that have reached quorum
|
||||
bigtable.CellsPerRowLimitFilter(1), // only the first cell in each column
|
||||
bigtable.TimestampRangeFilter(start, end), // within time range
|
||||
bigtable.StripValueFilter(), // no columns/values, just the row.Key()
|
||||
)))
|
||||
if err != nil {
|
||||
log.Fatalf("fetchRowsInInterval returned an error: %v", err)
|
||||
}
|
||||
}
|
||||
return rowKeys
|
||||
}
|
||||
|
||||
func updateTxTotalsResult(tbl *bigtable.Table, ctx context.Context, numPrevDays int) {
|
||||
if txTotalsResult.DailyTotals == nil {
|
||||
txTotalsResult.DailyTotals = map[string]map[string]int{}
|
||||
if loadCache {
|
||||
loadJsonToInterface(ctx, txTotalsResultPath, &txTotalsMutex, &txTotalsResult.DailyTotals)
|
||||
}
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
var intervalsWG sync.WaitGroup
|
||||
// there will be a query for each previous day, plus today
|
||||
intervalsWG.Add(numPrevDays + 1)
|
||||
|
||||
for daysAgo := 0; daysAgo <= numPrevDays; daysAgo++ {
|
||||
go func(tbl *bigtable.Table, ctx context.Context, daysAgo int) {
|
||||
// start is the SOD, end is EOD
|
||||
// "0 daysAgo start" is 00:00:00 AM of the current day
|
||||
// "0 daysAgo end" is 23:59:59 of the current day (the future)
|
||||
|
||||
// calculate the start and end times for the query
|
||||
hoursAgo := (24 * daysAgo)
|
||||
daysAgoDuration := -time.Duration(hoursAgo) * time.Hour
|
||||
n := now.Add(daysAgoDuration)
|
||||
year := n.Year()
|
||||
month := n.Month()
|
||||
day := n.Day()
|
||||
loc := n.Location()
|
||||
|
||||
start := time.Date(year, month, day, 0, 0, 0, 0, loc)
|
||||
end := time.Date(year, month, day, 23, 59, 59, 999999999, loc)
|
||||
|
||||
dateStr := start.Format("2006-01-02")
|
||||
|
||||
txTotalsMutex.Lock()
|
||||
if daysAgo >= 1 {
|
||||
if _, ok := txTotalsResult.DailyTotals[dateStr]; ok && useCache(dateStr) {
|
||||
txTotalsMutex.Unlock()
|
||||
intervalsWG.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
txTotalsMutex.Unlock()
|
||||
|
||||
defer intervalsWG.Done()
|
||||
result := fetchRowKeys(tbl, ctx, start, end)
|
||||
|
||||
// iterate through the rows and increment the counts
|
||||
countsByDay := map[string]int{}
|
||||
countsByDay["*"] = 0
|
||||
for _, rowKey := range result {
|
||||
chainId := strings.Split(rowKey, ":")[0]
|
||||
if _, ok := countsByDay[chainId]; !ok {
|
||||
countsByDay[chainId] = 1
|
||||
} else {
|
||||
countsByDay[chainId] = countsByDay[chainId] + 1
|
||||
}
|
||||
countsByDay["*"] = countsByDay["*"] + 1
|
||||
}
|
||||
|
||||
txTotalsMutex.Lock()
|
||||
txTotalsResult.DailyTotals[dateStr] = countsByDay
|
||||
txTotalsMutex.Unlock()
|
||||
|
||||
}(tbl, ctx, daysAgo)
|
||||
}
|
||||
|
||||
intervalsWG.Wait()
|
||||
|
||||
// create a set of all the keys from all dates, to ensure the result objects all have the same keys
|
||||
seenKeySet := map[string]bool{}
|
||||
for _, v := range txTotalsResult.DailyTotals {
|
||||
for chainId := range v {
|
||||
seenKeySet[chainId] = true
|
||||
}
|
||||
}
|
||||
// ensure each date object has the same keys:
|
||||
for date := range txTotalsResult.DailyTotals {
|
||||
for chainId := range seenKeySet {
|
||||
if _, ok := txTotalsResult.DailyTotals[date][chainId]; !ok {
|
||||
// add the missing key to the map
|
||||
txTotalsResult.DailyTotals[date][chainId] = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//persistInterfaceToJson(ctx, txTotalsResultPath, &txTotalsMutex, txTotalsResult.DailyTotals)
|
||||
}
|
||||
|
||||
func ComputeTransactionTotals(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
if r.Method == http.MethodOptions {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
w.Header().Set("Access-Control-Max-Age", "3600")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var err error
|
||||
updateTxTotalsResult(tbl, ctx, queryDays)
|
||||
if err != nil {
|
||||
log.Fatalf("failed getting createCountsOfInterval err %v", err)
|
||||
}
|
||||
|
||||
jsonBytes, err := json.Marshal(txTotalsResult)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte(err.Error()))
|
||||
log.Println(err.Error())
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(jsonBytes)
|
||||
}
|
||||
|
||||
func TransactionTotals(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
if r.Method == http.MethodOptions {
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
w.Header().Set("Access-Control-Max-Age", "3600")
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
var cachedResult txTotals
|
||||
cachedResult.DailyTotals = map[string]map[string]int{}
|
||||
loadJsonToInterface(ctx, txTotalsResultPath, &txTotalsMutex, &cachedResult.DailyTotals)
|
||||
|
||||
jsonBytes, err := json.Marshal(cachedResult)
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte(err.Error()))
|
||||
log.Println(err.Error())
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(jsonBytes)
|
||||
}
|
|
@ -73,6 +73,8 @@ func newMux() *http.ServeMux {
|
|||
mux.HandleFunc("/transaction", p.Transaction)
|
||||
mux.HandleFunc("/readrow", p.ReadRow)
|
||||
mux.HandleFunc("/findvalues", p.FindValues)
|
||||
mux.HandleFunc("/computetransactiontotals", p.ComputeTransactionTotals)
|
||||
mux.HandleFunc("/transactiontotals", p.TransactionTotals)
|
||||
|
||||
mux.HandleFunc("/readyz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) })
|
||||
|
||||
|
|
Loading…
Reference in New Issue