cloud_functions: Added LOAD_CACHE env variable

Can be set to false to effectively rebuild cache files.
This commit is contained in:
Kevin Peters 2022-07-20 16:22:18 +00:00 committed by Evan Gray
parent 3ce18fd66d
commit eacb7c3f06
13 changed files with 34 additions and 22 deletions

View File

@ -35,7 +35,7 @@ var addressesToUpToYesterdayFilePath = "addresses-transferred-to-up-to-yesterday
// finds all the unique addresses that have received tokens since a particular moment.
func addressesTransferredToSince(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
if _, ok := addressesToUpToYesterday["*"]; !ok {
if _, ok := addressesToUpToYesterday["*"]; !ok && loadCache {
loadJsonToInterface(ctx, addressesToUpToYesterdayFilePath, &muAddressesToUpToYesterday, &addressesToUpToYesterday)
}
@ -125,7 +125,7 @@ func addressesTransferredToSince(tbl *bigtable.Table, ctx context.Context, prefi
// calcuates a map of recepient address to notional value received, by chain, since the start time specified.
func createCumulativeAddressesOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmCumulativeAddressesCache["*"]; !ok {
if _, ok := warmCumulativeAddressesCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmCumulativeAddressesCacheFilePath, &muWarmCumulativeAddressesCache, &warmCumulativeAddressesCache)
}

View File

@ -112,7 +112,7 @@ func fetchAddressRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix
// finds unique addresses tokens have been sent to, for each day since the start time passed in.
func createAddressesOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmAddressesCache["*"]; !ok {
if _, ok := warmAddressesCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmAddressesCacheFilePath, &muWarmAddressesCache, &warmAddressesCache)
}

View File

@ -51,7 +51,7 @@ func fetchNFTRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix str
}
func createNFTCountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, numPrevDays int, keySegments int) (map[string]map[string]int, error) {
if _, ok := warmNFTCache["2021-09-13"]; !ok {
if _, ok := warmNFTCache["2021-09-13"]; !ok && loadCache {
loadJsonToInterface(ctx, warmNFTCacheFilePath, &muWarmNFTCache, &warmNFTCache)
}

View File

@ -24,7 +24,7 @@ var transfersFromFilePath = "notional-transferred-from.json"
// finds the daily amount transferred from each chain from the specified start to the present.
func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) {
if len(transfersFromCache.Daily) == 0 {
if len(transfersFromCache.Daily) == 0 && loadCache {
loadJsonToInterface(ctx, transfersFromFilePath, &muTransfersFromCache, &transfersFromCache)
}
@ -67,6 +67,9 @@ func createTransfersFromOfInterval(tbl *bigtable.Table, ctx context.Context, pre
}
}
// no cache for this query, initialize the map
if transfersFromCache.Daily == nil {
transfersFromCache.Daily = map[string]map[string]float64{}
}
transfersFromCache.Daily[dateStr] = map[string]float64{"*": 0}
muTransfersFromCache.Unlock()
@ -119,9 +122,7 @@ func ComputeNotionalTransferredFrom(w http.ResponseWriter, r *http.Request) {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
ctx := context.Background()
createTransfersFromOfInterval(tbl, ctx, "", releaseDay)
w.WriteHeader(http.StatusOK)

View File

@ -35,7 +35,7 @@ var transferredToUpToYesterdayFilePath = "notional-transferred-to-up-to-yesterda
// calculates the amount of each symbol transfered to each chain.
func transferredToSince(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]float64 {
if _, ok := transferredToUpToYesterday["*"]; !ok {
if _, ok := transferredToUpToYesterday["*"]; !ok && loadCache {
loadJsonToInterface(ctx, transferredToUpToYesterdayFilePath, &muTransferredToUpToYesterday, &transferredToUpToYesterday)
}
@ -145,7 +145,7 @@ func getDaysInRange(start, end time.Time) []string {
// calcuates a running total of notional value transferred, by symbol, since the start time specified.
func createCumulativeAmountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmCumulativeCache["*"]; !ok {
if _, ok := warmCumulativeCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmCumulativeCacheFilePath, &muWarmCumulativeCache, &warmCumulativeCache)
}

View File

@ -135,7 +135,7 @@ func fetchTransferRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefi
// finds the daily amount of each symbol transferred to each chain, from the specified start to the present.
func amountsTransferredToInInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]float64 {
if _, ok := warmTransfersToCache["*"]; !ok {
if _, ok := warmTransfersToCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTransfersToCacheFilePath, &muWarmTransfersToCache, &warmTransfersToCache)
}

View File

@ -30,7 +30,7 @@ var warmTransfersCacheFilePath = "notional-transferred-cache.json"
// finds the daily amount of each symbol transferred from each chain, to each chain,
// from the specified start to the present.
func createTransfersOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, start time.Time) map[string]map[string]map[string]map[string]float64 {
if _, ok := warmTransfersCache["*"]; !ok {
if _, ok := warmTransfersCache["*"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTransfersCacheFilePath, &muWarmTransfersCache, &warmTransfersCache)
}

View File

@ -41,8 +41,10 @@ func loadAndUpdateCoinGeckoPriceCache(ctx context.Context, coinIds []string, now
// at cold-start, load the price cache into memory, and fetch any missing token price histories and add them to the cache
if !loadedCoinGeckoPriceCache {
// load the price cache
loadJsonToInterface(ctx, coinGeckoPriceCacheFilePath, &muWarmTvlCumulativeCache, &coinGeckoPriceCache)
loadedCoinGeckoPriceCache = true
if loadCache {
loadJsonToInterface(ctx, coinGeckoPriceCacheFilePath, &muWarmTvlCumulativeCache, &coinGeckoPriceCache)
loadedCoinGeckoPriceCache = true
}
// find tokens missing price history
missing := []string{}
@ -81,7 +83,7 @@ func loadAndUpdateCoinGeckoPriceCache(ctx context.Context, coinIds []string, now
// calculates a running total of notional value transferred, by symbol, since the start time specified.
func createTvlCumulativeOfInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) map[string]map[string]map[string]LockedAsset {
if len(warmTvlCumulativeCache) == 0 {
if len(warmTvlCumulativeCache) == 0 && loadCache {
loadJsonToInterface(ctx, warmTvlCumulativeCacheFilePath, &muWarmTvlCumulativeCache, &warmTvlCumulativeCache)
}
@ -271,8 +273,7 @@ func ComputeTvlCumulative(w http.ResponseWriter, r *http.Request) {
// days since launch day
queryDays := int(time.Now().UTC().Sub(releaseDay).Hours() / 24)
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
ctx := context.Background()
dailyTvl := map[string]map[string]map[string]LockedAsset{}

View File

@ -38,7 +38,7 @@ type LockedAsset struct {
// finds the daily amount of each symbol transferred to each chain, from the specified start to the present.
func tvlInInterval(tbl *bigtable.Table, ctx context.Context, start time.Time) map[string]map[string]map[string]LockedAsset {
if len(warmTvlCache) == 0 {
if len(warmTvlCache) == 0 && loadCache {
loadJsonToInterface(ctx, warmTvlFilePath, &muWarmTvlCache, &warmTvlCache)
}
@ -303,8 +303,7 @@ func ComputeTVL(w http.ResponseWriter, r *http.Request) {
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
ctx := context.Background()
now := time.Now().UTC()
todaysDateStr := now.Format("2006-01-02")

View File

@ -34,7 +34,7 @@ func getLatestOfEachEmitterAddress(tbl *bigtable.Table, ctx context.Context, pre
if prefix == "" {
cachePrefix = "*"
}
if _, ok := warmCache[cachePrefix]; !ok {
if _, ok := warmCache[cachePrefix]; !ok && loadCache {
loadJsonToInterface(ctx, warmRecentCacheFilePath, &muWarmRecentCache, &warmCache)
}

View File

@ -40,6 +40,8 @@ var solanaTokens = map[string]SolanaToken{}
var releaseDay = time.Date(2021, 9, 13, 0, 0, 0, 0, time.UTC)
var loadCache = true
// init runs during cloud function initialization. So, this will only run during an
// an instance's cold start.
// https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis
@ -90,6 +92,12 @@ func init() {
if tokenAllowlistFilePath != "" {
loadJsonToInterface(context.Background(), tokenAllowlistFilePath, &sync.RWMutex{}, &tokenAllowlist)
}
loadCacheStr := os.Getenv("LOAD_CACHE")
if val, err := strconv.ParseBool(loadCacheStr); err == nil {
loadCache = val
log.Printf("loadCache set to %v\n", loadCache)
}
}
func timeTrack(start time.Time, name string) {

View File

@ -66,7 +66,7 @@ func fetchRowsInInterval(tbl *bigtable.Table, ctx context.Context, prefix string
}
func createCountsOfInterval(tbl *bigtable.Table, ctx context.Context, prefix string, numPrevDays int, keySegments int) (map[string]map[string]int, error) {
if _, ok := warmTotalsCache["2021-09-13"]; !ok {
if _, ok := warmTotalsCache["2021-09-13"]; !ok && loadCache {
loadJsonToInterface(ctx, warmTotalsCacheFilePath, &muWarmTotalsCache, &warmTotalsCache)
}

View File

@ -20,6 +20,9 @@
"CACHE_BUCKET": "cloud-function-cache-mainnet",
"TOKEN_ALLOWLIST": "token-allowlist-mainnet.json",
"GOOGLE_APPLICATION_CREDENTIALS": "/home/you/path/to/your/service-account.json",
// LOAD_CACHE sets whether or not previously computed results should be loaded and used in calculations
// can be set to false to effectively rebuild the cache
// "LOAD_CACHE": "true",
// CoinGecko API key if you have one. will work without - rate limit is lower.
// "COINGECKO_API_KEY": "your-key-here",
// SolanaBeach API key if you have one. will work without - rate limit is lower.