Compare commits

...

8 Commits

Author SHA1 Message Date
walker-16 823999cf2f
Merge ac862cde08 into ce1b7707fb 2024-04-26 20:44:47 +00:00
Fernando Torres ac862cde08
wip 2024-04-26 17:43:31 -03:00
Mariano ce1b7707fb
[WORMSCAN-API-1225] Add new endpoint for fetching x-chain-activity tops data (#1342)
* add 2 new tasks for collecting chain activity every day and hour

* making progress

* change query 2

* add query by month and year

* changes on task

* more changes

* change to 1d

* add 1d

* fix query

* adjust queryies

* change the way the query is executed

* changes on query

* making more progress

* fix per year query

* add a second group of tasks for downsampling

* add app_id

* update swagger docs

* optimize new tasks

* fix W

* fix W

* start using the new measurement

* change endpoint signature

* update endpoint name

* fix indents

* code review changes

* remove unnecessary break
2024-04-26 15:03:02 -03:00
Agustin Pazos d8dcd1bebe wip 2024-04-26 14:56:04 -03:00
Fernando Torres 9873e2aadf
wip 2024-04-24 16:51:20 -03:00
Agustin Pazos 01afaab67b wip 2024-04-24 12:56:01 -03:00
Fernando Torres b638aaafd7
wip 2024-04-24 11:00:30 -03:00
Agustin Pazos 793b37fc32 fly-event-processor initial commit 2024-04-24 10:56:22 -03:00
66 changed files with 3114 additions and 295 deletions

View File

@ -0,0 +1,40 @@
import "date"
runTask = (start,stop,srcBucket,destBucket,destMeasurement) => {
data = from(bucket: srcBucket)
|> range(start: start,stop: stop)
|> filter(fn: (r) => r._measurement == "vaa_volume_v2" and r.version == "v2")
|> filter(fn: (r) => r._field == "volume" and r._value > 0)
|> drop(columns:["destination_chain","app_id","token_chain","token_address","version","_measurement","_time"])
|> rename(columns: {_start: "_time"})
|> group(columns: ["emitter_chain","_time"])
vols = data
|> sum(column: "_value")
|> set(key: "_field", value: "volume")
|> set(key: "to", value: string(v:stop))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
return data
|> count(column: "_value")
|> set(key: "_field", value: "count")
|> set(key: "to", value: string(v:stop))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
}
bucketInfinite = "wormscan"
destMeasurement = "emitter_chain_activity_1d"
stop = date.truncate(t: now(),unit: 1d)
start = date.sub(d: 1d, from: stop)
option task = {
name: "calculate chain activity per emitter every day",
every: 1d,
}
runTask(start:start, stop: stop, srcBucket: bucketInfinite, destBucket: bucketInfinite, destMeasurement: destMeasurement)

View File

@ -0,0 +1,40 @@
import "date"
runTask = (start,stop,srcBucket,destBucket,destMeasurement) => {
data = from(bucket: srcBucket)
|> range(start: start,stop: stop)
|> filter(fn: (r) => r._measurement == "vaa_volume_v2" and r.version == "v2")
|> filter(fn: (r) => r._field == "volume" and r._value > 0)
|> drop(columns:["destination_chain","app_id","token_chain","token_address","version","_measurement","_time"])
|> rename(columns: {_start: "_time"})
|> group(columns: ["emitter_chain","_time"])
vols = data
|> sum(column: "_value")
|> set(key: "_field", value: "volume")
|> set(key: "to", value: string(v:stop))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
return data
|> count(column: "_value")
|> set(key: "_field", value: "count")
|> set(key: "to", value: string(v:stop))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
}
bucketInfinite = "wormscan"
destMeasurement = "emitter_chain_activity_1h"
stop = date.truncate(t: now(),unit: 1h)
start = date.sub(d: 1h, from: stop)
option task = {
name: "calculate chain activity per emitter every hour",
every: 1h,
}
runTask(start:start, stop: stop, srcBucket: bucketInfinite, destBucket: bucketInfinite, destMeasurement: destMeasurement)

View File

@ -0,0 +1,37 @@
import "date"
runTask = (start,stop,srcBucket,destBucket,destMeasurement) => {
data = from(bucket: srcBucket)
|> range(start: start,stop: stop)
|> filter(fn: (r) => r._measurement == "vaa_volume_v2" and r._field == "volume")
|> group(columns: ["emitter_chain", "destination_chain", "app_id"])
data
|> sum(column: "_value")
|> set(key: "_field", value: "volume")
|> map(fn: (r) => ({ r with _time: start }))
|> set(key: "to", value: string(v:date.add(d: 1d, to: start)))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
return data
|> count(column: "_value")
|> set(key: "_field", value: "count")
|> map(fn: (r) => ({ r with _time: start }))
|> set(key: "to", value: string(v:date.add(d: 1d, to: start)))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
}
bucketInfinite = "wormscan"
destMeasurement = "chain_activity_1d"
stop = date.truncate(t: now(),unit: 24h)
start = date.sub(d: 1d, from: stop)
option task = {
name: "calculate chain activity every day",
every: 1d,
}
runTask(start:start, stop: stop, srcBucket: bucketInfinite, destBucket: bucketInfinite, destMeasurement: destMeasurement)

View File

@ -0,0 +1,40 @@
import "date"
runTask = (start,stop,srcBucket,destBucket,destMeasurement) => {
data = from(bucket: srcBucket)
|> range(start: start,stop: stop)
|> filter(fn: (r) => r._measurement == "vaa_volume_v2" and r._field == "volume")
|> group(columns: ["emitter_chain", "destination_chain", "app_id"])
data
|> sum(column: "_value")
|> set(key: "_field", value: "volume")
|> map(fn: (r) => ({ r with _time: start }))
|> set(key: "to", value: string(v:date.add(d: 1h, to: start)))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
return data
|> count(column: "_value")
|> set(key: "_field", value: "count")
|> map(fn: (r) => ({ r with _time: start }))
|> set(key: "to", value: string(v:date.add(d: 1h, to: start)))
|> set(key: "_measurement", value: destMeasurement)
|> to(bucket: destBucket)
}
bucketInfinite = "wormscan"
destMeasurement = "chain_activity_1h"
stop = date.truncate(t: now(),unit: 1h)
start = date.sub(d: 1h, from: stop)
option task = {
name: "calculate chain activity every hour",
every: 1h,
}
runTask(start:start, stop: stop, srcBucket: bucketInfinite, destBucket: bucketInfinite, destMeasurement: destMeasurement)

View File

@ -1760,6 +1760,73 @@ const docTemplate = `{
}
}
},
"/api/v1/x-chain-activity/tops": {
"get": {
"description": "Search, for a specific period of time, the number of transactions and the volume.",
"tags": [
"wormholescan"
],
"operationId": "x-chain-activity-tops",
"parameters": [
{
"type": "string",
"description": "Time span, supported values: 1d, 1mo and 1y",
"name": "timespan",
"in": "query",
"required": true
},
{
"type": "string",
"description": "From date, supported format 2006-01-02T15:04:05Z07:00",
"name": "from",
"in": "query",
"required": true
},
{
"type": "string",
"description": "To date, supported format 2006-01-02T15:04:05Z07:00",
"name": "to",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Search by appId",
"name": "appId",
"in": "query"
},
{
"type": "string",
"description": "Search by sourceChain",
"name": "sourceChain",
"in": "query"
},
{
"type": "string",
"description": "Search by targetChain",
"name": "targetChain",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/transactions.ChainActivityTopResult"
}
}
},
"400": {
"description": "Bad Request"
},
"500": {
"description": "Internal Server Error"
}
}
}
},
"/swagger.json": {
"get": {
"description": "Returns the swagger specification for this API.",
@ -3325,6 +3392,29 @@ const docTemplate = `{
}
}
},
"transactions.ChainActivityTopResult": {
"type": "object",
"properties": {
"count": {
"type": "integer"
},
"destination_chain": {
"type": "string"
},
"emitter_chain": {
"type": "string"
},
"from": {
"type": "string"
},
"to": {
"type": "string"
},
"volume": {
"type": "integer"
}
}
},
"transactions.ChainPair": {
"type": "object",
"properties": {
@ -3591,7 +3681,6 @@ const docTemplate = `{
14,
15,
16,
17,
18,
19,
20,
@ -3599,24 +3688,35 @@ const docTemplate = `{
22,
23,
24,
25,
26,
28,
29,
30,
32,
33,
34,
35,
36,
37,
38,
39,
3104,
4000,
4001,
4002,
4003,
4004,
4005,
4006,
4007,
4008,
10002,
10003,
10004,
10005,
10006
10006,
10007
],
"x-enum-varnames": [
"ChainIDUnset",
@ -3636,7 +3736,6 @@ const docTemplate = `{
"ChainIDCelo",
"ChainIDNear",
"ChainIDMoonbeam",
"ChainIDNeon",
"ChainIDTerra2",
"ChainIDInjective",
"ChainIDOsmosis",
@ -3644,24 +3743,35 @@ const docTemplate = `{
"ChainIDAptos",
"ChainIDArbitrum",
"ChainIDOptimism",
"ChainIDGnosis",
"ChainIDPythNet",
"ChainIDXpla",
"ChainIDBtc",
"ChainIDBase",
"ChainIDSei",
"ChainIDRootstock",
"ChainIDScroll",
"ChainIDMantle",
"ChainIDBlast",
"ChainIDXLayer",
"ChainIDLinea",
"ChainIDBerachain",
"ChainIDWormchain",
"ChainIDCosmoshub",
"ChainIDEvmos",
"ChainIDKujira",
"ChainIDNeutron",
"ChainIDCelestia",
"ChainIDStargaze",
"ChainIDSeda",
"ChainIDDymension",
"ChainIDProvenance",
"ChainIDSepolia",
"ChainIDArbitrumSepolia",
"ChainIDBaseSepolia",
"ChainIDOptimismSepolia",
"ChainIDHolesky"
"ChainIDHolesky",
"ChainIDPolygonSepolia"
]
},
"vaa.VaaDoc": {

View File

@ -1753,6 +1753,73 @@
}
}
},
"/api/v1/x-chain-activity/tops": {
"get": {
"description": "Search, for a specific period of time, the number of transactions and the volume.",
"tags": [
"wormholescan"
],
"operationId": "x-chain-activity-tops",
"parameters": [
{
"type": "string",
"description": "Time span, supported values: 1d, 1mo and 1y",
"name": "timespan",
"in": "query",
"required": true
},
{
"type": "string",
"description": "From date, supported format 2006-01-02T15:04:05Z07:00",
"name": "from",
"in": "query",
"required": true
},
{
"type": "string",
"description": "To date, supported format 2006-01-02T15:04:05Z07:00",
"name": "to",
"in": "query",
"required": true
},
{
"type": "string",
"description": "Search by appId",
"name": "appId",
"in": "query"
},
{
"type": "string",
"description": "Search by sourceChain",
"name": "sourceChain",
"in": "query"
},
{
"type": "string",
"description": "Search by targetChain",
"name": "targetChain",
"in": "query"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"type": "array",
"items": {
"$ref": "#/definitions/transactions.ChainActivityTopResult"
}
}
},
"400": {
"description": "Bad Request"
},
"500": {
"description": "Internal Server Error"
}
}
}
},
"/swagger.json": {
"get": {
"description": "Returns the swagger specification for this API.",
@ -3318,6 +3385,29 @@
}
}
},
"transactions.ChainActivityTopResult": {
"type": "object",
"properties": {
"count": {
"type": "integer"
},
"destination_chain": {
"type": "string"
},
"emitter_chain": {
"type": "string"
},
"from": {
"type": "string"
},
"to": {
"type": "string"
},
"volume": {
"type": "integer"
}
}
},
"transactions.ChainPair": {
"type": "object",
"properties": {
@ -3584,7 +3674,6 @@
14,
15,
16,
17,
18,
19,
20,
@ -3592,24 +3681,35 @@
22,
23,
24,
25,
26,
28,
29,
30,
32,
33,
34,
35,
36,
37,
38,
39,
3104,
4000,
4001,
4002,
4003,
4004,
4005,
4006,
4007,
4008,
10002,
10003,
10004,
10005,
10006
10006,
10007
],
"x-enum-varnames": [
"ChainIDUnset",
@ -3629,7 +3729,6 @@
"ChainIDCelo",
"ChainIDNear",
"ChainIDMoonbeam",
"ChainIDNeon",
"ChainIDTerra2",
"ChainIDInjective",
"ChainIDOsmosis",
@ -3637,24 +3736,35 @@
"ChainIDAptos",
"ChainIDArbitrum",
"ChainIDOptimism",
"ChainIDGnosis",
"ChainIDPythNet",
"ChainIDXpla",
"ChainIDBtc",
"ChainIDBase",
"ChainIDSei",
"ChainIDRootstock",
"ChainIDScroll",
"ChainIDMantle",
"ChainIDBlast",
"ChainIDXLayer",
"ChainIDLinea",
"ChainIDBerachain",
"ChainIDWormchain",
"ChainIDCosmoshub",
"ChainIDEvmos",
"ChainIDKujira",
"ChainIDNeutron",
"ChainIDCelestia",
"ChainIDStargaze",
"ChainIDSeda",
"ChainIDDymension",
"ChainIDProvenance",
"ChainIDSepolia",
"ChainIDArbitrumSepolia",
"ChainIDBaseSepolia",
"ChainIDOptimismSepolia",
"ChainIDHolesky"
"ChainIDHolesky",
"ChainIDPolygonSepolia"
]
},
"vaa.VaaDoc": {

View File

@ -824,6 +824,21 @@ definitions:
$ref: '#/definitions/transactions.Tx'
type: array
type: object
transactions.ChainActivityTopResult:
properties:
count:
type: integer
destination_chain:
type: string
emitter_chain:
type: string
from:
type: string
to:
type: string
volume:
type: integer
type: object
transactions.ChainPair:
properties:
destinationChain:
@ -1012,7 +1027,6 @@ definitions:
- 14
- 15
- 16
- 17
- 18
- 19
- 20
@ -1020,24 +1034,35 @@ definitions:
- 22
- 23
- 24
- 25
- 26
- 28
- 29
- 30
- 32
- 33
- 34
- 35
- 36
- 37
- 38
- 39
- 3104
- 4000
- 4001
- 4002
- 4003
- 4004
- 4005
- 4006
- 4007
- 4008
- 10002
- 10003
- 10004
- 10005
- 10006
- 10007
type: integer
x-enum-varnames:
- ChainIDUnset
@ -1057,7 +1082,6 @@ definitions:
- ChainIDCelo
- ChainIDNear
- ChainIDMoonbeam
- ChainIDNeon
- ChainIDTerra2
- ChainIDInjective
- ChainIDOsmosis
@ -1065,24 +1089,35 @@ definitions:
- ChainIDAptos
- ChainIDArbitrum
- ChainIDOptimism
- ChainIDGnosis
- ChainIDPythNet
- ChainIDXpla
- ChainIDBtc
- ChainIDBase
- ChainIDSei
- ChainIDRootstock
- ChainIDScroll
- ChainIDMantle
- ChainIDBlast
- ChainIDXLayer
- ChainIDLinea
- ChainIDBerachain
- ChainIDWormchain
- ChainIDCosmoshub
- ChainIDEvmos
- ChainIDKujira
- ChainIDNeutron
- ChainIDCelestia
- ChainIDStargaze
- ChainIDSeda
- ChainIDDymension
- ChainIDProvenance
- ChainIDSepolia
- ChainIDArbitrumSepolia
- ChainIDBaseSepolia
- ChainIDOptimismSepolia
- ChainIDHolesky
- ChainIDPolygonSepolia
vaa.VaaDoc:
properties:
appId:
@ -2326,6 +2361,52 @@ paths:
description: Internal Server Error
tags:
- wormholescan
/api/v1/x-chain-activity/tops:
get:
description: Search, for a specific period of time, the number of transactions
and the volume.
operationId: x-chain-activity-tops
parameters:
- description: 'Time span, supported values: 1d, 1mo and 1y'
in: query
name: timespan
required: true
type: string
- description: From date, supported format 2006-01-02T15:04:05Z07:00
in: query
name: from
required: true
type: string
- description: To date, supported format 2006-01-02T15:04:05Z07:00
in: query
name: to
required: true
type: string
- description: Search by appId
in: query
name: appId
type: string
- description: Search by sourceChain
in: query
name: sourceChain
type: string
- description: Search by targetChain
in: query
name: targetChain
type: string
responses:
"200":
description: OK
schema:
items:
$ref: '#/definitions/transactions.ChainActivityTopResult'
type: array
"400":
description: Bad Request
"500":
description: Internal Server Error
tags:
- wormholescan
/swagger.json:
get:
description: Returns the swagger specification for this API.

View File

@ -143,6 +143,17 @@ type ChainActivityResult struct {
Volume uint64 `mapstructure:"_value" json:"volume"`
}
type ChainActivityTopResult struct {
Time time.Time `json:"from" mapstructure:"_time"`
To string `json:"to" mapstructure:"to"`
ChainSourceID string `mapstructure:"emitter_chain" json:"emitter_chain"`
ChainDestinationID string `mapstructure:"destination_chain" json:"destination_chain,omitempty"`
Volume uint64 `mapstructure:"volume" json:"volume"`
Txs uint64 `mapstructure:"count" json:"count"`
}
type ChainActivityTopResults []ChainActivityTopResult
type ChainActivityTimeSpan string
const (
@ -202,3 +213,25 @@ type TransactionDto struct {
Payload map[string]interface{} `bson:"payload"`
StandardizedProperties map[string]interface{} `bson:"standardizedProperties"`
}
type ChainActivityTopsQuery struct {
SourceChain *sdk.ChainID `json:"source_chain"`
TargetChain *sdk.ChainID `json:"target_chain"`
AppId string `json:"app_id"`
From time.Time `json:"from"`
To time.Time `json:"to"`
Timespan Timespan `json:"timespan"`
}
type Timespan string
const (
Hour Timespan = "1h"
Day Timespan = "1d"
Month Timespan = "1mo"
Year Timespan = "1y"
)
func (t Timespan) IsValid() bool {
return t == Hour || t == Day || t == Month || t == Year
}

View File

@ -3,6 +3,7 @@ package transactions
import (
"context"
"fmt"
"github.com/valyala/fasthttp"
"strconv"
"strings"
"sync"
@ -1048,3 +1049,338 @@ func (r *Repository) ListTransactionsByAddress(
return documents, nil
}
func (r *Repository) FindChainActivityTops(ctx *fasthttp.RequestCtx, q ChainActivityTopsQuery) ([]ChainActivityTopResult, error) {
query := r.buildChainActivityQueryTops(q)
result, err := r.queryAPI.Query(ctx, query)
if err != nil {
return nil, err
}
if result.Err() != nil {
return nil, result.Err()
}
var response []ChainActivityTopResult
for result.Next() {
var row ChainActivityTopResult
if err = mapstructure.Decode(result.Record().Values(), &row); err != nil {
return nil, err
}
parsedTime, errTime := time.Parse(time.RFC3339Nano, row.To)
if errTime == nil {
row.To = parsedTime.Format(time.RFC3339)
}
response = append(response, row)
}
return response, nil
}
func (r *Repository) buildChainActivityQueryTops(q ChainActivityTopsQuery) string {
var start, stop string
switch q.Timespan {
case Hour:
start = q.From.Truncate(1 * time.Hour).UTC().Format(time.RFC3339)
stop = q.To.Truncate(1 * time.Hour).UTC().Format(time.RFC3339)
case Day:
start = q.From.Truncate(24 * time.Hour).UTC().Format(time.RFC3339)
stop = q.To.Truncate(24 * time.Hour).UTC().Format(time.RFC3339)
case Month:
start = time.Date(q.From.Year(), q.From.Month(), 1, 0, 0, 0, 0, q.From.Location()).UTC().Format(time.RFC3339)
stop = time.Date(q.To.Year(), q.To.Month(), 1, 0, 0, 0, 0, q.To.Location()).UTC().Format(time.RFC3339)
default:
start = time.Date(q.From.Year(), 1, 1, 0, 0, 0, 0, q.From.Location()).UTC().Format(time.RFC3339)
stop = time.Date(q.To.Year(), 1, 1, 0, 0, 0, 0, q.To.Location()).UTC().Format(time.RFC3339)
}
filterTargetChain := ""
if q.TargetChain != nil {
filterTargetChain = "|> filter(fn: (r) => r.destination_chain == \"" + strconv.Itoa(int(*q.TargetChain)) + "\")"
}
filterSourceChain := ""
if q.SourceChain != nil {
filterSourceChain = "|> filter(fn: (r) => r.emitter_chain == \"" + strconv.Itoa(int(*q.SourceChain)) + "\")"
}
filterAppId := ""
if q.AppId != "" {
filterAppId = "|> filter(fn: (r) => r.app_id == \"" + q.AppId + "\")"
}
if q.TargetChain == nil && q.AppId == "" {
return r.buildQueryChainActivityTopsByEmitter(q, start, stop, filterSourceChain)
}
var query string
switch q.Timespan {
case Hour:
query = r.buildQueryChainActivityHourly(start, stop, filterSourceChain, filterTargetChain, filterAppId)
case Day:
query = r.buildQueryChainActivityDaily(start, stop, filterSourceChain, filterTargetChain, filterAppId)
case Month:
query = r.buildQueryChainActivityMonthly(start, stop, filterSourceChain, filterTargetChain, filterAppId)
default:
query = r.buildQueryChainActivityYearly(start, stop, filterSourceChain, filterTargetChain, filterAppId)
}
return query
}
func (r *Repository) buildQueryChainActivityTopsByEmitter(q ChainActivityTopsQuery, start, stop, filterSourceChain string) string {
measurement := ""
switch q.Timespan {
case Hour:
measurement = "emitter_chain_activity_1h"
default:
measurement = "emitter_chain_activity_1d"
}
if q.Timespan == Hour || q.Timespan == Day {
query := `
import "date"
from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "%s")
%s
|> pivot(rowKey:["_time","emitter_chain"], columnKey: ["_field"], valueColumn: "_value")
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, measurement, filterSourceChain)
}
if q.Timespan == Month {
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "%s")
%s
|> drop(columns:["to"])
|> window(every: 1mo, period:1mo)
|> drop(columns:["_time"])
|> rename(columns: {_start: "_time"})
|> map(fn: (r) => ({r with to: string(v: r._stop)}))
vols = data
|> filter(fn: (r) => (r._field == "volume" and r._value > 0))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, measurement, filterSourceChain)
}
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "%s")
%s
|> drop(columns:["to"])
|> window(every: 1y, period:1y)
|> drop(columns:["_time"])
|> rename(columns: {_start: "_time"})
|> map(fn: (r) => ({r with to: string(v: r._stop)}))
vols = data
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, measurement, filterSourceChain)
}
func (r *Repository) buildQueryChainActivityHourly(start, stop, filterSourceChain, filterTargetChain, filterAppId string) string {
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "chain_activity_1h")
%s
%s
%s
|> drop(columns:["destination_chain"])
vols = data
|> filter(fn: (r) => (r._field == "volume" and r._value > 0))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.to == r.to and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, filterSourceChain, filterTargetChain, filterAppId)
}
func (r *Repository) buildQueryChainActivityDaily(start, stop, filterSourceChain, filterTargetChain, filterAppId string) string {
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "chain_activity_1d")
%s
%s
%s
|> drop(columns:["destination_chain"])
vols = data
|> filter(fn: (r) => (r._field == "volume" and r._value > 0))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.to == r.to and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, filterSourceChain, filterTargetChain, filterAppId)
}
func (r *Repository) buildQueryChainActivityMonthly(start, stop, filterSourceChain, filterTargetChain, filterAppId string) string {
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "chain_activity_1d")
%s
%s
%s
|> drop(columns:["destination_chain","to","app_id"])
|> window(every: 1mo, period:1mo)
|> drop(columns:["_time"])
|> rename(columns: {_start: "_time"})
|> map(fn: (r) => ({r with to: string(v: r._stop)}))
vols = data
|> filter(fn: (r) => (r._field == "volume" and r._value > 0))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, filterSourceChain, filterTargetChain, filterAppId)
}
func (r *Repository) buildQueryChainActivityYearly(start, stop, filterSourceChain, filterTargetChain, filterAppId string) string {
query := `
import "date"
import "join"
data = from(bucket: "%s")
|> range(start: %s,stop: %s)
|> filter(fn: (r) => r._measurement == "chain_activity_1d")
%s
%s
%s
|> drop(columns:["destination_chain","to","app_id"])
|> window(every: 1y, period:1y)
|> drop(columns:["_time"])
|> rename(columns: {_start: "_time"})
|> map(fn: (r) => ({r with to: string(v: r._stop)}))
vols = data
|> filter(fn: (r) => (r._field == "volume" and r._value > 0))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "volume"})
counts = data
|> filter(fn: (r) => (r._field == "count"))
|> group(columns:["_time","to","emitter_chain"])
|> sum()
|> rename(columns: {_value: "count"})
join.inner(
left: vols,
right: counts,
on: (l, r) => l._time == r._time and l.emitter_chain == r.emitter_chain,
as: (l, r) => ({l with count: r.count}),
)
|> group()
|> sort(columns:["emitter_chain","_time"],desc:false)
`
return fmt.Sprintf(query, r.bucketInfiniteRetention, start, stop, filterSourceChain, filterTargetChain, filterAppId)
}

View File

@ -2,12 +2,13 @@ package transactions
import (
"context"
errors "errors"
"fmt"
"github.com/valyala/fasthttp"
"strings"
"time"
"github.com/wormhole-foundation/wormhole-explorer/api/cacheable"
"github.com/wormhole-foundation/wormhole-explorer/api/internal/errors"
errs "github.com/wormhole-foundation/wormhole-explorer/api/internal/errors"
"github.com/wormhole-foundation/wormhole-explorer/api/internal/metrics"
"github.com/wormhole-foundation/wormhole-explorer/api/internal/pagination"
@ -34,6 +35,7 @@ const (
topAssetsByVolumeKey = "wormscan:top-assets-by-volume"
topChainPairsByNumTransfersKey = "wormscan:top-chain-pairs-by-num-transfers"
chainActivityKey = "wormscan:chain-activity"
chainActivityTopsKey = "wormscan:chain-activity-tops"
)
// NewService create a new Service.
@ -157,7 +159,7 @@ func (s *Service) GetTransactionByID(
return nil, err
}
if len(output) == 0 {
return nil, errors.ErrNotFound
return nil, errs.ErrNotFound
}
// Return matching document
@ -167,3 +169,44 @@ func (s *Service) GetTransactionByID(
func (s *Service) GetTokenProvider() *domain.TokenProvider {
return s.tokenProvider
}
func (s *Service) GetChainActivityTops(ctx *fasthttp.RequestCtx, q ChainActivityTopsQuery) (ChainActivityTopResults, error) {
timeDuration := q.To.Sub(q.From)
if q.Timespan == Hour && timeDuration > 15*24*time.Hour {
return nil, errors.New("time range is too large for hourly data. Max time range allowed: 15 days")
}
if q.Timespan == Day {
if timeDuration < 24*time.Hour {
return nil, errors.New("time range is too small for daily data. Min time range allowed: 2 day")
}
if timeDuration > 365*24*time.Hour {
return nil, errors.New("time range is too large for daily data. Max time range allowed: 1 year")
}
}
if q.Timespan == Month {
if timeDuration < 30*24*time.Hour {
return nil, errors.New("time range is too small for monthly data. Min time range allowed: 60 days")
}
if timeDuration > 10*365*24*time.Hour {
return nil, errors.New("time range is too large for monthly data. Max time range allowed: 1 year")
}
}
if q.Timespan == Year {
if timeDuration < 365*24*time.Hour {
return nil, errors.New("time range is too small for yearly data. Min time range allowed: 1 year")
}
if timeDuration > 10*365*24*time.Hour {
return nil, errors.New("time range is too large for yearly data. Max time range allowed: 10 year")
}
}
return s.repo.FindChainActivityTops(ctx, q)
}

View File

@ -61,10 +61,6 @@ func ExtractToChain(c *fiber.Ctx, l *zap.Logger) (*sdk.ChainID, error) {
return &result, nil
}
func ExtractChain(c *fiber.Ctx, l *zap.Logger) (*sdk.ChainID, error) {
return extractChainQueryParam(c, l, "chain")
}
func ExtractSourceChain(c *fiber.Ctx, l *zap.Logger) (*sdk.ChainID, error) {
return extractChainQueryParam(c, l, "sourceChain")
}
@ -74,12 +70,10 @@ func ExtractTargetChain(c *fiber.Ctx, l *zap.Logger) (*sdk.ChainID, error) {
}
func extractChainQueryParam(c *fiber.Ctx, l *zap.Logger, queryParam string) (*sdk.ChainID, error) {
param := c.Query(queryParam)
if param == "" {
return nil, nil
}
chain, err := strconv.ParseInt(param, 10, 16)
if err != nil {
requestID := fmt.Sprintf("%v", c.Locals("requestid"))
@ -90,7 +84,6 @@ func extractChainQueryParam(c *fiber.Ctx, l *zap.Logger, queryParam string) (*sd
return nil, response.NewInvalidParamError(c, "INVALID CHAIN VALUE", errors.WithStack(err))
}
result := sdk.ChainID(chain)
return &result, nil
}
@ -358,14 +351,13 @@ func ExtractTimeSpanAndSampleRate(c *fiber.Ctx, l *zap.Logger) (string, string,
return timeSpan, sampleRate, nil
}
func ExtractTime(c *fiber.Ctx, queryParam string) (*time.Time, error) {
func ExtractTime(c *fiber.Ctx, timeLayout, queryParam string) (*time.Time, error) {
// get the start_time from query params
date := c.Query(queryParam, "")
if date == "" {
return nil, nil
}
t, err := time.Parse("20060102T150405Z", date)
t, err := time.Parse(timeLayout, date)
if err != nil {
return nil, response.NewInvalidQueryParamError(c, fmt.Sprintf("INVALID <%s> QUERY PARAMETER", queryParam), nil)
}

View File

@ -85,6 +85,7 @@ func RegisterRoutes(
api.Get("/last-txs", transactionCtrl.GetLastTransactions)
api.Get("/scorecards", transactionCtrl.GetScorecards)
api.Get("/x-chain-activity", transactionCtrl.GetChainActivity)
api.Get("/x-chain-activity/tops", transactionCtrl.GetChainActivityTops)
api.Get("/top-assets-by-volume", transactionCtrl.GetTopAssets)
api.Get("/top-chain-pairs-by-num-transfers", transactionCtrl.GetTopChainPairs)
api.Get("token/:chain/:token_address", transactionCtrl.GetTokenByChainAndAddress)

View File

@ -2,6 +2,7 @@ package transactions
import (
"strconv"
"time"
"github.com/gofiber/fiber/v2"
"github.com/shopspring/decimal"
@ -182,6 +183,75 @@ func (c *Controller) GetTopAssets(ctx *fiber.Ctx) error {
return ctx.JSON(response)
}
// GetChainActivityTops godoc
// @Description Search for a specific period of time the number of transactions and the volume.
// @Tags wormholescan
// @ID x-chain-activity-tops
// @Method Get
// @Param timespan query string true "Time span, supported values: 1d, 1mo and 1y"
// @Param from query string true "From date, supported format 2006-01-02T15:04:05Z07:00"
// @Param to query string true "To date, supported format 2006-01-02T15:04:05Z07:00"
// @Param appId query string false "Search by appId"
// @Param sourceChain query string false "Search by sourceChain"
// @Param targetChain query string false "Search by targetChain"
// @Success 200 {object} transactions.ChainActivityTopResults
// @Failure 400
// @Failure 500
// @Router /api/v1/x-chain-activity/tops [get]
func (c *Controller) GetChainActivityTops(ctx *fiber.Ctx) error {
sourceChain, err := middleware.ExtractSourceChain(ctx, c.logger)
if err != nil {
return err
}
targetChain, err := middleware.ExtractTargetChain(ctx, c.logger)
if err != nil {
return err
}
from, err := middleware.ExtractTime(ctx, time.RFC3339, "from")
if err != nil {
return err
}
to, err := middleware.ExtractTime(ctx, time.RFC3339, "to")
if err != nil {
return err
}
if from == nil || to == nil {
return response.NewInvalidParamError(ctx, "missing from/to query params ", nil)
}
payload := transactions.ChainActivityTopsQuery{
SourceChain: sourceChain,
TargetChain: targetChain,
From: *from,
To: *to,
AppId: middleware.ExtractAppId(ctx, c.logger),
Timespan: transactions.Timespan(ctx.Query("timespan")),
}
if !payload.Timespan.IsValid() {
return response.NewInvalidParamError(ctx, "invalid timespan", nil)
}
nowUTC := time.Now().UTC()
if nowUTC.Before(payload.To.UTC()) {
payload.To = nowUTC
}
if payload.To.Sub(payload.From) <= 0 {
return response.NewInvalidParamError(ctx, "invalid time range", nil)
}
// Get the chain activity.
activity, err := c.srv.GetChainActivityTops(ctx.Context(), payload)
if err != nil {
c.logger.Error("Error getting chain activity", zap.Error(err))
return err
}
return ctx.JSON(activity)
}
// GetChainActivity godoc
// @Description Returns a list of chain pairs by origin chain and destination chain.
// @Description The list could be rendered by notional or transaction count.

View File

@ -0,0 +1,65 @@
package guardian
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"go.uber.org/zap"
)
const DefaultTimeout = 10
// GuardianAPIClient guardian api client.
type GuardianAPIClient struct {
Client http.Client
BaseURL string
Logger *zap.Logger
}
// NewGuardianAPIClient create new instances of GuardianAPIClient.
func NewGuardianAPIClient(timeout int64, baseURL string, logger *zap.Logger) (GuardianAPIClient, error) {
if timeout == 0 {
timeout = DefaultTimeout
}
if baseURL == "" {
return GuardianAPIClient{}, errors.New("baseURL can not be empty")
}
return GuardianAPIClient{
Client: http.Client{
Timeout: time.Duration(timeout) * time.Second,
},
BaseURL: baseURL,
Logger: logger,
}, nil
}
// SignedVaa represent a guardianAPI signed vaa response.
type SignedVaa struct {
VaaBytes []byte `json:"vaaBytes"`
}
// GetSignedVAA get signed vaa.
func (c *GuardianAPIClient) GetSignedVAA(vaaID string) (*SignedVaa, error) {
endpointUrl := fmt.Sprintf("%s/v1/signed_vaa/%s", c.BaseURL, vaaID)
resp, err := c.Client.Get(endpointUrl)
if err != nil {
c.Logger.Error("failed to call endpoint", zap.String("endpoint", endpointUrl), zap.Error(err))
return nil, err
}
if resp.StatusCode != http.StatusOK {
c.Logger.Error("failed to call endpoint", zap.String("endpoint", endpointUrl), zap.Int("status_code", resp.StatusCode))
return nil, errors.New("failed to call endpoint, status code is not 200")
}
var signedVaa SignedVaa
err = json.NewDecoder(resp.Body).Decode(&signedVaa)
if err != nil {
c.Logger.Error("failed to decode response", zap.Error(err))
return nil, err
}
return &signedVaa, nil
}

View File

@ -35,3 +35,5 @@ PROTOCOLS_STATS_VERSION=v1
PROTOCOLS_ACTIVITY_VERSION=v1
# rpc provider json
RPC_PROVIDER_JSON=
GUARDIAN_PROVIDER_JSON=

View File

@ -39,3 +39,4 @@ PROTOCOLS_STATS_VERSION=v1
PROTOCOLS_ACTIVITY_VERSION=v1
# rpc provider json
RPC_PROVIDER_JSON=
GUARDIAN_PROVIDER_JSON=

View File

@ -35,3 +35,4 @@ PROTOCOLS_STATS_VERSION=v1
PROTOCOLS_ACTIVITY_VERSION=v1
# rpc provider json
RPC_PROVIDER_JSON=
GUARDIAN_PROVIDER_JSON=

View File

@ -39,3 +39,4 @@ PROTOCOLS_STATS_VERSION=v1
PROTOCOLS_ACTIVITY_VERSION=v1
# rpc provider json
RPC_PROVIDER_JSON=
GUARDIAN_PROVIDER_JSON=

View File

@ -0,0 +1,9 @@
---
kind: Secret
apiVersion: v1
metadata:
name: guardian-provider
namespace: {{ .NAMESPACE }}
type: Opaque
data:
guardian-provider.json: {{ .GUARDIAN_PROVIDER_JSON | b64enc }}

View File

@ -0,0 +1,9 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: fly-event-processor
namespace: {{ .NAMESPACE }}
data:
aws-region: {{ .SQS_AWS_REGION }}
duplicate-vaa-sqs-url: {{ .DUPLICATE_VAA_SQS_URL }}

View File

@ -0,0 +1,17 @@
ENVIRONMENT=production-mainnet
NAMESPACE=wormscan
NAME=wormscan-fly-event-processor
REPLICAS=2
IMAGE_NAME=
RESOURCES_LIMITS_MEMORY=256Mi
RESOURCES_LIMITS_CPU=500m
RESOURCES_REQUESTS_MEMORY=128Mi
RESOURCES_REQUESTS_CPU=250m
DUPLICATE_VAA_SQS_URL=
SQS_AWS_REGION=
P2P_NETWORK=mainnet
PPROF_ENABLED=false
AWS_IAM_ROLE=
ALERT_ENABLED=false
METRICS_ENABLED=true
CONSUMER_WORKER_SIZE=1

View File

@ -0,0 +1,17 @@
ENVIRONMENT=production-testnet
NAMESPACE=wormscan-testnet
NAME=wormscan-fly-event-processor
REPLICAS=1
IMAGE_NAME=
RESOURCES_LIMITS_MEMORY=30Mi
RESOURCES_LIMITS_CPU=20m
RESOURCES_REQUESTS_MEMORY=15Mi
RESOURCES_REQUESTS_CPU=10m
DUPLICATE_VAA_SQS_URL=
SQS_AWS_REGION=
P2P_NETWORK=testnet
PPROF_ENABLED=false
AWS_IAM_ROLE=
ALERT_ENABLED=false
METRICS_ENABLED=true
CONSUMER_WORKER_SIZE=1

View File

@ -0,0 +1,17 @@
ENVIRONMENT=staging-mainnet
NAMESPACE=wormscan
NAME=wormscan-fly-event-processor
REPLICAS=2
IMAGE_NAME=
RESOURCES_LIMITS_MEMORY=256Mi
RESOURCES_LIMITS_CPU=500m
RESOURCES_REQUESTS_MEMORY=128Mi
RESOURCES_REQUESTS_CPU=250m
DUPLICATE_VAA_SQS_URL=
SQS_AWS_REGION=
P2P_NETWORK=mainnet
PPROF_ENABLED=true
AWS_IAM_ROLE=
ALERT_ENABLED=false
METRICS_ENABLED=true
CONSUMER_WORKER_SIZE=1

View File

@ -0,0 +1,17 @@
ENVIRONMENT=staging-testnet
NAMESPACE=wormscan-testnet
NAME=wormscan-fly-event-processor
REPLICAS=1
IMAGE_NAME=
RESOURCES_LIMITS_MEMORY=30Mi
RESOURCES_LIMITS_CPU=20m
RESOURCES_REQUESTS_MEMORY=15Mi
RESOURCES_REQUESTS_CPU=10m
DUPLICATE_VAA_SQS_URL=
SQS_AWS_REGION=
P2P_NETWORK=testnet
PPROF_ENABLED=false
AWS_IAM_ROLE=
ALERT_ENABLED=false
METRICS_ENABLED=true
CONSUMER_WORKER_SIZE=1

View File

@ -0,0 +1,27 @@
apiVersion: keda.sh/v1alpha1
kind: TriggerAuthentication
metadata:
name: keda-auth-aws-{{ .NAME }}
namespace: {{ .NAMESPACE }}
spec:
podIdentity:
provider: aws
---
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: {{ .NAME }}
namespace: {{ .NAMESPACE }}
spec:
scaleTargetRef:
name: {{ .NAME }}
minReplicaCount: 2
maxReplicaCount: 10
triggers:
- type: aws-sqs-queue
authenticationRef:
name: keda-auth-aws-{{ .NAME }}
metadata:
awsRegion: {{ .SQS_AWS_REGION }}
queueURL: {{ .DUPLICATE_VAA_SQS_URL }}
queueLength: "5"

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .NAME }}
namespace: {{ .NAMESPACE }}
spec:
replicas: {{ .REPLICAS }}
selector:
matchLabels:
app: {{ .NAME }}
template:
metadata:
labels:
app: {{ .NAME }}
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8000"
spec:
restartPolicy: Always
terminationGracePeriodSeconds: 40
serviceAccountName: fly-events-processor
containers:
- name: {{ .NAME }}
image: {{ .IMAGE_NAME }}
imagePullPolicy: Always
volumeMounts:
- name: fly-event-processor-config
mountPath: /opt/fly-event-processor
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 20
timeoutSeconds: 3
failureThreshold: 3
httpGet:
path: /api/ready
port: 8000
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 3
failureThreshold: 3
httpGet:
path: /api/health
port: 8000
env:
- name: ENVIRONMENT
value: {{ .ENVIRONMENT }}
- name: PORT
value: "8000"
- name: LOG_LEVEL
value: "INFO"
- name: MONGODB_URI
valueFrom:
secretKeyRef:
name: mongodb
key: mongo-uri
- name: MONGODB_DATABASE
valueFrom:
configMapKeyRef:
name: config
key: mongo-database
- name: DUPLICATE_VAA_SQS_URL
valueFrom:
configMapKeyRef:
name: fly-event-processor
key: duplicate-vaa-sqs-url
- name: AWS_REGION
valueFrom:
configMapKeyRef:
name: fly-event-processor
key: aws-region
- name: PPROF_ENABLED
value: "{{ .PPROF_ENABLED }}"
- name: P2P_NETWORK
value: {{ .P2P_NETWORK }}
- name: ALERT_ENABLED
value: "{{ .ALERT_ENABLED }}"
- name: ALERT_API_KEY
valueFrom:
secretKeyRef:
name: opsgenie
key: api-key
- name: METRICS_ENABLED
value: "{{ .METRICS_ENABLED }}"
- name: CONSUMER_WORKER_SIZE
value: "{{ .CONSUMER_WORKER_SIZE }}"
- name: GUARDIAN_API_PROVIDER_PATH
value: "/opt/fly-event-processor/guardian-provider.json"
resources:
limits:
memory: {{ .RESOURCES_LIMITS_MEMORY }}
cpu: {{ .RESOURCES_LIMITS_CPU }}
requests:
memory: {{ .RESOURCES_REQUESTS_MEMORY }}
cpu: {{ .RESOURCES_REQUESTS_CPU }}
volumes:
- name: fly-event-processor-config
secret:
secretName: guardian-provider
items:
- key: guardian-provider.json
path: guardian-provider.json

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fly-events-processor
namespace: {{ .NAMESPACE }}
annotations:
eks.amazonaws.com/role-arn: {{ .AWS_IAM_ROLE }}

2
fly-event-processor/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.idea/
.vscode/

View File

@ -0,0 +1,20 @@
# syntax=docker.io/docker/dockerfile:1.3@sha256:42399d4635eddd7a9b8a24be879d2f9a930d0ed040a61324cfdf59ef1357b3b2
FROM --platform=linux/amd64 docker.io/golang:1.20.7-bullseye@sha256:74b09b3b6fa5aa542df8ef974cb745eb477be72f6fcf821517fb410aff532b00 AS build
WORKDIR /app
COPY fly-event-processor fly-event-processor
COPY common common
# Build the Go app
RUN cd fly-event-processor && CGO_ENABLED=0 GOOS=linux go build -o "./fly-event-processor" cmd/main.go
############################
# STEP 2 build a small image
############################
FROM alpine
#Copy certificates
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
# Copy our static executable.
COPY --from=build "/app/fly-event-processor/fly-event-processor" "/fly-event-processor"
# Run the binary.
ENTRYPOINT ["/fly-event-processor"]

View File

@ -0,0 +1,11 @@
SHELL := /bin/bash
build:
go build -o bin/service cmd/main.go
test:
go test -v -cover ./...
.PHONY: build doc test

View File

@ -0,0 +1 @@
# FlyEventProcessor

View File

@ -0,0 +1,36 @@
package main
import (
"github.com/spf13/cobra"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/cmd/service"
)
func main() {
execute()
}
func execute() error {
root := &cobra.Command{
Use: "fly-event-processor",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
service.Run()
}
},
}
addServiceCommand(root)
return root.Execute()
}
func addServiceCommand(root *cobra.Command) {
serviceCommand := &cobra.Command{
Use: "service",
Short: "Run fly-event-processor as service",
Run: func(_ *cobra.Command, _ []string) {
service.Run()
},
}
root.AddCommand(serviceCommand)
}

View File

@ -0,0 +1,215 @@
package service
import (
"context"
"errors"
"log"
"os"
"os/signal"
"syscall"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/wormhole-foundation/wormhole-explorer/common/client/sqs"
"github.com/wormhole-foundation/wormhole-explorer/common/dbutil"
"github.com/wormhole-foundation/wormhole-explorer/common/health"
"github.com/wormhole-foundation/wormhole-explorer/common/logger"
"github.com/wormhole-foundation/wormhole-explorer/common/pool"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/http/vaa"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/processor"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/queue"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/storage"
"go.mongodb.org/mongo-driver/mongo"
"go.uber.org/zap"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/config"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/consumer"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/http/infrastructure"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/internal/metrics"
)
func Run() {
rootCtx, rootCtxCancel := context.WithCancel(context.Background())
// load config
cfg, err := config.New(rootCtx)
if err != nil {
log.Fatal("Error loading config: ", err)
}
// initialize metrics
metrics := newMetrics(cfg)
// build logger
logger := logger.New("wormholescan-fly-event-processor", logger.WithLevel(cfg.LogLevel))
logger.Info("Starting wormholescan-fly-event-processor ...")
// create guardian provider pool
guardianApiProviderPool, err := newGuardianProviderPool(cfg)
if err != nil {
logger.Fatal("Error creating guardian provider pool: ", zap.Error(err))
}
// initialize the database client
db, err := dbutil.Connect(rootCtx, logger, cfg.MongoURI, cfg.MongoDatabase, false)
if err != nil {
log.Fatal("Failed to initialize MongoDB client: ", err)
}
// create a new repository
repository := storage.NewRepository(logger, db.Database)
// create a new processor
processor := processor.NewProcessor(guardianApiProviderPool, repository, logger, metrics)
// start serving /health and /ready endpoints
healthChecks, err := makeHealthChecks(rootCtx, cfg, db.Database)
if err != nil {
logger.Fatal("Failed to create health checks", zap.Error(err))
}
vaaCtrl := vaa.NewController(processor.Process, repository, logger)
server := infrastructure.NewServer(logger, cfg.Port, vaaCtrl, cfg.PprofEnabled, healthChecks...)
server.Start()
// create and start a duplicate VAA consumer.
duplicateVaaConsumeFunc := newDuplicateVaaConsumeFunc(rootCtx, cfg, metrics, logger)
duplicateVaa := consumer.New(duplicateVaaConsumeFunc, processor.Process, logger, metrics, cfg.P2pNetwork, cfg.ConsumerWorkerSize)
duplicateVaa.Start(rootCtx)
logger.Info("Started wormholescan-fly-event-processor")
// Waiting for signal
sigterm := make(chan os.Signal, 1)
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM)
select {
case <-rootCtx.Done():
logger.Warn("Terminating with root context cancelled.")
case signal := <-sigterm:
logger.Info("Terminating with signal.", zap.String("signal", signal.String()))
}
// graceful shutdown
logger.Info("Cancelling root context...")
rootCtxCancel()
logger.Info("Closing Http server...")
server.Stop()
logger.Info("Closing MongoDB connection...")
db.DisconnectWithTimeout(10 * time.Second)
logger.Info("Terminated wormholescan-fly-event-processor")
}
func newAwsConfig(ctx context.Context, cfg *config.ServiceConfiguration) (aws.Config, error) {
region := cfg.AwsRegion
if cfg.AwsAccessKeyID != "" && cfg.AwsSecretAccessKey != "" {
credentials := credentials.NewStaticCredentialsProvider(cfg.AwsAccessKeyID, cfg.AwsSecretAccessKey, "")
customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
if cfg.AwsEndpoint != "" {
return aws.Endpoint{
PartitionID: "aws",
URL: cfg.AwsEndpoint,
SigningRegion: region,
}, nil
}
return aws.Endpoint{}, &aws.EndpointNotFoundError{}
})
awsCfg, err := awsconfig.LoadDefaultConfig(
ctx,
awsconfig.WithRegion(region),
awsconfig.WithEndpointResolver(customResolver),
awsconfig.WithCredentialsProvider(credentials),
)
return awsCfg, err
}
return awsconfig.LoadDefaultConfig(ctx, awsconfig.WithRegion(region))
}
func newSqsConsumer(ctx context.Context, cfg *config.ServiceConfiguration, sqsUrl string) (*sqs.Consumer, error) {
awsconfig, err := newAwsConfig(ctx, cfg)
if err != nil {
return nil, err
}
consumer, err := sqs.NewConsumer(
awsconfig,
sqsUrl,
sqs.WithMaxMessages(10),
sqs.WithVisibilityTimeout(60),
)
return consumer, err
}
func makeHealthChecks(
ctx context.Context,
cfg *config.ServiceConfiguration,
db *mongo.Database,
) ([]health.Check, error) {
awsConfig, err := newAwsConfig(ctx, cfg)
if err != nil {
return nil, err
}
plugins := []health.Check{
health.SQS(awsConfig, cfg.DuplicateVaaSQSUrl),
health.Mongo(db),
}
return plugins, nil
}
func newMetrics(cfg *config.ServiceConfiguration) metrics.Metrics {
if !cfg.MetricsEnabled {
return metrics.NewDummyMetrics()
}
return metrics.NewPrometheusMetrics(cfg.Environment)
}
func newGuardianProviderPool(cfg *config.ServiceConfiguration) (*pool.Pool, error) {
if cfg.GuardianAPIConfigurationJson == nil {
return nil, errors.New("guardian api provider configuration is missing")
}
var guardianCfgs []pool.Config
for _, provider := range cfg.GuardianAPIConfigurationJson.GuardianProviders {
guardianCfgs = append(guardianCfgs, pool.Config{
Id: provider.ProviderUrl,
Description: provider.ProviderName,
RequestsPerMinute: provider.RequestsPerMinute,
Priority: provider.Priority,
})
}
if len(guardianCfgs) == 0 {
return nil, errors.New("guardian api provider configuration is empty")
}
return pool.NewPool(guardianCfgs), nil
}
func newDuplicateVaaConsumeFunc(
ctx context.Context,
cfg *config.ServiceConfiguration,
metrics metrics.Metrics,
logger *zap.Logger,
) queue.ConsumeFunc {
sqsConsumer, err := newSqsConsumer(ctx, cfg, cfg.DuplicateVaaSQSUrl)
if err != nil {
logger.Fatal("failed to create sqs consumer", zap.Error(err))
}
vaaQueue := queue.NewEventSqs(sqsConsumer, metrics, logger)
return vaaQueue.Consume
}

View File

@ -0,0 +1,83 @@
package config
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/joho/godotenv"
"github.com/sethvargo/go-envconfig"
)
// p2p network constants.
const (
P2pMainNet = "mainnet"
P2pTestNet = "testnet"
P2pDevNet = "devnet"
)
// ServiceConfiguration represents the application configuration when running as service with default values.
type ServiceConfiguration struct {
// Global configuration
Environment string `env:"ENVIRONMENT,required"`
LogLevel string `env:"LOG_LEVEL,default=INFO"`
Port string `env:"PORT,default=8000"`
PprofEnabled bool `env:"PPROF_ENABLED,default=false"`
P2pNetwork string `env:"P2P_NETWORK,required"`
AlertEnabled bool `env:"ALERT_ENABLED,default=false"`
AlertApiKey string `env:"ALERT_API_KEY"`
MetricsEnabled bool `env:"METRICS_ENABLED,default=false"`
// Fly event consumer configuration
ConsumerWorkerSize int `env:"CONSUMER_WORKER_SIZE,default=1"`
// Database configuration
MongoURI string `env:"MONGODB_URI,required"`
MongoDatabase string `env:"MONGODB_DATABASE,required"`
// AWS configuration
AwsEndpoint string `env:"AWS_ENDPOINT"`
AwsAccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
AwsSecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
AwsRegion string `env:"AWS_REGION"`
DuplicateVaaSQSUrl string `env:"DUPLICATE_VAA_SQS_URL"`
// Guardian api provider configuration
GuardianAPIProviderPath string `env:"GUARDIAN_API_PROVIDER_PATH,required"`
*GuardianAPIConfigurationJson `required:"false"`
}
type GuardianAPIConfigurationJson struct {
GuardianProviders []GuardianProvider `json:"guardian_providers"`
}
type GuardianProvider struct {
ProviderName string `json:"name"`
ProviderUrl string `json:"url"`
RequestsPerMinute uint16 `json:"requests_per_minute"`
Priority uint8 `json:"priority"`
}
// New creates a configuration with the values from .env file and environment variables.
func New(ctx context.Context) (*ServiceConfiguration, error) {
_ = godotenv.Load(".env", "../.env")
var configuration ServiceConfiguration
if err := envconfig.Process(ctx, &configuration); err != nil {
return nil, err
}
// Load guardian api provider configuration.
if configuration.GuardianAPIProviderPath != "" {
guardianAPIJsonFile, err := os.ReadFile(configuration.GuardianAPIProviderPath)
if err != nil {
return nil, fmt.Errorf("failed to read guardian API provider settings from file: %w", err)
}
var GuardianAPIConfigurationJson GuardianAPIConfigurationJson
if err := json.Unmarshal(guardianAPIJsonFile, &GuardianAPIConfigurationJson); err != nil {
return nil, fmt.Errorf("failed to unmarshal guardian API provider settings: %w", err)
}
configuration.GuardianAPIConfigurationJson = &GuardianAPIConfigurationJson
} else {
return nil, fmt.Errorf("guardian API provider settings file is required")
}
return &configuration, nil
}

View File

@ -0,0 +1,99 @@
package consumer
import (
"context"
"github.com/wormhole-foundation/wormhole-explorer/common/pool"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/internal/metrics"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/processor"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/queue"
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
"go.uber.org/zap"
)
// Consumer consumer struct definition.
type Consumer struct {
consumeFunc queue.ConsumeFunc
processor processor.ProcessorFunc
guardianPool *pool.Pool
logger *zap.Logger
metrics metrics.Metrics
p2pNetwork string
workersSize int
}
// New creates a new vaa consumer.
func New(
consumeFunc queue.ConsumeFunc,
processor processor.ProcessorFunc,
logger *zap.Logger,
metrics metrics.Metrics,
p2pNetwork string,
workersSize int,
) *Consumer {
c := Consumer{
consumeFunc: consumeFunc,
processor: processor,
logger: logger,
metrics: metrics,
p2pNetwork: p2pNetwork,
workersSize: workersSize,
}
return &c
}
// Start consumes messages from VAA queue, parse and store those messages in a repository.
func (c *Consumer) Start(ctx context.Context) {
ch := c.consumeFunc(ctx)
for i := 0; i < c.workersSize; i++ {
go c.producerLoop(ctx, ch)
}
}
func (c *Consumer) producerLoop(ctx context.Context, ch <-chan queue.ConsumerMessage) {
for {
select {
case <-ctx.Done():
return
case msg := <-ch:
c.processEvent(ctx, msg)
}
}
}
func (c *Consumer) processEvent(ctx context.Context, msg queue.ConsumerMessage) {
event := msg.Data()
vaaID := event.Data.VaaID
chainID := sdk.ChainID(event.Data.ChainID)
logger := c.logger.With(
zap.String("trackId", event.TrackID),
zap.String("vaaId", vaaID))
if msg.IsExpired() {
msg.Failed()
logger.Debug("event is expired")
c.metrics.IncDuplicatedVaaExpired(chainID)
return
}
params := &processor.Params{
TrackID: event.TrackID,
VaaID: vaaID,
ChainID: chainID,
}
err := c.processor(ctx, params)
if err != nil {
msg.Failed()
logger.Error("error processing event", zap.Error(err))
c.metrics.IncDuplicatedVaaFailed(chainID)
return
}
msg.Done()
logger.Debug("event processed")
c.metrics.IncDuplicatedVaaProcessed(chainID)
}

View File

@ -0,0 +1,85 @@
module github.com/wormhole-foundation/wormhole-explorer/fly-event-processor
go 1.20
require (
github.com/ansrivas/fiberprometheus/v2 v2.6.1
github.com/aws/aws-sdk-go-v2 v1.17.5
github.com/aws/aws-sdk-go-v2/config v1.18.15
github.com/aws/aws-sdk-go-v2/credentials v1.13.15
github.com/gofiber/fiber/v2 v2.52.4
github.com/joho/godotenv v1.5.1
github.com/prometheus/client_golang v1.16.0
github.com/sethvargo/go-envconfig v1.0.0
github.com/spf13/cobra v1.8.0
github.com/wormhole-foundation/wormhole-explorer/common v0.0.0-20240422172607-688a0d0f718e
github.com/wormhole-foundation/wormhole/sdk v0.0.0-20240416174455-25e60611a867
go.mongodb.org/mongo-driver v1.11.2
go.uber.org/zap v1.27.0
golang.org/x/net v0.21.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22
)
require (
github.com/algorand/go-algorand-sdk v1.23.0 // indirect
github.com/algorand/go-codec/codec v1.1.8 // indirect
github.com/andybalholm/brotli v1.0.5 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.29 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.30 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.23 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.20.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.18.5 // indirect
github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/certusone/wormhole/node v0.0.0-20240416174455-25e60611a867 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cosmos/btcutil v1.0.5 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/ethereum/go-ethereum v1.10.21 // indirect
github.com/go-redis/redis/v8 v8.11.5 // indirect
github.com/gofiber/adaptor/v2 v2.2.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.5.0 // indirect
github.com/holiman/uint256 v1.2.1 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/influxdata/influxdb-client-go/v2 v2.12.2 // indirect
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect
github.com/klauspost/compress v1.17.2 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasthttp v1.51.0 // indirect
github.com/valyala/tcplisten v1.0.0 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.19.0 // indirect
golang.org/x/sync v0.4.0 // indirect
golang.org/x/sys v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.32.0 // indirect
)
replace github.com/wormhole-foundation/wormhole-explorer/common => ../common

290
fly-event-processor/go.sum Normal file
View File

@ -0,0 +1,290 @@
github.com/algorand/go-algorand-sdk v1.23.0 h1:wlEV6OgDVc/sLeF2y41bwNG/Lr8EoMnN87Ur8N2Gyyo=
github.com/algorand/go-algorand-sdk v1.23.0/go.mod h1:7i2peZBcE48kfoxNZnLA+mklKh812jBKvQ+t4bn0KBQ=
github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
github.com/algorand/go-codec/codec v1.1.8 h1:lsFuhcOH2LiEhpBH3BVUUkdevVmwCRyvb7FCAAPeY6U=
github.com/algorand/go-codec/codec v1.1.8/go.mod h1:tQ3zAJ6ijTps6V+wp8KsGDnPC2uhHVC7ANyrtkIY0bA=
github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs=
github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/ansrivas/fiberprometheus/v2 v2.6.1 h1:wac3pXaE6BYYTF04AC6K0ktk6vCD+MnDOJZ3SK66kXM=
github.com/ansrivas/fiberprometheus/v2 v2.6.1/go.mod h1:MloIKvy4yN6hVqlRpJ/jDiR244YnWJaQC0FIqS8A+MY=
github.com/aws/aws-sdk-go-v2 v1.17.4/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2 v1.17.5 h1:TzCUW1Nq4H8Xscph5M/skINUitxM5UBAyvm2s7XBzL4=
github.com/aws/aws-sdk-go-v2 v1.17.5/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/config v1.18.15 h1:509yMO0pJUGUugBP2H9FOFyV+7Mz7sRR+snfDN5W4NY=
github.com/aws/aws-sdk-go-v2/config v1.18.15/go.mod h1:vS0tddZqpE8cD9CyW0/kITHF5Bq2QasW9Y1DFHD//O0=
github.com/aws/aws-sdk-go-v2/credentials v1.13.15 h1:0rZQIi6deJFjOEgHI9HI2eZcLPPEGQPictX66oRFLL8=
github.com/aws/aws-sdk-go-v2/credentials v1.13.15/go.mod h1:vRMLMD3/rXU+o6j2MW5YefrGMBmdTvkLLGqFwMLBHQc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.23 h1:Kbiv9PGnQfG/imNI4L/heyUXvzKmcWSBeDvkrQz5pFc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.23/go.mod h1:mOtmAg65GT1HIL/HT/PynwPbS+UG0BgCZ6vhkPqnxWo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28/go.mod h1:3lwChorpIM/BhImY/hy+Z6jekmN92cXGPI1QJasVPYY=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.29 h1:9/aKwwus0TQxppPXFmf010DFrE+ssSbzroLVYINA+xE=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.29/go.mod h1:Dip3sIGv485+xerzVv24emnjX5Sg88utCL8fwGmCeWg=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22/go.mod h1:EqK7gVrIGAHyZItrD1D8B0ilgwMD1GiWAmbU4u/JHNk=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.23 h1:b/Vn141DBuLVgXbhRWIrl9g+ww7G+ScV5SzniWR13jQ=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.23/go.mod h1:mr6c4cHC+S/MMkrjtSlG4QA36kOznDep+0fga5L/fGQ=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.30 h1:IVx9L7YFhpPq0tTnGo8u8TpluFu7nAn9X3sUDMb11c0=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.30/go.mod h1:vsbq62AOBwQ1LJ/GWKFxX8beUEYeRp/Agitrxee2/qM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.23 h1:QoOybhwRfciWUBbZ0gp9S7XaDnCuSTeK/fySB99V1ls=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.23/go.mod h1:9uPh+Hrz2Vn6oMnQYiUi/zbh3ovbnQk19YKINkQny44=
github.com/aws/aws-sdk-go-v2/service/sns v1.20.2 h1:MU/v2qtfGjKexJ09BMqE8pXo9xYMhT13FXjKgFc0cFw=
github.com/aws/aws-sdk-go-v2/service/sns v1.20.2/go.mod h1:VN2n9SOMS1lNbh5YD7o+ho0/rgfifSrK//YYNiVVF5E=
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.2 h1:CSNIo1jiw7KrkdgZjCOnotu6yuB3IybhKLuSQrTLNfo=
github.com/aws/aws-sdk-go-v2/service/sqs v1.20.2/go.mod h1:1ttxGjUHZliCQMpPss1sU5+Ph/5NvdMFRzr96bv8gm0=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.4 h1:qJdM48OOLl1FBSzI7ZrA1ZfLwOyCYqkXV5lko1hYDBw=
github.com/aws/aws-sdk-go-v2/service/sso v1.12.4/go.mod h1:jtLIhd+V+lft6ktxpItycqHqiVXrPIRjWIsFIlzMriw=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.4 h1:YRkWXQveFb0tFC0TLktmmhGsOcCgLwvq88MC2al47AA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.4/go.mod h1:zVwRrfdSmbRZWkUkWjOItY7SOalnFnq/Yg2LVPqDjwc=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.5 h1:L1600eLr0YvTT7gNh3Ni24yGI7NSHkq9Gp62vijPRCs=
github.com/aws/aws-sdk-go-v2/service/sts v1.18.5/go.mod h1:1mKZHLLpDMHTNSYPJ7qrcnCQdHCWsNQaT0xRvq2u80s=
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c=
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/certusone/wormhole/node v0.0.0-20240416174455-25e60611a867 h1:Wdd/ZJuGD3logxkNuT3hA2aq0Uk5uDGMGhca+S1CDnM=
github.com/certusone/wormhole/node v0.0.0-20240416174455-25e60611a867/go.mod h1:vJHIhQ0MeHZfQ4OpGiUCm3LD3nrdfT1CEIh2JaPCCso=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk=
github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/ethereum/go-ethereum v1.10.21 h1:5lqsEx92ZaZzRyOqBEXux4/UR06m296RGzN3ol3teJY=
github.com/ethereum/go-ethereum v1.10.21/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/gofiber/adaptor/v2 v2.2.1 h1:givE7iViQWlsTR4Jh7tB4iXzrlKBgiraB/yTdHs9Lv4=
github.com/gofiber/adaptor/v2 v2.2.1/go.mod h1:AhR16dEqs25W2FY/l8gSj1b51Azg5dtPDmm+pruNOrc=
github.com/gofiber/fiber/v2 v2.52.4 h1:P+T+4iK7VaqUsq2PALYEfBBo6bJZ4q3FP8cZ84EggTM=
github.com/gofiber/fiber/v2 v2.52.4/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/holiman/uint256 v1.2.1 h1:XRtyuda/zw2l+Bq/38n5XUoEF72aSOu/77Thd9pPp2o=
github.com/holiman/uint256 v1.2.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/influxdata/influxdb-client-go/v2 v2.12.2 h1:uYABKdrEKlYm+++qfKdbgaHKBPmoWR5wpbmj6MBB/2g=
github.com/influxdata/influxdb-client-go/v2 v2.12.2/go.mod h1:YteV91FiQxRdccyJ2cHvj2f/5sq4y4Njqu1fQzsQCOU=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsDSy7TDxedi9gyBkMvAirat/oRcL0lFdJBf6tdM=
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sethvargo/go-envconfig v1.0.0 h1:1C66wzy4QrROf5ew4KdVw942CQDa55qmlYmw9FZxZdU=
github.com/sethvargo/go-envconfig v1.0.0/go.mod h1:Lzc75ghUn5ucmcRGIdGQ33DKJrcjk4kihFYgSTBmjIc=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/test-go/testify v1.1.4 h1:Tf9lntrKUMHiXQ07qBScBTSA0dhYQlu83hswqelv1iE=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
github.com/valyala/fasthttp v1.51.0/go.mod h1:oI2XroL+lI7vdXyYoQk03bXBThfFl2cVdIA3Xl7cH8g=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
github.com/wormhole-foundation/wormhole/sdk v0.0.0-20240416174455-25e60611a867 h1:GXUBP09C/bnEukdU6H2AY81d0m8UWrWEejDp6CgiFQA=
github.com/wormhole-foundation/wormhole/sdk v0.0.0-20240416174455-25e60611a867/go.mod h1:pE/jYet19kY4P3V6mE2+01zvEfxdyBqv6L6HsnSa5uc=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.11.2 h1:+1v2rDQUWNcGW7/7E0Jvdz51V38XXxJfhzbV17aNHCw=
go.mongodb.org/mongo-driver v1.11.2/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@ -0,0 +1,20 @@
package guardian
import (
client "github.com/wormhole-foundation/wormhole-explorer/common/client/guardian"
"github.com/wormhole-foundation/wormhole-explorer/common/pool"
)
// GuardianAPIClient is a wrapper around the Guardian API client and the pool of providers.
type GuardianAPIClient struct {
Client *client.GuardianAPIClient
Pool *pool.Pool
}
// NewGuardianAPIClient creates a new Guardian API client.
func NewGuardianAPIClient(client *client.GuardianAPIClient, pool *pool.Pool) *GuardianAPIClient {
return &GuardianAPIClient{
Client: client,
Pool: pool,
}
}

View File

@ -0,0 +1,53 @@
package infrastructure
import (
"github.com/ansrivas/fiberprometheus/v2"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/pprof"
health "github.com/wormhole-foundation/wormhole-explorer/common/health"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/http/vaa"
"go.uber.org/zap"
)
type Server struct {
app *fiber.App
port string
logger *zap.Logger
}
func NewServer(logger *zap.Logger, port string, vaaController *vaa.Controller, pprofEnabled bool, checks ...health.Check) *Server {
app := fiber.New(fiber.Config{DisableStartupMessage: true})
prometheus := fiberprometheus.New("wormscan-fly-event-processor")
prometheus.RegisterAt(app, "/metrics")
// config use of middlware.
if pprofEnabled {
app.Use(pprof.New())
}
app.Use(prometheus.Middleware)
ctrl := health.NewController(checks, logger)
api := app.Group("/api")
api.Get("/health", ctrl.HealthCheck)
api.Get("/ready", ctrl.ReadyCheck)
api.Post("/vaa/duplicated", vaaController.Process)
return &Server{
app: app,
port: port,
logger: logger,
}
}
// Start listen serves HTTP requests from addr.
func (s *Server) Start() {
addr := ":" + s.port
s.logger.Info("Listening on " + addr)
go func() {
s.app.Listen(addr)
}()
}
// Stop gracefull server.
func (s *Server) Stop() {
_ = s.app.Shutdown()
}

View File

@ -0,0 +1,55 @@
package vaa
import (
"fmt"
"github.com/gofiber/fiber/v2"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/processor"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/storage"
"go.uber.org/zap"
)
// Controller definition.
type Controller struct {
logger *zap.Logger
repository *storage.Repository
processor processor.ProcessorFunc
}
// NewController creates a Controller instance.
// func NewController(repository *Repository, processor processor.ProcessorFunc, logger *zap.Logger) *Controller {
func NewController(processor processor.ProcessorFunc, repository *storage.Repository, logger *zap.Logger) *Controller {
return &Controller{processor: processor, repository: repository, logger: logger}
}
// Process processes the VAA message.
func (c *Controller) Process(ctx *fiber.Ctx) error {
request := struct {
VaaID string `json:"vaaId"`
}{}
if err := ctx.BodyParser(&request); err != nil {
c.logger.Error("error parsing request", zap.Error(err))
return err
}
vaa, err := c.repository.FindVAAById(ctx.Context(), request.VaaID)
if err != nil {
c.logger.Error("error getting vaa from collection", zap.Error(err))
return err
}
params := processor.Params{
TrackID: fmt.Sprintf("controller-%s", request.VaaID),
VaaID: request.VaaID,
ChainID: vaa.EmitterChain,
}
err = c.processor(ctx.Context(), &params)
if err != nil {
c.logger.Error("error processing vaa", zap.Error(err))
return err
}
return ctx.JSON(fiber.Map{"message": "success"})
}

View File

@ -0,0 +1,26 @@
package metrics
import sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
// DummyMetrics is a dummy implementation of Metric interface.
type DummyMetrics struct{}
// NewDummyMetrics returns a new instance of DummyMetrics.
func NewDummyMetrics() *DummyMetrics {
return &DummyMetrics{}
}
// IncDuplicatedVaaConsumedQueue dummy implementation.
func (d *DummyMetrics) IncDuplicatedVaaConsumedQueue() {}
// IncDuplicatedVaaProcessed dummy implementation.
func (d *DummyMetrics) IncDuplicatedVaaProcessed(chainID sdk.ChainID) {}
// IncDuplicatedVaaFailed dummy implementation.
func (d *DummyMetrics) IncDuplicatedVaaFailed(chainID sdk.ChainID) {}
// IncDuplicatedVaaExpired dummy implementation.
func (d *DummyMetrics) IncDuplicatedVaaExpired(chainID sdk.ChainID) {}
// IncDuplicatedVaaCanNotFixed dummy implementation.
func (d *DummyMetrics) IncDuplicatedVaaCanNotFixed(chainID sdk.ChainID) {}

View File

@ -0,0 +1,13 @@
package metrics
import sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
const serviceName = "wormscan-fly-event-processor"
type Metrics interface {
IncDuplicatedVaaConsumedQueue()
IncDuplicatedVaaProcessed(chainID sdk.ChainID)
IncDuplicatedVaaFailed(chainID sdk.ChainID)
IncDuplicatedVaaExpired(chainID sdk.ChainID)
IncDuplicatedVaaCanNotFixed(chainID sdk.ChainID)
}

View File

@ -0,0 +1,51 @@
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
)
// PrometheusMetrics is a Prometheus implementation of Metric interface.
type PrometheusMetrics struct {
duplicatedVaaCount *prometheus.CounterVec
}
// NewPrometheusMetrics returns a new instance of PrometheusMetrics.
func NewPrometheusMetrics(environment string) *PrometheusMetrics {
return &PrometheusMetrics{
duplicatedVaaCount: promauto.NewCounterVec(
prometheus.CounterOpts{
Name: "wormscan_fly_event_processor_duplicated_vaa_count",
Help: "The total number of duplicated VAA processed",
ConstLabels: map[string]string{
"environment": environment,
"service": serviceName,
},
}, []string{"chain", "type"}),
}
}
func (m *PrometheusMetrics) IncDuplicatedVaaConsumedQueue() {
m.duplicatedVaaCount.WithLabelValues("all", "consumed_queue").Inc()
}
func (m *PrometheusMetrics) IncDuplicatedVaaProcessed(chainID sdk.ChainID) {
chain := chainID.String()
m.duplicatedVaaCount.WithLabelValues(chain, "processed").Inc()
}
func (m *PrometheusMetrics) IncDuplicatedVaaFailed(chainID sdk.ChainID) {
chain := chainID.String()
m.duplicatedVaaCount.WithLabelValues(chain, "failed").Inc()
}
func (m *PrometheusMetrics) IncDuplicatedVaaExpired(chainID sdk.ChainID) {
chain := chainID.String()
m.duplicatedVaaCount.WithLabelValues(chain, "expired").Inc()
}
func (m *PrometheusMetrics) IncDuplicatedVaaCanNotFixed(chainID sdk.ChainID) {
chain := chainID.String()
m.duplicatedVaaCount.WithLabelValues(chain, "can_not_fixed").Inc()
}

View File

@ -0,0 +1,220 @@
package processor
import (
"errors"
"time"
"github.com/wormhole-foundation/wormhole-explorer/common/client/guardian"
"github.com/wormhole-foundation/wormhole-explorer/common/pool"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/internal/metrics"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/storage"
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
"go.uber.org/zap"
"golang.org/x/net/context"
)
type Processor struct {
guardianPool *pool.Pool
repository *storage.Repository
logger *zap.Logger
metrics metrics.Metrics
}
func NewProcessor(
guardianPool *pool.Pool,
repository *storage.Repository,
logger *zap.Logger,
metrics metrics.Metrics,
) *Processor {
return &Processor{
guardianPool: guardianPool,
repository: repository,
logger: logger,
metrics: metrics,
}
}
func (p *Processor) Process(ctx context.Context, params *Params) error {
logger := p.logger.With(
zap.String("trackId", params.TrackID),
zap.String("vaaId", params.VaaID))
// 1. check if the vaa stored in the VAA collections is the correct one.
// 1.1 get vaa from Vaas collection
vaaDoc, err := p.repository.FindVAAById(ctx, params.VaaID)
if err != nil {
logger.Error("error getting vaa from collection", zap.Error(err))
return err
}
// 1.2 if the event time has not reached the finality time, the event fail and
// will be reprocesed on the next retry.
finalityTime := getFinalityTimeByChainID(params.ChainID)
if vaaDoc.Timestamp == nil {
logger.Error("vaa timestamp is nil")
return errors.New("vaa timestamp is nil")
}
vaaTimestamp := *vaaDoc.Timestamp
reachedFinalityTime := time.Now().After(vaaTimestamp.Add(finalityTime))
if !reachedFinalityTime {
logger.Debug("event time has not reached the finality time",
zap.Time("finalityTime", vaaTimestamp.Add(finalityTime)))
return errors.New("event time has not reached the finality time")
}
// 1.3 call guardian api to get signed_vaa.
guardians := p.guardianPool.GetItems()
var signedVaa *guardian.SignedVaa
for _, g := range guardians {
g.Wait(ctx)
guardianAPIClient, err := guardian.NewGuardianAPIClient(
guardian.DefaultTimeout,
g.Id,
logger)
if err != nil {
logger.Error("error creating guardian api client", zap.Error(err))
continue
}
signedVaa, err = guardianAPIClient.GetSignedVAA(params.VaaID)
if err != nil {
logger.Error("error getting signed vaa from guardian api", zap.Error(err))
continue
}
break
}
if signedVaa == nil {
logger.Error("error getting signed vaa from guardian api")
return errors.New("error getting signed vaa from guardian api")
}
// 1.4 compare digest from vaa and signedVaa
guardianVAA, err := sdk.Unmarshal(signedVaa.VaaBytes)
if err != nil {
logger.Error("error unmarshalling guardian signed vaa", zap.Error(err))
return err
}
vaa, err := sdk.Unmarshal(vaaDoc.Vaa)
if err != nil {
logger.Error("error unmarshalling vaa", zap.Error(err))
return err
}
// If the guardian digest is the same that the vaa digest,
// the stored vaa in the vaas collection is the correct one.
if guardianVAA.HexDigest() == vaa.HexDigest() {
logger.Debug("vaa stored in Vaas collections is the correct")
return nil
}
// 2. Check for each duplicate VAAs to detect which is the correct one.
// 2.1 This check is necessary to avoid race conditions when the vaa is processed
if vaaDoc.TxHash == "" {
logger.Error("vaa txHash is empty")
return errors.New("vaa txHash is empty")
}
// 2.2 Get all duplicate vaas by vaaId
duplicateVaaDocs, err := p.repository.FindDuplicateVAAs(ctx, params.VaaID)
if err != nil {
logger.Error("error getting duplicate vaas from collection", zap.Error(err))
return err
}
// 2.3 Check each duplicate VAA to detect which is the correct one.
for _, duplicateVaaDoc := range duplicateVaaDocs {
duplicateVaa, err := sdk.Unmarshal(duplicateVaaDoc.Vaa)
if err != nil {
logger.Error("error unmarshalling vaa", zap.Error(err))
return err
}
if guardianVAA.HexDigest() == duplicateVaa.HexDigest() {
err := p.repository.FixVAA(ctx, params.VaaID, duplicateVaaDoc.ID)
if err != nil {
logger.Error("error fixing vaa", zap.Error(err))
return err
}
logger.Debug("vaa fixed")
return nil
}
}
logger.Debug("can't fix duplicate vaa")
p.metrics.IncDuplicatedVaaCanNotFixed(params.ChainID)
return errors.New("can't fix duplicate vaa")
}
func getFinalityTimeByChainID(chainID sdk.ChainID) time.Duration {
// Time to finalize for each chain.
// ref: https://docs.wormhole.com/wormhole/reference/constants
switch chainID {
case sdk.ChainIDSolana:
return 14 * time.Second
case sdk.ChainIDEthereum:
return 975 * time.Second
case sdk.ChainIDTerra:
return 6 * time.Second
case sdk.ChainIDBSC:
return 48 * time.Second
case sdk.ChainIDPolygon:
return 66 * time.Second
case sdk.ChainIDAvalanche:
return 2 * time.Second
case sdk.ChainIDOasis:
return 12 * time.Second
case sdk.ChainIDAlgorand:
return 4 * time.Second
case sdk.ChainIDFantom:
return 5 * time.Second
case sdk.ChainIDKarura:
return 24 * time.Second
case sdk.ChainIDAcala:
return 24 * time.Second
case sdk.ChainIDKlaytn:
return 1 * time.Second
case sdk.ChainIDCelo:
return 10 * time.Second
case sdk.ChainIDNear:
return 2 * time.Second
case sdk.ChainIDMoonbeam:
return 24 * time.Second
case sdk.ChainIDTerra2:
return 6 * time.Second
case sdk.ChainIDInjective:
return 3 * time.Second
case sdk.ChainIDSui:
return 3 * time.Second
case sdk.ChainIDAptos:
return 4 * time.Second
case sdk.ChainIDArbitrum:
return 1066 * time.Second
case sdk.ChainIDOptimism:
return 1026 * time.Second
case sdk.ChainIDXpla:
return 5 * time.Second
case sdk.ChainIDBase:
return 1026 * time.Second
case sdk.ChainIDSei:
return 1 * time.Second
case sdk.ChainIDWormchain:
return 5 * time.Second
case sdk.ChainIDSepolia:
return 975 * time.Second
case sdk.ChainIDArbitrumSepolia:
return 1066 * time.Second
case sdk.ChainIDBaseSepolia:
return 1026 * time.Second
case sdk.ChainIDOptimismSepolia:
return 1026 * time.Second
case sdk.ChainIDHolesky:
return 975 * time.Second
default:
// The default value is the max finality time.
return 1066 * time.Second
}
}

View File

@ -0,0 +1,15 @@
package processor
import (
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
"golang.org/x/net/context"
)
type Params struct {
TrackID string
VaaID string
ChainID sdk.ChainID
}
// ProcessorFunc is a function to process vaa message.
type ProcessorFunc func(context.Context, *Params) error

View File

@ -0,0 +1,157 @@
package queue
import (
"context"
"encoding/json"
"strconv"
"sync"
"time"
sqs_client "github.com/wormhole-foundation/wormhole-explorer/common/client/sqs"
"github.com/wormhole-foundation/wormhole-explorer/fly-event-processor/internal/metrics"
"go.uber.org/zap"
)
// SQSOption represents a VAA queue in SQS option function.
type SQSOption func(*SQS)
// SQS represents a VAA queue in SQS.
type SQS struct {
consumer *sqs_client.Consumer
ch chan ConsumerMessage
chSize int
wg sync.WaitGroup
metrics metrics.Metrics
logger *zap.Logger
}
// NewEventSqs creates a VAA queue in SQS instances.
func NewEventSqs(consumer *sqs_client.Consumer, metrics metrics.Metrics, logger *zap.Logger, opts ...SQSOption) *SQS {
s := &SQS{
consumer: consumer,
chSize: 10,
metrics: metrics,
logger: logger.With(zap.String("queueUrl", consumer.GetQueueUrl())),
}
for _, opt := range opts {
opt(s)
}
s.ch = make(chan ConsumerMessage, s.chSize)
return s
}
// WithChannelSize allows to specify an channel size when setting a value.
func WithChannelSize(size int) SQSOption {
return func(d *SQS) {
d.chSize = size
}
}
// Consume returns the channel with the received messages from SQS queue.
func (q *SQS) Consume(ctx context.Context) <-chan ConsumerMessage {
go func() {
for {
messages, err := q.consumer.GetMessages(ctx)
if err != nil {
q.logger.Error("Error getting messages from SQS", zap.Error(err))
continue
}
q.logger.Debug("Received messages from SQS", zap.Int("count", len(messages)))
expiredAt := time.Now().Add(q.consumer.GetVisibilityTimeout())
for _, msg := range messages {
q.metrics.IncDuplicatedVaaConsumedQueue()
// unmarshal body to sqsEvent
var sqsEvent sqsEvent
err := json.Unmarshal([]byte(*msg.Body), &sqsEvent)
if err != nil {
q.logger.Error("Error decoding message from SQS", zap.Error(err))
if err = q.consumer.DeleteMessage(ctx, msg.ReceiptHandle); err != nil {
q.logger.Error("Error deleting message from SQS", zap.Error(err))
}
continue
}
var event *Event
err = json.Unmarshal([]byte(sqsEvent.Message), event)
if err != nil {
q.logger.Error("Error decoding message from SQS", zap.Error(err))
if err = q.consumer.DeleteMessage(ctx, msg.ReceiptHandle); err != nil {
q.logger.Error("Error deleting message from SQS", zap.Error(err))
}
continue
}
if event == nil {
q.logger.Warn("Can not handle message", zap.String("body", *msg.Body))
if err = q.consumer.DeleteMessage(ctx, msg.ReceiptHandle); err != nil {
q.logger.Error("Error deleting message from SQS", zap.Error(err))
}
continue
}
retry, _ := strconv.Atoi(msg.Attributes["ApproximateReceiveCount"])
q.wg.Add(1)
q.ch <- &sqsConsumerMessage{
id: msg.ReceiptHandle,
data: event,
wg: &q.wg,
logger: q.logger,
consumer: q.consumer,
expiredAt: expiredAt,
retry: uint8(retry),
metrics: q.metrics,
ctx: ctx,
}
}
q.wg.Wait()
}
}()
return q.ch
}
// Close closes all consumer resources.
func (q *SQS) Close() {
close(q.ch)
}
type sqsConsumerMessage struct {
data *Event
consumer *sqs_client.Consumer
wg *sync.WaitGroup
id *string
logger *zap.Logger
expiredAt time.Time
retry uint8
metrics metrics.Metrics
ctx context.Context
}
func (m *sqsConsumerMessage) Data() *Event {
return m.data
}
func (m *sqsConsumerMessage) Done() {
if err := m.consumer.DeleteMessage(m.ctx, m.id); err != nil {
m.logger.Error("Error deleting message from SQS",
zap.String("vaaId", m.data.Data.VaaID),
zap.Bool("isExpired", m.IsExpired()),
zap.Time("expiredAt", m.expiredAt),
zap.Error(err),
)
}
m.wg.Done()
}
func (m *sqsConsumerMessage) Failed() {
m.wg.Done()
}
func (m *sqsConsumerMessage) IsExpired() bool {
return m.expiredAt.Before(time.Now())
}
func (m *sqsConsumerMessage) Retry() uint8 {
return m.retry
}

View File

@ -0,0 +1,43 @@
package queue
import (
"context"
"time"
)
// sqsEvent represents a event data from SQS.
type sqsEvent struct {
MessageID string `json:"MessageId"`
Message string `json:"Message"`
}
// Event represents a event data to be handle.
type Event struct {
TrackID string `json:"trackId"`
Type string `json:"type"`
Source string `json:"source"`
Data DuplicateVaa `json:"data"`
}
type DuplicateVaa struct {
VaaID string `json:"vaaId"`
ChainID uint16 `json:"chainId"`
Version uint8 `json:"version"`
GuardianSetIndex uint32 `json:"guardianSetIndex"`
Vaa []byte `json:"vaas"`
Digest string `json:"digest"`
ConsistencyLevel uint8 `json:"consistencyLevel"`
Timestamp *time.Time `json:"timestamp"`
}
// ConsumerMessage defition.
type ConsumerMessage interface {
Retry() uint8
Data() *Event
Done()
Failed()
IsExpired() bool
}
// ConsumeFunc is a function to consume Event.
type ConsumeFunc func(context.Context) <-chan ConsumerMessage

View File

@ -0,0 +1,127 @@
package storage
import (
"context"
commonRepo "github.com/wormhole-foundation/wormhole-explorer/common/repository"
"go.mongodb.org/mongo-driver/mongo"
"go.uber.org/zap"
"gopkg.in/mgo.v2/bson"
)
// Repository exposes operations over the `globalTransactions` collection.
type Repository struct {
logger *zap.Logger
vaas *mongo.Collection
duplicateVaas *mongo.Collection
}
// New creates a new repository.
func NewRepository(logger *zap.Logger, db *mongo.Database) *Repository {
r := Repository{
logger: logger,
vaas: db.Collection(commonRepo.Vaas),
duplicateVaas: db.Collection(commonRepo.DuplicateVaas),
}
return &r
}
// FindVAAById find a vaa by id.
func (r *Repository) FindVAAById(ctx context.Context, vaaID string) (*VaaDoc, error) {
var vaaDoc VaaDoc
err := r.vaas.FindOne(ctx, bson.M{"_id": vaaID}).Decode(&vaaDoc)
return &vaaDoc, err
}
// FindDuplicateVAAById find a duplicate vaa by id.
func (r *Repository) FindDuplicateVAAById(ctx context.Context, id string) (*DuplicateVaaDoc, error) {
var duplicateVaaDoc DuplicateVaaDoc
err := r.duplicateVaas.FindOne(ctx, bson.M{"_id": id}).Decode(&duplicateVaaDoc)
return &duplicateVaaDoc, err
}
// FindDuplicateVAAs find duplicate vaas by vaa id.
func (r *Repository) FindDuplicateVAAs(ctx context.Context, vaaID string) ([]DuplicateVaaDoc, error) {
var duplicateVaaDocs []DuplicateVaaDoc
cursor, err := r.duplicateVaas.Find(ctx, bson.M{"vaaId": vaaID})
if err != nil {
return nil, err
}
if err = cursor.All(ctx, &duplicateVaaDocs); err != nil {
return nil, err
}
return duplicateVaaDocs, nil
}
// FixVAA fix a vaa by id.
func (r *Repository) FixVAA(ctx context.Context, vaaID, duplicateID string) error {
// start mongo transaction
session, err := r.vaas.Database().Client().StartSession()
if err != nil {
return err
}
err = session.StartTransaction()
if err != nil {
return err
}
// get VAA by id
vaaDoc, err := r.FindVAAById(ctx, vaaID)
if err != nil {
session.AbortTransaction(ctx)
return err
}
// get duplicate vaa by id
duplicateVaaDoc, err := r.FindDuplicateVAAById(ctx, duplicateID)
if err != nil {
session.AbortTransaction(ctx)
return err
}
// create new vaa and new duplicate vaa
newVaa := duplicateVaaDoc.ToVaaDoc(true)
newDuplicateVaa, err := vaaDoc.ToDuplicateVaaDoc()
if err != nil {
session.AbortTransaction(ctx)
return err
}
// remove vaa
_, err = r.vaas.DeleteOne(ctx, bson.M{"_id": vaaID})
if err != nil {
session.AbortTransaction(ctx)
return err
}
// remove duplicate vaa
_, err = r.duplicateVaas.DeleteOne(ctx, bson.M{"_id": duplicateID})
if err != nil {
session.AbortTransaction(ctx)
return err
}
// insert new vaa
_, err = r.vaas.InsertOne(ctx, newVaa)
if err != nil {
session.AbortTransaction(ctx)
return err
}
// insert new duplicate vaa
_, err = r.duplicateVaas.InsertOne(ctx, newDuplicateVaa)
if err != nil {
session.AbortTransaction(ctx)
return err
}
// commit transaction
err = session.CommitTransaction(ctx)
if err != nil {
session.AbortTransaction(ctx)
return err
}
return nil
}

View File

@ -0,0 +1,87 @@
package storage
import (
"time"
"github.com/wormhole-foundation/wormhole-explorer/common/domain"
"github.com/wormhole-foundation/wormhole/sdk/vaa"
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
)
// VaaDoc represents a VAA document.
type VaaDoc struct {
ID string `bson:"_id"`
Version uint8 `bson:"version"`
EmitterChain sdk.ChainID `bson:"emitterChain"`
EmitterAddr string `bson:"emitterAddr"`
Sequence string `bson:"sequence"`
GuardianSetIndex uint32 `bson:"guardianSetIndex"`
Vaa []byte `bson:"vaas"`
TxHash string `bson:"txHash,omitempty"`
OriginTxHash *string `bson:"_originTxHash,omitempty"` //this is temporary field for fix enconding txHash
Timestamp *time.Time `bson:"timestamp"`
UpdatedAt *time.Time `bson:"updatedAt"`
Digest string `bson:"digest"`
IsDuplicated bool `bson:"isDuplicated"`
DuplicatedFixed bool `bson:"duplicatedFixed"`
}
// DuplicateVaaDoc represents a duplicate VAA document.
type DuplicateVaaDoc struct {
ID string `bson:"_id"`
VaaID string `bson:"vaaId"`
Version uint8 `bson:"version"`
EmitterChain sdk.ChainID `bson:"emitterChain"`
EmitterAddr string `bson:"emitterAddr"`
Sequence string `bson:"sequence"`
GuardianSetIndex uint32 `bson:"guardianSetIndex"`
Vaa []byte `bson:"vaas"`
Digest string `bson:"digest"`
ConsistencyLevel uint8 `bson:"consistencyLevel"`
TxHash string `bson:"txHash,omitempty"`
Timestamp *time.Time `bson:"timestamp"`
UpdatedAt *time.Time `bson:"updatedAt"`
}
func (d *DuplicateVaaDoc) ToVaaDoc(duplicatedFixed bool) *VaaDoc {
return &VaaDoc{
ID: d.VaaID,
Version: d.Version,
EmitterChain: d.EmitterChain,
EmitterAddr: d.EmitterAddr,
Sequence: d.Sequence,
GuardianSetIndex: d.GuardianSetIndex,
Vaa: d.Vaa,
Digest: d.Digest,
TxHash: d.TxHash,
OriginTxHash: nil,
Timestamp: d.Timestamp,
UpdatedAt: d.UpdatedAt,
DuplicatedFixed: duplicatedFixed,
IsDuplicated: true,
}
}
func (v *VaaDoc) ToDuplicateVaaDoc() (*DuplicateVaaDoc, error) {
vaa, err := vaa.Unmarshal(v.Vaa)
if err != nil {
return nil, err
}
uniqueId := domain.CreateUniqueVaaID(vaa)
return &DuplicateVaaDoc{
ID: uniqueId,
VaaID: v.ID,
Version: v.Version,
EmitterChain: v.EmitterChain,
EmitterAddr: v.EmitterAddr,
Sequence: v.Sequence,
GuardianSetIndex: v.GuardianSetIndex,
Vaa: v.Vaa,
Digest: v.Digest,
TxHash: v.TxHash,
ConsistencyLevel: vaa.ConsistencyLevel,
Timestamp: v.Timestamp,
UpdatedAt: v.UpdatedAt,
}, nil
}

26
fly/builder/event.go Normal file
View File

@ -0,0 +1,26 @@
package builder
import (
"context"
"github.com/wormhole-foundation/wormhole-explorer/fly/config"
"github.com/wormhole-foundation/wormhole-explorer/fly/event"
"go.uber.org/zap"
)
func NewEventDispatcher(ctx context.Context, config *config.Configuration, logger *zap.Logger) event.EventDispatcher {
if config.IsLocal {
return event.NewNoopEventDispatcher()
}
awsConfig, err := NewAwsConfig(ctx, config)
if err != nil {
logger.Fatal("could not create aws config", zap.Error(err))
}
ed, err := event.NewSnsEventDispatcher(awsConfig, config.Aws.EventsSnsUrl)
if err != nil {
logger.Fatal("could not create sns event dispatcher", zap.Error(err))
}
return ed
}

View File

@ -9,6 +9,7 @@ import (
"github.com/wormhole-foundation/wormhole-explorer/common/dbutil"
"github.com/wormhole-foundation/wormhole-explorer/common/domain"
"github.com/wormhole-foundation/wormhole-explorer/common/logger"
"github.com/wormhole-foundation/wormhole-explorer/fly/event"
"github.com/wormhole-foundation/wormhole-explorer/fly/internal/metrics"
"github.com/wormhole-foundation/wormhole-explorer/fly/producer"
"github.com/wormhole-foundation/wormhole-explorer/fly/storage"
@ -41,6 +42,7 @@ func RunTxHashEncoding(cfg TxHashEncondingConfig) {
db.Database,
producer.NewVAAInMemory(logger).Push,
txhash.NewMongoTxHash(db.Database, logger),
event.NewNoopEventDispatcher(),
logger)
workerTxHashEncoding(ctx, logger, repository, vaa.ChainID(cfg.ChainID), cfg.PageSize)

View File

@ -77,6 +77,7 @@ type AwsConfiguration struct {
AwsEndpoint string `env:"AWS_ENDPOINT"`
SqsUrl string `env:"SQS_URL,required"`
ObservationsSqsUrl string `env:"OBSERVATIONS_SQS_URL,required"`
EventsSnsUrl string `env:"EVENTS_SNS_URL,required"`
}
type Cache struct {

13
fly/event/noop.go Normal file
View File

@ -0,0 +1,13 @@
package event
import "context"
type NoopEventDispatcher struct{}
func NewNoopEventDispatcher() *NoopEventDispatcher {
return &NoopEventDispatcher{}
}
func (n *NoopEventDispatcher) NewDuplicateVaa(context.Context, DuplicateVaa) error {
return nil
}

44
fly/event/sns.go Normal file
View File

@ -0,0 +1,44 @@
package event
import (
"context"
"encoding/json"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
aws_sns "github.com/aws/aws-sdk-go-v2/service/sns"
"github.com/wormhole-foundation/wormhole-explorer/fly/internal/track"
)
type SnsEventDispatcher struct {
api *aws_sns.Client
url string
}
func NewSnsEventDispatcher(awsConfig aws.Config, url string) (*SnsEventDispatcher, error) {
return &SnsEventDispatcher{
api: aws_sns.NewFromConfig(awsConfig),
url: url,
}, nil
}
func (s *SnsEventDispatcher) NewDuplicateVaa(ctx context.Context, e DuplicateVaa) error {
body, err := json.Marshal(event{
TrackID: track.GetTrackIDForDuplicatedVAA(e.VaaID),
Type: "duplicated-vaa",
Source: "fly",
Data: e,
})
if err != nil {
return err
}
groupID := fmt.Sprintf("%s-%s", e.VaaID, e.Digest)
_, err = s.api.Publish(ctx,
&aws_sns.PublishInput{
MessageGroupId: aws.String(groupID),
MessageDeduplicationId: aws.String(groupID),
Message: aws.String(string(body)),
TopicArn: aws.String(s.url),
})
return err
}

28
fly/event/types.go Normal file
View File

@ -0,0 +1,28 @@
package event
import (
"context"
"time"
)
type DuplicateVaa struct {
VaaID string `json:"vaaId"`
ChainID uint16 `json:"chainId"`
Version uint8 `json:"version"`
GuardianSetIndex uint32 `json:"guardianSetIndex"`
Vaa []byte `json:"vaas"`
Digest string `json:"digest"`
ConsistencyLevel uint8 `json:"consistencyLevel"`
Timestamp *time.Time `json:"timestamp"`
}
type event struct {
TrackID string `json:"trackId"`
Type string `json:"type"`
Source string `json:"source"`
Data any `json:"data"`
}
type EventDispatcher interface {
NewDuplicateVaa(ctx context.Context, e DuplicateVaa) error
}

View File

@ -11,3 +11,8 @@ func GetTrackID(vaaID string) string {
uuid := uuid.New()
return fmt.Sprintf("gossip-signed-vaa-%s-%s", vaaID, uuid.String())
}
func GetTrackIDForDuplicatedVAA(vaaID string) string {
uuid := uuid.New()
return fmt.Sprintf("fly-duplicated-vaa-%s-%s", vaaID, uuid.String())
}

View File

@ -99,7 +99,9 @@ func main() {
if err != nil {
logger.Fatal("could not create tx hash store", zap.Error(err))
}
repository := storage.NewRepository(alertClient, metrics, db.Database, producerFunc, txHashStore, logger)
eventDispatcher := builder.NewEventDispatcher(rootCtx, cfg, logger)
repository := storage.NewRepository(alertClient, metrics, db.Database, producerFunc, txHashStore, eventDispatcher, logger)
vaaNonPythDedup, err := builder.NewDeduplicator("vaas-dedup", cfg.VaasDedup, logger)
if err != nil {

View File

@ -15,6 +15,7 @@ import (
"github.com/wormhole-foundation/wormhole-explorer/common/events"
"github.com/wormhole-foundation/wormhole-explorer/common/repository"
"github.com/wormhole-foundation/wormhole-explorer/common/utils"
"github.com/wormhole-foundation/wormhole-explorer/fly/event"
flyAlert "github.com/wormhole-foundation/wormhole-explorer/fly/internal/alert"
"github.com/wormhole-foundation/wormhole-explorer/fly/internal/metrics"
"github.com/wormhole-foundation/wormhole-explorer/fly/internal/track"
@ -30,13 +31,14 @@ import (
// TODO separate and maybe share between fly and web
type Repository struct {
alertClient alert.AlertClient
metrics metrics.Metrics
db *mongo.Database
afterUpdate producer.PushFunc
txHashStore txhash.TxHashStore
log *zap.Logger
collections struct {
alertClient alert.AlertClient
metrics metrics.Metrics
db *mongo.Database
afterUpdate producer.PushFunc
txHashStore txhash.TxHashStore
eventDispatcher event.EventDispatcher
log *zap.Logger
collections struct {
vaas *mongo.Collection
heartbeats *mongo.Collection
observations *mongo.Collection
@ -53,8 +55,9 @@ func NewRepository(alertService alert.AlertClient, metrics metrics.Metrics,
db *mongo.Database,
vaaTopicFunc producer.PushFunc,
txHashStore txhash.TxHashStore,
eventDispatcher event.EventDispatcher,
log *zap.Logger) *Repository {
return &Repository{alertService, metrics, db, vaaTopicFunc, txHashStore, log, struct {
return &Repository{alertService, metrics, db, vaaTopicFunc, txHashStore, eventDispatcher, log, struct {
vaas *mongo.Collection
heartbeats *mongo.Collection
observations *mongo.Collection
@ -514,7 +517,20 @@ func (s *Repository) UpsertDuplicateVaa(ctx context.Context, v *vaa.VAA, seriali
// send signedvaa event to topic.
if s.isNewRecord(result) {
return s.notifyNewVaa(ctx, v, serializedVaa, duplicateVaaDoc.TxHash)
err := s.notifyNewVaa(ctx, v, serializedVaa, duplicateVaaDoc.TxHash)
if err != nil {
return err
}
return s.eventDispatcher.NewDuplicateVaa(ctx, event.DuplicateVaa{
VaaID: v.MessageID(),
ChainID: uint16(v.EmitterChain),
Version: v.Version,
GuardianSetIndex: v.GuardianSetIndex,
Vaa: serializedVaa,
Digest: utils.NormalizeHex(v.HexDigest()),
ConsistencyLevel: v.ConsistencyLevel,
Timestamp: &v.Timestamp,
})
}
return nil

View File

@ -12,4 +12,5 @@ use (
./spy
./tx-tracker
./notional
./fly-event-processor
)

View File

@ -49,7 +49,8 @@ func (p *Publisher) Publish(ctx context.Context, e *watcher.Event) {
TxHash: e.TxHash,
Version: e.Version,
Revision: e.Revision,
Hash: e.Hash,
Digest: e.Digest,
Overwrite: e.DuplicatedFixed,
}
// In some scenarios the fly component that inserts the VAA documents does not have the txhash field available,

View File

@ -19,7 +19,8 @@ type Event struct {
TxHash string `json:"txHash"`
Version uint16 `json:"version"`
Revision uint16 `json:"revision"`
Hash []byte `json:"hash"`
Digest string `json:"digest"`
Overwrite bool `json:"overwrite"`
}
// PushFunc is a function to push VAAEvent.

View File

@ -50,8 +50,9 @@ type Event struct {
TxHash string `bson:"txHash"`
Version uint16 `bson:"version"`
Revision uint16 `bson:"revision"`
Hash []byte `bson:"hash"`
Digest string `bson:"digest"`
IsDuplicated bool `bson:"isDuplicated"`
DuplicatedFixed bool `bson:"duplicatedFixed"`
}
const queryTemplate = `

View File

@ -114,7 +114,7 @@ func (c *Consumer) processSourceTx(ctx context.Context, msg queue.ConsumerMessag
Vaa: event.Vaa,
IsVaaSigned: event.IsVaaSigned,
Metrics: c.metrics,
Overwrite: false, // avoid processing the same transaction twice
Overwrite: event.Overwrite, // avoid processing the same transaction twice
Source: event.Source,
}
_, err := ProcessSourceTx(ctx, c.logger, c.rpcpool, c.wormchainRpcPool, c.repository, &p, c.p2pNetwork)

View File

@ -159,270 +159,11 @@ func (r *Repository) AlreadyProcessed(ctx context.Context, vaaId string) (bool,
}
}
// CountDocumentsByTimeRange returns the number of documents that match the given time range.
func (r *Repository) CountDocumentsByTimeRange(
ctx context.Context,
timeAfter time.Time,
timeBefore time.Time,
) (uint64, error) {
// Build the aggregation pipeline
var pipeline mongo.Pipeline
{
// filter by time range
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"timestamp", bson.D{{"$gte", timeAfter}}},
}},
})
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"timestamp", bson.D{{"$lte", timeBefore}}},
}},
})
// Count the number of results
pipeline = append(pipeline, bson.D{
{"$count", "numDocuments"},
})
}
// Execute the aggregation pipeline
cur, err := r.vaas.Aggregate(ctx, pipeline)
if err != nil {
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
return 0, err
}
// Read results from cursor
var results []struct {
NumDocuments uint64 `bson:"numDocuments"`
}
err = cur.All(ctx, &results)
if err != nil {
r.logger.Error("failed to decode cursor", zap.Error(err))
return 0, err
}
if len(results) == 0 {
return 0, nil
}
if len(results) > 1 {
r.logger.Error("too many results", zap.Int("numResults", len(results)))
return 0, err
}
return results[0].NumDocuments, nil
}
// CountIncompleteDocuments returns the number of documents that have destTx data, but don't have sourceTx data.
func (r *Repository) CountIncompleteDocuments(ctx context.Context) (uint64, error) {
// Build the aggregation pipeline
var pipeline mongo.Pipeline
{
// Look up transactions that either:
// 1. have not been processed
// 2. have been processed, but encountered an internal error
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"$or", bson.A{
bson.D{{"originTx", bson.D{{"$exists", false}}}},
bson.D{{"originTx.status", bson.M{"$eq": domain.SourceTxStatusInternalError}}},
}},
}},
})
// Count the number of results
pipeline = append(pipeline, bson.D{
{"$count", "numDocuments"},
})
}
// Execute the aggregation pipeline
cur, err := r.globalTransactions.Aggregate(ctx, pipeline)
if err != nil {
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
return 0, err
}
// Read results from cursor
var results []struct {
NumDocuments uint64 `bson:"numDocuments"`
}
err = cur.All(ctx, &results)
if err != nil {
r.logger.Error("failed to decode cursor", zap.Error(err))
return 0, err
}
if len(results) == 0 {
return 0, nil
}
if len(results) > 1 {
r.logger.Error("too many results", zap.Int("numResults", len(results)))
return 0, err
}
return results[0].NumDocuments, nil
}
type GlobalTransaction struct {
Id string `bson:"_id"`
Vaas []vaa.VaaDoc `bson:"vaas"`
}
// GetDocumentsByTimeRange iterates through documents within a specified time range.
func (r *Repository) GetDocumentsByTimeRange(
ctx context.Context,
lastId string,
lastTimestamp *time.Time,
limit uint,
timeAfter time.Time,
timeBefore time.Time,
) ([]GlobalTransaction, error) {
// Build the aggregation pipeline
var pipeline mongo.Pipeline
{
// Specify sorting criteria
pipeline = append(pipeline, bson.D{
{"$sort", bson.D{
bson.E{"timestamp", -1},
bson.E{"_id", 1},
}},
})
// filter out already processed documents
//
// We use the timestap field as a pagination cursor
if lastTimestamp != nil {
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"$or", bson.A{
bson.D{{"timestamp", bson.M{"$lt": *lastTimestamp}}},
bson.D{{"$and", bson.A{
bson.D{{"timestamp", bson.M{"$eq": *lastTimestamp}}},
bson.D{{"_id", bson.M{"$gt": lastId}}},
}}},
}},
}},
})
}
// filter by time range
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"timestamp", bson.D{{"$gte", timeAfter}}},
}},
})
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"timestamp", bson.D{{"$lte", timeBefore}}},
}},
})
// Limit size of results
pipeline = append(pipeline, bson.D{
{"$limit", limit},
})
}
// Execute the aggregation pipeline
cur, err := r.vaas.Aggregate(ctx, pipeline)
if err != nil {
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
return nil, errors.WithStack(err)
}
// Read results from cursor
var documents []vaa.VaaDoc
err = cur.All(ctx, &documents)
if err != nil {
r.logger.Error("failed to decode cursor", zap.Error(err))
return nil, errors.WithStack(err)
}
// Build the result
var globalTransactions []GlobalTransaction
for i := range documents {
globalTransaction := GlobalTransaction{
Id: documents[i].ID,
Vaas: []vaa.VaaDoc{documents[i]},
}
globalTransactions = append(globalTransactions, globalTransaction)
}
return globalTransactions, nil
}
// GetIncompleteDocuments gets a batch of VAA IDs from the database.
func (r *Repository) GetIncompleteDocuments(
ctx context.Context,
lastId string,
lastTimestamp *time.Time,
limit uint,
) ([]GlobalTransaction, error) {
// Build the aggregation pipeline
var pipeline mongo.Pipeline
{
// Specify sorting criteria
pipeline = append(pipeline, bson.D{
{"$sort", bson.D{bson.E{"_id", 1}}},
})
// filter out already processed documents
//
// We use the _id field as a pagination cursor
pipeline = append(pipeline, bson.D{
{"$match", bson.D{{"_id", bson.M{"$gt": lastId}}}},
})
// Look up transactions that either:
// 1. have not been processed
// 2. have been processed, but encountered an internal error
pipeline = append(pipeline, bson.D{
{"$match", bson.D{
{"$or", bson.A{
bson.D{{"originTx", bson.D{{"$exists", false}}}},
bson.D{{"originTx.status", bson.M{"$eq": domain.SourceTxStatusInternalError}}},
}},
}},
})
// Left join on the VAA collection
pipeline = append(pipeline, bson.D{
{"$lookup", bson.D{
{"from", "vaas"},
{"localField", "_id"},
{"foreignField", "_id"},
{"as", "vaas"},
}},
})
// Limit size of results
pipeline = append(pipeline, bson.D{
{"$limit", limit},
})
}
// Execute the aggregation pipeline
cur, err := r.globalTransactions.Aggregate(ctx, pipeline)
if err != nil {
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
return nil, errors.WithStack(err)
}
// Read results from cursor
var documents []GlobalTransaction
err = cur.All(ctx, &documents)
if err != nil {
r.logger.Error("failed to decode cursor", zap.Error(err))
return nil, errors.WithStack(err)
}
return documents, nil
}
// VaaIdTxHash represents a vaaIdTxHash document.
type VaaIdTxHash struct {
TxHash string `bson:"txHash"`

View File

@ -25,6 +25,7 @@ type VaaEvent struct {
TxHash string `json:"txHash"`
Version uint16 `json:"version"`
Revision uint16 `json:"revision"`
Overwrite bool `json:"overwrite"`
}
// VaaConverter converts a message from a VAAEvent.
@ -49,6 +50,7 @@ func NewVaaConverter(log *zap.Logger) ConverterFunc {
Vaa: vaaEvent.Vaa,
IsVaaSigned: true,
TxHash: vaaEvent.TxHash,
Overwrite: vaaEvent.Overwrite,
}, nil
}
}

View File

@ -51,6 +51,7 @@ type Event struct {
Vaa []byte
IsVaaSigned bool
Attributes any
Overwrite bool
}
func GetAttributes[T EventAttributes](e *Event) (T, bool) {