2023-03-07 11:25:42 -08:00
|
|
|
package transactions
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2023-05-10 13:39:18 -07:00
|
|
|
"strconv"
|
2023-03-07 11:25:42 -08:00
|
|
|
"strings"
|
2023-05-23 10:50:19 -07:00
|
|
|
"sync"
|
2023-03-07 11:25:42 -08:00
|
|
|
"time"
|
|
|
|
|
|
|
|
influxdb2 "github.com/influxdata/influxdb-client-go/v2"
|
|
|
|
"github.com/influxdata/influxdb-client-go/v2/api"
|
|
|
|
"github.com/mitchellh/mapstructure"
|
2023-03-15 12:52:50 -07:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
errs "github.com/wormhole-foundation/wormhole-explorer/api/internal/errors"
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/api/internal/pagination"
|
2023-05-17 11:04:17 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/api/internal/tvl"
|
2023-04-25 11:34:29 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/common/domain"
|
2023-07-28 08:27:48 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/common/utils"
|
2023-04-25 11:34:29 -07:00
|
|
|
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
|
2023-03-15 12:52:50 -07:00
|
|
|
"go.mongodb.org/mongo-driver/bson"
|
|
|
|
"go.mongodb.org/mongo-driver/mongo"
|
2023-03-07 11:25:42 -08:00
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
2023-06-20 06:34:20 -07:00
|
|
|
const queryTemplateChainActivity = `
|
2023-03-07 11:25:42 -08:00
|
|
|
from(bucket: "%s")
|
2023-06-20 06:34:20 -07:00
|
|
|
|> range(start: %s)
|
|
|
|
|> filter(fn: (r) => r._measurement == "%s" and r._field == "%s")
|
|
|
|
|> last()
|
2023-06-05 12:33:52 -07:00
|
|
|
|> group(columns: ["emitter_chain", "destination_chain"])
|
2023-06-20 06:34:20 -07:00
|
|
|
|> sum()
|
2023-03-07 11:25:42 -08:00
|
|
|
`
|
|
|
|
|
2023-06-20 06:34:20 -07:00
|
|
|
const queryTemplateChainActivityWithApps = `
|
2023-03-07 11:25:42 -08:00
|
|
|
from(bucket: "%s")
|
2023-06-20 06:34:20 -07:00
|
|
|
|> range(start: %s)
|
|
|
|
|> filter(fn: (r) => r._measurement == "%s" and r._field == "%s")
|
2023-03-07 11:25:42 -08:00
|
|
|
|> filter(fn: (r) => contains(value: r.app_id, set: %s))
|
2023-06-20 06:34:20 -07:00
|
|
|
|> last()
|
2023-06-05 12:33:52 -07:00
|
|
|
|> group(columns: ["emitter_chain", "destination_chain"])
|
2023-06-20 06:34:20 -07:00
|
|
|
|> sum()
|
2023-03-07 11:25:42 -08:00
|
|
|
`
|
|
|
|
|
2023-04-20 12:01:10 -07:00
|
|
|
const queryTemplateTxCount24h = `
|
|
|
|
from(bucket: "%s")
|
|
|
|
|> range(start: -24h)
|
|
|
|
|> filter(fn: (r) => r._measurement == "vaa_count")
|
|
|
|
|> group(columns: ["_measurement"])
|
|
|
|
|> count()
|
|
|
|
`
|
|
|
|
|
2023-05-04 16:17:03 -07:00
|
|
|
const queryTemplateVolume24h = `
|
|
|
|
from(bucket: "%s")
|
|
|
|
|> range(start: -24h)
|
2023-10-02 07:21:13 -07:00
|
|
|
|> filter(fn: (r) => r._measurement == "vaa_volume_v2")
|
2023-05-04 16:17:03 -07:00
|
|
|
|> filter(fn:(r) => r._field == "volume")
|
2023-05-29 06:54:09 -07:00
|
|
|
|> group()
|
2023-05-04 16:17:03 -07:00
|
|
|
|> sum(column: "_value")
|
2023-05-10 13:39:18 -07:00
|
|
|
`
|
|
|
|
|
2023-05-18 07:14:36 -07:00
|
|
|
const queryTemplateMessages24h = `
|
|
|
|
import "date"
|
|
|
|
|
|
|
|
// Get historic count from the summarized metric.
|
|
|
|
summarized = from(bucket: "%s")
|
|
|
|
|> range(start: -24h)
|
|
|
|
|> filter(fn: (r) => r["_measurement"] == "vaa_count_all_messages_5m")
|
|
|
|
|> group()
|
|
|
|
|> sum()
|
|
|
|
|
|
|
|
// Get the current count from the unsummarized metric.
|
|
|
|
// This assumes that the summarization task runs exactly every 5 minutes
|
|
|
|
startOfInterval = date.truncate(t: now(), unit: 5m)
|
|
|
|
raw = from(bucket: "%s")
|
|
|
|
|> range(start: startOfInterval)
|
|
|
|
|> filter(fn: (r) => r["_measurement"] == "vaa_count_all_messages")
|
|
|
|
|> filter(fn: (r) => r["_field"] == "count")
|
|
|
|
|> group()
|
|
|
|
|> count()
|
|
|
|
|
|
|
|
// Merge all results, compute the sum, return the top 7 volumes.
|
|
|
|
union(tables: [summarized, raw])
|
|
|
|
|> group()
|
|
|
|
|> sum()
|
|
|
|
`
|
|
|
|
|
2023-05-12 09:05:18 -07:00
|
|
|
const queryTemplateTopAssets = `
|
2023-05-10 13:39:18 -07:00
|
|
|
import "date"
|
|
|
|
|
|
|
|
// Get historic volumes from the summarized metric.
|
|
|
|
summarized = from(bucket: "%s")
|
|
|
|
|> range(start: -%s)
|
2023-10-02 07:21:13 -07:00
|
|
|
|> filter(fn: (r) => r["_measurement"] == "asset_volumes_24h_v2")
|
2023-05-10 13:39:18 -07:00
|
|
|
|> group(columns: ["emitter_chain", "token_address", "token_chain"])
|
|
|
|
|
|
|
|
// Get the current day's volume from the unsummarized metric.
|
|
|
|
// This assumes that the summarization task runs exactly once per day at 00:00hs
|
|
|
|
startOfDay = date.truncate(t: now(), unit: 1d)
|
|
|
|
raw = from(bucket: "%s")
|
|
|
|
|> range(start: startOfDay)
|
2023-10-02 07:21:13 -07:00
|
|
|
|> filter(fn: (r) => r["_measurement"] == "vaa_volume_v2")
|
2023-05-12 09:05:18 -07:00
|
|
|
|> filter(fn: (r) => r["_field"] == "volume")
|
2023-05-10 13:39:18 -07:00
|
|
|
|> group(columns: ["emitter_chain", "token_address", "token_chain"])
|
|
|
|
|
|
|
|
// Merge all results, compute the sum, return the top 7 volumes.
|
|
|
|
union(tables: [summarized, raw])
|
|
|
|
|> group(columns: ["emitter_chain", "token_address", "token_chain"])
|
|
|
|
|> sum()
|
|
|
|
|> group()
|
|
|
|
|> top(columns: ["_value"], n: 7)
|
2023-05-04 16:17:03 -07:00
|
|
|
`
|
|
|
|
|
2023-05-12 09:05:18 -07:00
|
|
|
const queryTemplateTopChainPairs = `
|
|
|
|
import "date"
|
|
|
|
|
2023-07-19 11:03:43 -07:00
|
|
|
from(bucket: "%s")
|
2023-05-12 09:05:18 -07:00
|
|
|
|> range(start: -%s)
|
2023-07-19 11:03:43 -07:00
|
|
|
|> filter(fn: (r) => r._measurement == "%s" and r._field == "count")
|
|
|
|
|> last()
|
2023-05-12 09:05:18 -07:00
|
|
|
|> group(columns: ["emitter_chain", "destination_chain"])
|
|
|
|
|> sum()
|
|
|
|
|> group()
|
2023-07-24 05:20:01 -07:00
|
|
|
|> top(columns: ["_value"], n: 100)
|
2023-05-12 09:05:18 -07:00
|
|
|
`
|
|
|
|
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
type repositoryCollections struct {
|
|
|
|
vaas *mongo.Collection
|
|
|
|
parsedVaa *mongo.Collection
|
|
|
|
globalTransactions *mongo.Collection
|
|
|
|
}
|
|
|
|
|
2023-03-07 11:25:42 -08:00
|
|
|
type Repository struct {
|
2023-05-17 11:04:17 -07:00
|
|
|
tvl *tvl.Tvl
|
2023-05-10 13:39:18 -07:00
|
|
|
influxCli influxdb2.Client
|
|
|
|
queryAPI api.QueryAPI
|
|
|
|
bucketInfiniteRetention string
|
|
|
|
bucket30DaysRetention string
|
|
|
|
bucket24HoursRetention string
|
|
|
|
db *mongo.Database
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
collections repositoryCollections
|
2023-07-13 08:59:37 -07:00
|
|
|
supportedChainIDs map[sdk.ChainID]string
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
logger *zap.Logger
|
2023-03-07 11:25:42 -08:00
|
|
|
}
|
|
|
|
|
2023-05-10 13:39:18 -07:00
|
|
|
func NewRepository(
|
2023-05-17 11:04:17 -07:00
|
|
|
tvl *tvl.Tvl,
|
2023-05-10 13:39:18 -07:00
|
|
|
client influxdb2.Client,
|
|
|
|
org string,
|
|
|
|
bucket24HoursRetention, bucket30DaysRetention, bucketInfiniteRetention string,
|
|
|
|
db *mongo.Database,
|
|
|
|
logger *zap.Logger,
|
|
|
|
) *Repository {
|
|
|
|
|
|
|
|
r := Repository{
|
2023-05-17 11:04:17 -07:00
|
|
|
tvl: tvl,
|
2023-05-10 13:39:18 -07:00
|
|
|
influxCli: client,
|
|
|
|
queryAPI: client.QueryAPI(org),
|
|
|
|
bucket24HoursRetention: bucket24HoursRetention,
|
|
|
|
bucket30DaysRetention: bucket30DaysRetention,
|
|
|
|
bucketInfiniteRetention: bucketInfiniteRetention,
|
|
|
|
db: db,
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
collections: repositoryCollections{
|
|
|
|
vaas: db.Collection("vaas"),
|
|
|
|
parsedVaa: db.Collection("parsedVaa"),
|
|
|
|
globalTransactions: db.Collection("globalTransactions"),
|
|
|
|
},
|
2023-07-13 08:59:37 -07:00
|
|
|
supportedChainIDs: domain.GetSupportedChainIDs(),
|
|
|
|
logger: logger,
|
2023-05-10 13:39:18 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return &r
|
|
|
|
}
|
|
|
|
|
2023-05-12 09:05:18 -07:00
|
|
|
func (r *Repository) GetTopAssets(ctx context.Context, timeSpan *TopStatisticsTimeSpan) ([]AssetDTO, error) {
|
2023-05-10 13:39:18 -07:00
|
|
|
|
|
|
|
// Submit the query to InfluxDB
|
2023-05-12 09:05:18 -07:00
|
|
|
query := fmt.Sprintf(queryTemplateTopAssets, r.bucket30DaysRetention, *timeSpan, r.bucketInfiniteRetention)
|
2023-05-10 13:39:18 -07:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
return nil, result.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan query results
|
|
|
|
type Row struct {
|
|
|
|
EmitterChain string `mapstructure:"emitter_chain"`
|
|
|
|
TokenChain string `mapstructure:"token_chain"`
|
|
|
|
TokenAddress string `mapstructure:"token_address"`
|
2023-05-15 11:15:12 -07:00
|
|
|
Volume uint64 `mapstructure:"_value"`
|
2023-05-10 13:39:18 -07:00
|
|
|
}
|
|
|
|
var rows []Row
|
|
|
|
for result.Next() {
|
|
|
|
var row Row
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rows = append(rows, row)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the rows into the response model
|
|
|
|
var assets []AssetDTO
|
|
|
|
for i := range rows {
|
|
|
|
|
|
|
|
// parse emitter chain
|
|
|
|
emitterChain, err := strconv.ParseUint(rows[i].EmitterChain, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to convert emitter chain field to uint16")
|
|
|
|
}
|
|
|
|
|
|
|
|
// parse token chain
|
|
|
|
tokenChain, err := strconv.ParseUint(rows[i].TokenChain, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to convert token chain field to uint16")
|
|
|
|
}
|
|
|
|
|
|
|
|
// append the new item to the response
|
|
|
|
asset := AssetDTO{
|
|
|
|
EmitterChain: sdk.ChainID(emitterChain),
|
|
|
|
TokenChain: sdk.ChainID(tokenChain),
|
|
|
|
TokenAddress: rows[i].TokenAddress,
|
|
|
|
Volume: convertToDecimal(rows[i].Volume),
|
|
|
|
}
|
|
|
|
assets = append(assets, asset)
|
|
|
|
}
|
|
|
|
|
|
|
|
return assets, nil
|
|
|
|
}
|
|
|
|
|
2023-05-12 09:05:18 -07:00
|
|
|
func (r *Repository) GetTopChainPairs(ctx context.Context, timeSpan *TopStatisticsTimeSpan) ([]ChainPairDTO, error) {
|
|
|
|
|
2023-07-19 11:03:43 -07:00
|
|
|
if timeSpan == nil {
|
|
|
|
return nil, fmt.Errorf("invalid nil timeSpan")
|
|
|
|
}
|
|
|
|
|
|
|
|
var measurement string
|
|
|
|
switch *timeSpan {
|
|
|
|
case TimeSpan7Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_7_days_3h_v2"
|
2023-07-19 11:03:43 -07:00
|
|
|
case TimeSpan15Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_15_days_3h_v2"
|
2023-07-19 11:03:43 -07:00
|
|
|
case TimeSpan30Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_30_days_3h_v2"
|
2023-07-19 11:03:43 -07:00
|
|
|
}
|
|
|
|
|
2023-05-12 09:05:18 -07:00
|
|
|
// Submit the query to InfluxDB
|
2023-07-19 11:03:43 -07:00
|
|
|
query := fmt.Sprintf(queryTemplateTopChainPairs, r.bucket24HoursRetention, *timeSpan, measurement)
|
2023-05-12 09:05:18 -07:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
return nil, result.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Scan query results
|
|
|
|
type Row struct {
|
|
|
|
EmitterChain string `mapstructure:"emitter_chain"`
|
|
|
|
DestinationChain string `mapstructure:"destination_chain"`
|
|
|
|
NumberOfTransfers int64 `mapstructure:"_value"`
|
|
|
|
}
|
|
|
|
var rows []Row
|
|
|
|
for result.Next() {
|
|
|
|
var row Row
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rows = append(rows, row)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the rows into the response model
|
2023-07-24 05:20:01 -07:00
|
|
|
var pairs []ChainPairDTO
|
2023-05-12 09:05:18 -07:00
|
|
|
for i := range rows {
|
|
|
|
|
|
|
|
// parse emitter chain
|
|
|
|
emitterChain, err := strconv.ParseUint(rows[i].EmitterChain, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to convert emitter chain field to uint16")
|
|
|
|
}
|
|
|
|
|
|
|
|
// parse destination chain
|
|
|
|
destinationChain, err := strconv.ParseUint(rows[i].DestinationChain, 10, 16)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to convert destination chain field to uint16")
|
|
|
|
}
|
|
|
|
|
|
|
|
// append the new item to the response
|
2023-07-24 05:20:01 -07:00
|
|
|
pair := ChainPairDTO{
|
2023-05-12 09:05:18 -07:00
|
|
|
EmitterChain: sdk.ChainID(emitterChain),
|
|
|
|
DestinationChain: sdk.ChainID(destinationChain),
|
|
|
|
NumberOfTransfers: fmt.Sprintf("%d", rows[i].NumberOfTransfers),
|
|
|
|
}
|
2023-07-24 05:20:01 -07:00
|
|
|
|
|
|
|
// do not include invalid chain IDs in the response
|
|
|
|
if !domain.ChainIdIsValid(pair.EmitterChain) || !domain.ChainIdIsValid(pair.DestinationChain) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
pairs = append(pairs, pair)
|
|
|
|
|
|
|
|
// max number of elements
|
|
|
|
if len(pairs) == 7 {
|
|
|
|
break
|
|
|
|
}
|
2023-05-12 09:05:18 -07:00
|
|
|
}
|
|
|
|
|
2023-07-24 05:20:01 -07:00
|
|
|
return pairs, nil
|
2023-05-12 09:05:18 -07:00
|
|
|
}
|
|
|
|
|
2023-05-10 13:39:18 -07:00
|
|
|
// convertToDecimal converts an integer amount to a decimal string, with 8 decimals of precision.
|
2023-05-15 11:15:12 -07:00
|
|
|
func convertToDecimal(amount uint64) string {
|
2023-05-10 13:39:18 -07:00
|
|
|
|
|
|
|
// If the amount is less than 1, just use a format mask.
|
|
|
|
if amount < 1_0000_0000 {
|
|
|
|
return fmt.Sprintf("0.%08d", amount)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the amount is equal or greater than 1, we need to insert a dot 8 digits from the end.
|
|
|
|
s := fmt.Sprintf("%d", amount)
|
|
|
|
l := len(s)
|
|
|
|
result := s[:l-8] + "." + s[l-8:]
|
|
|
|
|
|
|
|
return result
|
2023-03-07 11:25:42 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Repository) FindChainActivity(ctx context.Context, q *ChainActivityQuery) ([]ChainActivityResult, error) {
|
2023-06-20 06:34:20 -07:00
|
|
|
query := r.buildChainActivityQuery(q)
|
2023-03-07 11:25:42 -08:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
return nil, result.Err()
|
|
|
|
}
|
|
|
|
var response []ChainActivityResult
|
|
|
|
for result.Next() {
|
|
|
|
var row ChainActivityResult
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
response = append(response, row)
|
|
|
|
}
|
2023-07-13 08:59:37 -07:00
|
|
|
|
|
|
|
// https://github.com/wormhole-foundation/wormhole-explorer/issues/433
|
|
|
|
// filter out results with wrong chain ids
|
|
|
|
// this should be fixed in the InfluxDB
|
|
|
|
var responseWithoutWrongChainId []ChainActivityResult
|
|
|
|
for _, res := range response {
|
|
|
|
chainSourceID, err := strconv.Atoi(res.ChainSourceID)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := r.supportedChainIDs[sdk.ChainID(chainSourceID)]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
chainDestinationID, err := strconv.Atoi(res.ChainDestinationID)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if _, ok := r.supportedChainIDs[sdk.ChainID(chainDestinationID)]; !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
responseWithoutWrongChainId = append(responseWithoutWrongChainId, res)
|
|
|
|
}
|
|
|
|
return responseWithoutWrongChainId, nil
|
2023-03-07 11:25:42 -08:00
|
|
|
}
|
|
|
|
|
2023-06-20 06:34:20 -07:00
|
|
|
func (r *Repository) buildChainActivityQuery(q *ChainActivityQuery) string {
|
|
|
|
|
|
|
|
var field string
|
2023-03-07 11:25:42 -08:00
|
|
|
if q.IsNotional {
|
2023-06-20 06:34:20 -07:00
|
|
|
field = "notional"
|
2023-03-07 11:25:42 -08:00
|
|
|
} else {
|
2023-06-20 06:34:20 -07:00
|
|
|
field = "count"
|
|
|
|
}
|
|
|
|
var measurement string
|
|
|
|
switch q.TimeSpan {
|
|
|
|
case ChainActivityTs7Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_7_days_3h_v2"
|
2023-06-20 06:34:20 -07:00
|
|
|
case ChainActivityTs30Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_30_days_3h_v2"
|
2023-06-20 06:34:20 -07:00
|
|
|
case ChainActivityTs90Days:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_90_days_3h_v2"
|
2023-06-20 06:34:20 -07:00
|
|
|
case ChainActivityTs1Year:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_1_year_3h_v2"
|
2023-06-20 06:34:20 -07:00
|
|
|
case ChainActivityTsAllTime:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_all_time_3h_v2"
|
2023-06-20 06:34:20 -07:00
|
|
|
default:
|
2023-10-02 07:21:13 -07:00
|
|
|
measurement = "chain_activity_7_days_3h_v2"
|
2023-03-07 11:25:42 -08:00
|
|
|
}
|
2023-06-20 06:34:20 -07:00
|
|
|
//today without hours
|
|
|
|
start := time.Now().Truncate(24 * time.Hour).UTC().Format(time.RFC3339)
|
2023-03-07 11:25:42 -08:00
|
|
|
if q.HasAppIDS() {
|
|
|
|
apps := `["` + strings.Join(q.GetAppIDs(), `","`) + `"]`
|
2023-06-20 06:34:20 -07:00
|
|
|
return fmt.Sprintf(queryTemplateChainActivityWithApps, r.bucket24HoursRetention, start, measurement, field, apps)
|
|
|
|
} else {
|
|
|
|
return fmt.Sprintf(queryTemplateChainActivity, r.bucket24HoursRetention, start, measurement, field)
|
2023-03-07 11:25:42 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-20 12:01:10 -07:00
|
|
|
func (r *Repository) GetScorecards(ctx context.Context) (*Scorecards, error) {
|
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
// This function launches one goroutine for each scorecard.
|
|
|
|
//
|
|
|
|
// We use a `sync.WaitGroup` to block until all goroutines are done.
|
|
|
|
var wg sync.WaitGroup
|
2023-05-18 07:14:36 -07:00
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
var messages24h, tvl, totalTxCount, totalTxVolume, txCount24h, volume24h string
|
2023-05-15 11:15:12 -07:00
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
messages24h, err = r.getMessages24h(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to query 24h messages", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
2023-04-20 12:01:10 -07:00
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
tvl, err = r.tvl.Get(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to get tvl", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
2023-04-20 12:01:10 -07:00
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
totalTxCount, err = r.getTotalTxCount(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to tx count", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
totalTxVolume, err = r.getTotalTxVolume(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to get total tx volume", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
txCount24h, err = r.getTxCount24h(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to get 24h transactions", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
2023-05-04 16:17:03 -07:00
|
|
|
|
2023-05-23 10:50:19 -07:00
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var err error
|
|
|
|
volume24h, err = r.getVolume24h(ctx)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to get 24h volume", zap.Error(err))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Each of the queries synchronized by this wait group has a context timeout.
|
|
|
|
//
|
|
|
|
// Hence, this call to `wg.Wait()` will not block indefinitely as long as the
|
|
|
|
// context timeouts are properly handled in each goroutine.
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// Build the result and return
|
2023-04-20 12:01:10 -07:00
|
|
|
scorecards := Scorecards{
|
2023-05-18 07:14:36 -07:00
|
|
|
Messages24h: messages24h,
|
2023-05-15 11:15:12 -07:00
|
|
|
TotalTxCount: totalTxCount,
|
|
|
|
TotalTxVolume: totalTxVolume,
|
2023-05-18 07:14:36 -07:00
|
|
|
Tvl: tvl,
|
2023-05-23 10:50:19 -07:00
|
|
|
TxCount24h: txCount24h,
|
2023-05-15 11:15:12 -07:00
|
|
|
Volume24h: volume24h,
|
2023-04-20 12:01:10 -07:00
|
|
|
}
|
|
|
|
return &scorecards, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Repository) getTotalTxCount(ctx context.Context) (string, error) {
|
|
|
|
|
2023-05-15 11:15:12 -07:00
|
|
|
query := buildTotalTrxCountQuery(r.bucketInfiniteRetention, r.bucket30DaysRetention, time.Now())
|
2023-04-20 12:01:10 -07:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
2023-05-15 11:15:12 -07:00
|
|
|
r.logger.Error("failed to query total tx count by portal bridge", zap.Error(err))
|
2023-04-20 12:01:10 -07:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
2023-05-15 11:15:12 -07:00
|
|
|
r.logger.Error("failed to query total tx count by portal bridge result has errors", zap.Error(err))
|
2023-04-20 12:01:10 -07:00
|
|
|
return "", result.Err()
|
|
|
|
}
|
|
|
|
if !result.Next() {
|
2023-05-15 11:15:12 -07:00
|
|
|
return "", errors.New("expected at least one record in query total tx count by portal bridge result")
|
2023-04-20 12:01:10 -07:00
|
|
|
}
|
|
|
|
row := struct {
|
|
|
|
Value uint64 `mapstructure:"_value"`
|
|
|
|
}{}
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
2023-05-15 11:15:12 -07:00
|
|
|
return "", fmt.Errorf("failed to decode total tx count by portal bridge query response: %w", err)
|
2023-04-20 12:01:10 -07:00
|
|
|
}
|
2023-05-15 11:15:12 -07:00
|
|
|
return fmt.Sprintf("%d", row.Value), nil
|
|
|
|
}
|
2023-04-20 12:01:10 -07:00
|
|
|
|
2023-05-15 11:15:12 -07:00
|
|
|
func (r *Repository) getTotalTxVolume(ctx context.Context) (string, error) {
|
|
|
|
|
|
|
|
query := buildTotalTrxVolumeQuery(r.bucketInfiniteRetention, r.bucket30DaysRetention, time.Now())
|
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to query total tx volume by portal bridge", zap.Error(err))
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
r.logger.Error("failed to query tx volume by portal bridge result has errors", zap.Error(err))
|
|
|
|
return "", result.Err()
|
|
|
|
}
|
|
|
|
if !result.Next() {
|
|
|
|
return "", errors.New("expected at least one record in query tx volume by portal bridge result")
|
|
|
|
}
|
|
|
|
row := struct {
|
|
|
|
Value uint64 `mapstructure:"_value"`
|
|
|
|
}{}
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return "", fmt.Errorf("failed to decode tx volume by portal bridge query response: %w", err)
|
|
|
|
}
|
|
|
|
return convertToDecimal(row.Value), nil
|
2023-04-20 12:01:10 -07:00
|
|
|
}
|
|
|
|
|
2023-05-18 07:14:36 -07:00
|
|
|
func (r *Repository) getMessages24h(ctx context.Context) (string, error) {
|
|
|
|
|
|
|
|
// query 24h transactions
|
|
|
|
query := fmt.Sprintf(queryTemplateMessages24h, r.bucket24HoursRetention, r.bucket24HoursRetention)
|
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to query 24h messages", zap.Error(err))
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
r.logger.Error("24h messages query result has errors", zap.Error(err))
|
|
|
|
return "", result.Err()
|
|
|
|
}
|
|
|
|
if !result.Next() {
|
|
|
|
return "", errors.New("expected at least one record in 24h messages query result")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize the row returned
|
|
|
|
row := struct {
|
|
|
|
Value uint64 `mapstructure:"_value"`
|
|
|
|
}{}
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return "", fmt.Errorf("failed to decode 24h message count query response: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprint(row.Value), nil
|
|
|
|
}
|
|
|
|
|
2023-04-20 12:01:10 -07:00
|
|
|
func (r *Repository) getTxCount24h(ctx context.Context) (string, error) {
|
|
|
|
|
|
|
|
// query 24h transactions
|
2023-05-10 14:18:32 -07:00
|
|
|
query := fmt.Sprintf(queryTemplateTxCount24h, r.bucket30DaysRetention)
|
2023-04-20 12:01:10 -07:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to query 24h transactions", zap.Error(err))
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
r.logger.Error("24h transactions query result has errors", zap.Error(err))
|
|
|
|
return "", result.Err()
|
|
|
|
}
|
|
|
|
if !result.Next() {
|
|
|
|
return "", errors.New("expected at least one record in 24h transactions query result")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize the row returned
|
|
|
|
row := struct {
|
|
|
|
Value uint64 `mapstructure:"_value"`
|
|
|
|
}{}
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return "", fmt.Errorf("failed to decode 24h transaction count query response: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprint(row.Value), nil
|
|
|
|
}
|
|
|
|
|
2023-05-04 16:17:03 -07:00
|
|
|
func (r *Repository) getVolume24h(ctx context.Context) (string, error) {
|
|
|
|
|
|
|
|
// query 24h volume
|
2023-05-10 13:39:18 -07:00
|
|
|
query := fmt.Sprintf(queryTemplateVolume24h, r.bucketInfiniteRetention)
|
2023-05-04 16:17:03 -07:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to query 24h volume", zap.Error(err))
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
r.logger.Error("24h volume query result has errors", zap.Error(err))
|
|
|
|
return "", result.Err()
|
|
|
|
}
|
|
|
|
if !result.Next() {
|
|
|
|
return "", errors.New("expected at least one record in 24h volume query result")
|
|
|
|
}
|
|
|
|
|
|
|
|
// deserialize the row returned
|
|
|
|
row := struct {
|
2023-05-15 11:15:12 -07:00
|
|
|
Value uint64 `mapstructure:"_value"`
|
2023-05-04 16:17:03 -07:00
|
|
|
}{}
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return "", fmt.Errorf("failed to decode 24h volume count query response: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-05-10 13:39:18 -07:00
|
|
|
// convert the volume to a string and return
|
|
|
|
volume := convertToDecimal(row.Value)
|
2023-05-04 16:17:03 -07:00
|
|
|
return volume, nil
|
|
|
|
}
|
|
|
|
|
2023-03-07 11:25:42 -08:00
|
|
|
// GetTransactionCount get the last transactions.
|
|
|
|
func (r *Repository) GetTransactionCount(ctx context.Context, q *TransactionCountQuery) ([]TransactionCountResult, error) {
|
2023-05-10 14:18:32 -07:00
|
|
|
query := buildLastTrxQuery(r.bucket30DaysRetention, time.Now(), q)
|
2023-03-07 11:25:42 -08:00
|
|
|
result, err := r.queryAPI.Query(ctx, query)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if result.Err() != nil {
|
|
|
|
return nil, result.Err()
|
|
|
|
}
|
|
|
|
response := []TransactionCountResult{}
|
|
|
|
for result.Next() {
|
|
|
|
var row TransactionCountResult
|
|
|
|
if err := mapstructure.Decode(result.Record().Values(), &row); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
response = append(response, row)
|
|
|
|
}
|
2023-06-15 09:01:57 -07:00
|
|
|
|
|
|
|
// [QA] The transaction history graph shows the current data twice when filtered by 1W
|
|
|
|
// https://github.com/wormhole-foundation/wormhole-explorer/issues/406
|
|
|
|
for i := range response {
|
|
|
|
if i > 0 {
|
|
|
|
if q.TimeSpan == "1w" || q.TimeSpan == "1mo" {
|
|
|
|
response[i].Time = response[i].Time.AddDate(0, 0, -1)
|
|
|
|
} else if q.TimeSpan == "1d" {
|
|
|
|
response[i].Time = response[i].Time.Add(-1 * time.Hour)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-07 11:25:42 -08:00
|
|
|
return response, nil
|
|
|
|
}
|
|
|
|
|
2023-04-25 11:34:29 -07:00
|
|
|
func (r *Repository) FindGlobalTransactionByID(ctx context.Context, q *GlobalTransactionQuery) (*GlobalTransactionDoc, error) {
|
|
|
|
|
|
|
|
// Look up the global transaction
|
|
|
|
globalTransaction, err := r.findGlobalTransactionByID(ctx, q)
|
|
|
|
if err != nil && err != errs.ErrNotFound {
|
|
|
|
return nil, fmt.Errorf("failed to find global transaction by id: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look up the VAA
|
|
|
|
originTx, err := r.findOriginTxFromVaa(ctx, q)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to find origin tx from the `vaas` collection: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we found data in the `globalTransactions` collections, use it.
|
|
|
|
// Otherwise, we can use data from the VAA collection to create an `OriginTx` object.
|
|
|
|
//
|
|
|
|
// Usually, `OriginTx`s will only exist in the `globalTransactions` collection for Solana,
|
|
|
|
// which is gathered by the `tx-tracker` service.
|
|
|
|
// For all the other chains, we'll end up using the data found in the `vaas` collection.
|
|
|
|
var result *GlobalTransactionDoc
|
|
|
|
switch {
|
|
|
|
case globalTransaction == nil:
|
|
|
|
result = &GlobalTransactionDoc{
|
2023-05-22 13:30:21 -07:00
|
|
|
ID: q.id,
|
2023-04-25 11:34:29 -07:00
|
|
|
OriginTx: originTx,
|
|
|
|
}
|
|
|
|
case globalTransaction != nil && globalTransaction.OriginTx == nil:
|
|
|
|
result = &GlobalTransactionDoc{
|
2023-05-22 13:30:21 -07:00
|
|
|
ID: q.id,
|
2023-04-25 11:34:29 -07:00
|
|
|
OriginTx: originTx,
|
|
|
|
DestinationTx: globalTransaction.DestinationTx,
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
result = globalTransaction
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// findOriginTxFromVaa uses data from the `vaas` collection to create an `OriginTx`.
|
|
|
|
func (r *Repository) findOriginTxFromVaa(ctx context.Context, q *GlobalTransactionQuery) (*OriginTx, error) {
|
|
|
|
|
|
|
|
// query the `vaas` collection
|
|
|
|
var record struct {
|
|
|
|
Timestamp time.Time `bson:"timestamp"`
|
|
|
|
TxHash string `bson:"txHash"`
|
|
|
|
EmitterChain sdk.ChainID `bson:"emitterChain"`
|
|
|
|
}
|
|
|
|
err := r.db.
|
|
|
|
Collection("vaas").
|
|
|
|
FindOne(ctx, bson.M{"_id": q.id}).
|
|
|
|
Decode(&record)
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, mongo.ErrNoDocuments) {
|
|
|
|
return nil, errs.ErrNotFound
|
|
|
|
}
|
|
|
|
requestID := fmt.Sprintf("%v", ctx.Value("requestid"))
|
|
|
|
r.logger.Error("failed execute FindOne command to get global transaction from `vaas` collection",
|
|
|
|
zap.Error(err),
|
|
|
|
zap.Any("q", q),
|
|
|
|
zap.String("requestID", requestID),
|
|
|
|
)
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// populate the result and return
|
|
|
|
originTx := OriginTx{
|
2023-07-12 12:47:33 -07:00
|
|
|
Status: string(domain.SourceTxStatusConfirmed),
|
2023-04-25 11:34:29 -07:00
|
|
|
}
|
2023-07-11 12:31:45 -07:00
|
|
|
if record.EmitterChain != sdk.ChainIDSolana && record.EmitterChain != sdk.ChainIDAptos {
|
|
|
|
originTx.TxHash = record.TxHash
|
|
|
|
}
|
2023-04-25 11:34:29 -07:00
|
|
|
return &originTx, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// findGlobalTransactionByID searches the `globalTransactions` collection by ID.
|
|
|
|
func (r *Repository) findGlobalTransactionByID(ctx context.Context, q *GlobalTransactionQuery) (*GlobalTransactionDoc, error) {
|
|
|
|
|
2023-03-15 12:52:50 -07:00
|
|
|
var globalTranstaction GlobalTransactionDoc
|
2023-04-25 11:34:29 -07:00
|
|
|
err := r.db.
|
|
|
|
Collection("globalTransactions").
|
|
|
|
FindOne(ctx, bson.M{"_id": q.id}).
|
|
|
|
Decode(&globalTranstaction)
|
2023-03-15 12:52:50 -07:00
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, mongo.ErrNoDocuments) {
|
|
|
|
return nil, errs.ErrNotFound
|
|
|
|
}
|
|
|
|
requestID := fmt.Sprintf("%v", ctx.Value("requestid"))
|
2023-04-25 11:34:29 -07:00
|
|
|
r.logger.Error("failed execute FindOne command to get global transaction from `globalTransactions` collection",
|
|
|
|
zap.Error(err),
|
|
|
|
zap.Any("q", q),
|
|
|
|
zap.String("requestID", requestID),
|
|
|
|
)
|
2023-03-15 12:52:50 -07:00
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
2023-04-25 11:34:29 -07:00
|
|
|
|
2023-03-15 12:52:50 -07:00
|
|
|
return &globalTranstaction, nil
|
|
|
|
}
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
2023-07-12 08:51:52 -07:00
|
|
|
// FindTransactionsInput is used to pass parameters to the `FindTransactions` method.
|
|
|
|
type FindTransactionsInput struct {
|
|
|
|
// id specifies the VAA ID of the transaction to be found.
|
|
|
|
id string
|
|
|
|
// sort specifies whether the results should be sorted
|
|
|
|
//
|
|
|
|
// If set to true, the results will be sorted by descending timestamp and ID.
|
|
|
|
// If set to false, the results will not be sorted.
|
|
|
|
sort bool
|
|
|
|
pagination *pagination.Pagination
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindTransactions returns transactions matching a specified search criteria.
|
|
|
|
func (r *Repository) FindTransactions(
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
ctx context.Context,
|
2023-07-12 08:51:52 -07:00
|
|
|
input *FindTransactionsInput,
|
|
|
|
) ([]TransactionDto, error) {
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
|
|
|
// Specify sorting criteria
|
2023-07-12 08:51:52 -07:00
|
|
|
if input.sort {
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$sort", bson.D{
|
2023-09-26 11:02:57 -07:00
|
|
|
bson.E{"timestamp", input.pagination.GetSortInt()},
|
2023-07-12 08:51:52 -07:00
|
|
|
bson.E{"_id", -1},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter by ID
|
|
|
|
if input.id != "" {
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{{"_id", input.id}}},
|
|
|
|
})
|
|
|
|
}
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
|
|
|
// left outer join on the `transferPrices` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "transferPrices"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "transferPrices"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `vaaIdTxHash` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "vaaIdTxHash"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "vaaIdTxHash"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `parsedVaa` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "parsedVaa"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "parsedVaa"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `globalTransactions` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "globalTransactions"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "globalTransactions"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// add nested fields
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$addFields", bson.D{
|
|
|
|
{"txHash", bson.M{"$arrayElemAt": []interface{}{"$vaaIdTxHash.txHash", 0}}},
|
2023-07-12 08:51:52 -07:00
|
|
|
{"payload", bson.M{"$arrayElemAt": []interface{}{"$parsedVaa.parsedPayload", 0}}},
|
|
|
|
{"standardizedProperties", bson.M{"$arrayElemAt": []interface{}{"$parsedVaa.standardizedProperties", 0}}},
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
{"symbol", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.symbol", 0}}},
|
|
|
|
{"usdAmount", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.usdAmount", 0}}},
|
|
|
|
{"tokenAmount", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.tokenAmount", 0}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Unset unused fields
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$unset", []interface{}{"transferPrices", "vaaTxIdHash", "parsedVaa"}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Skip initial results
|
2023-07-12 08:51:52 -07:00
|
|
|
if input.pagination != nil {
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$skip", input.pagination.Skip},
|
|
|
|
})
|
|
|
|
}
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
|
|
|
// Limit size of results
|
2023-07-12 08:51:52 -07:00
|
|
|
if input.pagination != nil {
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$limit", input.pagination.Limit},
|
|
|
|
})
|
|
|
|
}
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.collections.vaas.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
2023-07-12 08:51:52 -07:00
|
|
|
var documents []TransactionDto
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
err = cur.All(ctx, &documents)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-07-12 08:51:52 -07:00
|
|
|
return documents, nil
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListTransactionsByAddress returns a sorted list of transactions for a given address.
|
|
|
|
//
|
|
|
|
// Pagination is implemented using a keyset cursor pattern, based on the (timestamp, ID) pair.
|
|
|
|
func (r *Repository) ListTransactionsByAddress(
|
|
|
|
ctx context.Context,
|
2023-07-18 05:54:52 -07:00
|
|
|
address string,
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
pagination *pagination.Pagination,
|
2023-07-12 08:51:52 -07:00
|
|
|
) ([]TransactionDto, error) {
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
2023-07-18 05:54:52 -07:00
|
|
|
// filter transactions by destination address
|
2023-07-28 08:27:48 -07:00
|
|
|
{
|
|
|
|
const fieldName = "standardizedProperties.toAddress"
|
|
|
|
|
|
|
|
// If the address is non-EVM, it could be case sensitive (i.e. Solana), so we can't alter it.
|
|
|
|
var nonEvmFilter = bson.D{{fieldName, bson.M{"$eq": address}}}
|
|
|
|
|
|
|
|
// If the address is EVM, we must normalize it to the format used in the database,
|
|
|
|
// which is a 0x prefix and all lowercase characters.
|
|
|
|
var evmFilter bson.D
|
|
|
|
if utils.StartsWith0x(address) {
|
|
|
|
evmFilter = bson.D{{fieldName, bson.M{"$eq": strings.ToLower(address)}}}
|
|
|
|
} else {
|
|
|
|
evmFilter = bson.D{{fieldName, bson.M{"$eq": "0x" + strings.ToLower(address)}}}
|
|
|
|
}
|
|
|
|
|
|
|
|
pipeline = append(pipeline, bson.D{{"$match", bson.D{{"$or", bson.A{nonEvmFilter, evmFilter}}}}})
|
|
|
|
}
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
|
|
|
|
// specify sorting criteria
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$sort", bson.D{bson.E{"indexedAt", -1}}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `transferPrices` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "transferPrices"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "transferPrices"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `vaas` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "vaas"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "vaas"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `vaaIdTxHash` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "vaaIdTxHash"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "vaaIdTxHash"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `parsedVaa` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "parsedVaa"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "parsedVaa"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// left outer join on the `globalTransactions` collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "globalTransactions"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "globalTransactions"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// add nested fields
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$addFields", bson.D{
|
|
|
|
{"txHash", bson.M{"$arrayElemAt": []interface{}{"$vaaIdTxHash.txHash", 0}}},
|
|
|
|
{"timestamp", bson.M{"$arrayElemAt": []interface{}{"$vaas.timestamp", 0}}},
|
2023-07-12 08:51:52 -07:00
|
|
|
{"payload", bson.M{"$arrayElemAt": []interface{}{"$parsedVaa.parsedPayload", 0}}},
|
|
|
|
{"standardizedProperties", bson.M{"$arrayElemAt": []interface{}{"$parsedVaa.standardizedProperties", 0}}},
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
{"symbol", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.symbol", 0}}},
|
|
|
|
{"usdAmount", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.usdAmount", 0}}},
|
|
|
|
{"tokenAmount", bson.M{"$arrayElemAt": []interface{}{"$transferPrices.tokenAmount", 0}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
2023-09-25 12:50:16 -07:00
|
|
|
// Sorting criteria
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$sort", bson.D{bson.E{"timestamp", pagination.GetSortInt()}}},
|
|
|
|
})
|
|
|
|
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
// Unset unused fields
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$unset", []interface{}{"transferPrices", "vaas", "vaaTxIdHash", "parsedVaa"}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Skip initial results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$skip", pagination.Skip},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Limit size of results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$limit", pagination.Limit},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.collections.parsedVaa.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
2023-07-12 08:51:52 -07:00
|
|
|
var documents []TransactionDto
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
err = cur.All(ctx, &documents)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-07-12 08:51:52 -07:00
|
|
|
return documents, nil
|
Add endpoint `GET /api/v1/transactions` (#388)
### Summary
Tracking issue: https://github.com/wormhole-foundation/wormhole-explorer/issues/385
This pull request implements a new endpoint, `GET /api/v1/transactions`, which will be consumed by the wormhole explorer UI.
The endpoint returns a paginated list of transactions, in which each element contains a brief overview of the transaction (ID, txHash, status, etc.).
It exposes offset-based pagination via the parameters `page` and `pageSize`. Also, results can be obtained for a specific address by using the `address` query parameter.
The response model looks like this:
```json
{
"transactions": [
{
"id": "1/5ec18c34b47c63d17ab43b07b9b2319ea5ee2d163bce2e467000174e238c8e7f/12965",
"timestamp": "2023-06-08T19:30:19Z",
"txHash": "a302c4ab2d6b9a6003951d2e91f8fdbb83cfa20f6ffb588b95ef0290aab37066",
"originChain": 1,
"status": "ongoing"
},
{
"id": "22/0000000000000000000000000000000000000000000000000000000000000001/18308",
"timestamp": "2023-06-08T19:17:14Z",
"txHash": "00000000000000000000000000000000000000000000000000000000000047e7",
"originChain": 22,
"destinationAddress": "0x00000000000000000000000067e8a40816a983fbe3294aaebd0cc2391815b86b",
"destinationChain": 5,
"tokenAmount": "0.12",
"usdAmount": "0.12012",
"symbol": "USDC",
"status": "completed"
},
...
]
}
```
### Limitations of the current implementation
1. Doesn't return the total number of results (this may result in a performance issue when we filter by address)
2. Can only filter by receiver address (we don't have sender information in the database yet)
2023-06-12 07:43:48 -07:00
|
|
|
}
|