2023-03-20 12:36:19 -07:00
|
|
|
package consumer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2023-04-05 06:33:28 -07:00
|
|
|
"time"
|
2023-03-20 12:36:19 -07:00
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/api/handlers/vaa"
|
2023-04-25 11:34:29 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/common/domain"
|
2023-03-20 12:36:19 -07:00
|
|
|
"github.com/wormhole-foundation/wormhole-explorer/txtracker/chains"
|
|
|
|
sdk "github.com/wormhole-foundation/wormhole/sdk/vaa"
|
|
|
|
"go.mongodb.org/mongo-driver/bson"
|
|
|
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
|
|
|
"go.mongodb.org/mongo-driver/mongo"
|
|
|
|
"go.mongodb.org/mongo-driver/mongo/options"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
// Repository exposes operations over the `globalTransactions` collection.
|
|
|
|
type Repository struct {
|
|
|
|
logger *zap.Logger
|
|
|
|
globalTransactions *mongo.Collection
|
2023-04-05 06:33:28 -07:00
|
|
|
vaas *mongo.Collection
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// New creates a new repository.
|
|
|
|
func NewRepository(logger *zap.Logger, db *mongo.Database) *Repository {
|
|
|
|
|
|
|
|
r := Repository{
|
|
|
|
logger: logger,
|
|
|
|
globalTransactions: db.Collection("globalTransactions"),
|
2023-04-05 06:33:28 -07:00
|
|
|
vaas: db.Collection("vaas"),
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return &r
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpsertDocumentParams is a struct that contains the parameters for the upsertDocument method.
|
|
|
|
type UpsertDocumentParams struct {
|
|
|
|
VaaId string
|
|
|
|
ChainId sdk.ChainID
|
|
|
|
TxHash string
|
|
|
|
TxDetail *chains.TxDetail
|
2023-04-25 11:34:29 -07:00
|
|
|
TxStatus domain.SourceTxStatus
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r *Repository) UpsertDocument(ctx context.Context, params *UpsertDocumentParams) error {
|
|
|
|
|
|
|
|
fields := bson.D{
|
|
|
|
{Key: "status", Value: params.TxStatus},
|
|
|
|
}
|
|
|
|
|
|
|
|
if params.TxDetail != nil {
|
2023-04-05 06:33:28 -07:00
|
|
|
fields = append(fields, primitive.E{Key: "nativeTxHash", Value: params.TxDetail.NativeTxHash})
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
update := bson.D{
|
|
|
|
{
|
|
|
|
Key: "$set",
|
|
|
|
Value: bson.D{
|
|
|
|
{
|
|
|
|
Key: "originTx",
|
|
|
|
Value: fields,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := options.Update().SetUpsert(true)
|
|
|
|
|
|
|
|
_, err := r.globalTransactions.UpdateByID(ctx, params.VaaId, update, opts)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to upsert source tx information: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-04-05 06:33:28 -07:00
|
|
|
// CountDocumentsByTimeRange returns the number of documents that match the given time range.
|
|
|
|
func (r *Repository) CountDocumentsByTimeRange(
|
|
|
|
ctx context.Context,
|
|
|
|
timeAfter time.Time,
|
|
|
|
timeBefore time.Time,
|
|
|
|
) (uint64, error) {
|
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
|
|
|
// filter by time range
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{
|
|
|
|
{"timestamp", bson.D{{"$gte", timeAfter}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{
|
|
|
|
{"timestamp", bson.D{{"$lte", timeBefore}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Count the number of results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$count", "numDocuments"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.vaas.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
|
|
|
var results []struct {
|
|
|
|
NumDocuments uint64 `bson:"numDocuments"`
|
|
|
|
}
|
|
|
|
err = cur.All(ctx, &results)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if len(results) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
if len(results) > 1 {
|
|
|
|
r.logger.Error("too many results", zap.Int("numResults", len(results)))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return results[0].NumDocuments, nil
|
|
|
|
}
|
|
|
|
|
2023-03-20 12:36:19 -07:00
|
|
|
// CountIncompleteDocuments returns the number of documents that have destTx data, but don't have sourceTx data.
|
|
|
|
func (r *Repository) CountIncompleteDocuments(ctx context.Context) (uint64, error) {
|
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
2023-03-30 08:25:16 -07:00
|
|
|
// Look up transactions that either:
|
|
|
|
// 1. have not been processed
|
|
|
|
// 2. have been processed, but encountered an internal error
|
2023-03-20 12:36:19 -07:00
|
|
|
pipeline = append(pipeline, bson.D{
|
2023-03-30 08:25:16 -07:00
|
|
|
{"$match", bson.D{
|
|
|
|
{"$or", bson.A{
|
|
|
|
bson.D{{"originTx", bson.D{{"$exists", false}}}},
|
2023-04-25 11:34:29 -07:00
|
|
|
bson.D{{"originTx.status", bson.M{"$eq": domain.SourceTxStatusInternalError}}},
|
2023-03-30 08:25:16 -07:00
|
|
|
}},
|
|
|
|
}},
|
2023-03-20 12:36:19 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
// Count the number of results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
2023-04-05 06:33:28 -07:00
|
|
|
{"$count", "numDocuments"},
|
2023-03-20 12:36:19 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.globalTransactions.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
|
|
|
var results []struct {
|
2023-04-05 06:33:28 -07:00
|
|
|
NumDocuments uint64 `bson:"numDocuments"`
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
err = cur.All(ctx, &results)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
if len(results) == 0 {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
if len(results) > 1 {
|
|
|
|
r.logger.Error("too many results", zap.Int("numResults", len(results)))
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2023-04-05 06:33:28 -07:00
|
|
|
return results[0].NumDocuments, nil
|
2023-03-20 12:36:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
type GlobalTransaction struct {
|
|
|
|
Id string `bson:"_id"`
|
|
|
|
Vaas []vaa.VaaDoc `bson:"vaas"`
|
|
|
|
}
|
|
|
|
|
2023-04-05 06:33:28 -07:00
|
|
|
// GetDocumentsByTimeRange iterates through documents within a specified time range.
|
|
|
|
func (r *Repository) GetDocumentsByTimeRange(
|
|
|
|
ctx context.Context,
|
2023-04-12 11:46:16 -07:00
|
|
|
lastId string,
|
|
|
|
lastTimestamp *time.Time,
|
2023-04-05 06:33:28 -07:00
|
|
|
limit uint,
|
|
|
|
timeAfter time.Time,
|
|
|
|
timeBefore time.Time,
|
|
|
|
) ([]GlobalTransaction, error) {
|
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
|
|
|
// Specify sorting criteria
|
|
|
|
pipeline = append(pipeline, bson.D{
|
2023-04-12 11:46:16 -07:00
|
|
|
{"$sort", bson.D{
|
|
|
|
bson.E{"timestamp", -1},
|
|
|
|
bson.E{"_id", 1},
|
|
|
|
}},
|
2023-04-05 06:33:28 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
// filter out already processed documents
|
|
|
|
//
|
2023-04-12 11:46:16 -07:00
|
|
|
// We use the timestap field as a pagination cursor
|
|
|
|
if lastTimestamp != nil {
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{
|
|
|
|
{"$or", bson.A{
|
|
|
|
bson.D{{"timestamp", bson.M{"$lt": *lastTimestamp}}},
|
|
|
|
bson.D{{"$and", bson.A{
|
|
|
|
bson.D{{"timestamp", bson.M{"$eq": *lastTimestamp}}},
|
|
|
|
bson.D{{"_id", bson.M{"$gt": lastId}}},
|
|
|
|
}}},
|
|
|
|
}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
}
|
2023-04-05 06:33:28 -07:00
|
|
|
|
|
|
|
// filter by time range
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{
|
|
|
|
{"timestamp", bson.D{{"$gte", timeAfter}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$match", bson.D{
|
|
|
|
{"timestamp", bson.D{{"$lte", timeBefore}}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Limit size of results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$limit", limit},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.vaas.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
|
|
|
var documents []vaa.VaaDoc
|
|
|
|
err = cur.All(ctx, &documents)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the result
|
|
|
|
var globalTransactions []GlobalTransaction
|
|
|
|
for i := range documents {
|
|
|
|
globalTransaction := GlobalTransaction{
|
|
|
|
Id: documents[i].ID,
|
|
|
|
Vaas: []vaa.VaaDoc{documents[i]},
|
|
|
|
}
|
|
|
|
globalTransactions = append(globalTransactions, globalTransaction)
|
|
|
|
}
|
|
|
|
|
|
|
|
return globalTransactions, nil
|
|
|
|
}
|
|
|
|
|
2023-03-20 12:36:19 -07:00
|
|
|
// GetIncompleteDocuments gets a batch of VAA IDs from the database.
|
|
|
|
func (r *Repository) GetIncompleteDocuments(
|
|
|
|
ctx context.Context,
|
2023-04-12 11:46:16 -07:00
|
|
|
lastId string,
|
|
|
|
lastTimestamp *time.Time,
|
2023-03-20 12:36:19 -07:00
|
|
|
limit uint,
|
|
|
|
) ([]GlobalTransaction, error) {
|
|
|
|
|
|
|
|
// Build the aggregation pipeline
|
|
|
|
var pipeline mongo.Pipeline
|
|
|
|
{
|
|
|
|
// Specify sorting criteria
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$sort", bson.D{bson.E{"_id", 1}}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// filter out already processed documents
|
|
|
|
//
|
|
|
|
// We use the _id field as a pagination cursor
|
|
|
|
pipeline = append(pipeline, bson.D{
|
2023-04-12 11:46:16 -07:00
|
|
|
{"$match", bson.D{{"_id", bson.M{"$gt": lastId}}}},
|
2023-03-20 12:36:19 -07:00
|
|
|
})
|
|
|
|
|
2023-03-30 08:25:16 -07:00
|
|
|
// Look up transactions that either:
|
|
|
|
// 1. have not been processed
|
|
|
|
// 2. have been processed, but encountered an internal error
|
2023-03-20 12:36:19 -07:00
|
|
|
pipeline = append(pipeline, bson.D{
|
2023-03-30 08:25:16 -07:00
|
|
|
{"$match", bson.D{
|
|
|
|
{"$or", bson.A{
|
|
|
|
bson.D{{"originTx", bson.D{{"$exists", false}}}},
|
2023-04-25 11:34:29 -07:00
|
|
|
bson.D{{"originTx.status", bson.M{"$eq": domain.SourceTxStatusInternalError}}},
|
2023-03-30 08:25:16 -07:00
|
|
|
}},
|
|
|
|
}},
|
2023-03-20 12:36:19 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
// Left join on the VAA collection
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$lookup", bson.D{
|
|
|
|
{"from", "vaas"},
|
|
|
|
{"localField", "_id"},
|
|
|
|
{"foreignField", "_id"},
|
|
|
|
{"as", "vaas"},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
|
|
|
|
// Limit size of results
|
|
|
|
pipeline = append(pipeline, bson.D{
|
|
|
|
{"$limit", limit},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the aggregation pipeline
|
|
|
|
cur, err := r.globalTransactions.Aggregate(ctx, pipeline)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed execute aggregation pipeline", zap.Error(err))
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read results from cursor
|
|
|
|
var documents []GlobalTransaction
|
|
|
|
err = cur.All(ctx, &documents)
|
|
|
|
if err != nil {
|
|
|
|
r.logger.Error("failed to decode cursor", zap.Error(err))
|
|
|
|
return nil, errors.WithStack(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return documents, nil
|
|
|
|
}
|