added configurable log levels

This commit is contained in:
Matt Johnstone 2024-10-25 11:50:16 +02:00
parent 888d065acf
commit c9fabdecd4
No known key found for this signature in database
GPG Key ID: BE985FBB9BE7D3BB
5 changed files with 70 additions and 8 deletions

View File

@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"github.com/asymmetric-research/solana_exporter/pkg/slog"
"time"
)
@ -38,6 +39,17 @@ func NewExporterConfig(
comprehensiveSlotTracking bool,
monitorBlockSizes bool,
) *ExporterConfig {
logger := slog.Get()
logger.Infow(
"Setting up export config with ",
"httpTimeout", httpTimeout,
"rpcUrl", rpcUrl,
"listenAddress", listenAddress,
"nodeKeys", nodeKeys,
"balanceAddresses", balanceAddresses,
"comprehensiveSlotTracking", comprehensiveSlotTracking,
"monitorBlockSizes", monitorBlockSizes,
)
return &ExporterConfig{
HttpTimeout: time.Duration(httpTimeout) * time.Second,
RpcUrl: rpcUrl,

View File

@ -26,6 +26,7 @@ func (c *GaugeDesc) MustNewConstMetric(value float64, labels ...string) promethe
if len(labels) != len(c.VariableLabels) {
logger.Fatalf("Provided labels (%v) do not match %s labels (%v)", labels, c.Name, c.VariableLabels)
}
logger.Debugf("Emitting %v to %s(%v)", value, labels, c.Name)
return prometheus.MustNewConstMetric(c.Desc, prometheus.GaugeValue, value, labels...)
}

View File

@ -149,6 +149,7 @@ func (c *SolanaCollector) Describe(ch chan<- *prometheus.Desc) {
}
func (c *SolanaCollector) collectVoteAccounts(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting vote accounts...")
voteAccounts, err := c.rpcClient.GetVoteAccounts(ctx, rpc.CommitmentConfirmed, nil)
if err != nil {
c.logger.Errorf("failed to get vote accounts: %v", err)
@ -176,11 +177,13 @@ func (c *SolanaCollector) collectVoteAccounts(ctx context.Context, ch chan<- pro
for _, account := range voteAccounts.Delinquent {
ch <- c.ValidatorDelinquent.MustNewConstMetric(1, account.VotePubkey, account.NodePubkey)
}
c.logger.Info("Vote accounts collected.")
}
func (c *SolanaCollector) collectVersion(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting version...")
version, err := c.rpcClient.GetVersion(ctx)
if err != nil {
c.logger.Errorf("failed to get version: %v", err)
ch <- c.NodeVersion.NewInvalidMetric(err)
@ -188,10 +191,11 @@ func (c *SolanaCollector) collectVersion(ctx context.Context, ch chan<- promethe
}
ch <- c.NodeVersion.MustNewConstMetric(1, version)
c.logger.Info("Version collected.")
}
func (c *SolanaCollector) collectMinimumLedgerSlot(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting minimum ledger slot...")
slot, err := c.rpcClient.GetMinimumLedgerSlot(ctx)
if err != nil {
c.logger.Errorf("failed to get minimum lidger slot: %v", err)
ch <- c.NodeMinimumLedgerSlot.NewInvalidMetric(err)
@ -199,10 +203,11 @@ func (c *SolanaCollector) collectMinimumLedgerSlot(ctx context.Context, ch chan<
}
ch <- c.NodeMinimumLedgerSlot.MustNewConstMetric(float64(*slot), c.identity)
c.logger.Info("Minimum ledger slot collected.")
}
func (c *SolanaCollector) collectFirstAvailableBlock(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting first available block...")
block, err := c.rpcClient.GetFirstAvailableBlock(ctx)
if err != nil {
c.logger.Errorf("failed to get first available block: %v", err)
ch <- c.NodeFirstAvailableBlock.NewInvalidMetric(err)
@ -210,9 +215,11 @@ func (c *SolanaCollector) collectFirstAvailableBlock(ctx context.Context, ch cha
}
ch <- c.NodeFirstAvailableBlock.MustNewConstMetric(float64(*block), c.identity)
c.logger.Info("First available block collected.")
}
func (c *SolanaCollector) collectBalances(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting balances...")
balances, err := FetchBalances(ctx, c.rpcClient, c.balanceAddresses)
if err != nil {
c.logger.Errorf("failed to get balances: %v", err)
@ -223,9 +230,11 @@ func (c *SolanaCollector) collectBalances(ctx context.Context, ch chan<- prometh
for address, balance := range balances {
ch <- c.AccountBalances.MustNewConstMetric(balance, address)
}
c.logger.Info("Balances collected.")
}
func (c *SolanaCollector) collectHealth(ctx context.Context, ch chan<- prometheus.Metric) {
c.logger.Info("Collecting health...")
var (
isHealthy = 1
numSlotsBehind int64
@ -260,11 +269,12 @@ func (c *SolanaCollector) collectHealth(ctx context.Context, ch chan<- prometheu
ch <- c.NodeIsHealthy.MustNewConstMetric(float64(isHealthy), c.identity)
ch <- c.NodeNumSlotsBehind.MustNewConstMetric(float64(numSlotsBehind), c.identity)
c.logger.Info("Health collected.")
return
}
func (c *SolanaCollector) Collect(ch chan<- prometheus.Metric) {
c.logger.Info("========== BEGIN COLLECTION ==========")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -274,6 +284,8 @@ func (c *SolanaCollector) Collect(ch chan<- prometheus.Metric) {
c.collectHealth(ctx, ch)
c.collectMinimumLedgerSlot(ctx, ch)
c.collectFirstAvailableBlock(ctx, ch)
c.logger.Info("=========== END COLLECTION ===========")
}
func main() {

View File

@ -63,6 +63,14 @@ func NewSlotWatcher(
monitorBlockSizes bool,
) *SlotWatcher {
logger := slog.Get()
logger.Infow(
"Creating slot watcher with ",
"nodekeys", nodekeys,
"votekeys", votekeys,
"identity", identity,
"comprehensiveSlotTracking", comprehensiveSlotTracking,
"monitorBlockSizes", monitorBlockSizes,
)
watcher := SlotWatcher{
client: client,
logger: logger,
@ -141,7 +149,8 @@ func NewSlotWatcher(
[]string{IdentityLabel},
),
}
// register:
// register
logger.Info("Registering slot watcher metrics:")
for _, collector := range []prometheus.Collector{
watcher.TotalTransactionsMetric,
watcher.SlotHeightMetric,
@ -174,7 +183,7 @@ func (c *SlotWatcher) WatchSlots(ctx context.Context, pace time.Duration) {
ticker := time.NewTicker(pace)
defer ticker.Stop()
c.logger.Infof("Starting slot watcher")
c.logger.Infof("Starting slot watcher, running every %v", pace)
for {
select {
@ -281,6 +290,7 @@ func (c *SlotWatcher) trackEpoch(ctx context.Context, epoch *rpc.EpochInfo) {
// closeCurrentEpoch is called when an epoch change-over happens, and we need to make sure we track the last
// remaining slots in the "current" epoch before we start tracking the new one.
func (c *SlotWatcher) closeCurrentEpoch(ctx context.Context, newEpoch *rpc.EpochInfo) {
c.logger.Infof("Closing current epoch %v, moving into epoch %v", c.currentEpoch, newEpoch.Epoch)
c.moveSlotWatermark(ctx, c.lastSlot)
c.trackEpoch(ctx, newEpoch)
}

View File

@ -1,8 +1,11 @@
package slog
import (
"fmt"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"os"
"strings"
)
var log *zap.SugaredLogger
@ -11,10 +14,14 @@ var log *zap.SugaredLogger
func Init() {
config := zap.NewProductionConfig()
// Configure the encoder to use ISO8601 time format
// configure:
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
config.Level = zap.NewAtomicLevelAt(getEnvLogLevel())
logger, _ := config.Build()
logger, err := config.Build()
if err != nil {
panic(fmt.Errorf("error initializing logger: %v", err))
}
log = logger.Sugar()
}
@ -27,3 +34,23 @@ func Get() *zap.SugaredLogger {
func Sync() error {
return log.Sync()
}
func getEnvLogLevel() zapcore.Level {
level, ok := os.LookupEnv("LOG_LEVEL")
if !ok {
return zapcore.InfoLevel
}
switch strings.ToLower(level) {
case "debug":
return zapcore.DebugLevel
case "info":
return zapcore.InfoLevel
case "warn":
return zapcore.WarnLevel
case "error":
return zapcore.ErrorLevel
default:
fmt.Println(fmt.Sprintf("Unrecognised 'LOG_LEVEL' environment variable '%s', using 'info'", level))
return zapcore.InfoLevel
}
}