Implement skip rate tracking

This commit is contained in:
Leo 2021-01-03 23:58:24 +01:00
parent a9ec76867e
commit a87b361b9a
11 changed files with 563 additions and 31 deletions

View File

@ -2,13 +2,14 @@ package main
import (
"context"
"flag"
"github.com/certusone/solana_exporter/pkg/rpc"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"log"
"net/http"
"os"
"time"
"k8s.io/klog/v2"
)
const (
@ -16,21 +17,16 @@ const (
)
var (
listenAddr = os.Getenv("LISTEN_ADDR")
solanaRPCAddr = os.Getenv("SOLANA_RPC_ADDR")
rpcAddr = flag.String("rpcURI", "", "Solana RPC URI (including protocol and path)")
addr = flag.String("addr", ":8080", "Listen address")
)
func init() {
if solanaRPCAddr == "" {
log.Fatal("Please specify SOLANA_RPC_ADDR")
}
if listenAddr == "" {
listenAddr = ":8080"
}
klog.InitFlags(nil)
}
type solanaCollector struct {
rpcClient *rpc.RpcClient
rpcClient *rpc.RPCClient
totalValidatorsDesc *prometheus.Desc
validatorActivatedStake *prometheus.Desc
@ -97,7 +93,7 @@ func (c *solanaCollector) Collect(ch chan<- prometheus.Metric) {
ctx, cancel := context.WithTimeout(context.Background(), httpTimeout)
defer cancel()
accs, err := c.rpcClient.GetVoteAccounts(ctx)
accs, err := c.rpcClient.GetVoteAccounts(ctx, rpc.CommitmentRecent)
if err != nil {
ch <- prometheus.NewInvalidMetric(c.totalValidatorsDesc, err)
ch <- prometheus.NewInvalidMetric(c.validatorActivatedStake, err)
@ -110,8 +106,19 @@ func (c *solanaCollector) Collect(ch chan<- prometheus.Metric) {
}
func main() {
collector := NewSolanaCollector(solanaRPCAddr)
flag.Parse()
if *rpcAddr == "" {
klog.Fatal("Please specify -rpcURI")
}
collector := NewSolanaCollector(*rpcAddr)
go collector.WatchSlots()
prometheus.MustRegister(collector)
http.Handle("/metrics", promhttp.Handler())
log.Fatal(http.ListenAndServe(listenAddr, nil))
klog.Infof("listening on %s", *addr)
klog.Fatal(http.ListenAndServe(*addr, nil))
}

View File

@ -0,0 +1,186 @@
package main
import (
"context"
"fmt"
"github.com/certusone/solana_exporter/pkg/rpc"
"github.com/prometheus/client_golang/prometheus"
"k8s.io/klog/v2"
"time"
)
const (
slotPacerSchedule = 1 * time.Second
)
var (
totalTransactionsTotal = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "solana_confirmed_transactions_total",
Help: "Total number of transactions processed since genesis (max confirmation)",
})
confirmedSlotHeight = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "solana_confirmed_slot_height",
Help: "Last confirmed slot height processed by watcher routine (max confirmation)",
})
currentEpochNumber = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "solana_confirmed_epoch_number",
Help: "Current epoch (max confirmation)",
})
epochFirstSlot = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "solana_confirmed_epoch_first_slot",
Help: "Current epoch's first slot (max confirmation)",
})
epochLastSlot = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "solana_confirmed_epoch_last_slot",
Help: "Current epoch's last slot (max confirmation)",
})
leaderSlotsTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "solana_leader_slots_total",
Help: "Number of leader slots per leader, grouped by skip status (max confirmation)",
},
[]string{"status", "nodekey"})
)
func init() {
prometheus.MustRegister(totalTransactionsTotal)
prometheus.MustRegister(confirmedSlotHeight)
prometheus.MustRegister(currentEpochNumber)
prometheus.MustRegister(epochFirstSlot)
prometheus.MustRegister(epochLastSlot)
prometheus.MustRegister(leaderSlotsTotal)
}
func (c *solanaCollector) WatchSlots() {
var (
// Current mapping of relative slot numbers to leader public keys.
epochSlots map[int64]string
// Current epoch number corresponding to epochSlots.
epochNumber int64
// Last slot number we generated ticks for.
watermark int64
)
ticker := time.NewTicker(slotPacerSchedule)
for {
<-ticker.C
// Get current slot height and epoch info
ctx, cancel := context.WithTimeout(context.Background(), httpTimeout)
info, err := c.rpcClient.GetEpochInfo(ctx, rpc.CommitmentMax)
if err != nil {
klog.Infof("failed to fetch info info, retrying: %v", err)
cancel()
continue
}
cancel()
// Calculate first and last slot in epoch.
firstSlot := info.AbsoluteSlot - info.SlotIndex
lastSlot := firstSlot + info.SlotsInEpoch
totalTransactionsTotal.Set(float64(info.TransactionCount))
confirmedSlotHeight.Set(float64(info.AbsoluteSlot))
currentEpochNumber.Set(float64(info.Epoch))
epochFirstSlot.Set(float64(firstSlot))
epochLastSlot.Set(float64(lastSlot))
// Check whether we need to fetch a new leader schedule
if epochNumber != info.Epoch {
klog.Infof("new epoch at slot %d: %d (previous: %d)", firstSlot, info.Epoch, epochNumber)
epochSlots, err = c.fetchLeaderSlots(firstSlot)
if err != nil {
klog.Errorf("failed to request leader schedule, retrying: %v", err)
continue
}
klog.V(1).Infof("%d leader slots in epoch %d", len(epochSlots), info.Epoch)
epochNumber = info.Epoch
klog.V(1).Infof("we're still in epoch %d, not fetching leader schedule", info.Epoch)
// Reset watermark to current offset on new epoch (we do not backfill slots we missed at startup)
watermark = info.SlotIndex
} else if watermark == info.SlotIndex {
klog.Infof("slot has not advanced at %d, skipping", info.AbsoluteSlot)
continue
}
klog.Infof("confirmed slot %d (offset %d, +%d), epoch %d (from slot %d to %d, %d remaining)",
info.AbsoluteSlot, info.SlotIndex, info.SlotIndex-watermark, info.Epoch, firstSlot, lastSlot, lastSlot-info.AbsoluteSlot)
// Get list of confirmed blocks since the last request. This is totally undocumented, but the result won't
// contain missed blocks, allowing us to figure out block production success rate.
rangeStart := firstSlot + watermark
rangeEnd := firstSlot + info.SlotIndex - 1
ctx, cancel = context.WithTimeout(context.Background(), httpTimeout)
cfm, err := c.rpcClient.GetConfirmedBlocks(ctx, rangeStart, rangeEnd)
if err != nil {
klog.Errorf("failed to request confirmed blocks at %d, retrying: %v", watermark, err)
cancel()
continue
}
cancel()
klog.Infof("confirmed blocks: %d -> %d: %v", rangeStart, rangeEnd, cfm)
// Figure out leaders for each block in range
for i := watermark; i < info.SlotIndex; i++ {
leader, ok := epochSlots[i]
abs := firstSlot + i
if !ok {
// This cannot happen with a well-behaved node and is a programming error in either Solana or the exporter.
klog.Fatalf("slot %d (offset %d) missing from epoch %d leader schedule",
abs, i, info.Epoch)
}
// Check if block was included in getConfirmedBlocks output, otherwise, it was skipped.
var present bool
for _, s := range cfm {
if abs == s {
present = true
}
}
var skipped string
var label string
if present {
skipped = "(valid)"
label = "valid"
} else {
skipped = "(SKIPPED)"
label = "skipped"
}
leaderSlotsTotal.With(prometheus.Labels{"status": label, "nodekey": leader}).Add(1)
klog.V(1).Infof("slot %d (offset %d) with leader %s %s", abs, i, leader, skipped)
}
watermark = info.SlotIndex
}
}
func (c *solanaCollector) fetchLeaderSlots(epochSlot int64) (map[int64]string, error) {
sch, err := c.rpcClient.GetLeaderSchedule(context.Background(), epochSlot)
if err != nil {
return nil, fmt.Errorf("failed to get leader schedule: %w", err)
}
slots := make(map[int64]string)
for pk, sch := range sch {
for _, i := range sch {
slots[int64(i)] = pk
}
}
return slots, err
}

5
go.mod
View File

@ -2,4 +2,7 @@ module github.com/certusone/solana_exporter
go 1.13
require github.com/prometheus/client_golang v1.4.0
require (
github.com/prometheus/client_golang v1.4.0
k8s.io/klog/v2 v2.4.0
)

7
go.sum
View File

@ -14,6 +14,8 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -21,6 +23,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@ -74,8 +77,10 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -84,3 +89,5 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=

109
hack/rpc.http Normal file
View File

@ -0,0 +1,109 @@
// https://docs.solana.com/developing/clients/jsonrpc-api
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getVoteAccounts"
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getLeaderSchedule",
"params": [56252256]
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getSlotLeader"
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getEpochInfo",
"params": ["max"]
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getConfirmedBlocks",
"params": [56610214]
}
###
POST https://testnet.solana.com
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getConfirmedBlock",
"params": [56600576, "jsonParsed"]
}
###
POST https://testnet.solana.com
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getBlockTime",
"params": [56611148]
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getSlot"
}
###
POST https://tds-rpc.certus.one
Content-Type: application/json
{
"jsonrpc": "2.0",
"id": 1,
"method": "getConfirmedBlocks",
"params": [
5,
10
]
}

36
pkg/rpc/blocktime.go Normal file
View File

@ -0,0 +1,36 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog/v2"
)
type (
GetBlockTimeResponse struct {
Result int64 `json:"result"`
Error rpcError `json:"error"`
}
)
// https://docs.solana.com/developing/clients/jsonrpc-api#getblocktime
func (c *RPCClient) GetBlockTime(ctx context.Context, slot int64) (int64, error) {
body, err := c.rpcRequest(ctx, formatRPCRequest("getBlockTime", []interface{}{slot}))
if err != nil {
return 0, fmt.Errorf("RPC call failed: %w", err)
}
klog.V(2).Infof("getBlockTime response: %v", string(body))
var resp GetBlockTimeResponse
if err = json.Unmarshal(body, &resp); err != nil {
return 0, fmt.Errorf("failed to decode response body: %w", err)
}
if resp.Error.Code != 0 {
return 0, fmt.Errorf("RPC error: %d %v", resp.Error.Code, resp.Error.Message)
}
return resp.Result, nil
}

View File

@ -3,18 +3,51 @@ package rpc
import (
"bytes"
"context"
"fmt"
"encoding/json"
"io"
"io/ioutil"
"k8s.io/klog/v2"
"net/http"
)
type RpcClient struct {
httpClient http.Client
rpcAddr string
type (
RPCClient struct {
httpClient http.Client
rpcAddr string
}
rpcError struct {
Message string `json:"message"`
Code int64 `json:"id"`
}
rpcRequest struct {
Version string `json:"jsonrpc"`
ID int `json:"id"`
Method string `json:"method"`
Params []interface{} `json:"params"`
}
Commitment string
)
func (c Commitment) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]string{"commitment": string(c)})
}
func NewRPCClient(rpcAddr string) *RpcClient {
c := &RpcClient{
const (
// Most recent block confirmed by supermajority of the cluster as having reached maximum lockout.
CommitmentMax Commitment = "max"
// Most recent block having reached maximum lockout on this node.
CommitmentRoot Commitment = "root"
// Most recent block that has been voted on by supermajority of the cluster (optimistic confirmation).
CommitmentSingleGossiper Commitment = "singleGossip"
// The node will query its most recent block. Note that the block may not be complete.
CommitmentRecent Commitment = "recent"
)
func NewRPCClient(rpcAddr string) *RPCClient {
c := &RPCClient{
httpClient: http.Client{},
rpcAddr: rpcAddr,
}
@ -22,8 +55,25 @@ func NewRPCClient(rpcAddr string) *RpcClient {
return c
}
func (c *RpcClient) rpcRequest(ctx context.Context, body []byte) ([]byte, error) {
req, err := http.NewRequestWithContext(ctx, "POST", c.rpcAddr, bytes.NewBuffer(body))
func formatRPCRequest(method string, params []interface{}) io.Reader {
r := &rpcRequest{
Version: "2.0",
ID: 1,
Method: method,
Params: params,
}
b, err := json.Marshal(r)
if err != nil {
panic(err)
}
klog.V(2).Infof("jsonrpc request: %s", string(b))
return bytes.NewBuffer(b)
}
func (c *RPCClient) rpcRequest(ctx context.Context, data io.Reader) ([]byte, error) {
req, err := http.NewRequestWithContext(ctx, "POST", c.rpcAddr, data)
if err != nil {
panic(err)
}
@ -31,13 +81,13 @@ func (c *RpcClient) rpcRequest(ctx context.Context, body []byte) ([]byte, error)
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
return nil, err
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
return nil, err
}
return body, nil

View File

@ -0,0 +1,36 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog/v2"
)
type (
GetConfirmedBlocksResponse struct {
Result []int64 `json:"result"`
Error rpcError `json:"error"`
}
)
// https://docs.solana.com/developing/clients/jsonrpc-api#getconfirmedblocks
func (c *RPCClient) GetConfirmedBlocks(ctx context.Context, startSlot, endSlot int64) ([]int64, error) {
body, err := c.rpcRequest(ctx, formatRPCRequest("getConfirmedBlocks", []interface{}{startSlot, endSlot}))
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
}
klog.V(2).Infof("getBlockTime response: %v", string(body))
var resp GetConfirmedBlocksResponse
if err = json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("failed to decode response body: %w", err)
}
if resp.Error.Code != 0 {
return nil, fmt.Errorf("RPC error: %d %v", resp.Error.Code, resp.Error.Message)
}
return resp.Result, nil
}

51
pkg/rpc/epochinfo.go Normal file
View File

@ -0,0 +1,51 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog/v2"
)
type (
EpochInfo struct {
// Current absolute slot in epoch
AbsoluteSlot int64 `json:"absoluteSlot"`
// Current block height
BlockHeight int64 `json:"blockHeight"`
// Current epoch number
Epoch int64 `json:"epoch"`
// Current slot relative to the start of the current epoch
SlotIndex int64 `json:"slotIndex"`
// Number of slots in this epoch
SlotsInEpoch int64 `json:"slotsInEpoch"`
// Total number of transactions ever (?)
TransactionCount int64 `json:"transactionCount"`
}
GetEpochInfoResponse struct {
Result EpochInfo `json:"result"`
Error rpcError `json:"error"`
}
)
// https://docs.solana.com/developing/clients/jsonrpc-api#getepochinfo
func (c *RPCClient) GetEpochInfo(ctx context.Context, commitment Commitment) (*EpochInfo, error) {
body, err := c.rpcRequest(ctx, formatRPCRequest("getEpochInfo", []interface{}{commitment}))
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
}
klog.V(2).Infof("epoch info response: %v", string(body))
var resp GetEpochInfoResponse
if err = json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("failed to decode response body: %w", err)
}
if resp.Error.Code != 0 {
return nil, fmt.Errorf("RPC error: %d %v", resp.Error.Code, resp.Error.Message)
}
return &resp.Result, nil
}

38
pkg/rpc/leaderschedule.go Normal file
View File

@ -0,0 +1,38 @@
package rpc
import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog/v2"
)
type (
LeaderSchedule map[string][]int64
GetLeaderScheduleResponse struct {
Result LeaderSchedule `json:"result"`
Error rpcError `json:"error"`
}
)
// https://docs.solana.com/developing/clients/jsonrpc-api#getleaderschedule
func (c *RPCClient) GetLeaderSchedule(ctx context.Context, epochSlot int64) (LeaderSchedule, error) {
body, err := c.rpcRequest(ctx, formatRPCRequest("getLeaderSchedule", []interface{}{epochSlot}))
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
}
klog.V(3).Infof("getLeaderSchedule response: %v", string(body))
var resp GetLeaderScheduleResponse
if err = json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("failed to decode response body: %w", err)
}
if resp.Error.Code != 0 {
return nil, fmt.Errorf("RPC error: %d %v", resp.Error.Code, resp.Error.Message)
}
return resp.Result, nil
}

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"k8s.io/klog/v2"
)
type (
@ -23,19 +24,27 @@ type (
Current []VoteAccount `json:"current"`
Delinquent []VoteAccount `json:"delinquent"`
} `json:"result"`
Error rpcError `json:"error"`
}
)
func (c *RpcClient) GetVoteAccounts(ctx context.Context) (*GetVoteAccountsResponse, error) {
body, err := c.rpcRequest(ctx, []byte(`{"jsonrpc":"2.0","id":1, "method":"getVoteAccounts", "params":[{"commitment":"recent"}]}`))
// https://docs.solana.com/developing/clients/jsonrpc-api#getvoteaccounts
func (c *RPCClient) GetVoteAccounts(ctx context.Context, commitment Commitment) (*GetVoteAccountsResponse, error) {
body, err := c.rpcRequest(ctx, formatRPCRequest("getVoteAccounts", []interface{}{commitment}))
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
}
var voteAccounts GetVoteAccountsResponse
if err = json.Unmarshal(body, &voteAccounts); err != nil {
klog.V(3).Infof("getVoteAccounts response: %v", string(body))
var resp GetVoteAccountsResponse
if err = json.Unmarshal(body, &resp); err != nil {
return nil, fmt.Errorf("failed to decode response body: %w", err)
}
return &voteAccounts, nil
if resp.Error.Code != 0 {
return nil, fmt.Errorf("RPC error: %d %v", resp.Error.Code, resp.Error.Message)
}
return &resp, nil
}