blockstore: accurately encode entry batches as YAML

Updates blockstore dump YAML schema to reflect that
slices of shreds map to slices of entries.

Adds a new "entry_batches" wrapper list that annotates
each slice of entries with shred range and encoded size.
This commit is contained in:
Richard Patel 2022-09-11 08:56:03 +02:00
parent b902a03a05
commit d7cd878bc9
4 changed files with 86 additions and 32 deletions

View File

@ -0,0 +1,64 @@
package yaml
import (
"encoding/base64"
"encoding/json"
"github.com/certusone/radiance/pkg/blockstore"
"github.com/certusone/radiance/pkg/shred"
)
// entryBatch is a YAML-friendly version of blockstore.Entries.
type entryBatch struct {
Shreds []uint32 `yaml:"shreds,flow"`
EncodedSize int `yaml:"encoded_size,omitempty"`
Entries []entry `yaml:"entries"`
}
func makeEntryBatch(b *blockstore.Entries, withTxs bool) entryBatch {
es := make([]entry, len(b.Entries))
for i, e := range b.Entries {
es[i] = makeEntry(&e, withTxs)
}
shreds := make([]uint32, len(b.Shreds))
for i, s := range b.Shreds {
shreds[i] = s.CommonHeader().Index
}
return entryBatch{
Entries: es,
Shreds: shreds,
EncodedSize: len(b.Raw),
}
}
// entry is a YAML-friendly version of shred.Entry.
type entry struct {
NumHashes uint64 `yaml:"num_hashes"`
Hash string `yaml:"hash"`
NumTxns int `yaml:"num_txns"`
Txns []any `yaml:"txns,omitempty"`
}
func makeEntry(e *shred.Entry, withTxs bool) entry {
var txJSONs []any
if withTxs {
// Hacky and slow serializer to make txn YAML output tolerable
//
// The main problem is that the YAML serializer formats byte slices as arrays,
// whereas JSON serializer outputs base64 strings, which is what we want.
//
// This indirection effectively creates a dynamic data structure out of a strongly typed Txn,
// replacing all byte slices in with strings.
txJSONs = make([]any, len(e.Txns))
for i, txn := range e.Txns {
txJSON, _ := json.Marshal(txn)
_ = json.Unmarshal(txJSON, &txJSONs[i])
}
}
return entry{
NumHashes: e.NumHashes,
Hash: base64.StdEncoding.EncodeToString(e.Hash[:]),
NumTxns: len(e.Txns),
Txns: txJSONs,
}
}

View File

@ -26,6 +26,7 @@ var (
flagSlots = flags.String("slots", "", "Slots to dump")
flagEntries = flags.Bool("entries", false, "Also dump slot entries")
flagShreds = flags.Bool("shreds", false, "Also dump shreds")
flagTxns = flags.Bool("txs", false, "Also dump transactions")
)
func init() {
@ -136,11 +137,16 @@ func dumpDataEntries(db *blockstore.DB, meta *blockstore.SlotMeta) {
return
}
fmt.Println(" entries:")
yamlEntries := make([]entryBatch, len(entries))
for i, x := range entries {
yamlEntries[i] = makeEntryBatch(&x, *flagTxns)
}
fmt.Println(" entry_batches:")
enc := newYAMLPrinter(3)
defer enc.Close()
if err := enc.Encode(entries); err != nil {
if err := enc.Encode(yamlEntries); err != nil {
panic(err.Error())
}
}

View File

@ -57,7 +57,13 @@ func sliceSortedByRange[T constraints.Ordered](list []T, start T, stop T) []T {
return list
}
func (d *DB) GetEntries(meta *SlotMeta) ([]shred.Entry, error) {
type Entries struct {
Entries []shred.Entry
Raw []byte
Shreds []shred.Shred
}
func (d *DB) GetEntries(meta *SlotMeta) ([]Entries, error) {
shreds, err := d.GetDataShreds(meta.Slot, 0, uint32(meta.Received))
if err != nil {
return nil, err
@ -66,8 +72,7 @@ func (d *DB) GetEntries(meta *SlotMeta) ([]shred.Entry, error) {
}
// DataShredsToEntries reassembles shreds to entries containing transactions.
func DataShredsToEntries(meta *SlotMeta, shreds []shred.Shred) ([]shred.Entry, error) {
var entries []shred.Entry
func DataShredsToEntries(meta *SlotMeta, shreds []shred.Shred) (entries []Entries, err error) {
ranges := meta.entryRanges()
for _, r := range ranges {
parts := shreds[r.startIdx : r.endIdx+1]
@ -84,7 +89,11 @@ func DataShredsToEntries(meta *SlotMeta, shreds []shred.Shred) ([]shred.Entry, e
return nil, fmt.Errorf("cannot decode entry at %d:[%d-%d]: %w",
meta.Slot, r.startIdx, r.endIdx, err)
}
entries = append(entries, subEntries.Entries...)
entries = append(entries, Entries{
Entries: subEntries.Entries,
Raw: entryBytes[:dec.Position()],
Shreds: parts,
})
}
return entries, nil
}

View File

@ -1,10 +1,6 @@
package shred
import (
"encoding/json"
"github.com/gagliardetto/solana-go"
)
import "github.com/gagliardetto/solana-go"
type Shred interface {
CommonHeader() *CommonHeader
@ -74,24 +70,3 @@ type Entry struct {
NumTxns uint64 `bin:"sizeof=Txns"`
Txns []solana.Transaction
}
func (e Entry) MarshalYAML() (any, error) {
// Hacky and slow serializer to make YAML output tolerable
txJSONs := make([]any, len(e.Txns))
for i, txn := range e.Txns {
txJSON, err := json.Marshal(txn)
if err != nil {
return nil, err
}
_ = json.Unmarshal(txJSON, &txJSONs[i])
}
return struct {
NumHashes uint64 `yaml:"num_hashes"`
Hash string `yaml:"hash"`
Txns []any `yaml:"txns"`
}{
NumHashes: e.NumHashes,
Hash: e.Hash.String(),
Txns: txJSONs,
}, nil
}