feat(orm): add ORM Table and Indexes (#10670)
## Description Closes: #10729 Includes: * table, auto-increment table, and singleton `Table` implementations * primary key, index and unique index `Index` implementations * store wrappers based on tm-db but that could be retargeted to the new ADR 040 db which separate index and commitment stores, with a debug wrapper * streaming JSON import and export * full logical decoding (and encoding) --- ### Author Checklist *All items are required. Please add a note to the item if the item is not applicable and please add links to any relevant follow up issues.* I have... - [x] included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title - [ ] added `!` to the type prefix if API or client breaking change - [x] targeted the correct branch (see [PR Targeting](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#pr-targeting)) - [x] provided a link to the relevant issue or specification - [ ] followed the guidelines for [building modules](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules) - [x] included the necessary unit and integration [tests](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#testing) - [ ] added a changelog entry to `CHANGELOG.md` - [x] included comments for [documenting Go code](https://blog.golang.org/godoc) - [x] updated the relevant documentation or specification - [x] reviewed "Files changed" and left comments if necessary - [ ] confirmed all CI checks have passed ### Reviewers Checklist *All items are required. Please add a note if the item is not applicable and please add your handle next to the items reviewed if you only reviewed selected items.* I have... - [ ] confirmed the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title - [ ] confirmed `!` in the type prefix if API or client breaking change - [ ] confirmed all author checklist items have been addressed - [ ] reviewed state machine logic - [ ] reviewed API design and naming - [ ] reviewed documentation is accurate - [ ] reviewed tests and test coverage - [ ] manually tested (if applicable)
This commit is contained in:
parent
033e5f3076
commit
531bf50845
|
@ -2,13 +2,14 @@ package groupv1beta1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
fmt "fmt"
|
fmt "fmt"
|
||||||
|
io "io"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
|
||||||
runtime "github.com/cosmos/cosmos-proto/runtime"
|
runtime "github.com/cosmos/cosmos-proto/runtime"
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
protoiface "google.golang.org/protobuf/runtime/protoiface"
|
protoiface "google.golang.org/protobuf/runtime/protoiface"
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
io "io"
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ protoreflect.List = (*_GenesisState_2_list)(nil)
|
var _ protoreflect.List = (*_GenesisState_2_list)(nil)
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package encodeutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SkipPrefix skips the provided prefix in the reader or returns an error.
|
||||||
|
// This is used for efficient logical decoding of keys.
|
||||||
|
func SkipPrefix(r *bytes.Reader, prefix []byte) error {
|
||||||
|
n := len(prefix)
|
||||||
|
if n > 0 {
|
||||||
|
// we skip checking the prefix for performance reasons because we assume
|
||||||
|
// that it was checked by the caller
|
||||||
|
_, err := r.Seek(int64(n), io.SeekCurrent)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendVarUInt32 creates a new key prefix, by encoding and appending a
|
||||||
|
// var-uint32 to the provided prefix.
|
||||||
|
func AppendVarUInt32(prefix []byte, x uint32) []byte {
|
||||||
|
prefixLen := len(prefix)
|
||||||
|
res := make([]byte, prefixLen+binary.MaxVarintLen32)
|
||||||
|
copy(res, prefix)
|
||||||
|
n := binary.PutUvarint(res[prefixLen:], uint64(x))
|
||||||
|
return res[:prefixLen+n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValuesOf takes the arguments and converts them to protoreflect.Value's.
|
||||||
|
func ValuesOf(values ...interface{}) []protoreflect.Value {
|
||||||
|
n := len(values)
|
||||||
|
res := make([]protoreflect.Value, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
res[i] = protoreflect.ValueOf(values[i])
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
|
@ -19,10 +19,30 @@ type EntryCodec interface {
|
||||||
type IndexCodec interface {
|
type IndexCodec interface {
|
||||||
EntryCodec
|
EntryCodec
|
||||||
|
|
||||||
|
// MessageType returns the message type this index codec applies to.
|
||||||
|
MessageType() protoreflect.MessageType
|
||||||
|
|
||||||
|
// GetFieldNames returns the field names in the key of this index.
|
||||||
|
GetFieldNames() []protoreflect.Name
|
||||||
|
|
||||||
// DecodeIndexKey decodes a kv-pair into index-fields and primary-key field
|
// DecodeIndexKey decodes a kv-pair into index-fields and primary-key field
|
||||||
// values. These fields may or may not overlap depending on the index.
|
// values. These fields may or may not overlap depending on the index.
|
||||||
DecodeIndexKey(k, v []byte) (indexFields, primaryKey []protoreflect.Value, err error)
|
DecodeIndexKey(k, v []byte) (indexFields, primaryKey []protoreflect.Value, err error)
|
||||||
|
|
||||||
// EncodeKVFromMessage encodes a kv-pair for the index from a message.
|
// EncodeKVFromMessage encodes a kv-pair for the index from a message.
|
||||||
EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error)
|
EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error)
|
||||||
|
|
||||||
|
// CompareKeys compares the provided values which must correspond to the
|
||||||
|
// fields in this key. Prefix keys of different lengths are supported but the
|
||||||
|
// function will panic if either array is too long. A negative value is returned
|
||||||
|
// if values1 is less than values2, 0 is returned if the two arrays are equal,
|
||||||
|
// and a positive value is returned if values2 is greater.
|
||||||
|
CompareKeys(key1, key2 []protoreflect.Value) int
|
||||||
|
|
||||||
|
// EncodeKeyFromMessage encodes the key part of this index and returns both
|
||||||
|
// index values and encoded key.
|
||||||
|
EncodeKeyFromMessage(message protoreflect.Message) (keyValues []protoreflect.Value, key []byte, err error)
|
||||||
|
|
||||||
|
// IsFullyOrdered returns true if all fields in the key are also ordered.
|
||||||
|
IsFullyOrdered() bool
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,18 +42,12 @@ func (p *PrimaryKeyEntry) GetTableName() protoreflect.FullName {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *PrimaryKeyEntry) String() string {
|
func (p *PrimaryKeyEntry) String() string {
|
||||||
msg := p.Value
|
if p.Value == nil {
|
||||||
msgStr := "_"
|
return fmt.Sprintf("PK %s %s -> _", p.TableName, fmtValues(p.Key))
|
||||||
if msg != nil {
|
|
||||||
msgBz, err := protojson.Marshal(msg)
|
|
||||||
if err == nil {
|
|
||||||
msgStr = string(msgBz)
|
|
||||||
} else {
|
} else {
|
||||||
msgStr = fmt.Sprintf("ERR:%v", err)
|
return fmt.Sprintf("PK %s %s -> %s", p.TableName, fmtValues(p.Key), p.Value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("PK:%s/%s:%s", p.TableName, fmtValues(p.Key), msgStr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func fmtValues(values []protoreflect.Value) string {
|
func fmtValues(values []protoreflect.Value) string {
|
||||||
if len(values) == 0 {
|
if len(values) == 0 {
|
||||||
|
@ -109,7 +103,7 @@ func (i *IndexKeyEntry) GetTableName() protoreflect.FullName {
|
||||||
func (i *IndexKeyEntry) doNotImplement() {}
|
func (i *IndexKeyEntry) doNotImplement() {}
|
||||||
|
|
||||||
func (i *IndexKeyEntry) string() string {
|
func (i *IndexKeyEntry) string() string {
|
||||||
return fmt.Sprintf("%s/%s:%s:%s", i.TableName, fmtFields(i.Fields), fmtValues(i.IndexValues), fmtValues(i.PrimaryKey))
|
return fmt.Sprintf("%s %s : %s -> %s", i.TableName, fmtFields(i.Fields), fmtValues(i.IndexValues), fmtValues(i.PrimaryKey))
|
||||||
}
|
}
|
||||||
|
|
||||||
func fmtFields(fields []protoreflect.Name) string {
|
func fmtFields(fields []protoreflect.Name) string {
|
||||||
|
@ -122,10 +116,10 @@ func fmtFields(fields []protoreflect.Name) string {
|
||||||
|
|
||||||
func (i *IndexKeyEntry) String() string {
|
func (i *IndexKeyEntry) String() string {
|
||||||
if i.IsUnique {
|
if i.IsUnique {
|
||||||
return fmt.Sprintf("UNIQ:%s", i.string())
|
return fmt.Sprintf("UNIQ %s", i.string())
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
return fmt.Sprintf("IDX:%s", i.string())
|
return fmt.Sprintf("IDX %s", i.string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +140,7 @@ func (s *SeqEntry) GetTableName() protoreflect.FullName {
|
||||||
func (s *SeqEntry) doNotImplement() {}
|
func (s *SeqEntry) doNotImplement() {}
|
||||||
|
|
||||||
func (s *SeqEntry) String() string {
|
func (s *SeqEntry) String() string {
|
||||||
return fmt.Sprintf("SEQ:%s:%d", s.TableName, s.Value)
|
return fmt.Sprintf("SEQ %s %d", s.TableName, s.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _, _, _ Entry = &PrimaryKeyEntry{}, &IndexKeyEntry{}, &SeqEntry{}
|
var _, _, _ Entry = &PrimaryKeyEntry{}, &IndexKeyEntry{}, &SeqEntry{}
|
||||||
|
|
|
@ -4,33 +4,31 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
|
||||||
|
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
"github.com/cosmos/cosmos-sdk/orm/internal/testutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var aFullName = (&testpb.A{}).ProtoReflect().Descriptor().FullName()
|
var aFullName = (&testpb.ExampleTable{}).ProtoReflect().Descriptor().FullName()
|
||||||
|
|
||||||
func TestPrimaryKeyEntry(t *testing.T) {
|
func TestPrimaryKeyEntry(t *testing.T) {
|
||||||
entry := &ormkv.PrimaryKeyEntry{
|
entry := &ormkv.PrimaryKeyEntry{
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Key: testutil.ValuesOf(uint32(1), "abc"),
|
Key: encodeutil.ValuesOf(uint32(1), "abc"),
|
||||||
Value: &testpb.A{I32: -1},
|
Value: &testpb.ExampleTable{I32: -1},
|
||||||
}
|
}
|
||||||
assert.Equal(t, `PK:testpb.A/1/"abc":{"i32":-1}`, entry.String())
|
assert.Equal(t, `PK testpb.ExampleTable 1/"abc" -> i32:-1`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
|
|
||||||
// prefix key
|
// prefix key
|
||||||
entry = &ormkv.PrimaryKeyEntry{
|
entry = &ormkv.PrimaryKeyEntry{
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Key: testutil.ValuesOf(uint32(1), "abc"),
|
Key: encodeutil.ValuesOf(uint32(1), "abc"),
|
||||||
Value: nil,
|
Value: nil,
|
||||||
}
|
}
|
||||||
assert.Equal(t, `PK:testpb.A/1/"abc":_`, entry.String())
|
assert.Equal(t, `PK testpb.ExampleTable 1/"abc" -> _`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,20 +37,20 @@ func TestIndexKeyEntry(t *testing.T) {
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Fields: []protoreflect.Name{"u32", "i32", "str"},
|
Fields: []protoreflect.Name{"u32", "i32", "str"},
|
||||||
IsUnique: false,
|
IsUnique: false,
|
||||||
IndexValues: testutil.ValuesOf(uint32(10), int32(-1), "abc"),
|
IndexValues: encodeutil.ValuesOf(uint32(10), int32(-1), "abc"),
|
||||||
PrimaryKey: testutil.ValuesOf("abc", int32(-1)),
|
PrimaryKey: encodeutil.ValuesOf("abc", int32(-1)),
|
||||||
}
|
}
|
||||||
assert.Equal(t, `IDX:testpb.A/u32/i32/str:10/-1/"abc":"abc"/-1`, entry.String())
|
assert.Equal(t, `IDX testpb.ExampleTable u32/i32/str : 10/-1/"abc" -> "abc"/-1`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
|
|
||||||
entry = &ormkv.IndexKeyEntry{
|
entry = &ormkv.IndexKeyEntry{
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Fields: []protoreflect.Name{"u32"},
|
Fields: []protoreflect.Name{"u32"},
|
||||||
IsUnique: true,
|
IsUnique: true,
|
||||||
IndexValues: testutil.ValuesOf(uint32(10)),
|
IndexValues: encodeutil.ValuesOf(uint32(10)),
|
||||||
PrimaryKey: testutil.ValuesOf("abc", int32(-1)),
|
PrimaryKey: encodeutil.ValuesOf("abc", int32(-1)),
|
||||||
}
|
}
|
||||||
assert.Equal(t, `UNIQ:testpb.A/u32:10:"abc"/-1`, entry.String())
|
assert.Equal(t, `UNIQ testpb.ExampleTable u32 : 10 -> "abc"/-1`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
|
|
||||||
// prefix key
|
// prefix key
|
||||||
|
@ -60,9 +58,9 @@ func TestIndexKeyEntry(t *testing.T) {
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Fields: []protoreflect.Name{"u32", "i32", "str"},
|
Fields: []protoreflect.Name{"u32", "i32", "str"},
|
||||||
IsUnique: false,
|
IsUnique: false,
|
||||||
IndexValues: testutil.ValuesOf(uint32(10), int32(-1)),
|
IndexValues: encodeutil.ValuesOf(uint32(10), int32(-1)),
|
||||||
}
|
}
|
||||||
assert.Equal(t, `IDX:testpb.A/u32/i32/str:10/-1:_`, entry.String())
|
assert.Equal(t, `IDX testpb.ExampleTable u32/i32/str : 10/-1 -> _`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
|
|
||||||
// prefix key
|
// prefix key
|
||||||
|
@ -70,8 +68,8 @@ func TestIndexKeyEntry(t *testing.T) {
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Fields: []protoreflect.Name{"str", "i32"},
|
Fields: []protoreflect.Name{"str", "i32"},
|
||||||
IsUnique: true,
|
IsUnique: true,
|
||||||
IndexValues: testutil.ValuesOf("abc", int32(1)),
|
IndexValues: encodeutil.ValuesOf("abc", int32(1)),
|
||||||
}
|
}
|
||||||
assert.Equal(t, `UNIQ:testpb.A/str/i32:"abc"/1:_`, entry.String())
|
assert.Equal(t, `UNIQ testpb.ExampleTable str/i32 : "abc"/1 -> _`, entry.String())
|
||||||
assert.Equal(t, aFullName, entry.GetTableName())
|
assert.Equal(t, aFullName, entry.GetTableName())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ import (
|
||||||
// IndexKeyCodec is the codec for (non-unique) index keys.
|
// IndexKeyCodec is the codec for (non-unique) index keys.
|
||||||
type IndexKeyCodec struct {
|
type IndexKeyCodec struct {
|
||||||
*KeyCodec
|
*KeyCodec
|
||||||
tableName protoreflect.FullName
|
|
||||||
pkFieldOrder []int
|
pkFieldOrder []int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +19,15 @@ var _ IndexCodec = &IndexKeyCodec{}
|
||||||
|
|
||||||
// NewIndexKeyCodec creates a new IndexKeyCodec with an optional prefix for the
|
// NewIndexKeyCodec creates a new IndexKeyCodec with an optional prefix for the
|
||||||
// provided message descriptor, index and primary key fields.
|
// provided message descriptor, index and primary key fields.
|
||||||
func NewIndexKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescriptor, indexFields, primaryKeyFields []protoreflect.Name) (*IndexKeyCodec, error) {
|
func NewIndexKeyCodec(prefix []byte, messageType protoreflect.MessageType, indexFields, primaryKeyFields []protoreflect.Name) (*IndexKeyCodec, error) {
|
||||||
|
if len(indexFields) == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("index fields are empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(primaryKeyFields) == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("primary key fields are empty")
|
||||||
|
}
|
||||||
|
|
||||||
indexFieldMap := map[protoreflect.Name]int{}
|
indexFieldMap := map[protoreflect.Name]int{}
|
||||||
|
|
||||||
keyFields := make([]protoreflect.Name, 0, len(indexFields)+len(primaryKeyFields))
|
keyFields := make([]protoreflect.Name, 0, len(indexFields)+len(primaryKeyFields))
|
||||||
|
@ -43,7 +50,7 @@ func NewIndexKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescr
|
||||||
k++
|
k++
|
||||||
}
|
}
|
||||||
|
|
||||||
cdc, err := NewKeyCodec(prefix, messageDescriptor, keyFields)
|
cdc, err := NewKeyCodec(prefix, messageType, keyFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -51,13 +58,12 @@ func NewIndexKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescr
|
||||||
return &IndexKeyCodec{
|
return &IndexKeyCodec{
|
||||||
KeyCodec: cdc,
|
KeyCodec: cdc,
|
||||||
pkFieldOrder: pkFieldOrder,
|
pkFieldOrder: pkFieldOrder,
|
||||||
tableName: messageDescriptor.FullName(),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cdc IndexKeyCodec) DecodeIndexKey(k, _ []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
func (cdc IndexKeyCodec) DecodeIndexKey(k, _ []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
||||||
|
|
||||||
values, err := cdc.Decode(bytes.NewReader(k))
|
values, err := cdc.DecodeKey(bytes.NewReader(k))
|
||||||
// got prefix key
|
// got prefix key
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return values, nil, nil
|
return values, nil, nil
|
||||||
|
@ -87,7 +93,7 @@ func (cdc IndexKeyCodec) DecodeEntry(k, v []byte) (Entry, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &IndexKeyEntry{
|
return &IndexKeyEntry{
|
||||||
TableName: cdc.tableName,
|
TableName: cdc.messageType.Descriptor().FullName(),
|
||||||
Fields: cdc.fieldNames,
|
Fields: cdc.fieldNames,
|
||||||
IndexValues: idxValues,
|
IndexValues: idxValues,
|
||||||
PrimaryKey: pk,
|
PrimaryKey: pk,
|
||||||
|
@ -100,21 +106,19 @@ func (cdc IndexKeyCodec) EncodeEntry(entry Entry) (k, v []byte, err error) {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry
|
return nil, nil, ormerrors.BadDecodeEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
if indexEntry.TableName != cdc.tableName {
|
if indexEntry.TableName != cdc.messageType.Descriptor().FullName() {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry
|
return nil, nil, ormerrors.BadDecodeEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
bz, err := cdc.KeyCodec.Encode(indexEntry.IndexValues)
|
bz, err := cdc.KeyCodec.EncodeKey(indexEntry.IndexValues)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return bz, sentinel, nil
|
return bz, []byte{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var sentinel = []byte{0}
|
|
||||||
|
|
||||||
func (cdc IndexKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
func (cdc IndexKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
||||||
_, k, err = cdc.EncodeFromMessage(message)
|
_, k, err = cdc.EncodeKeyFromMessage(message)
|
||||||
return k, sentinel, err
|
return k, []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,20 +18,20 @@ func TestIndexKeyCodec(t *testing.T) {
|
||||||
idxPartCdc := testutil.TestKeyCodecGen(1, 5).Draw(t, "idxPartCdc").(testutil.TestKeyCodec)
|
idxPartCdc := testutil.TestKeyCodecGen(1, 5).Draw(t, "idxPartCdc").(testutil.TestKeyCodec)
|
||||||
pkCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "pkCdc").(testutil.TestKeyCodec)
|
pkCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "pkCdc").(testutil.TestKeyCodec)
|
||||||
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
||||||
desc := (&testpb.A{}).ProtoReflect().Descriptor()
|
messageType := (&testpb.ExampleTable{}).ProtoReflect().Type()
|
||||||
indexKeyCdc, err := ormkv.NewIndexKeyCodec(
|
indexKeyCdc, err := ormkv.NewIndexKeyCodec(
|
||||||
prefix,
|
prefix,
|
||||||
desc,
|
messageType,
|
||||||
idxPartCdc.Codec.GetFieldNames(),
|
idxPartCdc.Codec.GetFieldNames(),
|
||||||
pkCodec.Codec.GetFieldNames(),
|
pkCodec.Codec.GetFieldNames(),
|
||||||
)
|
)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.A)
|
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.ExampleTable)
|
||||||
key := indexKeyCdc.GetValues(a.ProtoReflect())
|
key := indexKeyCdc.GetKeyValues(a.ProtoReflect())
|
||||||
pk := pkCodec.Codec.GetValues(a.ProtoReflect())
|
pk := pkCodec.Codec.GetKeyValues(a.ProtoReflect())
|
||||||
idx1 := &ormkv.IndexKeyEntry{
|
idx1 := &ormkv.IndexKeyEntry{
|
||||||
TableName: desc.FullName(),
|
TableName: messageType.Descriptor().FullName(),
|
||||||
Fields: indexKeyCdc.GetFieldNames(),
|
Fields: indexKeyCdc.GetFieldNames(),
|
||||||
IsUnique: false,
|
IsUnique: false,
|
||||||
IndexValues: key,
|
IndexValues: key,
|
||||||
|
@ -48,16 +48,16 @@ func TestIndexKeyCodec(t *testing.T) {
|
||||||
entry2, err := indexKeyCdc.DecodeEntry(k, v)
|
entry2, err := indexKeyCdc.DecodeEntry(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
idx2 := entry2.(*ormkv.IndexKeyEntry)
|
idx2 := entry2.(*ormkv.IndexKeyEntry)
|
||||||
assert.Equal(t, 0, indexKeyCdc.CompareValues(idx1.IndexValues, idx2.IndexValues))
|
assert.Equal(t, 0, indexKeyCdc.CompareKeys(idx1.IndexValues, idx2.IndexValues))
|
||||||
assert.Equal(t, 0, pkCodec.Codec.CompareValues(idx1.PrimaryKey, idx2.PrimaryKey))
|
assert.Equal(t, 0, pkCodec.Codec.CompareKeys(idx1.PrimaryKey, idx2.PrimaryKey))
|
||||||
assert.Equal(t, false, idx2.IsUnique)
|
assert.Equal(t, false, idx2.IsUnique)
|
||||||
assert.Equal(t, desc.FullName(), idx2.TableName)
|
assert.Equal(t, messageType.Descriptor().FullName(), idx2.TableName)
|
||||||
assert.DeepEqual(t, idx1.Fields, idx2.Fields)
|
assert.DeepEqual(t, idx1.Fields, idx2.Fields)
|
||||||
|
|
||||||
idxFields, pk2, err := indexKeyCdc.DecodeIndexKey(k, v)
|
idxFields, pk2, err := indexKeyCdc.DecodeIndexKey(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, 0, indexKeyCdc.CompareValues(key, idxFields))
|
assert.Equal(t, 0, indexKeyCdc.CompareKeys(key, idxFields))
|
||||||
assert.Equal(t, 0, pkCodec.Codec.CompareValues(pk, pk2))
|
assert.Equal(t, 0, pkCodec.Codec.CompareKeys(pk, pk2))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
"github.com/cosmos/cosmos-sdk/orm/encoding/ormfield"
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormfield"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,11 +23,12 @@ type KeyCodec struct {
|
||||||
fieldDescriptors []protoreflect.FieldDescriptor
|
fieldDescriptors []protoreflect.FieldDescriptor
|
||||||
fieldNames []protoreflect.Name
|
fieldNames []protoreflect.Name
|
||||||
fieldCodecs []ormfield.Codec
|
fieldCodecs []ormfield.Codec
|
||||||
|
messageType protoreflect.MessageType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKeyCodec returns a new KeyCodec with an optional prefix for the provided
|
// NewKeyCodec returns a new KeyCodec with an optional prefix for the provided
|
||||||
// message descriptor and fields.
|
// message descriptor and fields.
|
||||||
func NewKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescriptor, fieldNames []protoreflect.Name) (*KeyCodec, error) {
|
func NewKeyCodec(prefix []byte, messageType protoreflect.MessageType, fieldNames []protoreflect.Name) (*KeyCodec, error) {
|
||||||
n := len(fieldNames)
|
n := len(fieldNames)
|
||||||
fieldCodecs := make([]ormfield.Codec, n)
|
fieldCodecs := make([]ormfield.Codec, n)
|
||||||
fieldDescriptors := make([]protoreflect.FieldDescriptor, n)
|
fieldDescriptors := make([]protoreflect.FieldDescriptor, n)
|
||||||
|
@ -35,7 +37,7 @@ func NewKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescriptor
|
||||||
i int
|
i int
|
||||||
}
|
}
|
||||||
fixedSize := 0
|
fixedSize := 0
|
||||||
messageFields := messageDescriptor.Fields()
|
messageFields := messageType.Descriptor().Fields()
|
||||||
|
|
||||||
for i := 0; i < n; i++ {
|
for i := 0; i < n; i++ {
|
||||||
nonTerminal := i != n-1
|
nonTerminal := i != n-1
|
||||||
|
@ -63,15 +65,16 @@ func NewKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescriptor
|
||||||
prefix: prefix,
|
prefix: prefix,
|
||||||
fixedSize: fixedSize,
|
fixedSize: fixedSize,
|
||||||
variableSizers: variableSizers,
|
variableSizers: variableSizers,
|
||||||
|
messageType: messageType,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode encodes the values assuming that they correspond to the fields
|
// EncodeKey encodes the values assuming that they correspond to the fields
|
||||||
// specified for the key. If the array of values is shorter than the
|
// specified for the key. If the array of values is shorter than the
|
||||||
// number of fields in the key, a partial "prefix" key will be encoded
|
// number of fields in the key, a partial "prefix" key will be encoded
|
||||||
// which can be used for constructing a prefix iterator.
|
// which can be used for constructing a prefix iterator.
|
||||||
func (cdc *KeyCodec) Encode(values []protoreflect.Value) ([]byte, error) {
|
func (cdc *KeyCodec) EncodeKey(values []protoreflect.Value) ([]byte, error) {
|
||||||
sz, err := cdc.ComputeBufferSize(values)
|
sz, err := cdc.ComputeKeyBufferSize(values)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -94,8 +97,8 @@ func (cdc *KeyCodec) Encode(values []protoreflect.Value) ([]byte, error) {
|
||||||
return w.Bytes(), nil
|
return w.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetValues extracts the values specified by the key fields from the message.
|
// GetKeyValues extracts the values specified by the key fields from the message.
|
||||||
func (cdc *KeyCodec) GetValues(message protoreflect.Message) []protoreflect.Value {
|
func (cdc *KeyCodec) GetKeyValues(message protoreflect.Message) []protoreflect.Value {
|
||||||
res := make([]protoreflect.Value, len(cdc.fieldDescriptors))
|
res := make([]protoreflect.Value, len(cdc.fieldDescriptors))
|
||||||
for i, f := range cdc.fieldDescriptors {
|
for i, f := range cdc.fieldDescriptors {
|
||||||
res[i] = message.Get(f)
|
res[i] = message.Get(f)
|
||||||
|
@ -103,11 +106,11 @@ func (cdc *KeyCodec) GetValues(message protoreflect.Message) []protoreflect.Valu
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode decodes the values in the key specified by the reader. If the
|
// DecodeKey decodes the values in the key specified by the reader. If the
|
||||||
// provided key is a prefix key, the values that could be decoded will
|
// provided key is a prefix key, the values that could be decoded will
|
||||||
// be returned with io.EOF as the error.
|
// be returned with io.EOF as the error.
|
||||||
func (cdc *KeyCodec) Decode(r *bytes.Reader) ([]protoreflect.Value, error) {
|
func (cdc *KeyCodec) DecodeKey(r *bytes.Reader) ([]protoreflect.Value, error) {
|
||||||
if err := skipPrefix(r, cdc.prefix); err != nil {
|
if err := encodeutil.SkipPrefix(r, cdc.prefix); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,10 +128,10 @@ func (cdc *KeyCodec) Decode(r *bytes.Reader) ([]protoreflect.Value, error) {
|
||||||
return values, nil
|
return values, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeFromMessage combines GetValues and Encode.
|
// EncodeKeyFromMessage combines GetKeyValues and EncodeKey.
|
||||||
func (cdc *KeyCodec) EncodeFromMessage(message protoreflect.Message) ([]protoreflect.Value, []byte, error) {
|
func (cdc *KeyCodec) EncodeKeyFromMessage(message protoreflect.Message) ([]protoreflect.Value, []byte, error) {
|
||||||
values := cdc.GetValues(message)
|
values := cdc.GetKeyValues(message)
|
||||||
bz, err := cdc.Encode(values)
|
bz, err := cdc.EncodeKey(values)
|
||||||
return values, bz, err
|
return values, bz, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,12 +145,12 @@ func (cdc *KeyCodec) IsFullyOrdered() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompareValues compares the provided values which must correspond to the
|
// CompareKeys compares the provided values which must correspond to the
|
||||||
// fields in this key. Prefix keys of different lengths are supported but the
|
// fields in this key. Prefix keys of different lengths are supported but the
|
||||||
// function will panic if either array is too long. A negative value is returned
|
// function will panic if either array is too long. A negative value is returned
|
||||||
// if values1 is less than values2, 0 is returned if the two arrays are equal,
|
// if values1 is less than values2, 0 is returned if the two arrays are equal,
|
||||||
// and a positive value is returned if values2 is greater.
|
// and a positive value is returned if values2 is greater.
|
||||||
func (cdc *KeyCodec) CompareValues(values1, values2 []protoreflect.Value) int {
|
func (cdc *KeyCodec) CompareKeys(values1, values2 []protoreflect.Value) int {
|
||||||
j := len(values1)
|
j := len(values1)
|
||||||
k := len(values2)
|
k := len(values2)
|
||||||
n := j
|
n := j
|
||||||
|
@ -178,9 +181,9 @@ func (cdc *KeyCodec) CompareValues(values1, values2 []protoreflect.Value) int {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ComputeBufferSize computes the required buffer size for the provided values
|
// ComputeKeyBufferSize computes the required buffer size for the provided values
|
||||||
// which can represent a full or prefix key.
|
// which can represent a full or prefix key.
|
||||||
func (cdc KeyCodec) ComputeBufferSize(values []protoreflect.Value) (int, error) {
|
func (cdc KeyCodec) ComputeKeyBufferSize(values []protoreflect.Value) (int, error) {
|
||||||
size := cdc.fixedSize
|
size := cdc.fixedSize
|
||||||
n := len(values)
|
n := len(values)
|
||||||
for _, sz := range cdc.variableSizers {
|
for _, sz := range cdc.variableSizers {
|
||||||
|
@ -198,10 +201,10 @@ func (cdc KeyCodec) ComputeBufferSize(values []protoreflect.Value) (int, error)
|
||||||
return size, nil
|
return size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetValues sets the provided values on the message which must correspond
|
// SetKeyValues sets the provided values on the message which must correspond
|
||||||
// exactly to the field descriptors for this key. Prefix keys aren't
|
// exactly to the field descriptors for this key. Prefix keys aren't
|
||||||
// supported.
|
// supported.
|
||||||
func (cdc *KeyCodec) SetValues(message protoreflect.Message, values []protoreflect.Value) {
|
func (cdc *KeyCodec) SetKeyValues(message protoreflect.Message, values []protoreflect.Value) {
|
||||||
for i, f := range cdc.fieldDescriptors {
|
for i, f := range cdc.fieldDescriptors {
|
||||||
message.Set(f, values[i])
|
message.Set(f, values[i])
|
||||||
}
|
}
|
||||||
|
@ -284,6 +287,13 @@ func (cdc *KeyCodec) GetFieldNames() []protoreflect.Name {
|
||||||
return cdc.fieldNames
|
return cdc.fieldNames
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prefix returns the prefix applied to keys in this codec before any field
|
||||||
|
// values are encoded.
|
||||||
func (cdc *KeyCodec) Prefix() []byte {
|
func (cdc *KeyCodec) Prefix() []byte {
|
||||||
return cdc.prefix
|
return cdc.prefix
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MessageType returns the message type of fields in this key.
|
||||||
|
func (cdc *KeyCodec) MessageType() protoreflect.MessageType {
|
||||||
|
return cdc.messageType
|
||||||
|
}
|
||||||
|
|
|
@ -5,14 +5,13 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
"pgregory.net/rapid"
|
"pgregory.net/rapid"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
"github.com/cosmos/cosmos-sdk/orm/internal/testutil"
|
"github.com/cosmos/cosmos-sdk/orm/internal/testutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -29,24 +28,24 @@ func TestKeyCodec(t *testing.T) {
|
||||||
keyValues2 := key.Draw(t, "values2")
|
keyValues2 := key.Draw(t, "values2")
|
||||||
bz2 := assertEncDecKey(t, key, keyValues2)
|
bz2 := assertEncDecKey(t, key, keyValues2)
|
||||||
// bytes comparison should equal comparison of values
|
// bytes comparison should equal comparison of values
|
||||||
assert.Equal(t, key.Codec.CompareValues(keyValues, keyValues2), bytes.Compare(bz1, bz2))
|
assert.Equal(t, key.Codec.CompareKeys(keyValues, keyValues2), bytes.Compare(bz1, bz2))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertEncDecKey(t *rapid.T, key testutil.TestKeyCodec, keyValues []protoreflect.Value) []byte {
|
func assertEncDecKey(t *rapid.T, key testutil.TestKeyCodec, keyValues []protoreflect.Value) []byte {
|
||||||
bz, err := key.Codec.Encode(keyValues)
|
bz, err := key.Codec.EncodeKey(keyValues)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
keyValues2, err := key.Codec.Decode(bytes.NewReader(bz))
|
keyValues2, err := key.Codec.DecodeKey(bytes.NewReader(bz))
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, 0, key.Codec.CompareValues(keyValues, keyValues2))
|
assert.Equal(t, 0, key.Codec.CompareKeys(keyValues, keyValues2))
|
||||||
return bz
|
return bz
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCompareValues(t *testing.T) {
|
func TestCompareValues(t *testing.T) {
|
||||||
cdc, err := ormkv.NewKeyCodec(nil,
|
cdc, err := ormkv.NewKeyCodec(nil,
|
||||||
(&testpb.A{}).ProtoReflect().Descriptor(),
|
(&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
[]protoreflect.Name{"u32", "str", "i32"})
|
[]protoreflect.Name{"u32", "str", "i32"})
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
@ -59,113 +58,113 @@ func TestCompareValues(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"eq",
|
"eq",
|
||||||
testutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"eq prefix 0",
|
"eq prefix 0",
|
||||||
testutil.ValuesOf(),
|
encodeutil.ValuesOf(),
|
||||||
testutil.ValuesOf(),
|
encodeutil.ValuesOf(),
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"eq prefix 1",
|
"eq prefix 1",
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"eq prefix 2",
|
"eq prefix 2",
|
||||||
testutil.ValuesOf(uint32(0), "abc"),
|
encodeutil.ValuesOf(uint32(0), "abc"),
|
||||||
testutil.ValuesOf(uint32(0), "abc"),
|
encodeutil.ValuesOf(uint32(0), "abc"),
|
||||||
0,
|
0,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"lt1",
|
"lt1",
|
||||||
testutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(0), "abc", int32(-3)),
|
||||||
testutil.ValuesOf(uint32(1), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(1), "abc", int32(-3)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"lt2",
|
"lt2",
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-3)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-3)),
|
||||||
testutil.ValuesOf(uint32(1), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(1), "abc", int32(-3)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"lt3",
|
"lt3",
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-3)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-3)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"less prefix 0",
|
"less prefix 0",
|
||||||
testutil.ValuesOf(),
|
encodeutil.ValuesOf(),
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"less prefix 1",
|
"less prefix 1",
|
||||||
testutil.ValuesOf(uint32(1)),
|
encodeutil.ValuesOf(uint32(1)),
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"less prefix 2",
|
"less prefix 2",
|
||||||
testutil.ValuesOf(uint32(1), "abb"),
|
encodeutil.ValuesOf(uint32(1), "abb"),
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
||||||
-1,
|
-1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt1",
|
"gt1",
|
||||||
testutil.ValuesOf(uint32(2), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(2), "abb", int32(-4)),
|
||||||
testutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(1), "abb", int32(-4)),
|
||||||
1,
|
1,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt2",
|
"gt2",
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(-4)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(-4)),
|
||||||
testutil.ValuesOf(uint32(2), "abb", int32(-4)),
|
encodeutil.ValuesOf(uint32(2), "abb", int32(-4)),
|
||||||
1,
|
1,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt3",
|
"gt3",
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(1)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(1)),
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
||||||
1,
|
1,
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt prefix 0",
|
"gt prefix 0",
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
||||||
testutil.ValuesOf(),
|
encodeutil.ValuesOf(),
|
||||||
1,
|
1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt prefix 1",
|
"gt prefix 1",
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
||||||
testutil.ValuesOf(uint32(2)),
|
encodeutil.ValuesOf(uint32(2)),
|
||||||
1,
|
1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"gt prefix 2",
|
"gt prefix 2",
|
||||||
testutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
encodeutil.ValuesOf(uint32(2), "abc", int32(-3)),
|
||||||
testutil.ValuesOf(uint32(2), "abc"),
|
encodeutil.ValuesOf(uint32(2), "abc"),
|
||||||
1,
|
1,
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
|
@ -174,7 +173,7 @@ func TestCompareValues(t *testing.T) {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
assert.Equal(
|
assert.Equal(
|
||||||
t, test.expect,
|
t, test.expect,
|
||||||
cdc.CompareValues(test.values1, test.values2),
|
cdc.CompareKeys(test.values1, test.values2),
|
||||||
)
|
)
|
||||||
// CheckValidRangeIterationKeys should give comparable results
|
// CheckValidRangeIterationKeys should give comparable results
|
||||||
err := cdc.CheckValidRangeIterationKeys(test.values1, test.values2)
|
err := cdc.CheckValidRangeIterationKeys(test.values1, test.values2)
|
||||||
|
@ -189,7 +188,7 @@ func TestCompareValues(t *testing.T) {
|
||||||
|
|
||||||
func TestDecodePrefixKey(t *testing.T) {
|
func TestDecodePrefixKey(t *testing.T) {
|
||||||
cdc, err := ormkv.NewKeyCodec(nil,
|
cdc, err := ormkv.NewKeyCodec(nil,
|
||||||
(&testpb.A{}).ProtoReflect().Descriptor(),
|
(&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
[]protoreflect.Name{"u32", "str", "bz", "i32"})
|
[]protoreflect.Name{"u32", "str", "bz", "i32"})
|
||||||
|
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
@ -199,23 +198,23 @@ func TestDecodePrefixKey(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"1",
|
"1",
|
||||||
testutil.ValuesOf(uint32(5), "abc"),
|
encodeutil.ValuesOf(uint32(5), "abc"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
bz, err := cdc.Encode(test.values)
|
bz, err := cdc.EncodeKey(test.values)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
values, err := cdc.Decode(bytes.NewReader(bz))
|
values, err := cdc.DecodeKey(bytes.NewReader(bz))
|
||||||
assert.ErrorType(t, err, io.EOF)
|
assert.ErrorType(t, err, io.EOF)
|
||||||
assert.Equal(t, 0, cdc.CompareValues(test.values, values))
|
assert.Equal(t, 0, cdc.CompareKeys(test.values, values))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidRangeIterationKeys(t *testing.T) {
|
func TestValidRangeIterationKeys(t *testing.T) {
|
||||||
cdc, err := ormkv.NewKeyCodec(nil,
|
cdc, err := ormkv.NewKeyCodec(nil,
|
||||||
(&testpb.A{}).ProtoReflect().Descriptor(),
|
(&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
[]protoreflect.Name{"u32", "str", "bz", "i32"})
|
[]protoreflect.Name{"u32", "str", "bz", "i32"})
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
@ -227,62 +226,62 @@ func TestValidRangeIterationKeys(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
"1 eq",
|
"1 eq",
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1 lt",
|
"1 lt",
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
testutil.ValuesOf(uint32(1)),
|
encodeutil.ValuesOf(uint32(1)),
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1 gt",
|
"1 gt",
|
||||||
testutil.ValuesOf(uint32(1)),
|
encodeutil.ValuesOf(uint32(1)),
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2 lt",
|
"1,2 lt",
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
testutil.ValuesOf(uint32(0), "abc"),
|
encodeutil.ValuesOf(uint32(0), "abc"),
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2 gt",
|
"1,2 gt",
|
||||||
testutil.ValuesOf(uint32(0), "abc"),
|
encodeutil.ValuesOf(uint32(0), "abc"),
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2,3",
|
"1,2,3",
|
||||||
testutil.ValuesOf(uint32(0)),
|
encodeutil.ValuesOf(uint32(0)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2,3,4 lt",
|
"1,2,3,4 lt",
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
||||||
false,
|
false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"too long",
|
"too long",
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1), int32(1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1), int32(1)),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2,3,4 eq",
|
"1,2,3,4 eq",
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(1)),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"1,2,3,4 bz err",
|
"1,2,3,4 bz err",
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2}, int32(-1)),
|
||||||
testutil.ValuesOf(uint32(0), "abc", []byte{1, 2, 3}, int32(1)),
|
encodeutil.ValuesOf(uint32(0), "abc", []byte{1, 2, 3}, int32(1)),
|
||||||
true,
|
true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -300,19 +299,19 @@ func TestValidRangeIterationKeys(t *testing.T) {
|
||||||
|
|
||||||
func TestGetSet(t *testing.T) {
|
func TestGetSet(t *testing.T) {
|
||||||
cdc, err := ormkv.NewKeyCodec(nil,
|
cdc, err := ormkv.NewKeyCodec(nil,
|
||||||
(&testpb.A{}).ProtoReflect().Descriptor(),
|
(&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
[]protoreflect.Name{"u32", "str", "i32"})
|
[]protoreflect.Name{"u32", "str", "i32"})
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
|
||||||
var a testpb.A
|
var a testpb.ExampleTable
|
||||||
values := testutil.ValuesOf(uint32(4), "abc", int32(1))
|
values := encodeutil.ValuesOf(uint32(4), "abc", int32(1))
|
||||||
cdc.SetValues(a.ProtoReflect(), values)
|
cdc.SetKeyValues(a.ProtoReflect(), values)
|
||||||
values2 := cdc.GetValues(a.ProtoReflect())
|
values2 := cdc.GetKeyValues(a.ProtoReflect())
|
||||||
assert.Equal(t, 0, cdc.CompareValues(values, values2))
|
assert.Equal(t, 0, cdc.CompareKeys(values, values2))
|
||||||
bz, err := cdc.Encode(values)
|
bz, err := cdc.EncodeKey(values)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
values3, bz2, err := cdc.EncodeFromMessage(a.ProtoReflect())
|
values3, bz2, err := cdc.EncodeKeyFromMessage(a.ProtoReflect())
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, 0, cdc.CompareValues(values, values3))
|
assert.Equal(t, 0, cdc.CompareKeys(values, values3))
|
||||||
assert.Assert(t, bytes.Equal(bz, bz2))
|
assert.Assert(t, bytes.Equal(bz, bz2))
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,21 +14,21 @@ import (
|
||||||
// PrimaryKeyCodec is the codec for primary keys.
|
// PrimaryKeyCodec is the codec for primary keys.
|
||||||
type PrimaryKeyCodec struct {
|
type PrimaryKeyCodec struct {
|
||||||
*KeyCodec
|
*KeyCodec
|
||||||
msgType protoreflect.MessageType
|
|
||||||
unmarshalOptions proto.UnmarshalOptions
|
unmarshalOptions proto.UnmarshalOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ IndexCodec = &PrimaryKeyCodec{}
|
||||||
|
|
||||||
// NewPrimaryKeyCodec creates a new PrimaryKeyCodec for the provided msg and
|
// NewPrimaryKeyCodec creates a new PrimaryKeyCodec for the provided msg and
|
||||||
// fields, with an optional prefix and unmarshal options.
|
// fields, with an optional prefix and unmarshal options.
|
||||||
func NewPrimaryKeyCodec(prefix []byte, msgType protoreflect.MessageType, fieldNames []protoreflect.Name, unmarshalOptions proto.UnmarshalOptions) (*PrimaryKeyCodec, error) {
|
func NewPrimaryKeyCodec(prefix []byte, msgType protoreflect.MessageType, fieldNames []protoreflect.Name, unmarshalOptions proto.UnmarshalOptions) (*PrimaryKeyCodec, error) {
|
||||||
keyCodec, err := NewKeyCodec(prefix, msgType.Descriptor(), fieldNames)
|
keyCodec, err := NewKeyCodec(prefix, msgType, fieldNames)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &PrimaryKeyCodec{
|
return &PrimaryKeyCodec{
|
||||||
KeyCodec: keyCodec,
|
KeyCodec: keyCodec,
|
||||||
msgType: msgType,
|
|
||||||
unmarshalOptions: unmarshalOptions,
|
unmarshalOptions: unmarshalOptions,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ func NewPrimaryKeyCodec(prefix []byte, msgType protoreflect.MessageType, fieldNa
|
||||||
var _ IndexCodec = PrimaryKeyCodec{}
|
var _ IndexCodec = PrimaryKeyCodec{}
|
||||||
|
|
||||||
func (p PrimaryKeyCodec) DecodeIndexKey(k, _ []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
func (p PrimaryKeyCodec) DecodeIndexKey(k, _ []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
||||||
indexFields, err = p.Decode(bytes.NewReader(k))
|
indexFields, err = p.DecodeKey(bytes.NewReader(k))
|
||||||
|
|
||||||
// got prefix key
|
// got prefix key
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -55,16 +55,21 @@ func (p PrimaryKeyCodec) DecodeIndexKey(k, _ []byte) (indexFields, primaryKey []
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PrimaryKeyCodec) DecodeEntry(k, v []byte) (Entry, error) {
|
func (p PrimaryKeyCodec) DecodeEntry(k, v []byte) (Entry, error) {
|
||||||
values, err := p.Decode(bytes.NewReader(k))
|
values, err := p.DecodeKey(bytes.NewReader(k))
|
||||||
if err != nil {
|
if err == io.EOF {
|
||||||
|
return &PrimaryKeyEntry{
|
||||||
|
TableName: p.messageType.Descriptor().FullName(),
|
||||||
|
Key: values,
|
||||||
|
}, nil
|
||||||
|
} else if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := p.msgType.New().Interface()
|
msg := p.messageType.New().Interface()
|
||||||
err = p.Unmarshal(values, v, msg)
|
err = p.Unmarshal(values, v, msg)
|
||||||
|
|
||||||
return &PrimaryKeyEntry{
|
return &PrimaryKeyEntry{
|
||||||
TableName: p.msgType.Descriptor().FullName(),
|
TableName: p.messageType.Descriptor().FullName(),
|
||||||
Key: values,
|
Key: values,
|
||||||
Value: msg,
|
Value: msg,
|
||||||
}, err
|
}, err
|
||||||
|
@ -76,15 +81,15 @@ func (p PrimaryKeyCodec) EncodeEntry(entry Entry) (k, v []byte, err error) {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry.Wrapf("expected %T, got %T", &PrimaryKeyEntry{}, entry)
|
return nil, nil, ormerrors.BadDecodeEntry.Wrapf("expected %T, got %T", &PrimaryKeyEntry{}, entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pkEntry.TableName != p.msgType.Descriptor().FullName() {
|
if pkEntry.TableName != p.messageType.Descriptor().FullName() {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry.Wrapf(
|
return nil, nil, ormerrors.BadDecodeEntry.Wrapf(
|
||||||
"wrong table name, got %s, expected %s",
|
"wrong table name, got %s, expected %s",
|
||||||
pkEntry.TableName,
|
pkEntry.TableName,
|
||||||
p.msgType.Descriptor().FullName(),
|
p.messageType.Descriptor().FullName(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
k, err = p.KeyCodec.Encode(pkEntry.Key)
|
k, err = p.KeyCodec.EncodeKey(pkEntry.Key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -104,7 +109,7 @@ func (p PrimaryKeyCodec) marshal(key []protoreflect.Value, message proto.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the primary key values again returning the message to its original state
|
// set the primary key values again returning the message to its original state
|
||||||
p.SetValues(message.ProtoReflect(), key)
|
p.SetKeyValues(message.ProtoReflect(), key)
|
||||||
|
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
@ -122,12 +127,12 @@ func (p *PrimaryKeyCodec) Unmarshal(key []protoreflect.Value, value []byte, mess
|
||||||
}
|
}
|
||||||
|
|
||||||
// rehydrate primary key
|
// rehydrate primary key
|
||||||
p.SetValues(message.ProtoReflect(), key)
|
p.SetKeyValues(message.ProtoReflect(), key)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PrimaryKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
func (p PrimaryKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
||||||
ks, k, err := p.KeyCodec.EncodeFromMessage(message)
|
ks, k, err := p.KeyCodec.EncodeKeyFromMessage(message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,14 +20,14 @@ func TestPrimaryKeyCodec(t *testing.T) {
|
||||||
keyCodec := testutil.TestKeyCodecGen(0, 5).Draw(t, "keyCodec").(testutil.TestKeyCodec)
|
keyCodec := testutil.TestKeyCodecGen(0, 5).Draw(t, "keyCodec").(testutil.TestKeyCodec)
|
||||||
pkCodec, err := ormkv.NewPrimaryKeyCodec(
|
pkCodec, err := ormkv.NewPrimaryKeyCodec(
|
||||||
keyCodec.Codec.Prefix(),
|
keyCodec.Codec.Prefix(),
|
||||||
(&testpb.A{}).ProtoReflect().Type(),
|
(&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
keyCodec.Codec.GetFieldNames(),
|
keyCodec.Codec.GetFieldNames(),
|
||||||
proto.UnmarshalOptions{},
|
proto.UnmarshalOptions{},
|
||||||
)
|
)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.A)
|
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.ExampleTable)
|
||||||
key := keyCodec.Codec.GetValues(a.ProtoReflect())
|
key := keyCodec.Codec.GetKeyValues(a.ProtoReflect())
|
||||||
pk1 := &ormkv.PrimaryKeyEntry{
|
pk1 := &ormkv.PrimaryKeyEntry{
|
||||||
TableName: aFullName,
|
TableName: aFullName,
|
||||||
Key: key,
|
Key: key,
|
||||||
|
@ -44,16 +44,16 @@ func TestPrimaryKeyCodec(t *testing.T) {
|
||||||
entry2, err := pkCodec.DecodeEntry(k, v)
|
entry2, err := pkCodec.DecodeEntry(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
pk2 := entry2.(*ormkv.PrimaryKeyEntry)
|
pk2 := entry2.(*ormkv.PrimaryKeyEntry)
|
||||||
assert.Equal(t, 0, pkCodec.CompareValues(pk1.Key, pk2.Key))
|
assert.Equal(t, 0, pkCodec.CompareKeys(pk1.Key, pk2.Key))
|
||||||
assert.DeepEqual(t, pk1.Value, pk2.Value, protocmp.Transform())
|
assert.DeepEqual(t, pk1.Value, pk2.Value, protocmp.Transform())
|
||||||
|
|
||||||
idxFields, pk3, err := pkCodec.DecodeIndexKey(k, v)
|
idxFields, pk3, err := pkCodec.DecodeIndexKey(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, 0, pkCodec.CompareValues(pk1.Key, pk3))
|
assert.Equal(t, 0, pkCodec.CompareKeys(pk1.Key, pk3))
|
||||||
assert.Equal(t, 0, pkCodec.CompareValues(pk1.Key, idxFields))
|
assert.Equal(t, 0, pkCodec.CompareKeys(pk1.Key, idxFields))
|
||||||
|
|
||||||
pkCodec.ClearValues(a.ProtoReflect())
|
pkCodec.ClearValues(a.ProtoReflect())
|
||||||
pkCodec.SetValues(a.ProtoReflect(), pk1.Key)
|
pkCodec.SetKeyValues(a.ProtoReflect(), pk1.Key)
|
||||||
assert.DeepEqual(t, a, pk2.Value, protocmp.Transform())
|
assert.DeepEqual(t, a, pk2.Value, protocmp.Transform())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,13 +11,13 @@ import (
|
||||||
|
|
||||||
// SeqCodec is the codec for auto-incrementing uint64 primary key sequences.
|
// SeqCodec is the codec for auto-incrementing uint64 primary key sequences.
|
||||||
type SeqCodec struct {
|
type SeqCodec struct {
|
||||||
tableName protoreflect.FullName
|
messageType protoreflect.FullName
|
||||||
prefix []byte
|
prefix []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSeqCodec creates a new SeqCodec.
|
// NewSeqCodec creates a new SeqCodec.
|
||||||
func NewSeqCodec(tableName protoreflect.FullName, prefix []byte) *SeqCodec {
|
func NewSeqCodec(messageType protoreflect.MessageType, prefix []byte) *SeqCodec {
|
||||||
return &SeqCodec{tableName: tableName, prefix: prefix}
|
return &SeqCodec{messageType: messageType.Descriptor().FullName(), prefix: prefix}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ EntryCodec = &SeqCodec{}
|
var _ EntryCodec = &SeqCodec{}
|
||||||
|
@ -33,7 +33,7 @@ func (s SeqCodec) DecodeEntry(k, v []byte) (Entry, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &SeqEntry{
|
return &SeqEntry{
|
||||||
TableName: s.tableName,
|
TableName: s.messageType,
|
||||||
Value: x,
|
Value: x,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ func (s SeqCodec) EncodeEntry(entry Entry) (k, v []byte, err error) {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry
|
return nil, nil, ormerrors.BadDecodeEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
if seqEntry.TableName != s.tableName {
|
if seqEntry.TableName != s.messageType {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry
|
return nil, nil, ormerrors.BadDecodeEntry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,9 @@ import (
|
||||||
func TestSeqCodec(t *testing.T) {
|
func TestSeqCodec(t *testing.T) {
|
||||||
rapid.Check(t, func(t *rapid.T) {
|
rapid.Check(t, func(t *rapid.T) {
|
||||||
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
||||||
tableName := (&testpb.A{}).ProtoReflect().Descriptor().FullName()
|
typ := (&testpb.ExampleTable{}).ProtoReflect().Type()
|
||||||
cdc := ormkv.NewSeqCodec(tableName, prefix)
|
tableName := typ.Descriptor().FullName()
|
||||||
|
cdc := ormkv.NewSeqCodec(typ, prefix)
|
||||||
|
|
||||||
seq, err := cdc.DecodeValue(nil)
|
seq, err := cdc.DecodeValue(nil)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
|
|
||||||
// UniqueKeyCodec is the codec for unique indexes.
|
// UniqueKeyCodec is the codec for unique indexes.
|
||||||
type UniqueKeyCodec struct {
|
type UniqueKeyCodec struct {
|
||||||
tableName protoreflect.FullName
|
|
||||||
pkFieldOrder []struct {
|
pkFieldOrder []struct {
|
||||||
inKey bool
|
inKey bool
|
||||||
i int
|
i int
|
||||||
|
@ -20,10 +19,20 @@ type UniqueKeyCodec struct {
|
||||||
valueCodec *KeyCodec
|
valueCodec *KeyCodec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ IndexCodec = &UniqueKeyCodec{}
|
||||||
|
|
||||||
// NewUniqueKeyCodec creates a new UniqueKeyCodec with an optional prefix for the
|
// NewUniqueKeyCodec creates a new UniqueKeyCodec with an optional prefix for the
|
||||||
// provided message descriptor, index and primary key fields.
|
// provided message descriptor, index and primary key fields.
|
||||||
func NewUniqueKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDescriptor, indexFields, primaryKeyFields []protoreflect.Name) (*UniqueKeyCodec, error) {
|
func NewUniqueKeyCodec(prefix []byte, messageType protoreflect.MessageType, indexFields, primaryKeyFields []protoreflect.Name) (*UniqueKeyCodec, error) {
|
||||||
keyCodec, err := NewKeyCodec(prefix, messageDescriptor, indexFields)
|
if len(indexFields) == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("index fields are empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(primaryKeyFields) == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("primary key fields are empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
keyCodec, err := NewKeyCodec(prefix, messageType, indexFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -55,23 +64,20 @@ func NewUniqueKeyCodec(prefix []byte, messageDescriptor protoreflect.MessageDesc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
valueCodec, err := NewKeyCodec(nil, messageDescriptor, valueFields)
|
valueCodec, err := NewKeyCodec(nil, messageType, valueFields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &UniqueKeyCodec{
|
return &UniqueKeyCodec{
|
||||||
tableName: messageDescriptor.FullName(),
|
|
||||||
pkFieldOrder: pkFieldOrder,
|
pkFieldOrder: pkFieldOrder,
|
||||||
keyCodec: keyCodec,
|
keyCodec: keyCodec,
|
||||||
valueCodec: valueCodec,
|
valueCodec: valueCodec,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ IndexCodec = &UniqueKeyCodec{}
|
|
||||||
|
|
||||||
func (u UniqueKeyCodec) DecodeIndexKey(k, v []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
func (u UniqueKeyCodec) DecodeIndexKey(k, v []byte) (indexFields, primaryKey []protoreflect.Value, err error) {
|
||||||
ks, err := u.keyCodec.Decode(bytes.NewReader(k))
|
ks, err := u.keyCodec.DecodeKey(bytes.NewReader(k))
|
||||||
|
|
||||||
// got prefix key
|
// got prefix key
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -85,7 +91,7 @@ func (u UniqueKeyCodec) DecodeIndexKey(k, v []byte) (indexFields, primaryKey []p
|
||||||
return ks, nil, err
|
return ks, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
vs, err := u.valueCodec.Decode(bytes.NewReader(v))
|
vs, err := u.valueCodec.DecodeKey(bytes.NewReader(v))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -117,7 +123,7 @@ func (u UniqueKeyCodec) DecodeEntry(k, v []byte) (Entry, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return &IndexKeyEntry{
|
return &IndexKeyEntry{
|
||||||
TableName: u.tableName,
|
TableName: u.MessageType().Descriptor().FullName(),
|
||||||
Fields: u.keyCodec.fieldNames,
|
Fields: u.keyCodec.fieldNames,
|
||||||
IsUnique: true,
|
IsUnique: true,
|
||||||
IndexValues: idxVals,
|
IndexValues: idxVals,
|
||||||
|
@ -130,7 +136,7 @@ func (u UniqueKeyCodec) EncodeEntry(entry Entry) (k, v []byte, err error) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, ormerrors.BadDecodeEntry
|
return nil, nil, ormerrors.BadDecodeEntry
|
||||||
}
|
}
|
||||||
k, err = u.keyCodec.Encode(indexEntry.IndexValues)
|
k, err = u.keyCodec.EncodeKey(indexEntry.IndexValues)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -155,16 +161,44 @@ func (u UniqueKeyCodec) EncodeEntry(entry Entry) (k, v []byte, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err = u.valueCodec.Encode(values)
|
v, err = u.valueCodec.EncodeKey(values)
|
||||||
return k, v, err
|
return k, v, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u UniqueKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
func (u UniqueKeyCodec) EncodeKVFromMessage(message protoreflect.Message) (k, v []byte, err error) {
|
||||||
_, k, err = u.keyCodec.EncodeFromMessage(message)
|
_, k, err = u.keyCodec.EncodeKeyFromMessage(message)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, v, err = u.valueCodec.EncodeFromMessage(message)
|
_, v, err = u.valueCodec.EncodeKeyFromMessage(message)
|
||||||
return k, v, err
|
return k, v, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) GetFieldNames() []protoreflect.Name {
|
||||||
|
return u.keyCodec.GetFieldNames()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) GetKeyCodec() *KeyCodec {
|
||||||
|
return u.keyCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) GetValueCodec() *KeyCodec {
|
||||||
|
return u.valueCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) CompareKeys(key1, key2 []protoreflect.Value) int {
|
||||||
|
return u.keyCodec.CompareKeys(key1, key2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) EncodeKeyFromMessage(message protoreflect.Message) (keyValues []protoreflect.Value, key []byte, err error) {
|
||||||
|
return u.keyCodec.EncodeKeyFromMessage(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) IsFullyOrdered() bool {
|
||||||
|
return u.keyCodec.IsFullyOrdered()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u UniqueKeyCodec) MessageType() protoreflect.MessageType {
|
||||||
|
return u.keyCodec.messageType
|
||||||
|
}
|
||||||
|
|
|
@ -17,20 +17,20 @@ func TestUniqueKeyCodec(t *testing.T) {
|
||||||
rapid.Check(t, func(t *rapid.T) {
|
rapid.Check(t, func(t *rapid.T) {
|
||||||
keyCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "keyCodec").(testutil.TestKeyCodec)
|
keyCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "keyCodec").(testutil.TestKeyCodec)
|
||||||
pkCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "primaryKeyCodec").(testutil.TestKeyCodec)
|
pkCodec := testutil.TestKeyCodecGen(1, 5).Draw(t, "primaryKeyCodec").(testutil.TestKeyCodec)
|
||||||
desc := (&testpb.A{}).ProtoReflect().Descriptor()
|
messageType := (&testpb.ExampleTable{}).ProtoReflect().Type()
|
||||||
uniqueKeyCdc, err := ormkv.NewUniqueKeyCodec(
|
uniqueKeyCdc, err := ormkv.NewUniqueKeyCodec(
|
||||||
keyCodec.Codec.Prefix(),
|
keyCodec.Codec.Prefix(),
|
||||||
desc,
|
messageType,
|
||||||
keyCodec.Codec.GetFieldNames(),
|
keyCodec.Codec.GetFieldNames(),
|
||||||
pkCodec.Codec.GetFieldNames(),
|
pkCodec.Codec.GetFieldNames(),
|
||||||
)
|
)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.A)
|
a := testutil.GenA.Draw(t, fmt.Sprintf("a%d", i)).(*testpb.ExampleTable)
|
||||||
key := keyCodec.Codec.GetValues(a.ProtoReflect())
|
key := keyCodec.Codec.GetKeyValues(a.ProtoReflect())
|
||||||
pk := pkCodec.Codec.GetValues(a.ProtoReflect())
|
pk := pkCodec.Codec.GetKeyValues(a.ProtoReflect())
|
||||||
uniq1 := &ormkv.IndexKeyEntry{
|
uniq1 := &ormkv.IndexKeyEntry{
|
||||||
TableName: desc.FullName(),
|
TableName: messageType.Descriptor().FullName(),
|
||||||
Fields: keyCodec.Codec.GetFieldNames(),
|
Fields: keyCodec.Codec.GetFieldNames(),
|
||||||
IsUnique: true,
|
IsUnique: true,
|
||||||
IndexValues: key,
|
IndexValues: key,
|
||||||
|
@ -47,16 +47,16 @@ func TestUniqueKeyCodec(t *testing.T) {
|
||||||
entry2, err := uniqueKeyCdc.DecodeEntry(k, v)
|
entry2, err := uniqueKeyCdc.DecodeEntry(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
uniq2 := entry2.(*ormkv.IndexKeyEntry)
|
uniq2 := entry2.(*ormkv.IndexKeyEntry)
|
||||||
assert.Equal(t, 0, keyCodec.Codec.CompareValues(uniq1.IndexValues, uniq2.IndexValues))
|
assert.Equal(t, 0, keyCodec.Codec.CompareKeys(uniq1.IndexValues, uniq2.IndexValues))
|
||||||
assert.Equal(t, 0, pkCodec.Codec.CompareValues(uniq1.PrimaryKey, uniq2.PrimaryKey))
|
assert.Equal(t, 0, pkCodec.Codec.CompareKeys(uniq1.PrimaryKey, uniq2.PrimaryKey))
|
||||||
assert.Equal(t, true, uniq2.IsUnique)
|
assert.Equal(t, true, uniq2.IsUnique)
|
||||||
assert.Equal(t, desc.FullName(), uniq2.TableName)
|
assert.Equal(t, messageType.Descriptor().FullName(), uniq2.TableName)
|
||||||
assert.DeepEqual(t, uniq1.Fields, uniq2.Fields)
|
assert.DeepEqual(t, uniq1.Fields, uniq2.Fields)
|
||||||
|
|
||||||
idxFields, pk2, err := uniqueKeyCdc.DecodeIndexKey(k, v)
|
idxFields, pk2, err := uniqueKeyCdc.DecodeIndexKey(k, v)
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, 0, keyCodec.Codec.CompareValues(key, idxFields))
|
assert.Equal(t, 0, keyCodec.Codec.CompareKeys(key, idxFields))
|
||||||
assert.Equal(t, 0, pkCodec.Codec.CompareValues(pk, pk2))
|
assert.Equal(t, 0, pkCodec.Codec.CompareKeys(pk, pk2))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,17 +0,0 @@
|
||||||
package ormkv
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func skipPrefix(r *bytes.Reader, prefix []byte) error {
|
|
||||||
n := len(prefix)
|
|
||||||
if n > 0 {
|
|
||||||
// we skip checking the prefix for performance reasons because we assume
|
|
||||||
// that it was checked by the caller
|
|
||||||
_, err := r.Seek(int64(n), io.SeekCurrent)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
23
orm/go.mod
23
orm/go.mod
|
@ -4,18 +4,37 @@ go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cosmos/cosmos-proto v1.0.0-alpha6
|
github.com/cosmos/cosmos-proto v1.0.0-alpha6
|
||||||
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha1
|
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha2
|
||||||
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2
|
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2
|
||||||
|
github.com/tendermint/tm-db v0.6.6
|
||||||
google.golang.org/protobuf v1.27.1
|
google.golang.org/protobuf v1.27.1
|
||||||
gotest.tools/v3 v3.1.0
|
gotest.tools/v3 v3.1.0
|
||||||
pgregory.net/rapid v0.4.7
|
pgregory.net/rapid v0.4.7
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/DataDog/zstd v1.4.5 // indirect
|
||||||
|
github.com/cespare/xxhash v1.1.0 // indirect
|
||||||
|
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
|
||||||
|
github.com/dgraph-io/ristretto v0.0.3 // indirect
|
||||||
|
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||||
|
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
|
||||||
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 // indirect
|
||||||
|
github.com/google/btree v1.0.0 // indirect
|
||||||
github.com/google/go-cmp v0.5.5 // indirect
|
github.com/google/go-cmp v0.5.5 // indirect
|
||||||
|
github.com/jmhodges/levigo v1.0.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect
|
||||||
|
go.etcd.io/bbolt v1.3.6 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f // indirect
|
||||||
|
golang.org/x/sys v0.0.0-20210903071746-97244b99971b // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
|
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
|
||||||
|
|
233
orm/go.sum
233
orm/go.sum
|
@ -1,56 +1,283 @@
|
||||||
github.com/cosmos/cosmos-proto v1.0.0-alpha1/go.mod h1:msdDWOvfStHLG+Z2y2SJ0dcqimZ2vc8M1MPnZ4jOF7U=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
|
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
|
||||||
|
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||||
|
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||||
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/cosmos/cosmos-proto v1.0.0-alpha4/go.mod h1:msdDWOvfStHLG+Z2y2SJ0dcqimZ2vc8M1MPnZ4jOF7U=
|
||||||
github.com/cosmos/cosmos-proto v1.0.0-alpha6 h1:N2BvV2AyzGAXCJnvlw1pMzEQ+76tj5FDBrkYQYIDCdU=
|
github.com/cosmos/cosmos-proto v1.0.0-alpha6 h1:N2BvV2AyzGAXCJnvlw1pMzEQ+76tj5FDBrkYQYIDCdU=
|
||||||
github.com/cosmos/cosmos-proto v1.0.0-alpha6/go.mod h1:msdDWOvfStHLG+Z2y2SJ0dcqimZ2vc8M1MPnZ4jOF7U=
|
github.com/cosmos/cosmos-proto v1.0.0-alpha6/go.mod h1:msdDWOvfStHLG+Z2y2SJ0dcqimZ2vc8M1MPnZ4jOF7U=
|
||||||
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha1 h1:inilHdPSVUHHp8R+uW2MNPXkC3AFmTySE1kHWwc8XjM=
|
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha2 h1:47aK2mZ8oh3wyr5Q4OiZxyrMkQZRW67Ah/HfC8dW8hs=
|
||||||
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha1/go.mod h1:cJoD3zcka5CZsRloy0auZiVDQtlXAfZSUxLyJMVJ/uk=
|
github.com/cosmos/cosmos-sdk/api v0.1.0-alpha2/go.mod h1:xWm3hne2f6upv80eIS+fJnnUaed/R2EJno1It4Zb9aw=
|
||||||
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2 h1:bBglNlra8ZHb4dmbEE8V85ihLA+DkriSm7tcx6x/JWo=
|
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2 h1:bBglNlra8ZHb4dmbEE8V85ihLA+DkriSm7tcx6x/JWo=
|
||||||
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2/go.mod h1:Gi7pzVRnvZ1N16JAXpLADzng0ePoE7YeEHaULSFB2Ts=
|
github.com/cosmos/cosmos-sdk/errors v1.0.0-beta.2/go.mod h1:Gi7pzVRnvZ1N16JAXpLADzng0ePoE7YeEHaULSFB2Ts=
|
||||||
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
|
||||||
|
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
|
||||||
|
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||||
|
github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
|
||||||
|
github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
|
||||||
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
|
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
|
||||||
|
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
|
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||||
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||||
|
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64=
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=
|
||||||
|
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg=
|
||||||
|
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=
|
||||||
|
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg=
|
||||||
|
github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
|
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||||
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=
|
||||||
|
github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
|
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||||
|
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
|
github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI=
|
||||||
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||||
|
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk=
|
||||||
|
github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM=
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok=
|
||||||
|
github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8=
|
||||||
|
github.com/tendermint/tm-db v0.6.6 h1:EzhaOfR0bdKyATqcd5PNeyeq8r+V4bRPHBfyFdD9kGM=
|
||||||
|
github.com/tendermint/tm-db v0.6.6/go.mod h1:wP8d49A85B7/erz/r4YbKssKw6ylsO/hKtFk7E1aWZI=
|
||||||
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||||
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
|
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
|
||||||
|
golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210903071746-97244b99971b h1:3Dq0eVHn0uaQJmPO+/aYPI/fRMqdrVDbu7MQcku54gg=
|
||||||
|
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||||
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
|
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||||
|
google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
|
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||||
|
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||||
|
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||||
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
|
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||||
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
|
gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
|
||||||
gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
|
gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
|
||||||
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g=
|
pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g=
|
||||||
pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
package listinternal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options is the internal list options struct.
|
||||||
|
type Options struct {
|
||||||
|
Start, End, Prefix []protoreflect.Value
|
||||||
|
Reverse bool
|
||||||
|
Cursor []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o Options) Validate() error {
|
||||||
|
if o.Start != nil || o.End != nil {
|
||||||
|
if o.Prefix != nil {
|
||||||
|
return fmt.Errorf("can either use Start/End or Prefix, not both")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type Option interface {
|
||||||
|
apply(*Options)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FuncOption func(*Options)
|
||||||
|
|
||||||
|
func (f FuncOption) apply(options *Options) {
|
||||||
|
f(options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ApplyOptions(opts *Options, funcOpts []Option) {
|
||||||
|
for _, opt := range funcOpts {
|
||||||
|
opt.apply(opts)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,247 @@
|
||||||
|
package testkv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormtable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Debugger is an interface that handles debug info from the debug store wrapper.
|
||||||
|
type Debugger interface {
|
||||||
|
|
||||||
|
// Log logs a single log message.
|
||||||
|
Log(string)
|
||||||
|
|
||||||
|
// Decode decodes a key-value entry into a debug string.
|
||||||
|
Decode(key, value []byte) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDebugBackend wraps both stores from a Backend with a debugger.
|
||||||
|
func NewDebugBackend(backend ormtable.Backend, debugger Debugger) ormtable.Backend {
|
||||||
|
return ormtable.NewBackend(ormtable.BackendOptions{
|
||||||
|
CommitmentStore: NewDebugStore(backend.CommitmentStore(), debugger, "commit"),
|
||||||
|
IndexStore: NewDebugStore(backend.IndexStore(), debugger, "index"),
|
||||||
|
Hooks: debugHooks{debugger: debugger, hooks: backend.Hooks()},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type debugStore struct {
|
||||||
|
store kv.Store
|
||||||
|
debugger Debugger
|
||||||
|
storeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDebugStore wraps the store with the debugger instance returning a debug store wrapper.
|
||||||
|
func NewDebugStore(store kv.Store, debugger Debugger, storeName string) kv.Store {
|
||||||
|
return &debugStore{store: store, debugger: debugger, storeName: storeName}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) Get(key []byte) ([]byte, error) {
|
||||||
|
val, err := t.store.Get(key)
|
||||||
|
if err != nil {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ERR on GET %s: %v", t.debugger.Decode(key, nil), err))
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("GET %x %x", key, val))
|
||||||
|
t.debugger.Log(fmt.Sprintf(" %s", t.debugger.Decode(key, val)))
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) Has(key []byte) (bool, error) {
|
||||||
|
has, err := t.store.Has(key)
|
||||||
|
if err != nil {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ERR on HAS %s: %v", t.debugger.Decode(key, nil), err))
|
||||||
|
}
|
||||||
|
return has, err
|
||||||
|
}
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("HAS %x", key))
|
||||||
|
t.debugger.Log(fmt.Sprintf(" %s", t.debugger.Decode(key, nil)))
|
||||||
|
}
|
||||||
|
return has, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) Iterator(start, end []byte) (kv.Iterator, error) {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ITERATOR %x -> %x", start, end))
|
||||||
|
}
|
||||||
|
it, err := t.store.Iterator(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &debugIterator{
|
||||||
|
iterator: it,
|
||||||
|
storeName: t.storeName,
|
||||||
|
debugger: t.debugger,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) ReverseIterator(start, end []byte) (kv.Iterator, error) {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ITERATOR %x <- %x", start, end))
|
||||||
|
}
|
||||||
|
it, err := t.store.ReverseIterator(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &debugIterator{
|
||||||
|
iterator: it,
|
||||||
|
storeName: t.storeName,
|
||||||
|
debugger: t.debugger,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) Set(key, value []byte) error {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("SET %x %x", key, value))
|
||||||
|
t.debugger.Log(fmt.Sprintf(" %s", t.debugger.Decode(key, value)))
|
||||||
|
}
|
||||||
|
err := t.store.Set(key, value)
|
||||||
|
if err != nil {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ERR on SET %s: %v", t.debugger.Decode(key, value), err))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t debugStore) Delete(key []byte) error {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("DEL %x", key))
|
||||||
|
t.debugger.Log(fmt.Sprintf("DEL %s", t.debugger.Decode(key, nil)))
|
||||||
|
}
|
||||||
|
err := t.store.Delete(key)
|
||||||
|
if err != nil {
|
||||||
|
if t.debugger != nil {
|
||||||
|
t.debugger.Log(fmt.Sprintf("ERR on SET %s: %v", t.debugger.Decode(key, nil), err))
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ kv.Store = &debugStore{}
|
||||||
|
|
||||||
|
type debugIterator struct {
|
||||||
|
iterator kv.Iterator
|
||||||
|
storeName string
|
||||||
|
debugger Debugger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Domain() (start []byte, end []byte) {
|
||||||
|
start, end = d.iterator.Domain()
|
||||||
|
d.debugger.Log(fmt.Sprintf(" DOMAIN %x -> %x", start, end))
|
||||||
|
return start, end
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Valid() bool {
|
||||||
|
valid := d.iterator.Valid()
|
||||||
|
d.debugger.Log(fmt.Sprintf(" VALID %t", valid))
|
||||||
|
return valid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Next() {
|
||||||
|
d.debugger.Log(" NEXT")
|
||||||
|
d.iterator.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Key() (key []byte) {
|
||||||
|
key = d.iterator.Key()
|
||||||
|
value := d.iterator.Value()
|
||||||
|
d.debugger.Log(fmt.Sprintf(" KEY %x %x", key, value))
|
||||||
|
d.debugger.Log(fmt.Sprintf(" %s", d.debugger.Decode(key, value)))
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Value() (value []byte) {
|
||||||
|
return d.iterator.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Error() error {
|
||||||
|
err := d.iterator.Error()
|
||||||
|
d.debugger.Log(fmt.Sprintf(" ERR %+v", err))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugIterator) Close() error {
|
||||||
|
d.debugger.Log(" CLOSE")
|
||||||
|
return d.iterator.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ kv.Iterator = &debugIterator{}
|
||||||
|
|
||||||
|
// EntryCodecDebugger is a Debugger instance that uses an EntryCodec and Print
|
||||||
|
// function for debugging.
|
||||||
|
type EntryCodecDebugger struct {
|
||||||
|
EntryCodec ormkv.EntryCodec
|
||||||
|
Print func(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *EntryCodecDebugger) Log(s string) {
|
||||||
|
if d.Print != nil {
|
||||||
|
d.Print(s)
|
||||||
|
} else {
|
||||||
|
fmt.Println(s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *EntryCodecDebugger) Decode(key, value []byte) string {
|
||||||
|
entry, err := d.EntryCodec.DecodeEntry(key, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("ERR:%v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
type debugHooks struct {
|
||||||
|
debugger Debugger
|
||||||
|
hooks ormtable.Hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugHooks) OnInsert(message proto.Message) error {
|
||||||
|
d.debugger.Log(fmt.Sprintf(
|
||||||
|
"ORM INSERT %s %s",
|
||||||
|
message.ProtoReflect().Descriptor().FullName(),
|
||||||
|
message,
|
||||||
|
))
|
||||||
|
if d.hooks != nil {
|
||||||
|
return d.hooks.OnInsert(message)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugHooks) OnUpdate(existing, new proto.Message) error {
|
||||||
|
d.debugger.Log(fmt.Sprintf(
|
||||||
|
"ORM UPDATE %s %s -> %s",
|
||||||
|
existing.ProtoReflect().Descriptor().FullName(),
|
||||||
|
existing,
|
||||||
|
new,
|
||||||
|
))
|
||||||
|
if d.hooks != nil {
|
||||||
|
return d.hooks.OnUpdate(existing, new)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d debugHooks) OnDelete(message proto.Message) error {
|
||||||
|
d.debugger.Log(fmt.Sprintf(
|
||||||
|
"ORM DELETE %s %s",
|
||||||
|
message.ProtoReflect().Descriptor().FullName(),
|
||||||
|
message,
|
||||||
|
))
|
||||||
|
if d.hooks != nil {
|
||||||
|
return d.hooks.OnDelete(message)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
package testkv
|
||||||
|
|
||||||
|
import (
|
||||||
|
dbm "github.com/tendermint/tm-db"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormtable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewSplitMemBackend returns a Backend instance
|
||||||
|
// which uses two separate memory stores to simulate behavior when there
|
||||||
|
// are really two separate backing stores.
|
||||||
|
func NewSplitMemBackend() ormtable.Backend {
|
||||||
|
return ormtable.NewBackend(ormtable.BackendOptions{
|
||||||
|
CommitmentStore: dbm.NewMemDB(),
|
||||||
|
IndexStore: dbm.NewMemDB(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSharedMemBackend returns a Backend instance
|
||||||
|
// which uses a single backing memory store to simulate legacy scenarios
|
||||||
|
// where only a single KV-store is available to modules.
|
||||||
|
func NewSharedMemBackend() ormtable.Backend {
|
||||||
|
return ormtable.NewBackend(ormtable.BackendOptions{
|
||||||
|
CommitmentStore: dbm.NewMemDB(),
|
||||||
|
// commit store is automatically used as the index store
|
||||||
|
})
|
||||||
|
}
|
|
@ -8,15 +8,16 @@ import "cosmos/orm/v1alpha1/orm.proto";
|
||||||
|
|
||||||
option go_package = "github.com/cosmos/cosmos-sdk/orm/internal/testpb";
|
option go_package = "github.com/cosmos/cosmos-sdk/orm/internal/testpb";
|
||||||
|
|
||||||
message A {
|
message ExampleTable {
|
||||||
option (cosmos.orm.v1alpha1.table) = {
|
option (cosmos.orm.v1alpha1.table) = {
|
||||||
id: 1;
|
id: 1;
|
||||||
primary_key: {
|
primary_key: {
|
||||||
fields: "u32,u64,str"
|
fields: "u32,i64,str"
|
||||||
}
|
}
|
||||||
index:{
|
index:{
|
||||||
id: 1;
|
id: 1;
|
||||||
fields:"u64,str"
|
fields:"u64,str"
|
||||||
|
unique: true
|
||||||
}
|
}
|
||||||
index:{
|
index:{
|
||||||
id: 2;
|
id: 2;
|
||||||
|
@ -49,10 +50,15 @@ message A {
|
||||||
// Invalid key fields:
|
// Invalid key fields:
|
||||||
repeated uint32 repeated = 17;
|
repeated uint32 repeated = 17;
|
||||||
map<string, uint32> map = 18;
|
map<string, uint32> map = 18;
|
||||||
B msg = 19;
|
ExampleMessage msg = 19;
|
||||||
oneof sum {
|
oneof sum {
|
||||||
uint32 oneof = 20;
|
uint32 oneof = 20;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message ExampleMessage {
|
||||||
|
string foo = 1;
|
||||||
|
int32 bar = 2;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Enum {
|
enum Enum {
|
||||||
|
@ -63,20 +69,28 @@ enum Enum {
|
||||||
ENUM_NEG_THREE = -3;
|
ENUM_NEG_THREE = -3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message B {
|
|
||||||
option (cosmos.orm.v1alpha1.singleton) = {id: 2};
|
|
||||||
string x = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message C {
|
message ExampleAutoIncrementTable {
|
||||||
option (cosmos.orm.v1alpha1.table) = {
|
option (cosmos.orm.v1alpha1.table) = {
|
||||||
id: 3
|
id: 3
|
||||||
primary_key:{
|
primary_key:{
|
||||||
fields:"id"
|
fields:"id"
|
||||||
auto_increment: true
|
auto_increment: true
|
||||||
}
|
}
|
||||||
|
index:{
|
||||||
|
id: 1
|
||||||
|
fields:"x"
|
||||||
|
unique: true
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
uint64 id = 1;
|
uint64 id = 1;
|
||||||
string x = 2;
|
string x = 2;
|
||||||
|
int32 y = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExampleSingleton {
|
||||||
|
option (cosmos.orm.v1alpha1.singleton) = {id: 2};
|
||||||
|
string foo = 1;
|
||||||
|
int32 bar = 2;
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,8 +4,9 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/types/known/durationpb"
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
"pgregory.net/rapid"
|
"pgregory.net/rapid"
|
||||||
|
|
||||||
|
@ -14,7 +15,7 @@ import (
|
||||||
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestFieldSpec defines a test field against the testpb.A message.
|
// TestFieldSpec defines a test field against the testpb.ExampleTable message.
|
||||||
type TestFieldSpec struct {
|
type TestFieldSpec struct {
|
||||||
FieldName protoreflect.Name
|
FieldName protoreflect.Name
|
||||||
Gen *rapid.Generator
|
Gen *rapid.Generator
|
||||||
|
@ -78,19 +79,23 @@ var TestFieldSpecs = []TestFieldSpec{
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ts",
|
"ts",
|
||||||
rapid.ArrayOf(2, rapid.Int64()).Map(func(xs [2]int64) protoreflect.Message {
|
rapid.Custom(func(t *rapid.T) protoreflect.Message {
|
||||||
|
seconds := rapid.Int64Range(-9999999999, 9999999999).Draw(t, "seconds").(int64)
|
||||||
|
nanos := rapid.Int32Range(0, 999999999).Draw(t, "nanos").(int32)
|
||||||
return (×tamppb.Timestamp{
|
return (×tamppb.Timestamp{
|
||||||
Seconds: xs[0],
|
Seconds: seconds,
|
||||||
Nanos: int32(xs[1]),
|
Nanos: nanos,
|
||||||
}).ProtoReflect()
|
}).ProtoReflect()
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"dur",
|
"dur",
|
||||||
rapid.ArrayOf(2, rapid.Int64()).Map(func(xs [2]int64) protoreflect.Message {
|
rapid.Custom(func(t *rapid.T) protoreflect.Message {
|
||||||
|
seconds := rapid.Int64Range(0, 315576000000).Draw(t, "seconds").(int64)
|
||||||
|
nanos := rapid.Int32Range(0, 999999999).Draw(t, "nanos").(int32)
|
||||||
return (&durationpb.Duration{
|
return (&durationpb.Duration{
|
||||||
Seconds: xs[0],
|
Seconds: seconds,
|
||||||
Nanos: int32(xs[1]),
|
Nanos: nanos,
|
||||||
}).ProtoReflect()
|
}).ProtoReflect()
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -111,7 +116,7 @@ func MakeTestCodec(fname protoreflect.Name, nonTerminal bool) (ormfield.Codec, e
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetTestField(fname protoreflect.Name) protoreflect.FieldDescriptor {
|
func GetTestField(fname protoreflect.Name) protoreflect.FieldDescriptor {
|
||||||
a := &testpb.A{}
|
a := &testpb.ExampleTable{}
|
||||||
return a.ProtoReflect().Descriptor().Fields().ByName(fname)
|
return a.ProtoReflect().Descriptor().Fields().ByName(fname)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,8 +152,8 @@ func TestKeyCodecGen(minLen, maxLen int) *rapid.Generator {
|
||||||
|
|
||||||
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
||||||
|
|
||||||
desc := (&testpb.A{}).ProtoReflect().Descriptor()
|
msgType := (&testpb.ExampleTable{}).ProtoReflect().Type()
|
||||||
cdc, err := ormkv.NewKeyCodec(prefix, desc, fields)
|
cdc, err := ormkv.NewKeyCodec(prefix, msgType, fields)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
@ -169,8 +174,8 @@ func (k TestKeyCodec) Draw(t *rapid.T, id string) []protoreflect.Value {
|
||||||
return keyValues
|
return keyValues
|
||||||
}
|
}
|
||||||
|
|
||||||
var GenA = rapid.Custom(func(t *rapid.T) *testpb.A {
|
var GenA = rapid.Custom(func(t *rapid.T) *testpb.ExampleTable {
|
||||||
a := &testpb.A{}
|
a := &testpb.ExampleTable{}
|
||||||
ref := a.ProtoReflect()
|
ref := a.ProtoReflect()
|
||||||
for _, spec := range TestFieldSpecs {
|
for _, spec := range TestFieldSpecs {
|
||||||
field := GetTestField(spec.FieldName)
|
field := GetTestField(spec.FieldName)
|
||||||
|
@ -179,12 +184,3 @@ var GenA = rapid.Custom(func(t *rapid.T) *testpb.A {
|
||||||
}
|
}
|
||||||
return a
|
return a
|
||||||
})
|
})
|
||||||
|
|
||||||
func ValuesOf(values ...interface{}) []protoreflect.Value {
|
|
||||||
n := len(values)
|
|
||||||
res := make([]protoreflect.Value, n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
res[i] = protoreflect.ValueOf(values[i])
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
|
@ -0,0 +1,3 @@
|
||||||
|
// Package model contains packages which define ORM data "model" types
|
||||||
|
// such as tables, indexes, and schemas.
|
||||||
|
package model
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Package kvstore defines the abstract interfaces which ORM tables and indexes
|
||||||
|
// use for reading and writing data against a KV-store backend.
|
||||||
|
package kv
|
||||||
|
|
||||||
|
import (
|
||||||
|
dbm "github.com/tendermint/tm-db"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadonlyStore is an interface for readonly access to a kv-store.
|
||||||
|
type ReadonlyStore interface {
|
||||||
|
// Get fetches the value of the given key, or nil if it does not exist.
|
||||||
|
// CONTRACT: key, value readonly []byte
|
||||||
|
Get(key []byte) ([]byte, error)
|
||||||
|
|
||||||
|
// Has checks if a key exists.
|
||||||
|
// CONTRACT: key, value readonly []byte
|
||||||
|
Has(key []byte) (bool, error)
|
||||||
|
|
||||||
|
// Iterator returns an iterator over a domain of keys, in ascending order. The caller must call
|
||||||
|
// Close when done. End is exclusive, and start must be less than end. A nil start iterates
|
||||||
|
// from the first key, and a nil end iterates to the last key (inclusive). Empty keys are not
|
||||||
|
// valid.
|
||||||
|
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
|
||||||
|
// CONTRACT: start, end readonly []byte
|
||||||
|
Iterator(start, end []byte) (Iterator, error)
|
||||||
|
|
||||||
|
// ReverseIterator returns an iterator over a domain of keys, in descending order. The caller
|
||||||
|
// must call Close when done. End is exclusive, and start must be less than end. A nil end
|
||||||
|
// iterates from the last key (inclusive), and a nil start iterates to the first key (inclusive).
|
||||||
|
// Empty keys are not valid.
|
||||||
|
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
|
||||||
|
// CONTRACT: start, end readonly []byte
|
||||||
|
ReverseIterator(start, end []byte) (Iterator, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator aliases github.com/tendermint/tm-db.Iterator.
|
||||||
|
type Iterator = dbm.Iterator
|
||||||
|
|
||||||
|
// Store is an interface for writing to a kv-store.
|
||||||
|
type Store interface {
|
||||||
|
ReadonlyStore
|
||||||
|
|
||||||
|
// Set sets the value for the given key, replacing it if it already exists.
|
||||||
|
// CONTRACT: key, value readonly []byte
|
||||||
|
Set(key, value []byte) error
|
||||||
|
|
||||||
|
// Delete deletes the key, or does nothing if the key does not exist.
|
||||||
|
// CONTRACT: key readonly []byte
|
||||||
|
Delete(key []byte) error
|
||||||
|
}
|
|
@ -0,0 +1,77 @@
|
||||||
|
// Package ormlist defines options for listing items from ORM indexes.
|
||||||
|
package ormlist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/listinternal"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Option represents a list option.
|
||||||
|
type Option = listinternal.Option
|
||||||
|
|
||||||
|
// Start defines the values to use to start range iteration. It cannot be
|
||||||
|
// combined with Prefix.
|
||||||
|
//
|
||||||
|
// Values must correspond in type to the index's fields and the number of values
|
||||||
|
// provided cannot exceed the number of fields in the index, although fewer
|
||||||
|
// values can be provided.
|
||||||
|
//
|
||||||
|
// Range iteration can only be done for start and end values which are
|
||||||
|
// well-ordered, meaning that any unordered components must be equal. Ex.
|
||||||
|
// the bytes type is considered unordered, so a range iterator is created
|
||||||
|
// over an index with a bytes field, both start and end must have the same
|
||||||
|
// value for bytes.
|
||||||
|
func Start(values ...interface{}) Option {
|
||||||
|
return listinternal.FuncOption(func(options *listinternal.Options) {
|
||||||
|
options.Start = encodeutil.ValuesOf(values...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// End defines the values to use to end range iteration. It cannot be
|
||||||
|
// combined with Prefix.
|
||||||
|
//
|
||||||
|
// Values must correspond in type to the index's fields and the number of values
|
||||||
|
// provided cannot exceed the number of fields in the index, although fewer
|
||||||
|
// values can be provided.
|
||||||
|
//
|
||||||
|
// Range iteration can only be done for start and end values which are
|
||||||
|
// well-ordered, meaning that any unordered components must be equal. Ex.
|
||||||
|
// the bytes type is considered unordered, so a range iterator is created
|
||||||
|
// over an index with a bytes field, both start and end must have the same
|
||||||
|
// value for bytes.
|
||||||
|
func End(values ...interface{}) Option {
|
||||||
|
return listinternal.FuncOption(func(options *listinternal.Options) {
|
||||||
|
options.End = encodeutil.ValuesOf(values...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefix defines values to use for prefix iteration. It cannot be used
|
||||||
|
// together with Start or End.
|
||||||
|
//
|
||||||
|
// Values must correspond in type to the index's fields and the number of values
|
||||||
|
// provided cannot exceed the number of fields in the index, although fewer
|
||||||
|
// values can be provided.
|
||||||
|
func Prefix(values ...interface{}) Option {
|
||||||
|
return listinternal.FuncOption(func(options *listinternal.Options) {
|
||||||
|
options.Prefix = encodeutil.ValuesOf(values...)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reverse reverses the direction of iteration. If Reverse is
|
||||||
|
// provided twice, iteration will happen in the forward direction.
|
||||||
|
func Reverse() Option {
|
||||||
|
return listinternal.FuncOption(func(options *listinternal.Options) {
|
||||||
|
options.Reverse = !options.Reverse
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cursor specifies a cursor after which to restart iteration. Cursor values
|
||||||
|
// are returned by iterators and in pagination results.
|
||||||
|
func Cursor(cursor CursorT) Option {
|
||||||
|
return listinternal.FuncOption(func(options *listinternal.Options) {
|
||||||
|
options.Cursor = cursor
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CursorT defines a cursor type.
|
||||||
|
type CursorT []byte
|
|
@ -0,0 +1,218 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// autoIncrementTable is a Table implementation for tables with an
|
||||||
|
// auto-incrementing uint64 primary key.
|
||||||
|
type autoIncrementTable struct {
|
||||||
|
*tableImpl
|
||||||
|
autoIncField protoreflect.FieldDescriptor
|
||||||
|
seqCodec *ormkv.SeqCodec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) Save(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) Insert(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeInsert)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) Update(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *autoIncrementTable) save(backend Backend, message proto.Message, mode saveMode) error {
|
||||||
|
messageRef := message.ProtoReflect()
|
||||||
|
val := messageRef.Get(t.autoIncField).Uint()
|
||||||
|
writer := newBatchIndexCommitmentWriter(backend)
|
||||||
|
defer writer.Close()
|
||||||
|
|
||||||
|
if val == 0 {
|
||||||
|
if mode == saveModeUpdate {
|
||||||
|
return ormerrors.PrimaryKeyInvalidOnUpdate
|
||||||
|
}
|
||||||
|
|
||||||
|
mode = saveModeInsert
|
||||||
|
key, err := t.nextSeqValue(writer.IndexStore())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
messageRef.Set(t.autoIncField, protoreflect.ValueOfUint64(key))
|
||||||
|
} else {
|
||||||
|
if mode == saveModeInsert {
|
||||||
|
return ormerrors.AutoIncrementKeyAlreadySet
|
||||||
|
}
|
||||||
|
|
||||||
|
mode = saveModeUpdate
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.tableImpl.doSave(writer, message, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *autoIncrementTable) curSeqValue(kv kv.ReadonlyStore) (uint64, error) {
|
||||||
|
bz, err := kv.Get(t.seqCodec.Prefix())
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.seqCodec.DecodeValue(bz)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *autoIncrementTable) nextSeqValue(kv kv.Store) (uint64, error) {
|
||||||
|
seq, err := t.curSeqValue(kv)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
seq++
|
||||||
|
return seq, t.setSeqValue(kv, seq)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *autoIncrementTable) setSeqValue(kv kv.Store, seq uint64) error {
|
||||||
|
return kv.Set(t.seqCodec.Prefix(), t.seqCodec.EncodeValue(seq))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) EncodeEntry(entry ormkv.Entry) (k, v []byte, err error) {
|
||||||
|
if _, ok := entry.(*ormkv.SeqEntry); ok {
|
||||||
|
return t.seqCodec.EncodeEntry(entry)
|
||||||
|
}
|
||||||
|
return t.tableImpl.EncodeEntry(entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) ValidateJSON(reader io.Reader) error {
|
||||||
|
return t.decodeAutoIncJson(nil, reader, func(message proto.Message, maxID uint64) error {
|
||||||
|
messageRef := message.ProtoReflect()
|
||||||
|
id := messageRef.Get(t.autoIncField).Uint()
|
||||||
|
if id > maxID {
|
||||||
|
return fmt.Errorf("invalid ID %d, expected a value <= %d", id, maxID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.customJSONValidator != nil {
|
||||||
|
return t.customJSONValidator(message)
|
||||||
|
} else {
|
||||||
|
return DefaultJSONValidator(message)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) ImportJSON(ctx context.Context, reader io.Reader) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.decodeAutoIncJson(backend, reader, func(message proto.Message, maxID uint64) error {
|
||||||
|
messageRef := message.ProtoReflect()
|
||||||
|
id := messageRef.Get(t.autoIncField).Uint()
|
||||||
|
if id == 0 {
|
||||||
|
// we don't have an ID in the JSON, so we call Save to insert and
|
||||||
|
// generate one
|
||||||
|
return t.save(backend, message, saveModeInsert)
|
||||||
|
} else {
|
||||||
|
if id > maxID {
|
||||||
|
return fmt.Errorf("invalid ID %d, expected a value <= %d", id, maxID)
|
||||||
|
}
|
||||||
|
// we do have an ID and calling Save will fail because it expects
|
||||||
|
// either no ID or SAVE_MODE_UPDATE. So instead we drop one level
|
||||||
|
// down and insert using tableImpl which doesn't know about
|
||||||
|
// auto-incrementing IDs
|
||||||
|
return t.tableImpl.save(backend, message, saveModeInsert)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) decodeAutoIncJson(backend Backend, reader io.Reader, onMsg func(message proto.Message, maxID uint64) error) error {
|
||||||
|
decoder, err := t.startDecodeJson(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var seq uint64
|
||||||
|
|
||||||
|
return t.doDecodeJson(decoder,
|
||||||
|
func(message json.RawMessage) bool {
|
||||||
|
err = json.Unmarshal(message, &seq)
|
||||||
|
if err == nil {
|
||||||
|
// writer is nil during validation
|
||||||
|
if backend != nil {
|
||||||
|
writer := newBatchIndexCommitmentWriter(backend)
|
||||||
|
defer writer.Close()
|
||||||
|
err = t.setSeqValue(writer.IndexStore(), seq)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
err = writer.Write()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
},
|
||||||
|
func(message proto.Message) error {
|
||||||
|
return onMsg(message, seq)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t autoIncrementTable) ExportJSON(ctx context.Context, writer io.Writer) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = writer.Write([]byte("["))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
seq, err := t.curSeqValue(backend.IndexStoreReader())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bz, err := json.Marshal(seq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(bz)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = writer.Write([]byte(",\n"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.doExportJSON(ctx, writer)
|
||||||
|
}
|
|
@ -0,0 +1,74 @@
|
||||||
|
package ormtable_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
"gotest.tools/v3/golden"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormtable"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAutoIncrementScenario(t *testing.T) {
|
||||||
|
table, err := ormtable.Build(ormtable.Options{
|
||||||
|
MessageType: (&testpb.ExampleAutoIncrementTable{}).ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
// first run tests with a split index-commitment store
|
||||||
|
runAutoIncrementScenario(t, table, ormtable.WrapContextDefault(testkv.NewSplitMemBackend()))
|
||||||
|
|
||||||
|
// now run with shared store and debugging
|
||||||
|
debugBuf := &strings.Builder{}
|
||||||
|
store := testkv.NewDebugBackend(
|
||||||
|
testkv.NewSharedMemBackend(),
|
||||||
|
&testkv.EntryCodecDebugger{
|
||||||
|
EntryCodec: table,
|
||||||
|
Print: func(s string) { debugBuf.WriteString(s + "\n") },
|
||||||
|
},
|
||||||
|
)
|
||||||
|
runAutoIncrementScenario(t, table, ormtable.WrapContextDefault(store))
|
||||||
|
|
||||||
|
golden.Assert(t, debugBuf.String(), "test_auto_inc.golden")
|
||||||
|
checkEncodeDecodeEntries(t, table, store.IndexStoreReader())
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAutoIncrementScenario(t *testing.T, table ormtable.Table, context context.Context) {
|
||||||
|
err := table.Save(context, &testpb.ExampleAutoIncrementTable{Id: 5})
|
||||||
|
assert.ErrorContains(t, err, "update")
|
||||||
|
|
||||||
|
ex1 := &testpb.ExampleAutoIncrementTable{X: "foo", Y: 5}
|
||||||
|
assert.NilError(t, table.Save(context, ex1))
|
||||||
|
assert.Equal(t, uint64(1), ex1.Id)
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
assert.NilError(t, table.ExportJSON(context, buf))
|
||||||
|
golden.Assert(t, string(buf.Bytes()), "auto_inc_json.golden")
|
||||||
|
|
||||||
|
assert.NilError(t, table.ValidateJSON(bytes.NewReader(buf.Bytes())))
|
||||||
|
store2 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
assert.NilError(t, table.ImportJSON(store2, bytes.NewReader(buf.Bytes())))
|
||||||
|
assertTablesEqual(t, table, context, store2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBadJSON(t *testing.T) {
|
||||||
|
table, err := ormtable.Build(ormtable.Options{
|
||||||
|
MessageType: (&testpb.ExampleAutoIncrementTable{}).ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
store := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
f, err := os.Open("testdata/bad_auto_inc.json")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.ErrorContains(t, table.ImportJSON(store, f), "invalid ID")
|
||||||
|
|
||||||
|
f, err = os.Open("testdata/bad_auto_inc2.json")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.ErrorContains(t, table.ImportJSON(store, f), "invalid ID")
|
||||||
|
}
|
|
@ -0,0 +1,156 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadBackend defines the type used for read-only ORM operations.
|
||||||
|
type ReadBackend interface {
|
||||||
|
// CommitmentStoreReader returns the reader for the commitment store.
|
||||||
|
CommitmentStoreReader() kv.ReadonlyStore
|
||||||
|
|
||||||
|
// IndexStoreReader returns the reader for the index store.
|
||||||
|
IndexStoreReader() kv.ReadonlyStore
|
||||||
|
|
||||||
|
private()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backend defines the type used for read-write ORM operations.
|
||||||
|
// Unlike ReadBackend, write access to the underlying kv-store
|
||||||
|
// is hidden so that this can be fully encapsulated by the ORM.
|
||||||
|
type Backend interface {
|
||||||
|
ReadBackend
|
||||||
|
|
||||||
|
// CommitmentStore returns the merklized commitment store.
|
||||||
|
CommitmentStore() kv.Store
|
||||||
|
|
||||||
|
// IndexStore returns the index store if a separate one exists,
|
||||||
|
// otherwise it the commitment store.
|
||||||
|
IndexStore() kv.Store
|
||||||
|
|
||||||
|
// Hooks returns a Hooks instance or nil.
|
||||||
|
Hooks() Hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadBackendOptions defines options for creating a ReadBackend.
|
||||||
|
// Read context can optionally define two stores - a commitment store
|
||||||
|
// that is backed by a merkle tree and an index store that isn't.
|
||||||
|
// If the index store is not defined, the commitment store will be
|
||||||
|
// used for all operations.
|
||||||
|
type ReadBackendOptions struct {
|
||||||
|
|
||||||
|
// CommitmentStoreReader is a reader for the commitment store.
|
||||||
|
CommitmentStoreReader kv.ReadonlyStore
|
||||||
|
|
||||||
|
// IndexStoreReader is an optional reader for the index store.
|
||||||
|
// If it is nil the CommitmentStoreReader will be used.
|
||||||
|
IndexStoreReader kv.ReadonlyStore
|
||||||
|
}
|
||||||
|
|
||||||
|
type readBackend struct {
|
||||||
|
commitmentReader kv.ReadonlyStore
|
||||||
|
indexReader kv.ReadonlyStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r readBackend) CommitmentStoreReader() kv.ReadonlyStore {
|
||||||
|
return r.commitmentReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r readBackend) IndexStoreReader() kv.ReadonlyStore {
|
||||||
|
return r.indexReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (readBackend) private() {}
|
||||||
|
|
||||||
|
// NewReadBackend creates a new ReadBackend.
|
||||||
|
func NewReadBackend(options ReadBackendOptions) ReadBackend {
|
||||||
|
indexReader := options.IndexStoreReader
|
||||||
|
if indexReader == nil {
|
||||||
|
indexReader = options.CommitmentStoreReader
|
||||||
|
}
|
||||||
|
return &readBackend{
|
||||||
|
commitmentReader: options.CommitmentStoreReader,
|
||||||
|
indexReader: indexReader,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type backend struct {
|
||||||
|
commitmentStore kv.Store
|
||||||
|
indexStore kv.Store
|
||||||
|
hooks Hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
func (backend) private() {}
|
||||||
|
|
||||||
|
func (c backend) CommitmentStoreReader() kv.ReadonlyStore {
|
||||||
|
return c.commitmentStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c backend) IndexStoreReader() kv.ReadonlyStore {
|
||||||
|
return c.indexStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c backend) CommitmentStore() kv.Store {
|
||||||
|
return c.commitmentStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c backend) IndexStore() kv.Store {
|
||||||
|
return c.indexStore
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c backend) Hooks() Hooks {
|
||||||
|
return c.hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
// BackendOptions defines options for creating a Backend.
|
||||||
|
// Context can optionally define two stores - a commitment store
|
||||||
|
// that is backed by a merkle tree and an index store that isn't.
|
||||||
|
// If the index store is not defined, the commitment store will be
|
||||||
|
// used for all operations.
|
||||||
|
type BackendOptions struct {
|
||||||
|
|
||||||
|
// CommitmentStore is the commitment store.
|
||||||
|
CommitmentStore kv.Store
|
||||||
|
|
||||||
|
// IndexStore is the optional index store.
|
||||||
|
// If it is nil the CommitmentStore will be used.
|
||||||
|
IndexStore kv.Store
|
||||||
|
|
||||||
|
// Hooks are optional hooks into ORM insert, update and delete operations.
|
||||||
|
Hooks Hooks
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBackend creates a new Backend.
|
||||||
|
func NewBackend(options BackendOptions) Backend {
|
||||||
|
indexStore := options.IndexStore
|
||||||
|
if indexStore == nil {
|
||||||
|
indexStore = options.CommitmentStore
|
||||||
|
}
|
||||||
|
return &backend{
|
||||||
|
commitmentStore: options.CommitmentStore,
|
||||||
|
indexStore: indexStore,
|
||||||
|
hooks: options.Hooks,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WrapContextDefault performs the default wrapping of a backend in a context.
|
||||||
|
// This should be used primarily for testing purposes and production code
|
||||||
|
// should use some other framework specific wrapping (for instance using
|
||||||
|
// "store keys").
|
||||||
|
func WrapContextDefault(backend ReadBackend) context.Context {
|
||||||
|
return context.WithValue(context.Background(), defaultContextKey, backend)
|
||||||
|
}
|
||||||
|
|
||||||
|
type contextKeyType string
|
||||||
|
|
||||||
|
var defaultContextKey = contextKeyType("backend")
|
||||||
|
|
||||||
|
func getBackendDefault(ctx context.Context) (Backend, error) {
|
||||||
|
return ctx.Value(defaultContextKey).(Backend), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReadBackendDefault(ctx context.Context) (ReadBackend, error) {
|
||||||
|
return ctx.Value(defaultContextKey).(ReadBackend), nil
|
||||||
|
}
|
|
@ -0,0 +1,96 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import "github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
|
||||||
|
type batchIndexCommitmentWriter struct {
|
||||||
|
Backend
|
||||||
|
commitmentWriter *batchStoreWriter
|
||||||
|
indexWriter *batchStoreWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBatchIndexCommitmentWriter(store Backend) *batchIndexCommitmentWriter {
|
||||||
|
return &batchIndexCommitmentWriter{
|
||||||
|
Backend: store,
|
||||||
|
// optimal array capacities are estimated here:
|
||||||
|
commitmentWriter: &batchStoreWriter{
|
||||||
|
ReadonlyStore: store.CommitmentStoreReader(),
|
||||||
|
writes: make([]batchWriterEntry, 0, 2),
|
||||||
|
},
|
||||||
|
indexWriter: &batchStoreWriter{
|
||||||
|
ReadonlyStore: store.IndexStoreReader(),
|
||||||
|
writes: make([]batchWriterEntry, 0, 16),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *batchIndexCommitmentWriter) CommitmentStore() kv.Store {
|
||||||
|
return w.commitmentWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *batchIndexCommitmentWriter) IndexStore() kv.Store {
|
||||||
|
return w.indexWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write flushes any pending writes.
|
||||||
|
func (w *batchIndexCommitmentWriter) Write() error {
|
||||||
|
err := flushWrites(w.Backend.CommitmentStore(), w.commitmentWriter.writes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flushWrites(w.Backend.IndexStore(), w.indexWriter.writes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear writes
|
||||||
|
w.Close()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushWrites(writer kv.Store, writes []batchWriterEntry) error {
|
||||||
|
for _, write := range writes {
|
||||||
|
if !write.delete {
|
||||||
|
err := writer.Set(write.key, write.value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err := writer.Delete(write.key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close discards any pending writes and should generally be called using
|
||||||
|
// a defer statement.
|
||||||
|
func (w *batchIndexCommitmentWriter) Close() {
|
||||||
|
w.commitmentWriter.writes = nil
|
||||||
|
w.indexWriter.writes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchWriterEntry struct {
|
||||||
|
key, value []byte
|
||||||
|
delete bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type batchStoreWriter struct {
|
||||||
|
kv.ReadonlyStore
|
||||||
|
writes []batchWriterEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *batchStoreWriter) Set(key, value []byte) error {
|
||||||
|
b.writes = append(b.writes, batchWriterEntry{key: key, value: value})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *batchStoreWriter) Delete(key []byte) error {
|
||||||
|
b.writes = append(b.writes, batchWriterEntry{key: key, delete: true})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Backend = &batchIndexCommitmentWriter{}
|
|
@ -0,0 +1,296 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
ormv1alpha1 "github.com/cosmos/cosmos-sdk/api/cosmos/orm/v1alpha1"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
primaryKeyId uint32 = 0
|
||||||
|
indexIdLimit uint32 = 32768
|
||||||
|
seqId = indexIdLimit
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options are options for building a Table.
|
||||||
|
type Options struct {
|
||||||
|
// Prefix is an optional prefix used to build the table's prefix.
|
||||||
|
Prefix []byte
|
||||||
|
|
||||||
|
// MessageType is the protobuf message type of the table.
|
||||||
|
MessageType protoreflect.MessageType
|
||||||
|
|
||||||
|
// TableDescriptor is an optional table descriptor to be explicitly used
|
||||||
|
// with the table. Generally this should be nil and the table descriptor
|
||||||
|
// should be pulled from the table message option. TableDescriptor
|
||||||
|
// cannot be used together with SingletonDescriptor.
|
||||||
|
TableDescriptor *ormv1alpha1.TableDescriptor
|
||||||
|
|
||||||
|
// SingletonDescriptor is an optional singleton descriptor to be explicitly used.
|
||||||
|
// Generally this should be nil and the table descriptor
|
||||||
|
// should be pulled from the singleton message option. SingletonDescriptor
|
||||||
|
// cannot be used together with TableDescriptor.
|
||||||
|
SingletonDescriptor *ormv1alpha1.SingletonDescriptor
|
||||||
|
|
||||||
|
// TypeResolver is an optional type resolver to be used when unmarshaling
|
||||||
|
// protobuf messages.
|
||||||
|
TypeResolver TypeResolver
|
||||||
|
|
||||||
|
// JSONValidator is an optional validator that can be used for validating
|
||||||
|
// messaging when using ValidateJSON. If it is nil, DefaultJSONValidator
|
||||||
|
// will be used
|
||||||
|
JSONValidator func(proto.Message) error
|
||||||
|
|
||||||
|
// GetBackend is an optional function which retrieves a Backend from the context.
|
||||||
|
// If it is nil, the default behavior will be to attempt to retrieve a
|
||||||
|
// backend using the method that WrapContextDefault uses. This method
|
||||||
|
// can be used to imlement things like "store keys" which would allow a
|
||||||
|
// table to only be used with a specific backend and to hide direct
|
||||||
|
// access to the backend other than through the table interface.
|
||||||
|
GetBackend func(context.Context) (Backend, error)
|
||||||
|
|
||||||
|
// GetReadBackend is an optional function which retrieves a ReadBackend from the context.
|
||||||
|
// If it is nil, the default behavior will be to attempt to retrieve a
|
||||||
|
// backend using the method that WrapContextDefault uses.
|
||||||
|
GetReadBackend func(context.Context) (ReadBackend, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeResolver is an interface that can be used for the protoreflect.UnmarshalOptions.Resolver option.
|
||||||
|
type TypeResolver interface {
|
||||||
|
protoregistry.MessageTypeResolver
|
||||||
|
protoregistry.ExtensionTypeResolver
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build builds a Table instance from the provided Options.
|
||||||
|
func Build(options Options) (Table, error) {
|
||||||
|
messageDescriptor := options.MessageType.Descriptor()
|
||||||
|
|
||||||
|
getReadBackend := options.GetReadBackend
|
||||||
|
if getReadBackend == nil {
|
||||||
|
getReadBackend = getReadBackendDefault
|
||||||
|
}
|
||||||
|
getBackend := options.GetBackend
|
||||||
|
if getBackend == nil {
|
||||||
|
getBackend = getBackendDefault
|
||||||
|
}
|
||||||
|
|
||||||
|
table := &tableImpl{
|
||||||
|
primaryKeyIndex: &primaryKeyIndex{
|
||||||
|
indexers: []indexer{},
|
||||||
|
getBackend: getBackend,
|
||||||
|
getReadBackend: getReadBackend,
|
||||||
|
},
|
||||||
|
indexes: []Index{},
|
||||||
|
indexesByFields: map[fieldNames]concreteIndex{},
|
||||||
|
uniqueIndexesByFields: map[fieldNames]UniqueIndex{},
|
||||||
|
entryCodecsById: map[uint32]ormkv.EntryCodec{},
|
||||||
|
typeResolver: options.TypeResolver,
|
||||||
|
customJSONValidator: options.JSONValidator,
|
||||||
|
}
|
||||||
|
|
||||||
|
pkIndex := table.primaryKeyIndex
|
||||||
|
|
||||||
|
tableDesc := options.TableDescriptor
|
||||||
|
if tableDesc == nil {
|
||||||
|
tableDesc = proto.GetExtension(messageDescriptor.Options(), ormv1alpha1.E_Table).(*ormv1alpha1.TableDescriptor)
|
||||||
|
}
|
||||||
|
|
||||||
|
singletonDesc := options.SingletonDescriptor
|
||||||
|
if singletonDesc == nil {
|
||||||
|
singletonDesc = proto.GetExtension(messageDescriptor.Options(), ormv1alpha1.E_Singleton).(*ormv1alpha1.SingletonDescriptor)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case tableDesc != nil:
|
||||||
|
if singletonDesc != nil {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("message %s cannot be declared as both a table and a singleton", messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
case singletonDesc != nil:
|
||||||
|
if singletonDesc.Id == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableId.Wrapf("%s", messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := encodeutil.AppendVarUInt32(options.Prefix, singletonDesc.Id)
|
||||||
|
pkCodec, err := ormkv.NewPrimaryKeyCodec(
|
||||||
|
prefix,
|
||||||
|
options.MessageType,
|
||||||
|
nil,
|
||||||
|
proto.UnmarshalOptions{Resolver: options.TypeResolver},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pkIndex.PrimaryKeyCodec = pkCodec
|
||||||
|
table.tablePrefix = prefix
|
||||||
|
table.tableId = singletonDesc.Id
|
||||||
|
|
||||||
|
return &singleton{table}, nil
|
||||||
|
default:
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("missing table descriptor for %s", messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
tableId := tableDesc.Id
|
||||||
|
if tableId == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableId.Wrapf("table %s", messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix := options.Prefix
|
||||||
|
prefix = encodeutil.AppendVarUInt32(prefix, tableId)
|
||||||
|
table.tablePrefix = prefix
|
||||||
|
table.tableId = tableId
|
||||||
|
|
||||||
|
if tableDesc.PrimaryKey == nil {
|
||||||
|
return nil, ormerrors.MissingPrimaryKey.Wrap(string(messageDescriptor.FullName()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pkFields := commaSeparatedFieldNames(tableDesc.PrimaryKey.Fields)
|
||||||
|
table.primaryKeyIndex.fields = pkFields
|
||||||
|
pkFieldNames := pkFields.Names()
|
||||||
|
if len(pkFieldNames) == 0 {
|
||||||
|
return nil, ormerrors.InvalidTableDefinition.Wrapf("empty primary key fields for %s", messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
pkPrefix := encodeutil.AppendVarUInt32(prefix, primaryKeyId)
|
||||||
|
pkCodec, err := ormkv.NewPrimaryKeyCodec(
|
||||||
|
pkPrefix,
|
||||||
|
options.MessageType,
|
||||||
|
pkFieldNames,
|
||||||
|
proto.UnmarshalOptions{Resolver: options.TypeResolver},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pkIndex.PrimaryKeyCodec = pkCodec
|
||||||
|
table.indexesByFields[pkFields] = pkIndex
|
||||||
|
table.uniqueIndexesByFields[pkFields] = pkIndex
|
||||||
|
table.entryCodecsById[primaryKeyId] = pkIndex
|
||||||
|
table.indexes = append(table.indexes, pkIndex)
|
||||||
|
|
||||||
|
for _, idxDesc := range tableDesc.Index {
|
||||||
|
id := idxDesc.Id
|
||||||
|
if id == 0 || id >= indexIdLimit {
|
||||||
|
return nil, ormerrors.InvalidIndexId.Wrapf("index on table %s with fields %s, invalid id %d", messageDescriptor.FullName(), idxDesc.Fields, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := table.entryCodecsById[id]; ok {
|
||||||
|
return nil, ormerrors.DuplicateIndexId.Wrapf("id %d on table %s", id, messageDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
idxFields := commaSeparatedFieldNames(idxDesc.Fields)
|
||||||
|
idxPrefix := encodeutil.AppendVarUInt32(prefix, id)
|
||||||
|
var index concreteIndex
|
||||||
|
|
||||||
|
// altNames contains all the alternative "names" of this index
|
||||||
|
altNames := map[fieldNames]bool{idxFields: true}
|
||||||
|
|
||||||
|
if idxDesc.Unique && isNonTrivialUniqueKey(idxFields.Names(), pkFieldNames) {
|
||||||
|
uniqCdc, err := ormkv.NewUniqueKeyCodec(
|
||||||
|
idxPrefix,
|
||||||
|
options.MessageType,
|
||||||
|
idxFields.Names(),
|
||||||
|
pkFieldNames,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
uniqIdx := &uniqueKeyIndex{
|
||||||
|
UniqueKeyCodec: uniqCdc,
|
||||||
|
fields: idxFields,
|
||||||
|
primaryKey: pkIndex,
|
||||||
|
getReadBackend: getReadBackend,
|
||||||
|
}
|
||||||
|
table.uniqueIndexesByFields[idxFields] = uniqIdx
|
||||||
|
index = uniqIdx
|
||||||
|
} else {
|
||||||
|
idxCdc, err := ormkv.NewIndexKeyCodec(
|
||||||
|
idxPrefix,
|
||||||
|
options.MessageType,
|
||||||
|
idxFields.Names(),
|
||||||
|
pkFieldNames,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
index = &indexKeyIndex{
|
||||||
|
IndexKeyCodec: idxCdc,
|
||||||
|
fields: idxFields,
|
||||||
|
primaryKey: pkIndex,
|
||||||
|
getReadBackend: getReadBackend,
|
||||||
|
}
|
||||||
|
|
||||||
|
// non-unique indexes can sometimes be named by several sub-lists of
|
||||||
|
// fields and we need to handle all of them. For example consider,
|
||||||
|
// a primary key for fields "a,b,c" and an index on field "c". Because the
|
||||||
|
// rest of the primary key gets appended to the index key, the index for "c"
|
||||||
|
// is actually stored as "c,a,b". So this index can be referred to
|
||||||
|
// by the fields "c", "c,a", or "c,a,b".
|
||||||
|
allFields := index.GetFieldNames()
|
||||||
|
allFieldNames := fieldsFromNames(allFields)
|
||||||
|
altNames[allFieldNames] = true
|
||||||
|
for i := 1; i < len(allFields); i++ {
|
||||||
|
altName := fieldsFromNames(allFields[:i])
|
||||||
|
if altNames[altName] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// we check by generating a codec for each sub-list of fields,
|
||||||
|
// then we see if the full list of fields matches.
|
||||||
|
altIdxCdc, err := ormkv.NewIndexKeyCodec(
|
||||||
|
idxPrefix,
|
||||||
|
options.MessageType,
|
||||||
|
allFields[:i],
|
||||||
|
pkFieldNames,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fieldsFromNames(altIdxCdc.GetFieldNames()) == allFieldNames {
|
||||||
|
altNames[altName] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name := range altNames {
|
||||||
|
if _, ok := table.indexesByFields[name]; ok {
|
||||||
|
return nil, fmt.Errorf("duplicate index for fields %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
table.indexesByFields[name] = index
|
||||||
|
}
|
||||||
|
|
||||||
|
table.entryCodecsById[id] = index
|
||||||
|
table.indexes = append(table.indexes, index)
|
||||||
|
table.indexers = append(table.indexers, index.(indexer))
|
||||||
|
}
|
||||||
|
|
||||||
|
if tableDesc.PrimaryKey.AutoIncrement {
|
||||||
|
autoIncField := pkCodec.GetFieldDescriptors()[0]
|
||||||
|
if len(pkFieldNames) != 1 && autoIncField.Kind() != protoreflect.Uint64Kind {
|
||||||
|
return nil, ormerrors.InvalidAutoIncrementKey.Wrapf("field %s", autoIncField.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
seqPrefix := encodeutil.AppendVarUInt32(prefix, seqId)
|
||||||
|
seqCodec := ormkv.NewSeqCodec(options.MessageType, seqPrefix)
|
||||||
|
table.entryCodecsById[seqId] = seqCodec
|
||||||
|
return &autoIncrementTable{
|
||||||
|
tableImpl: table,
|
||||||
|
autoIncField: autoIncField,
|
||||||
|
seqCodec: seqCodec,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return table, nil
|
||||||
|
}
|
|
@ -0,0 +1,3 @@
|
||||||
|
// Package ormtable defines the interfaces and implementations of tables and
|
||||||
|
// indexes.
|
||||||
|
package ormtable
|
|
@ -0,0 +1,55 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fieldNames abstractly represents a list of fields with a comparable type which
|
||||||
|
// can be used as a map key. It is used primarily to lookup indexes.
|
||||||
|
type fieldNames struct {
|
||||||
|
fields string
|
||||||
|
}
|
||||||
|
|
||||||
|
// commaSeparatedFieldNames creates a fieldNames instance from a list of comma-separated
|
||||||
|
// fields.
|
||||||
|
func commaSeparatedFieldNames(fields string) fieldNames {
|
||||||
|
// normalize cases where there are spaces
|
||||||
|
if strings.IndexByte(fields, ' ') >= 0 {
|
||||||
|
parts := strings.Split(fields, ",")
|
||||||
|
for i, part := range parts {
|
||||||
|
parts[i] = strings.TrimSpace(part)
|
||||||
|
}
|
||||||
|
fields = strings.Join(parts, ",")
|
||||||
|
}
|
||||||
|
return fieldNames{fields: fields}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldsFromNames creates a fieldNames instance from an array of field
|
||||||
|
// names.
|
||||||
|
func fieldsFromNames(fnames []protoreflect.Name) fieldNames {
|
||||||
|
var names []string
|
||||||
|
for _, name := range fnames {
|
||||||
|
names = append(names, string(name))
|
||||||
|
}
|
||||||
|
return fieldNames{fields: strings.Join(names, ",")}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Names returns the array of names this fieldNames instance represents.
|
||||||
|
func (f fieldNames) Names() []protoreflect.Name {
|
||||||
|
if f.fields == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fields := strings.Split(f.fields, ",")
|
||||||
|
names := make([]protoreflect.Name, len(fields))
|
||||||
|
for i, field := range fields {
|
||||||
|
names[i] = protoreflect.Name(field)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f fieldNames) String() string {
|
||||||
|
return f.fields
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFieldNames(t *testing.T) {
|
||||||
|
names := []protoreflect.Name{"a", "b", "c"}
|
||||||
|
|
||||||
|
abc := "a,b,c"
|
||||||
|
f := commaSeparatedFieldNames(abc)
|
||||||
|
assert.Equal(t, fieldNames{abc}, f)
|
||||||
|
assert.DeepEqual(t, names, f.Names())
|
||||||
|
assert.Equal(t, abc, f.String())
|
||||||
|
|
||||||
|
f = commaSeparatedFieldNames("a, b ,c")
|
||||||
|
assert.Equal(t, fieldNames{abc}, f)
|
||||||
|
assert.DeepEqual(t, names, f.Names())
|
||||||
|
assert.Equal(t, abc, f.String())
|
||||||
|
|
||||||
|
// empty okay
|
||||||
|
f = commaSeparatedFieldNames("")
|
||||||
|
assert.Equal(t, fieldNames{""}, f)
|
||||||
|
assert.Equal(t, 0, len(f.Names()))
|
||||||
|
assert.Equal(t, "", f.String())
|
||||||
|
|
||||||
|
f = fieldsFromNames(names)
|
||||||
|
assert.Equal(t, fieldNames{abc}, f)
|
||||||
|
assert.DeepEqual(t, names, f.Names())
|
||||||
|
assert.Equal(t, abc, f.String())
|
||||||
|
|
||||||
|
// empty okay
|
||||||
|
f = fieldsFromNames([]protoreflect.Name{})
|
||||||
|
assert.Equal(t, fieldNames{""}, f)
|
||||||
|
f = fieldsFromNames(nil)
|
||||||
|
assert.Equal(t, fieldNames{""}, f)
|
||||||
|
assert.Equal(t, 0, len(f.Names()))
|
||||||
|
assert.Equal(t, "", f.String())
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import "google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
// Hooks defines an interface for a table hooks which can intercept
|
||||||
|
// insert, update and delete operations. Table.Save and Table.Delete methods will
|
||||||
|
// do a type assertion on kvstore.IndexCommitmentStore and if the Hooks
|
||||||
|
// interface is defined call the appropriate hooks.
|
||||||
|
type Hooks interface {
|
||||||
|
// OnInsert is called before the message is inserted.
|
||||||
|
// If error is not nil the operation will fail.
|
||||||
|
OnInsert(proto.Message) error
|
||||||
|
|
||||||
|
// OnUpdate is called before the existing message is updated with the new one.
|
||||||
|
// If error is not nil the operation will fail.
|
||||||
|
OnUpdate(existing, new proto.Message) error
|
||||||
|
|
||||||
|
// OnDelete is called before the message is deleted.
|
||||||
|
// If error is not nil the operation will fail.
|
||||||
|
OnDelete(proto.Message) error
|
||||||
|
}
|
|
@ -0,0 +1,57 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Index defines an index on a table. Index instances
|
||||||
|
// are stateless, with all state existing only in the store passed
|
||||||
|
// to index methods.
|
||||||
|
type Index interface {
|
||||||
|
|
||||||
|
// Iterator returns an iterator for this index with the provided list options.
|
||||||
|
Iterator(ctx context.Context, options ...ormlist.Option) (Iterator, error)
|
||||||
|
|
||||||
|
// MessageType returns the protobuf message type of the index.
|
||||||
|
MessageType() protoreflect.MessageType
|
||||||
|
|
||||||
|
// Fields returns the canonical field names of the index.
|
||||||
|
Fields() string
|
||||||
|
|
||||||
|
doNotImplement()
|
||||||
|
}
|
||||||
|
|
||||||
|
// concreteIndex is used internally by table implementations.
|
||||||
|
type concreteIndex interface {
|
||||||
|
Index
|
||||||
|
ormkv.IndexCodec
|
||||||
|
|
||||||
|
readValueFromIndexKey(context ReadBackend, primaryKey []protoreflect.Value, value []byte, message proto.Message) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// UniqueIndex defines an unique index on a table.
|
||||||
|
type UniqueIndex interface {
|
||||||
|
Index
|
||||||
|
|
||||||
|
// Has returns true if the key values are present in the store for this index.
|
||||||
|
Has(context context.Context, keyValues ...interface{}) (found bool, err error)
|
||||||
|
|
||||||
|
// Get retrieves the message if one exists for the provided key values.
|
||||||
|
Get(context context.Context, message proto.Message, keyValues ...interface{}) (found bool, err error)
|
||||||
|
|
||||||
|
// DeleteByKey deletes the message if one exists in for the provided key values.
|
||||||
|
DeleteByKey(context context.Context, keyValues ...interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexer interface {
|
||||||
|
onInsert(store kv.Store, message protoreflect.Message) error
|
||||||
|
onUpdate(store kv.Store, new, existing protoreflect.Message) error
|
||||||
|
onDelete(store kv.Store, message protoreflect.Message) error
|
||||||
|
}
|
|
@ -0,0 +1,93 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// indexKeyIndex implements Index for a regular IndexKey.
|
||||||
|
type indexKeyIndex struct {
|
||||||
|
*ormkv.IndexKeyCodec
|
||||||
|
fields fieldNames
|
||||||
|
primaryKey *primaryKeyIndex
|
||||||
|
getReadBackend func(context.Context) (ReadBackend, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) Iterator(ctx context.Context, options ...ormlist.Option) (Iterator, error) {
|
||||||
|
backend, err := i.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return iterator(backend, backend.IndexStoreReader(), i, i.KeyCodec, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ indexer = &indexKeyIndex{}
|
||||||
|
var _ Index = &indexKeyIndex{}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) doNotImplement() {}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) onInsert(store kv.Store, message protoreflect.Message) error {
|
||||||
|
k, v, err := i.EncodeKVFromMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return store.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) onUpdate(store kv.Store, new, existing protoreflect.Message) error {
|
||||||
|
newValues := i.GetKeyValues(new)
|
||||||
|
existingValues := i.GetKeyValues(existing)
|
||||||
|
if i.CompareKeys(newValues, existingValues) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
existingKey, err := i.EncodeKey(existingValues)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = store.Delete(existingKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
newKey, err := i.EncodeKey(newValues)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return store.Set(newKey, []byte{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) onDelete(store kv.Store, message protoreflect.Message) error {
|
||||||
|
_, key, err := i.EncodeKeyFromMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return store.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexKeyIndex) readValueFromIndexKey(backend ReadBackend, primaryKey []protoreflect.Value, _ []byte, message proto.Message) error {
|
||||||
|
found, err := i.primaryKey.get(backend, message, primaryKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return ormerrors.UnexpectedError.Wrapf("can't find primary key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p indexKeyIndex) Fields() string {
|
||||||
|
return p.fields.String()
|
||||||
|
}
|
|
@ -0,0 +1,239 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/listinternal"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Iterator defines the interface for iterating over indexes.
|
||||||
|
type Iterator interface {
|
||||||
|
|
||||||
|
// Next advances the iterator and returns true if a valid entry is found.
|
||||||
|
// Next must be called before starting iteration.
|
||||||
|
Next() bool
|
||||||
|
|
||||||
|
// Keys returns the current index key and primary key values that the
|
||||||
|
// iterator points to.
|
||||||
|
Keys() (indexKey, primaryKey []protoreflect.Value, err error)
|
||||||
|
|
||||||
|
// UnmarshalMessage unmarshals the entry the iterator currently points to
|
||||||
|
// the provided proto.Message.
|
||||||
|
UnmarshalMessage(proto.Message) error
|
||||||
|
|
||||||
|
// GetMessage retrieves the proto.Message that the iterator currently points
|
||||||
|
// to.
|
||||||
|
GetMessage() (proto.Message, error)
|
||||||
|
|
||||||
|
// Cursor returns the cursor referencing the current iteration position
|
||||||
|
// and can be used to restart iteration right after this position.
|
||||||
|
Cursor() ormlist.CursorT
|
||||||
|
|
||||||
|
// Close closes the iterator and must always be called when done using
|
||||||
|
// the iterator. The defer keyword should generally be used for this.
|
||||||
|
Close()
|
||||||
|
|
||||||
|
doNotImplement()
|
||||||
|
}
|
||||||
|
|
||||||
|
func iterator(
|
||||||
|
backend ReadBackend,
|
||||||
|
reader kv.ReadonlyStore,
|
||||||
|
index concreteIndex,
|
||||||
|
codec *ormkv.KeyCodec,
|
||||||
|
options []listinternal.Option,
|
||||||
|
) (Iterator, error) {
|
||||||
|
opts := &listinternal.Options{}
|
||||||
|
listinternal.ApplyOptions(opts, options)
|
||||||
|
if err := opts.Validate(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Start != nil || opts.End != nil {
|
||||||
|
err := codec.CheckValidRangeIterationKeys(opts.Start, opts.End)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
startBz, err := codec.EncodeKey(opts.Start)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
endBz, err := codec.EncodeKey(opts.End)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fullEndKey := len(codec.GetFieldNames()) == len(opts.End)
|
||||||
|
|
||||||
|
return rangeIterator(reader, backend, index, startBz, endBz, fullEndKey, opts)
|
||||||
|
} else {
|
||||||
|
prefixBz, err := codec.EncodeKey(opts.Prefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return prefixIterator(reader, backend, index, prefixBz, opts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func prefixIterator(iteratorStore kv.ReadonlyStore, backend ReadBackend, index concreteIndex, prefix []byte, options *listinternal.Options) (Iterator, error) {
|
||||||
|
if !options.Reverse {
|
||||||
|
var start []byte
|
||||||
|
if len(options.Cursor) != 0 {
|
||||||
|
// must start right after cursor
|
||||||
|
start = append(options.Cursor, 0x0)
|
||||||
|
} else {
|
||||||
|
start = prefix
|
||||||
|
}
|
||||||
|
end := prefixEndBytes(prefix)
|
||||||
|
it, err := iteratorStore.Iterator(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &indexIterator{
|
||||||
|
index: index,
|
||||||
|
store: backend,
|
||||||
|
iterator: it,
|
||||||
|
started: false,
|
||||||
|
}, nil
|
||||||
|
} else {
|
||||||
|
var end []byte
|
||||||
|
if len(options.Cursor) != 0 {
|
||||||
|
// end bytes is already exclusive by default
|
||||||
|
end = options.Cursor
|
||||||
|
} else {
|
||||||
|
end = prefixEndBytes(prefix)
|
||||||
|
}
|
||||||
|
it, err := iteratorStore.ReverseIterator(prefix, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &indexIterator{
|
||||||
|
index: index,
|
||||||
|
store: backend,
|
||||||
|
iterator: it,
|
||||||
|
started: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE: fullEndKey indicates whether the end key contained all the fields of the key,
|
||||||
|
// if it did then we need to use inclusive end bytes, otherwise we prefix the end bytes
|
||||||
|
func rangeIterator(iteratorStore kv.ReadonlyStore, reader ReadBackend, index concreteIndex, start, end []byte, fullEndKey bool, options *listinternal.Options) (Iterator, error) {
|
||||||
|
if !options.Reverse {
|
||||||
|
if len(options.Cursor) != 0 {
|
||||||
|
start = append(options.Cursor, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fullEndKey {
|
||||||
|
end = inclusiveEndBytes(end)
|
||||||
|
} else {
|
||||||
|
end = prefixEndBytes(end)
|
||||||
|
}
|
||||||
|
|
||||||
|
it, err := iteratorStore.Iterator(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &indexIterator{
|
||||||
|
index: index,
|
||||||
|
store: reader,
|
||||||
|
iterator: it,
|
||||||
|
started: false,
|
||||||
|
}, nil
|
||||||
|
} else {
|
||||||
|
if len(options.Cursor) != 0 {
|
||||||
|
end = options.Cursor
|
||||||
|
} else {
|
||||||
|
if fullEndKey {
|
||||||
|
end = inclusiveEndBytes(end)
|
||||||
|
} else {
|
||||||
|
end = prefixEndBytes(end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it, err := iteratorStore.ReverseIterator(start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &indexIterator{
|
||||||
|
index: index,
|
||||||
|
store: reader,
|
||||||
|
iterator: it,
|
||||||
|
started: false,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type indexIterator struct {
|
||||||
|
index concreteIndex
|
||||||
|
store ReadBackend
|
||||||
|
iterator kv.Iterator
|
||||||
|
|
||||||
|
indexValues []protoreflect.Value
|
||||||
|
primaryKey []protoreflect.Value
|
||||||
|
value []byte
|
||||||
|
started bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *indexIterator) Next() bool {
|
||||||
|
if !i.started {
|
||||||
|
i.started = true
|
||||||
|
} else {
|
||||||
|
i.iterator.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.iterator.Valid()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *indexIterator) Keys() (indexKey, primaryKey []protoreflect.Value, err error) {
|
||||||
|
if i.indexValues != nil {
|
||||||
|
return i.indexValues, i.primaryKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i.value = i.iterator.Value()
|
||||||
|
i.indexValues, i.primaryKey, err = i.index.DecodeIndexKey(i.iterator.Key(), i.value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.indexValues, i.primaryKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexIterator) UnmarshalMessage(message proto.Message) error {
|
||||||
|
_, pk, err := i.Keys()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return i.index.readValueFromIndexKey(i.store, pk, i.value, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *indexIterator) GetMessage() (proto.Message, error) {
|
||||||
|
msg := i.index.MessageType().New().Interface()
|
||||||
|
err := i.UnmarshalMessage(msg)
|
||||||
|
return msg, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexIterator) Cursor() ormlist.CursorT {
|
||||||
|
return i.iterator.Key()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i indexIterator) Close() {
|
||||||
|
err := i.iterator.Close()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (indexIterator) doNotImplement() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Iterator = &indexIterator{}
|
|
@ -0,0 +1,124 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
queryv1beta1 "github.com/cosmos/cosmos-sdk/api/cosmos/base/query/v1beta1"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PaginationRequest is a request to the Paginate function and extends the
|
||||||
|
// options in query.PageRequest.
|
||||||
|
type PaginationRequest struct {
|
||||||
|
*queryv1beta1.PageRequest
|
||||||
|
|
||||||
|
// Filter is an optional filter function that can be used to filter
|
||||||
|
// the results in the underlying iterator and should return true to include
|
||||||
|
// an item in the result.
|
||||||
|
Filter func(message proto.Message) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// PaginationResponse is a response from the Paginate function and extends the
|
||||||
|
// options in query.PageResponse.
|
||||||
|
type PaginationResponse struct {
|
||||||
|
*queryv1beta1.PageResponse
|
||||||
|
|
||||||
|
// HaveMore indicates whether there are more pages.
|
||||||
|
HaveMore bool
|
||||||
|
|
||||||
|
// Cursors returns a cursor for each item and can be used to implement
|
||||||
|
// GraphQL connection edges.
|
||||||
|
Cursors []ormlist.CursorT
|
||||||
|
}
|
||||||
|
|
||||||
|
// Paginate retrieves a "page" of data from the provided index and context.
|
||||||
|
func Paginate(
|
||||||
|
index Index,
|
||||||
|
ctx context.Context,
|
||||||
|
request *PaginationRequest,
|
||||||
|
onItem func(proto.Message),
|
||||||
|
options ...ormlist.Option,
|
||||||
|
) (*PaginationResponse, error) {
|
||||||
|
offset := int(request.Offset)
|
||||||
|
if len(request.Key) != 0 {
|
||||||
|
if offset > 0 {
|
||||||
|
return nil, fmt.Errorf("can only specify one of cursor or offset")
|
||||||
|
}
|
||||||
|
|
||||||
|
options = append(options, ormlist.Cursor(request.Key))
|
||||||
|
}
|
||||||
|
|
||||||
|
if request.Reverse {
|
||||||
|
options = append(options, ormlist.Reverse())
|
||||||
|
}
|
||||||
|
|
||||||
|
it, err := index.Iterator(ctx, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer it.Close()
|
||||||
|
|
||||||
|
limit := int(request.Limit)
|
||||||
|
if limit == 0 {
|
||||||
|
return nil, fmt.Errorf("limit not specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
if offset != 0 {
|
||||||
|
for ; i < offset; i++ {
|
||||||
|
if !it.Next() {
|
||||||
|
return &PaginationResponse{
|
||||||
|
PageResponse: &queryv1beta1.PageResponse{Total: uint64(i)},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
haveMore := false
|
||||||
|
cursors := make([]ormlist.CursorT, 0, limit)
|
||||||
|
done := limit + offset
|
||||||
|
for it.Next() {
|
||||||
|
if i == done {
|
||||||
|
haveMore = true
|
||||||
|
if request.CountTotal {
|
||||||
|
for {
|
||||||
|
i++
|
||||||
|
if !it.Next() {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
message, err := it.GetMessage()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if request.Filter != nil && !request.Filter(message) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
i++
|
||||||
|
cursors = append(cursors, it.Cursor())
|
||||||
|
onItem(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
pageRes := &queryv1beta1.PageResponse{}
|
||||||
|
if request.CountTotal {
|
||||||
|
pageRes.Total = uint64(i)
|
||||||
|
}
|
||||||
|
n := len(cursors)
|
||||||
|
if n != 0 {
|
||||||
|
pageRes.NextKey = cursors[n-1]
|
||||||
|
}
|
||||||
|
return &PaginationResponse{
|
||||||
|
PageResponse: pageRes,
|
||||||
|
HaveMore: haveMore,
|
||||||
|
Cursors: cursors,
|
||||||
|
}, nil
|
||||||
|
}
|
|
@ -0,0 +1,142 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// primaryKeyIndex defines an UniqueIndex for the primary key.
|
||||||
|
type primaryKeyIndex struct {
|
||||||
|
*ormkv.PrimaryKeyCodec
|
||||||
|
fields fieldNames
|
||||||
|
indexers []indexer
|
||||||
|
getBackend func(context.Context) (Backend, error)
|
||||||
|
getReadBackend func(context.Context) (ReadBackend, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) Iterator(ctx context.Context, options ...ormlist.Option) (Iterator, error) {
|
||||||
|
backend, err := p.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return iterator(backend, backend.CommitmentStoreReader(), p, p.KeyCodec, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) doNotImplement() {}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) Has(context context.Context, key ...interface{}) (found bool, err error) {
|
||||||
|
ctx, err := p.getReadBackend(context)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
keyBz, err := p.EncodeKey(encodeutil.ValuesOf(key...))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.CommitmentStoreReader().Has(keyBz)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) Get(ctx context.Context, message proto.Message, values ...interface{}) (found bool, err error) {
|
||||||
|
backend, err := p.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.get(backend, message, encodeutil.ValuesOf(values...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t primaryKeyIndex) DeleteByKey(ctx context.Context, primaryKeyValues ...interface{}) error {
|
||||||
|
return t.doDeleteByKey(ctx, encodeutil.ValuesOf(primaryKeyValues...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t primaryKeyIndex) doDeleteByKey(ctx context.Context, primaryKeyValues []protoreflect.Value) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pk, err := t.EncodeKey(primaryKeyValues)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
found, err := t.getByKeyBytes(backend, pk, primaryKeyValues, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if hooks := backend.Hooks(); hooks != nil {
|
||||||
|
err = hooks.OnDelete(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete object
|
||||||
|
writer := newBatchIndexCommitmentWriter(backend)
|
||||||
|
defer writer.Close()
|
||||||
|
err = writer.CommitmentStore().Delete(pk)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear indexes
|
||||||
|
mref := msg.ProtoReflect()
|
||||||
|
indexStoreWriter := writer.IndexStore()
|
||||||
|
for _, idx := range t.indexers {
|
||||||
|
err := idx.onDelete(indexStoreWriter, mref)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return writer.Write()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) get(backend ReadBackend, message proto.Message, values []protoreflect.Value) (found bool, err error) {
|
||||||
|
key, err := p.EncodeKey(values)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.getByKeyBytes(backend, key, values, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) getByKeyBytes(store ReadBackend, key []byte, keyValues []protoreflect.Value, message proto.Message) (found bool, err error) {
|
||||||
|
bz, err := store.CommitmentStoreReader().Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bz == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, p.Unmarshal(keyValues, bz, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) readValueFromIndexKey(_ ReadBackend, primaryKey []protoreflect.Value, value []byte, message proto.Message) error {
|
||||||
|
return p.Unmarshal(primaryKey, value, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p primaryKeyIndex) Fields() string {
|
||||||
|
return p.fields.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ UniqueIndex = &primaryKeyIndex{}
|
|
@ -0,0 +1,93 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
)
|
||||||
|
|
||||||
|
// singleton implements a Table instance for singletons.
|
||||||
|
type singleton struct {
|
||||||
|
*tableImpl
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t singleton) DefaultJSON() json.RawMessage {
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
bz, err := t.jsonMarshalOptions().Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
return json.RawMessage("{}")
|
||||||
|
}
|
||||||
|
return bz
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t singleton) ValidateJSON(reader io.Reader) error {
|
||||||
|
bz, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
err = protojson.Unmarshal(bz, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.customJSONValidator != nil {
|
||||||
|
return t.customJSONValidator(msg)
|
||||||
|
} else {
|
||||||
|
return DefaultJSONValidator(msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t singleton) ImportJSON(ctx context.Context, reader io.Reader) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bz, err := io.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
err = protojson.Unmarshal(bz, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, msg, saveModeDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t singleton) ExportJSON(ctx context.Context, writer io.Writer) error {
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
found, err := t.Get(ctx, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var bz []byte
|
||||||
|
if !found {
|
||||||
|
bz = t.DefaultJSON()
|
||||||
|
} else {
|
||||||
|
bz, err = t.jsonMarshalOptions().Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = writer.Write(bz)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t singleton) jsonMarshalOptions() protojson.MarshalOptions {
|
||||||
|
return protojson.MarshalOptions{
|
||||||
|
Multiline: true,
|
||||||
|
Indent: "",
|
||||||
|
UseProtoNames: true,
|
||||||
|
EmitUnpopulated: true,
|
||||||
|
Resolver: t.typeResolver,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package ormtable_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormtable"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/testing/protocmp"
|
||||||
|
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSingleton(t *testing.T) {
|
||||||
|
val := &testpb.ExampleSingleton{}
|
||||||
|
singleton, err := ormtable.Build(ormtable.Options{
|
||||||
|
MessageType: val.ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
store := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
|
||||||
|
found, err := singleton.Has(store)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, !found)
|
||||||
|
assert.NilError(t, singleton.Save(store, val))
|
||||||
|
found, err = singleton.Has(store)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
|
||||||
|
val.Foo = "abc"
|
||||||
|
val.Bar = 3
|
||||||
|
assert.NilError(t, singleton.Save(store, val))
|
||||||
|
|
||||||
|
var val2 testpb.ExampleSingleton
|
||||||
|
found, err = singleton.Get(store, &val2)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, val, &val2, protocmp.Transform())
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
assert.NilError(t, singleton.ExportJSON(store, buf))
|
||||||
|
assert.NilError(t, singleton.ValidateJSON(bytes.NewReader(buf.Bytes())))
|
||||||
|
store2 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
assert.NilError(t, singleton.ImportJSON(store2, bytes.NewReader(buf.Bytes())))
|
||||||
|
|
||||||
|
var val3 testpb.ExampleSingleton
|
||||||
|
found, err = singleton.Get(store, &val3)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, val, &val3, protocmp.Transform())
|
||||||
|
}
|
|
@ -0,0 +1,113 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// View defines a read-only table.
|
||||||
|
//
|
||||||
|
// It exists as a separate interacted to support future scenarios where
|
||||||
|
// tables may be "supported" virtually to provide compatibility between
|
||||||
|
// systems, for instance to enable backwards compatibility when a major
|
||||||
|
// migration needs to be performed.
|
||||||
|
type View interface {
|
||||||
|
UniqueIndex
|
||||||
|
|
||||||
|
// GetIndex returns the index referenced by the provided fields if
|
||||||
|
// one exists or nil. Note that some concrete indexes can be retrieved by
|
||||||
|
// multiple lists of fields.
|
||||||
|
GetIndex(fields string) Index
|
||||||
|
|
||||||
|
// GetUniqueIndex returns the unique index referenced by the provided fields if
|
||||||
|
// one exists or nil. Note that some concrete indexes can be retrieved by
|
||||||
|
// multiple lists of fields.
|
||||||
|
GetUniqueIndex(fields string) UniqueIndex
|
||||||
|
|
||||||
|
// Indexes returns all the concrete indexes for the table.
|
||||||
|
Indexes() []Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table is an abstract interface around a concrete table. Table instances
|
||||||
|
// are stateless, with all state existing only in the store passed
|
||||||
|
// to table and index methods.
|
||||||
|
type Table interface {
|
||||||
|
View
|
||||||
|
|
||||||
|
ormkv.EntryCodec
|
||||||
|
|
||||||
|
// Save saves the provided entry in the store either inserting it or
|
||||||
|
// updating it if needed.
|
||||||
|
//
|
||||||
|
// If store implement the Hooks interface, the appropriate OnInsert or
|
||||||
|
// OnUpdate hook method will be called.
|
||||||
|
//
|
||||||
|
// Save attempts to be atomic with respect to the underlying store,
|
||||||
|
// meaning that either the full save operation is written or the store is
|
||||||
|
// left unchanged, unless there is an error with the underlying store.
|
||||||
|
Save(context context.Context, message proto.Message) error
|
||||||
|
|
||||||
|
// Insert inserts the provided entry in the store and fails if there is
|
||||||
|
// an unique key violation. See Save for more details on behavior.
|
||||||
|
Insert(context context.Context, message proto.Message) error
|
||||||
|
|
||||||
|
// Update updates the provided entry in the store and fails if an entry
|
||||||
|
// with a matching primary key does not exist. See Save for more details
|
||||||
|
// on behavior.
|
||||||
|
Update(context context.Context, message proto.Message) error
|
||||||
|
|
||||||
|
// Delete deletes the entry with the provided primary key from the store.
|
||||||
|
//
|
||||||
|
// If store implement the Hooks interface, the OnDelete hook method will
|
||||||
|
// be called.
|
||||||
|
//
|
||||||
|
// Delete attempts to be atomic with respect to the underlying store,
|
||||||
|
// meaning that either the full save operation is written or the store is
|
||||||
|
// left unchanged, unless there is an error with the underlying store.
|
||||||
|
Delete(context context.Context, message proto.Message) error
|
||||||
|
|
||||||
|
// DefaultJSON returns default JSON that can be used as a template for
|
||||||
|
// genesis files.
|
||||||
|
//
|
||||||
|
// For regular tables this an empty JSON array, but for singletons an
|
||||||
|
// empty instance of the singleton is marshaled.
|
||||||
|
DefaultJSON() json.RawMessage
|
||||||
|
|
||||||
|
// ValidateJSON validates JSON streamed from the reader.
|
||||||
|
ValidateJSON(io.Reader) error
|
||||||
|
|
||||||
|
// ImportJSON imports JSON into the store, streaming one entry at a time.
|
||||||
|
// Each table should be import from a separate JSON file to enable proper
|
||||||
|
// streaming.
|
||||||
|
//
|
||||||
|
// Regular tables should be stored as an array of objects with each object
|
||||||
|
// corresponding to a single record in the table.
|
||||||
|
//
|
||||||
|
// Auto-incrementing tables
|
||||||
|
// can optionally have the last sequence value as the first element in the
|
||||||
|
// array. If the last sequence value is provided, then each value of the
|
||||||
|
// primary key in the file must be <= this last sequence value or omitted
|
||||||
|
// entirely. If no last sequence value is provided, no entries should
|
||||||
|
// contain the primary key as this will be auto-assigned.
|
||||||
|
//
|
||||||
|
// Singletons should define a single object and not an array.
|
||||||
|
//
|
||||||
|
// ImportJSON is not atomic with respect to the underlying store, meaning
|
||||||
|
// that in the case of an error, some records may already have been
|
||||||
|
// imported. It is assumed that ImportJSON is called in the context of some
|
||||||
|
// larger transaction isolation.
|
||||||
|
ImportJSON(context.Context, io.Reader) error
|
||||||
|
|
||||||
|
// ExportJSON exports JSON in the format accepted by ImportJSON.
|
||||||
|
// Auto-incrementing tables will export the last sequence number as the
|
||||||
|
// first element in the JSON array.
|
||||||
|
ExportJSON(context.Context, io.Writer) error
|
||||||
|
|
||||||
|
// ID is the ID of this table within the schema of its FileDescriptor.
|
||||||
|
ID() uint32
|
||||||
|
}
|
|
@ -0,0 +1,375 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// tableImpl implements Table.
|
||||||
|
type tableImpl struct {
|
||||||
|
*primaryKeyIndex
|
||||||
|
indexes []Index
|
||||||
|
indexesByFields map[fieldNames]concreteIndex
|
||||||
|
uniqueIndexesByFields map[fieldNames]UniqueIndex
|
||||||
|
entryCodecsById map[uint32]ormkv.EntryCodec
|
||||||
|
tablePrefix []byte
|
||||||
|
tableId uint32
|
||||||
|
typeResolver TypeResolver
|
||||||
|
customJSONValidator func(message proto.Message) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) Save(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) Insert(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeInsert)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) Update(ctx context.Context, message proto.Message) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.save(backend, message, saveModeUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) save(backend Backend, message proto.Message, mode saveMode) error {
|
||||||
|
writer := newBatchIndexCommitmentWriter(backend)
|
||||||
|
defer writer.Close()
|
||||||
|
return t.doSave(writer, message, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) doSave(writer *batchIndexCommitmentWriter, message proto.Message, mode saveMode) error {
|
||||||
|
mref := message.ProtoReflect()
|
||||||
|
pkValues, pk, err := t.EncodeKeyFromMessage(mref)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
existing := mref.New().Interface()
|
||||||
|
haveExisting, err := t.getByKeyBytes(writer, pk, pkValues, existing)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if haveExisting {
|
||||||
|
if mode == saveModeInsert {
|
||||||
|
return ormerrors.PrimaryKeyConstraintViolation.Wrapf("%q:%+v", mref.Descriptor().FullName(), pkValues)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hooks := writer.Hooks(); hooks != nil {
|
||||||
|
err = hooks.OnUpdate(existing, message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if mode == saveModeUpdate {
|
||||||
|
return ormerrors.NotFoundOnUpdate.Wrapf("%q", mref.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if hooks := writer.Hooks(); hooks != nil {
|
||||||
|
err = hooks.OnInsert(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// temporarily clear primary key
|
||||||
|
t.ClearValues(mref)
|
||||||
|
|
||||||
|
// store object
|
||||||
|
bz, err := proto.MarshalOptions{Deterministic: true}.Marshal(message)
|
||||||
|
err = writer.CommitmentStore().Set(pk, bz)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// set primary key again
|
||||||
|
t.SetKeyValues(mref, pkValues)
|
||||||
|
|
||||||
|
// set indexes
|
||||||
|
indexStoreWriter := writer.IndexStore()
|
||||||
|
if !haveExisting {
|
||||||
|
for _, idx := range t.indexers {
|
||||||
|
err = idx.onInsert(indexStoreWriter, mref)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
existingMref := existing.ProtoReflect()
|
||||||
|
for _, idx := range t.indexers {
|
||||||
|
err = idx.onUpdate(indexStoreWriter, mref, existingMref)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return writer.Write()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) Delete(context context.Context, message proto.Message) error {
|
||||||
|
pk := t.PrimaryKeyCodec.GetKeyValues(message.ProtoReflect())
|
||||||
|
return t.DeleteByKey(context, pk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) GetIndex(fields string) Index {
|
||||||
|
return t.indexesByFields[commaSeparatedFieldNames(fields)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) GetUniqueIndex(fields string) UniqueIndex {
|
||||||
|
return t.uniqueIndexesByFields[commaSeparatedFieldNames(fields)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) Indexes() []Index {
|
||||||
|
return t.indexes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) DefaultJSON() json.RawMessage {
|
||||||
|
return json.RawMessage("[]")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) decodeJson(reader io.Reader, onMsg func(message proto.Message) error) error {
|
||||||
|
decoder, err := t.startDecodeJson(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.doDecodeJson(decoder, nil, onMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) startDecodeJson(reader io.Reader) (*json.Decoder, error) {
|
||||||
|
decoder := json.NewDecoder(reader)
|
||||||
|
token, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if token != json.Delim('[') {
|
||||||
|
return nil, ormerrors.JSONImportError.Wrapf("expected [ got %s", token)
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// onFirst is called on the first RawMessage and used for auto-increment tables
|
||||||
|
// to decode the sequence in which case it should return true.
|
||||||
|
// onMsg is called on every decoded message
|
||||||
|
func (t tableImpl) doDecodeJson(decoder *json.Decoder, onFirst func(message json.RawMessage) bool, onMsg func(message proto.Message) error) error {
|
||||||
|
unmarshalOptions := protojson.UnmarshalOptions{Resolver: t.typeResolver}
|
||||||
|
|
||||||
|
first := true
|
||||||
|
for decoder.More() {
|
||||||
|
var rawJson json.RawMessage
|
||||||
|
err := decoder.Decode(&rawJson)
|
||||||
|
if err != nil {
|
||||||
|
return ormerrors.JSONImportError.Wrapf("%s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if first {
|
||||||
|
first = false
|
||||||
|
if onFirst != nil {
|
||||||
|
if onFirst(rawJson) {
|
||||||
|
// if onFirst handled this, skip decoding into a proto message
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
err = unmarshalOptions.Unmarshal(rawJson, msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = onMsg(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := decoder.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if token != json.Delim(']') {
|
||||||
|
return ormerrors.JSONImportError.Wrapf("expected ] got %s", token)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultJSONValidator is the default validator used when calling
|
||||||
|
// Table.ValidateJSON(). It will call methods with the signature `ValidateBasic() error`
|
||||||
|
// and/or `Validate() error` to validate the message.
|
||||||
|
func DefaultJSONValidator(message proto.Message) error {
|
||||||
|
if v, ok := message.(interface{ ValidateBasic() error }); ok {
|
||||||
|
err := v.ValidateBasic()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := message.(interface{ Validate() error }); ok {
|
||||||
|
err := v.Validate()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) ValidateJSON(reader io.Reader) error {
|
||||||
|
return t.decodeJson(reader, func(message proto.Message) error {
|
||||||
|
if t.customJSONValidator != nil {
|
||||||
|
return t.customJSONValidator(message)
|
||||||
|
} else {
|
||||||
|
return DefaultJSONValidator(message)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) ImportJSON(ctx context.Context, reader io.Reader) error {
|
||||||
|
backend, err := t.getBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.decodeJson(reader, func(message proto.Message) error {
|
||||||
|
return t.save(backend, message, saveModeDefault)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) ExportJSON(context context.Context, writer io.Writer) error {
|
||||||
|
_, err := writer.Write([]byte("["))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.doExportJSON(context, writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) doExportJSON(ctx context.Context, writer io.Writer) error {
|
||||||
|
marshalOptions := protojson.MarshalOptions{
|
||||||
|
UseProtoNames: true,
|
||||||
|
Resolver: t.typeResolver,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
it, _ := t.Iterator(ctx)
|
||||||
|
start := true
|
||||||
|
for {
|
||||||
|
found := it.Next()
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
_, err = writer.Write([]byte("]"))
|
||||||
|
return err
|
||||||
|
} else if !start {
|
||||||
|
_, err = writer.Write([]byte(",\n"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
start = false
|
||||||
|
|
||||||
|
msg := t.MessageType().New().Interface()
|
||||||
|
err = it.UnmarshalMessage(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
bz, err := marshalOptions.Marshal(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = writer.Write(bz)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) DecodeEntry(k, v []byte) (ormkv.Entry, error) {
|
||||||
|
r := bytes.NewReader(k)
|
||||||
|
err := encodeutil.SkipPrefix(r, t.tablePrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
id, err := binary.ReadUvarint(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if id > math.MaxUint32 {
|
||||||
|
return nil, ormerrors.UnexpectedDecodePrefix.Wrapf("uint32 varint id out of range %d", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
idx, ok := t.entryCodecsById[uint32(id)]
|
||||||
|
if !ok {
|
||||||
|
return nil, ormerrors.UnexpectedDecodePrefix.Wrapf("can't find field with id %d", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return idx.DecodeEntry(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) EncodeEntry(entry ormkv.Entry) (k, v []byte, err error) {
|
||||||
|
switch entry := entry.(type) {
|
||||||
|
case *ormkv.PrimaryKeyEntry:
|
||||||
|
return t.PrimaryKeyCodec.EncodeEntry(entry)
|
||||||
|
case *ormkv.IndexKeyEntry:
|
||||||
|
idx, ok := t.indexesByFields[fieldsFromNames(entry.Fields)]
|
||||||
|
if !ok {
|
||||||
|
return nil, nil, ormerrors.BadDecodeEntry.Wrapf("can't find index with fields %s", entry.Fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
return idx.EncodeEntry(entry)
|
||||||
|
default:
|
||||||
|
return nil, nil, ormerrors.BadDecodeEntry.Wrapf("%s", entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t tableImpl) ID() uint32 {
|
||||||
|
return t.tableId
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Table = &tableImpl{}
|
||||||
|
|
||||||
|
type saveMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
saveModeDefault saveMode = iota
|
||||||
|
saveModeInsert
|
||||||
|
saveModeUpdate
|
||||||
|
)
|
|
@ -0,0 +1,692 @@
|
||||||
|
package ormtable_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/testing/protocmp"
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
"gotest.tools/v3/golden"
|
||||||
|
"pgregory.net/rapid"
|
||||||
|
|
||||||
|
queryv1beta1 "github.com/cosmos/cosmos-sdk/api/cosmos/base/query/v1beta1"
|
||||||
|
sdkerrors "github.com/cosmos/cosmos-sdk/errors"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testpb"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/internal/testutil"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormtable"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestScenario(t *testing.T) {
|
||||||
|
table, err := ormtable.Build(ormtable.Options{
|
||||||
|
MessageType: (&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
// first run tests with a split index-commitment store
|
||||||
|
runTestScenario(t, table, testkv.NewSplitMemBackend())
|
||||||
|
|
||||||
|
// now run tests with a shared index-commitment store
|
||||||
|
|
||||||
|
// we're going to wrap this test in a debug store and save the decoded debug
|
||||||
|
// messages, these will be checked against a golden file at the end of the
|
||||||
|
// test. the golden file can be used for fine-grained debugging of kv-store
|
||||||
|
// layout
|
||||||
|
debugBuf := &strings.Builder{}
|
||||||
|
store := testkv.NewDebugBackend(
|
||||||
|
testkv.NewSharedMemBackend(),
|
||||||
|
&testkv.EntryCodecDebugger{
|
||||||
|
EntryCodec: table,
|
||||||
|
Print: func(s string) { debugBuf.WriteString(s + "\n") },
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
runTestScenario(t, table, store)
|
||||||
|
|
||||||
|
// we're going to store debug data in a golden file to make sure that
|
||||||
|
// logical decoding works successfully
|
||||||
|
// run `go test pkgname -test.update-golden` to update the golden file
|
||||||
|
// see https://pkg.go.dev/gotest.tools/v3/golden for docs
|
||||||
|
golden.Assert(t, debugBuf.String(), "test_scenario.golden")
|
||||||
|
|
||||||
|
checkEncodeDecodeEntries(t, table, store.IndexStoreReader())
|
||||||
|
}
|
||||||
|
|
||||||
|
// check that the ormkv.Entry's decode and encode to the same bytes
|
||||||
|
func checkEncodeDecodeEntries(t *testing.T, table ormtable.Table, store kv.ReadonlyStore) {
|
||||||
|
it, err := store.Iterator(nil, nil)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
for it.Valid() {
|
||||||
|
key := it.Key()
|
||||||
|
value := it.Value()
|
||||||
|
entry, err := table.DecodeEntry(key, value)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
k, v, err := table.EncodeEntry(entry)
|
||||||
|
assert.Assert(t, bytes.Equal(key, k), "%x %x %s", key, k, entry)
|
||||||
|
assert.Assert(t, bytes.Equal(value, v), "%x %x %s", value, v, entry)
|
||||||
|
it.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTestScenario(t *testing.T, table ormtable.Table, backend ormtable.Backend) {
|
||||||
|
ctx := ormtable.WrapContextDefault(backend)
|
||||||
|
|
||||||
|
// let's create 10 data items we'll use later and give them indexes
|
||||||
|
data := []*testpb.ExampleTable{
|
||||||
|
{U32: 4, I64: -2, Str: "abc", U64: 7}, // 0
|
||||||
|
{U32: 4, I64: -2, Str: "abd", U64: 7}, // 1
|
||||||
|
{U32: 4, I64: -1, Str: "abc", U64: 8}, // 2
|
||||||
|
{U32: 5, I64: -2, Str: "abd", U64: 8}, // 3
|
||||||
|
{U32: 5, I64: -2, Str: "abe", U64: 9}, // 4
|
||||||
|
{U32: 7, I64: -2, Str: "abe", U64: 10}, // 5
|
||||||
|
{U32: 7, I64: -1, Str: "abe", U64: 11}, // 6
|
||||||
|
{U32: 8, I64: -4, Str: "abc", U64: 11}, // 7
|
||||||
|
{U32: 8, I64: 1, Str: "abc", U64: 12}, // 8
|
||||||
|
{U32: 8, I64: 1, Str: "abd", U64: 10}, // 9
|
||||||
|
}
|
||||||
|
|
||||||
|
// let's make a function to match what's in our iterator with what we
|
||||||
|
// expect using indexes in the data array above
|
||||||
|
assertIteratorItems := func(it ormtable.Iterator, xs ...int) {
|
||||||
|
for _, i := range xs {
|
||||||
|
assert.Assert(t, it.Next())
|
||||||
|
msg, err := it.GetMessage()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
//t.Logf("data[%d] %v == %v", i, data[i], msg)
|
||||||
|
assert.DeepEqual(t, data[i], msg, protocmp.Transform())
|
||||||
|
}
|
||||||
|
// make sure the iterator is done
|
||||||
|
assert.Assert(t, !it.Next())
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert one record
|
||||||
|
err := table.Insert(ctx, data[0])
|
||||||
|
// trivial prefix query has one record
|
||||||
|
it, err := table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assertIteratorItems(it, 0)
|
||||||
|
|
||||||
|
// insert one record
|
||||||
|
err = table.Insert(ctx, data[1])
|
||||||
|
// trivial prefix query has two records
|
||||||
|
it, err = table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assertIteratorItems(it, 0, 1)
|
||||||
|
|
||||||
|
// insert the other records
|
||||||
|
assert.NilError(t, err)
|
||||||
|
for i := 2; i < len(data); i++ {
|
||||||
|
err = table.Insert(ctx, data[i])
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// let's do a prefix query on the primary key
|
||||||
|
it, err = table.Iterator(ctx, ormlist.Prefix(uint32(8)))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assertIteratorItems(it, 7, 8, 9)
|
||||||
|
|
||||||
|
// let's try a reverse prefix query
|
||||||
|
it, err = table.Iterator(ctx, ormlist.Prefix(uint32(4)), ormlist.Reverse())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer it.Close()
|
||||||
|
assertIteratorItems(it, 2, 1, 0)
|
||||||
|
|
||||||
|
// let's try a range query
|
||||||
|
it, err = table.Iterator(ctx,
|
||||||
|
ormlist.Start(uint32(4), int64(-1)),
|
||||||
|
ormlist.End(uint32(7)),
|
||||||
|
)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer it.Close()
|
||||||
|
assertIteratorItems(it, 2, 3, 4, 5, 6)
|
||||||
|
|
||||||
|
// and another range query
|
||||||
|
it, err = table.Iterator(ctx,
|
||||||
|
ormlist.Start(uint32(5), int64(-3)),
|
||||||
|
ormlist.End(uint32(8), int64(1), "abc"),
|
||||||
|
)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
defer it.Close()
|
||||||
|
assertIteratorItems(it, 3, 4, 5, 6, 7, 8)
|
||||||
|
|
||||||
|
// now a reverse range query on a different index
|
||||||
|
strU32Index := table.GetIndex("str,u32")
|
||||||
|
assert.Assert(t, strU32Index != nil)
|
||||||
|
it, err = strU32Index.Iterator(ctx,
|
||||||
|
ormlist.Start("abc"),
|
||||||
|
ormlist.End("abd"),
|
||||||
|
ormlist.Reverse(),
|
||||||
|
)
|
||||||
|
assertIteratorItems(it, 9, 3, 1, 8, 7, 2, 0)
|
||||||
|
|
||||||
|
// another prefix query forwards
|
||||||
|
it, err = strU32Index.Iterator(ctx, ormlist.Prefix("abe", uint32(7)))
|
||||||
|
assertIteratorItems(it, 5, 6)
|
||||||
|
// and backwards
|
||||||
|
it, err = strU32Index.Iterator(ctx, ormlist.Prefix("abc", uint32(4)), ormlist.Reverse())
|
||||||
|
assertIteratorItems(it, 2, 0)
|
||||||
|
|
||||||
|
// try an unique index
|
||||||
|
u64StrIndex := table.GetUniqueIndex("u64,str")
|
||||||
|
assert.Assert(t, u64StrIndex != nil)
|
||||||
|
found, err := u64StrIndex.Has(ctx, uint64(12), "abc")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
var a testpb.ExampleTable
|
||||||
|
found, err = u64StrIndex.Get(ctx, &a, uint64(12), "abc")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
assert.DeepEqual(t, data[8], &a, protocmp.Transform())
|
||||||
|
|
||||||
|
// let's try paginating some stuff
|
||||||
|
|
||||||
|
// first create a function to test what we got from pagination
|
||||||
|
assertGotItems := func(items []proto.Message, xs ...int) {
|
||||||
|
n := len(xs)
|
||||||
|
assert.Equal(t, n, len(items))
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
j := xs[i]
|
||||||
|
//t.Logf("data[%d] %v == %v", j, data[j], items[i])
|
||||||
|
assert.DeepEqual(t, data[j], items[i], protocmp.Transform())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// now do some pagination
|
||||||
|
var items []proto.Message
|
||||||
|
onItem := func(item proto.Message) {
|
||||||
|
items = append(items, item)
|
||||||
|
}
|
||||||
|
res, err := ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 4,
|
||||||
|
CountTotal: true,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 4, len(res.Cursors))
|
||||||
|
assertGotItems(items, 0, 1, 2, 3)
|
||||||
|
|
||||||
|
// read another page
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Key: res.NextKey,
|
||||||
|
Limit: 4,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 4, len(res.Cursors))
|
||||||
|
assertGotItems(items, 4, 5, 6, 7)
|
||||||
|
|
||||||
|
// and the last page
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Key: res.NextKey,
|
||||||
|
Limit: 4,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Assert(t, !res.HaveMore)
|
||||||
|
assert.Equal(t, 2, len(res.Cursors))
|
||||||
|
assertGotItems(items, 8, 9)
|
||||||
|
|
||||||
|
// let's go backwards
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 2,
|
||||||
|
CountTotal: true,
|
||||||
|
Reverse: true,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 2, len(res.Cursors))
|
||||||
|
assertGotItems(items, 9, 8)
|
||||||
|
|
||||||
|
// a bit more
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Key: res.NextKey,
|
||||||
|
Limit: 2,
|
||||||
|
Reverse: true,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 2, len(res.Cursors))
|
||||||
|
assertGotItems(items, 7, 6)
|
||||||
|
|
||||||
|
// range query
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx,
|
||||||
|
&ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 10,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
onItem,
|
||||||
|
ormlist.Start(uint32(4), int64(-1), "abc"),
|
||||||
|
ormlist.End(uint32(7), int64(-2), "abe"),
|
||||||
|
)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, !res.HaveMore)
|
||||||
|
assert.Equal(t, 4, len(res.Cursors))
|
||||||
|
assertGotItems(items, 2, 3, 4, 5)
|
||||||
|
|
||||||
|
// let's try an offset
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 2,
|
||||||
|
CountTotal: true,
|
||||||
|
Offset: 3,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 2, len(res.Cursors))
|
||||||
|
assertGotItems(items, 3, 4)
|
||||||
|
|
||||||
|
// and reverse
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 3,
|
||||||
|
CountTotal: true,
|
||||||
|
Offset: 5,
|
||||||
|
Reverse: true,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, res != nil)
|
||||||
|
assert.Assert(t, res.NextKey != nil)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
assert.Assert(t, res.HaveMore)
|
||||||
|
assert.Equal(t, 3, len(res.Cursors))
|
||||||
|
assertGotItems(items, 4, 3, 2)
|
||||||
|
|
||||||
|
// now an offset that's slightly too big
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 1,
|
||||||
|
CountTotal: true,
|
||||||
|
Offset: 10,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, 0, len(items))
|
||||||
|
assert.Assert(t, !res.HaveMore)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
|
||||||
|
// another offset that's too big
|
||||||
|
items = nil
|
||||||
|
res, err = ormtable.Paginate(table, ctx, &ormtable.PaginationRequest{
|
||||||
|
PageRequest: &queryv1beta1.PageRequest{
|
||||||
|
Limit: 1,
|
||||||
|
CountTotal: true,
|
||||||
|
Offset: 14,
|
||||||
|
}}, onItem)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, 0, len(items))
|
||||||
|
assert.Assert(t, !res.HaveMore)
|
||||||
|
assert.Equal(t, uint64(10), res.Total)
|
||||||
|
|
||||||
|
// now let's update some things
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
data[i].U64 = data[i].U64 * 2
|
||||||
|
data[i].Bz = []byte(data[i].Str)
|
||||||
|
err = table.Update(ctx, data[i])
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
it, err = table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
// we should still get everything in the same order
|
||||||
|
assertIteratorItems(it, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
|
||||||
|
|
||||||
|
// let's use SAVE_MODE_DEFAULT and add something
|
||||||
|
data = append(data, &testpb.ExampleTable{U32: 9})
|
||||||
|
err = table.Save(ctx, data[10])
|
||||||
|
assert.NilError(t, err)
|
||||||
|
found, err = table.Get(ctx, &a, uint32(9), int64(0), "")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
assert.DeepEqual(t, data[10], &a, protocmp.Transform())
|
||||||
|
// and update it
|
||||||
|
data[10].B = true
|
||||||
|
assert.NilError(t, table.Save(ctx, data[10]))
|
||||||
|
found, err = table.Get(ctx, &a, uint32(9), int64(0), "")
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
assert.DeepEqual(t, data[10], &a, protocmp.Transform())
|
||||||
|
// and iterate
|
||||||
|
it, err = table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assertIteratorItems(it, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
|
||||||
|
|
||||||
|
// let's export and import JSON and use a read-only backend
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
readBackend := ormtable.NewReadBackend(ormtable.ReadBackendOptions{
|
||||||
|
CommitmentStoreReader: backend.CommitmentStoreReader(),
|
||||||
|
IndexStoreReader: backend.IndexStoreReader(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, table.ExportJSON(ormtable.WrapContextDefault(readBackend), buf))
|
||||||
|
assert.NilError(t, table.ValidateJSON(bytes.NewReader(buf.Bytes())))
|
||||||
|
store2 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
assert.NilError(t, table.ImportJSON(store2, bytes.NewReader(buf.Bytes())))
|
||||||
|
assertTablesEqual(t, table, ctx, store2)
|
||||||
|
|
||||||
|
// let's delete item 5
|
||||||
|
key5 := []interface{}{uint32(7), int64(-2), "abe"}
|
||||||
|
err = table.DeleteByKey(ctx, key5...)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
// it should be gone
|
||||||
|
found, err = table.Has(ctx, key5...)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, !found)
|
||||||
|
// and missing from the iterator
|
||||||
|
it, err = table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assertIteratorItems(it, 0, 1, 2, 3, 4, 6, 7, 8, 9, 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandomTableData(t *testing.T) {
|
||||||
|
testTable(t, TableDataGen(testutil.GenA, 100).Example().(*TableData))
|
||||||
|
}
|
||||||
|
|
||||||
|
func testTable(t *testing.T, tableData *TableData) {
|
||||||
|
for _, index := range tableData.table.Indexes() {
|
||||||
|
indexModel := &IndexModel{
|
||||||
|
TableData: tableData,
|
||||||
|
index: index.(TestIndex),
|
||||||
|
}
|
||||||
|
sort.Sort(indexModel)
|
||||||
|
if _, ok := index.(ormtable.UniqueIndex); ok {
|
||||||
|
testUniqueIndex(t, indexModel)
|
||||||
|
}
|
||||||
|
testIndex(t, indexModel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testUniqueIndex(t *testing.T, model *IndexModel) {
|
||||||
|
index := model.index.(ormtable.UniqueIndex)
|
||||||
|
t.Logf("testing unique index %T %s", index, index.Fields())
|
||||||
|
for i := 0; i < len(model.data); i++ {
|
||||||
|
x := model.data[i]
|
||||||
|
ks, _, err := index.(ormkv.IndexCodec).EncodeKeyFromMessage(x.ProtoReflect())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
values := protoValuesToInterfaces(ks)
|
||||||
|
|
||||||
|
found, err := index.Has(model.context, values...)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
|
||||||
|
msg := model.table.MessageType().New().Interface()
|
||||||
|
found, err = index.Get(model.context, msg, values...)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Assert(t, found)
|
||||||
|
assert.DeepEqual(t, x, msg, protocmp.Transform())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testIndex(t *testing.T, model *IndexModel) {
|
||||||
|
index := model.index
|
||||||
|
if index.IsFullyOrdered() {
|
||||||
|
t.Logf("testing index %T %s", index, index.Fields())
|
||||||
|
|
||||||
|
it, err := model.index.Iterator(model.context)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
checkIteratorAgainstSlice(t, it, model.data)
|
||||||
|
|
||||||
|
it, err = model.index.Iterator(model.context, ormlist.Reverse())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
checkIteratorAgainstSlice(t, it, reverseData(model.data))
|
||||||
|
|
||||||
|
rapid.Check(t, func(t *rapid.T) {
|
||||||
|
i := rapid.IntRange(0, len(model.data)-2).Draw(t, "i").(int)
|
||||||
|
j := rapid.IntRange(i+1, len(model.data)-1).Draw(t, "j").(int)
|
||||||
|
|
||||||
|
start, _, err := model.index.(ormkv.IndexCodec).EncodeKeyFromMessage(model.data[i].ProtoReflect())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
end, _, err := model.index.(ormkv.IndexCodec).EncodeKeyFromMessage(model.data[j].ProtoReflect())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
startVals := protoValuesToInterfaces(start)
|
||||||
|
endVals := protoValuesToInterfaces(end)
|
||||||
|
|
||||||
|
it, err = model.index.Iterator(model.context, ormlist.Start(startVals...), ormlist.End(endVals...))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
checkIteratorAgainstSlice(t, it, model.data[i:j+1])
|
||||||
|
|
||||||
|
it, err = model.index.Iterator(model.context, ormlist.Start(startVals...), ormlist.End(endVals...), ormlist.Reverse())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
checkIteratorAgainstSlice(t, it, reverseData(model.data[i:j+1]))
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
t.Logf("testing unordered index %T %s", index, index.Fields())
|
||||||
|
|
||||||
|
// get all the data
|
||||||
|
it, err := model.index.Iterator(model.context)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
var data2 []proto.Message
|
||||||
|
for it.Next() {
|
||||||
|
msg, err := it.GetMessage()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
data2 = append(data2, msg)
|
||||||
|
}
|
||||||
|
assert.Equal(t, len(model.data), len(data2))
|
||||||
|
|
||||||
|
// sort it
|
||||||
|
model2 := &IndexModel{
|
||||||
|
TableData: &TableData{
|
||||||
|
table: model.table,
|
||||||
|
data: data2,
|
||||||
|
context: model.context,
|
||||||
|
},
|
||||||
|
index: model.index,
|
||||||
|
}
|
||||||
|
sort.Sort(model2)
|
||||||
|
|
||||||
|
// compare
|
||||||
|
for i := 0; i < len(data2); i++ {
|
||||||
|
assert.DeepEqual(t, model.data[i], data2[i], protocmp.Transform())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func reverseData(data []proto.Message) []proto.Message {
|
||||||
|
n := len(data)
|
||||||
|
reverse := make([]proto.Message, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
reverse[n-i-1] = data[i]
|
||||||
|
}
|
||||||
|
return reverse
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkIteratorAgainstSlice(t assert.TestingT, iterator ormtable.Iterator, data []proto.Message) {
|
||||||
|
i := 0
|
||||||
|
for iterator.Next() {
|
||||||
|
if i >= len(data) {
|
||||||
|
for iterator.Next() {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
t.Log(fmt.Sprintf("too many elements in iterator, len(data) = %d, i = %d", len(data), i))
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
msg, err := iterator.GetMessage()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, data[i], msg, protocmp.Transform())
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TableDataGen(elemGen *rapid.Generator, n int) *rapid.Generator {
|
||||||
|
return rapid.Custom(func(t *rapid.T) *TableData {
|
||||||
|
prefix := rapid.SliceOfN(rapid.Byte(), 0, 5).Draw(t, "prefix").([]byte)
|
||||||
|
message := elemGen.Draw(t, "message").(proto.Message)
|
||||||
|
table, err := ormtable.Build(ormtable.Options{
|
||||||
|
Prefix: prefix,
|
||||||
|
MessageType: message.ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]proto.Message, n)
|
||||||
|
store := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
|
||||||
|
for i := 0; i < n; {
|
||||||
|
message = elemGen.Draw(t, fmt.Sprintf("message[%d]", i)).(proto.Message)
|
||||||
|
err := table.Insert(store, message)
|
||||||
|
if sdkerrors.IsOf(err, ormerrors.PrimaryKeyConstraintViolation, ormerrors.UniqueKeyViolation) {
|
||||||
|
continue
|
||||||
|
} else if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
data[i] = message
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
return &TableData{
|
||||||
|
data: data,
|
||||||
|
table: table,
|
||||||
|
context: store,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type TableData struct {
|
||||||
|
table ormtable.Table
|
||||||
|
data []proto.Message
|
||||||
|
context context.Context
|
||||||
|
}
|
||||||
|
|
||||||
|
type IndexModel struct {
|
||||||
|
*TableData
|
||||||
|
index TestIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestIndex exposes methods that all index implementations expose publicly
|
||||||
|
// but on private structs because they are intended only to be used for testing.
|
||||||
|
type TestIndex interface {
|
||||||
|
ormtable.Index
|
||||||
|
|
||||||
|
// CompareKeys the two keys against the underlying IndexCodec, returning a
|
||||||
|
// negative value if key1 is less than key2, 0 if they are equal, and a
|
||||||
|
// positive value otherwise.
|
||||||
|
CompareKeys(key1, key2 []protoreflect.Value) int
|
||||||
|
|
||||||
|
// IsFullyOrdered returns true if all of the fields in the index are
|
||||||
|
// considered "well-ordered" in terms of sorted iteration.
|
||||||
|
IsFullyOrdered() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *IndexModel) Len() int {
|
||||||
|
return len(m.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *IndexModel) Less(i, j int) bool {
|
||||||
|
is, _, err := m.index.(ormkv.IndexCodec).EncodeKeyFromMessage(m.data[i].ProtoReflect())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
js, _, err := m.index.(ormkv.IndexCodec).EncodeKeyFromMessage(m.data[j].ProtoReflect())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return m.index.CompareKeys(is, js) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *IndexModel) Swap(i, j int) {
|
||||||
|
x := m.data[i]
|
||||||
|
m.data[i] = m.data[j]
|
||||||
|
m.data[j] = x
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ sort.Interface = &IndexModel{}
|
||||||
|
|
||||||
|
func TestJSONExportImport(t *testing.T) {
|
||||||
|
table, err := ormtable.Build(ormtable.Options{
|
||||||
|
MessageType: (&testpb.ExampleTable{}).ProtoReflect().Type(),
|
||||||
|
})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
store := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
|
||||||
|
for i := 0; i < 100; {
|
||||||
|
x := testutil.GenA.Example().(proto.Message)
|
||||||
|
err = table.Insert(store, x)
|
||||||
|
if sdkerrors.IsOf(err, ormerrors.PrimaryKeyConstraintViolation, ormerrors.UniqueKeyViolation) {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
assert.NilError(t, table.ExportJSON(store, buf))
|
||||||
|
|
||||||
|
assert.NilError(t, table.ValidateJSON(bytes.NewReader(buf.Bytes())))
|
||||||
|
|
||||||
|
store2 := ormtable.WrapContextDefault(testkv.NewSplitMemBackend())
|
||||||
|
assert.NilError(t, table.ImportJSON(store2, bytes.NewReader(buf.Bytes())))
|
||||||
|
|
||||||
|
assertTablesEqual(t, table, store, store2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertTablesEqual(t assert.TestingT, table ormtable.Table, ctx, ctx2 context.Context) {
|
||||||
|
it, err := table.Iterator(ctx)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
it2, err := table.Iterator(ctx2)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
for {
|
||||||
|
have := it.Next()
|
||||||
|
have2 := it2.Next()
|
||||||
|
assert.Equal(t, have, have2)
|
||||||
|
if !have {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
msg1, err := it.GetMessage()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
msg2, err := it.GetMessage()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
assert.DeepEqual(t, msg1, msg2, protocmp.Transform())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func protoValuesToInterfaces(ks []protoreflect.Value) []interface{} {
|
||||||
|
values := make([]interface{}, len(ks))
|
||||||
|
for i := 0; i < len(ks); i++ {
|
||||||
|
values[i] = ks[i].Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return values
|
||||||
|
}
|
|
@ -0,0 +1,2 @@
|
||||||
|
[1,
|
||||||
|
{"id":"1","x":"foo","y":5}]
|
|
@ -0,0 +1,2 @@
|
||||||
|
[1,
|
||||||
|
{"id":"2","x":"foo","y":5}]
|
|
@ -0,0 +1 @@
|
||||||
|
[{"id":"1","x":"foo","y":5}]
|
|
@ -0,0 +1,31 @@
|
||||||
|
GET 03000000000000000005
|
||||||
|
PK testpb.ExampleAutoIncrementTable 5 -> id:5
|
||||||
|
GET 03808002
|
||||||
|
SEQ testpb.ExampleAutoIncrementTable 0
|
||||||
|
GET 03000000000000000001
|
||||||
|
PK testpb.ExampleAutoIncrementTable 1 -> id:1
|
||||||
|
ORM INSERT testpb.ExampleAutoIncrementTable id:1 x:"foo" y:5
|
||||||
|
HAS 0301666f6f
|
||||||
|
ERR:EOF
|
||||||
|
SET 03000000000000000001 1203666f6f1805
|
||||||
|
PK testpb.ExampleAutoIncrementTable 1 -> id:1 x:"foo" y:5
|
||||||
|
SET 03808002 01
|
||||||
|
SEQ testpb.ExampleAutoIncrementTable 1
|
||||||
|
SET 0301666f6f 0000000000000001
|
||||||
|
UNIQ testpb.ExampleAutoIncrementTable x : "foo" -> 1
|
||||||
|
GET 03808002 01
|
||||||
|
SEQ testpb.ExampleAutoIncrementTable 1
|
||||||
|
ITERATOR 0300 -> 0301
|
||||||
|
VALID true
|
||||||
|
KEY 03000000000000000001 1203666f6f1805
|
||||||
|
PK testpb.ExampleAutoIncrementTable 1 -> id:1 x:"foo" y:5
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 0300 -> 0301
|
||||||
|
VALID true
|
||||||
|
KEY 03000000000000000001 1203666f6f1805
|
||||||
|
PK testpb.ExampleAutoIncrementTable 1 -> id:1 x:"foo" y:5
|
||||||
|
KEY 03000000000000000001 1203666f6f1805
|
||||||
|
PK testpb.ExampleAutoIncrementTable 1 -> id:1 x:"foo" y:5
|
||||||
|
NEXT
|
||||||
|
VALID false
|
|
@ -0,0 +1,923 @@
|
||||||
|
GET 0100000000047ffffffffffffffe616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 str:"abc" i64:-2
|
||||||
|
ORM INSERT testpb.ExampleTable u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
HAS 01010000000000000007616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
SET 01010000000000000007616263 000000047ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 7/"abc" -> 4/-2/"abc"
|
||||||
|
SET 010261626300000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
SET 01030061626300000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
GET 0100000000047ffffffffffffffe616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 str:"abd" i64:-2
|
||||||
|
ORM INSERT testpb.ExampleTable u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
HAS 01010000000000000007616264
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
SET 01010000000000000007616264 000000047ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 7/"abd" -> 4/-2/"abd"
|
||||||
|
SET 010261626400000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/4/-2 -> 4/-2/"abd"
|
||||||
|
SET 01030061626400000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abd"/4/-2 -> 4/-2/"abd"
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
GET 0100000000047fffffffffffffff616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 str:"abc" i64:-1
|
||||||
|
ORM INSERT testpb.ExampleTable u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
HAS 01010000000000000008616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
SET 01010000000000000008616263 000000047fffffffffffffff
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 8/"abc" -> 4/-1/"abc"
|
||||||
|
SET 010261626300000000047fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
SET 01030061626300000000047fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
GET 0100000000057ffffffffffffffe616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 str:"abd" i64:-2
|
||||||
|
ORM INSERT testpb.ExampleTable u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
HAS 01010000000000000008616264
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
SET 01010000000000000008616264 000000057ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 8/"abd" -> 5/-2/"abd"
|
||||||
|
SET 010261626400000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/5/-2 -> 5/-2/"abd"
|
||||||
|
SET 01030061626400000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abd"/5/-2 -> 5/-2/"abd"
|
||||||
|
GET 0100000000057ffffffffffffffe616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 str:"abe" i64:-2
|
||||||
|
ORM INSERT testpb.ExampleTable u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
HAS 01010000000000000009616265
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
SET 01010000000000000009616265 000000057ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 9/"abe" -> 5/-2/"abe"
|
||||||
|
SET 010261626500000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abe"/5/-2 -> 5/-2/"abe"
|
||||||
|
SET 01030061626500000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abe"/5/-2 -> 5/-2/"abe"
|
||||||
|
GET 0100000000077ffffffffffffffe616265
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 str:"abe" i64:-2
|
||||||
|
ORM INSERT testpb.ExampleTable u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
HAS 0101000000000000000a616265
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
SET 0101000000000000000a616265 000000077ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 10/"abe" -> 7/-2/"abe"
|
||||||
|
SET 010261626500000000077ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abe"/7/-2 -> 7/-2/"abe"
|
||||||
|
SET 01030061626500000000077ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abe"/7/-2 -> 7/-2/"abe"
|
||||||
|
GET 0100000000077fffffffffffffff616265
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 str:"abe" i64:-1
|
||||||
|
ORM INSERT testpb.ExampleTable u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
HAS 0101000000000000000b616265
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
SET 0101000000000000000b616265 000000077fffffffffffffff
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 11/"abe" -> 7/-1/"abe"
|
||||||
|
SET 010261626500000000077fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abe"/7/-1 -> 7/-1/"abe"
|
||||||
|
SET 01030061626500000000077fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abe"/7/-1 -> 7/-1/"abe"
|
||||||
|
GET 0100000000087ffffffffffffffc616263
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 str:"abc" i64:-4
|
||||||
|
ORM INSERT testpb.ExampleTable u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
HAS 0101000000000000000b616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
SET 0101000000000000000b616263 000000087ffffffffffffffc
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 11/"abc" -> 8/-4/"abc"
|
||||||
|
SET 010261626300000000087ffffffffffffffc
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/8/-4 -> 8/-4/"abc"
|
||||||
|
SET 01030061626300000000087ffffffffffffffc
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/8/-4 -> 8/-4/"abc"
|
||||||
|
GET 0100000000088000000000000001616263
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 str:"abc" i64:1
|
||||||
|
ORM INSERT testpb.ExampleTable u32:8 u64:12 str:"abc" i64:1
|
||||||
|
HAS 0101000000000000000c616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
SET 0101000000000000000c616263 000000088000000000000001
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 12/"abc" -> 8/1/"abc"
|
||||||
|
SET 010261626300000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/8/1 -> 8/1/"abc"
|
||||||
|
SET 01030061626300000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/8/1 -> 8/1/"abc"
|
||||||
|
GET 0100000000088000000000000001616264
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 str:"abd" i64:1
|
||||||
|
ORM INSERT testpb.ExampleTable u32:8 u64:10 str:"abd" i64:1
|
||||||
|
HAS 0101000000000000000a616264
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
SET 0101000000000000000a616264 000000088000000000000001
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 10/"abd" -> 8/1/"abd"
|
||||||
|
SET 010261626400000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/8/1 -> 8/1/"abd"
|
||||||
|
SET 01030061626400000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abd"/8/1 -> 8/1/"abd"
|
||||||
|
ITERATOR 010000000008 -> 010000000009
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 010000000004 <- 010000000005
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 0100000000047fffffffffffffff -> 010000000008
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 0100000000057ffffffffffffffd -> 010000000008800000000000000161626300
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 010261626300 <- 010261626401
|
||||||
|
VALID true
|
||||||
|
KEY 010261626400000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/8/1 -> 8/1/"abd"
|
||||||
|
GET 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626400000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/5/-2 -> 5/-2/"abd"
|
||||||
|
GET 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626400000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abd"/4/-2 -> 4/-2/"abd"
|
||||||
|
GET 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000088000000000000001
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/8/1 -> 8/1/"abc"
|
||||||
|
GET 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000087ffffffffffffffc
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/8/-4 -> 8/-4/"abc"
|
||||||
|
GET 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000047fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
GET 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
GET 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 01026162650000000007 -> 01026162650000000008
|
||||||
|
VALID true
|
||||||
|
KEY 010261626500000000077ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abe"/7/-2 -> 7/-2/"abe"
|
||||||
|
GET 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626500000000077fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abe"/7/-1 -> 7/-1/"abe"
|
||||||
|
GET 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 01026162630000000004 <- 01026162630000000005
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000047fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
GET 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 010261626300000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : "abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
GET 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
HAS 0101000000000000000c616263
|
||||||
|
ERR:EOF
|
||||||
|
GET 0101000000000000000c616263 000000088000000000000001
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 12/"abc" -> 8/1/"abc"
|
||||||
|
GET 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100000000057ffffffffffffffe61626400 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100000000087ffffffffffffffc61626300 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 <- 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 <- 0100000000088000000000000001616263
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100000000047fffffffffffffff616263 -> 0100000000077ffffffffffffffe61626500
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 <- 0101
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
KEY 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
GET 0100000000047ffffffffffffffe616263 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:7 str:"abc" i64:-2
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:4 u64:7 str:"abc" i64:-2 -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
HAS 0101000000000000000e616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
DEL 01010000000000000007616263
|
||||||
|
DEL ERR:EOF
|
||||||
|
SET 0101000000000000000e616263 000000047ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 14/"abc" -> 4/-2/"abc"
|
||||||
|
DEL 01030061626300000000047ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
SET 01030361626361626300000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : "YWJj"/"abc"/4/-2 -> 4/-2/"abc"
|
||||||
|
GET 0100000000047ffffffffffffffe616264 1007
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:7 str:"abd" i64:-2
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:4 u64:7 str:"abd" i64:-2 -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
HAS 0101000000000000000e616264
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
DEL 01010000000000000007616264
|
||||||
|
DEL ERR:EOF
|
||||||
|
SET 0101000000000000000e616264 000000047ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 14/"abd" -> 4/-2/"abd"
|
||||||
|
DEL 01030061626400000000047ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abd"/4/-2 -> 4/-2/"abd"
|
||||||
|
SET 01030361626461626400000000047ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : "YWJk"/"abd"/4/-2 -> 4/-2/"abd"
|
||||||
|
GET 0100000000047fffffffffffffff616263 1008
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:8 str:"abc" i64:-1
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:4 u64:8 str:"abc" i64:-1 -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
HAS 01010000000000000010616263
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
DEL 01010000000000000008616263
|
||||||
|
DEL ERR:EOF
|
||||||
|
SET 01010000000000000010616263 000000047fffffffffffffff
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 16/"abc" -> 4/-1/"abc"
|
||||||
|
DEL 01030061626300000000047fffffffffffffff
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
SET 01030361626361626300000000047fffffffffffffff
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : "YWJj"/"abc"/4/-1 -> 4/-1/"abc"
|
||||||
|
GET 0100000000057ffffffffffffffe616264 1008
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:8 str:"abd" i64:-2
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:5 u64:8 str:"abd" i64:-2 -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
HAS 01010000000000000010616264
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
DEL 01010000000000000008616264
|
||||||
|
DEL ERR:EOF
|
||||||
|
SET 01010000000000000010616264 000000057ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 16/"abd" -> 5/-2/"abd"
|
||||||
|
DEL 01030061626400000000057ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abd"/5/-2 -> 5/-2/"abd"
|
||||||
|
SET 01030361626461626400000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : "YWJk"/"abd"/5/-2 -> 5/-2/"abd"
|
||||||
|
GET 0100000000057ffffffffffffffe616265 1009
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:9 str:"abe" i64:-2
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:5 u64:9 str:"abe" i64:-2 -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
HAS 01010000000000000012616265
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
DEL 01010000000000000009616265
|
||||||
|
DEL ERR:EOF
|
||||||
|
SET 01010000000000000012616265 000000057ffffffffffffffe
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 18/"abe" -> 5/-2/"abe"
|
||||||
|
DEL 01030061626500000000057ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abe"/5/-2 -> 5/-2/"abe"
|
||||||
|
SET 01030361626561626500000000057ffffffffffffffe
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : "YWJl"/"abe"/5/-2 -> 5/-2/"abe"
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
GET 0100000000098000000000000000
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9
|
||||||
|
ORM INSERT testpb.ExampleTable u32:9
|
||||||
|
HAS 01010000000000000000
|
||||||
|
ERR:EOF
|
||||||
|
SET 0100000000098000000000000000
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9
|
||||||
|
SET 01010000000000000000 000000098000000000000000
|
||||||
|
UNIQ testpb.ExampleTable u64/str : 0/"" -> 9/0/""
|
||||||
|
SET 010200000000098000000000000000
|
||||||
|
IDX testpb.ExampleTable str/u32/i64 : ""/9/0 -> 9/0/""
|
||||||
|
SET 01030000000000098000000000000000
|
||||||
|
IDX testpb.ExampleTable bz/str/u32/i64 : ""/""/9/0 -> 9/0/""
|
||||||
|
GET 0100000000098000000000000000
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9
|
||||||
|
GET 0100000000098000000000000000
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9
|
||||||
|
ORM UPDATE testpb.ExampleTable u32:9 -> u32:9 b:true
|
||||||
|
SET 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
GET 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
KEY 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
KEY 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
GET 0100000000077ffffffffffffffe616265 100a
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
ORM DELETE testpb.ExampleTable u32:7 u64:10 str:"abe" i64:-2
|
||||||
|
DEL 0100000000077ffffffffffffffe616265
|
||||||
|
DEL PK testpb.ExampleTable 7/-2/"abe" -> u32:7 str:"abe" i64:-2
|
||||||
|
DEL 0101000000000000000a616265
|
||||||
|
DEL ERR:EOF
|
||||||
|
DEL 010261626500000000077ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable str/u32/i64 : "abe"/7/-2 -> 7/-2/"abe"
|
||||||
|
DEL 01030061626500000000077ffffffffffffffe
|
||||||
|
DEL IDX testpb.ExampleTable bz/str/u32/i64 : ""/"abe"/7/-2 -> 7/-2/"abe"
|
||||||
|
HAS 0100000000077ffffffffffffffe616265
|
||||||
|
PK testpb.ExampleTable 7/-2/"abe" -> u32:7 str:"abe" i64:-2
|
||||||
|
ITERATOR 0100 -> 0101
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616263 100e2203616263
|
||||||
|
PK testpb.ExampleTable 4/-2/"abc" -> u32:4 u64:14 str:"abc" bz:"abc" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047ffffffffffffffe616264 100e2203616264
|
||||||
|
PK testpb.ExampleTable 4/-2/"abd" -> u32:4 u64:14 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000047fffffffffffffff616263 10102203616263
|
||||||
|
PK testpb.ExampleTable 4/-1/"abc" -> u32:4 u64:16 str:"abc" bz:"abc" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616264 10102203616264
|
||||||
|
PK testpb.ExampleTable 5/-2/"abd" -> u32:5 u64:16 str:"abd" bz:"abd" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000057ffffffffffffffe616265 10122203616265
|
||||||
|
PK testpb.ExampleTable 5/-2/"abe" -> u32:5 u64:18 str:"abe" bz:"abe" i64:-2
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000077fffffffffffffff616265 100b
|
||||||
|
PK testpb.ExampleTable 7/-1/"abe" -> u32:7 u64:11 str:"abe" i64:-1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000087ffffffffffffffc616263 100b
|
||||||
|
PK testpb.ExampleTable 8/-4/"abc" -> u32:8 u64:11 str:"abc" i64:-4
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616263 100c
|
||||||
|
PK testpb.ExampleTable 8/1/"abc" -> u32:8 u64:12 str:"abc" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000088000000000000001616264 100a
|
||||||
|
PK testpb.ExampleTable 8/1/"abd" -> u32:8 u64:10 str:"abd" i64:1
|
||||||
|
NEXT
|
||||||
|
VALID true
|
||||||
|
KEY 0100000000098000000000000000 7801
|
||||||
|
PK testpb.ExampleTable 9/0/"" -> u32:9 b:true
|
||||||
|
NEXT
|
||||||
|
VALID false
|
||||||
|
CLOSE
|
||||||
|
CLOSE
|
||||||
|
CLOSE
|
|
@ -0,0 +1,211 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/kv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/model/ormlist"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/encodeutil"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/encoding/ormkv"
|
||||||
|
"github.com/cosmos/cosmos-sdk/orm/types/ormerrors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type uniqueKeyIndex struct {
|
||||||
|
*ormkv.UniqueKeyCodec
|
||||||
|
fields fieldNames
|
||||||
|
primaryKey *primaryKeyIndex
|
||||||
|
getReadBackend func(context.Context) (ReadBackend, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) Iterator(ctx context.Context, options ...ormlist.Option) (Iterator, error) {
|
||||||
|
backend, err := u.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return iterator(backend, backend.IndexStoreReader(), u, u.GetKeyCodec(), options)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) doNotImplement() {}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) Has(ctx context.Context, values ...interface{}) (found bool, err error) {
|
||||||
|
backend, err := u.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := u.GetKeyCodec().EncodeKey(encodeutil.ValuesOf(values...))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return backend.IndexStoreReader().Has(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) Get(ctx context.Context, message proto.Message, keyValues ...interface{}) (found bool, err error) {
|
||||||
|
backend, err := u.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := u.GetKeyCodec().EncodeKey(encodeutil.ValuesOf(keyValues...))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := backend.IndexStoreReader().Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// for unique keys, value can be empty and the entry still exists
|
||||||
|
if value == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, pk, err := u.DecodeIndexKey(key, value)
|
||||||
|
if err != nil {
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.primaryKey.get(backend, message, pk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) DeleteByKey(ctx context.Context, keyValues ...interface{}) error {
|
||||||
|
backend, err := u.getReadBackend(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := u.GetKeyCodec().EncodeKey(encodeutil.ValuesOf(keyValues...))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := backend.IndexStoreReader().Get(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// for unique keys, value can be empty and the entry still exists
|
||||||
|
if value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_, pk, err := u.DecodeIndexKey(key, value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.primaryKey.doDeleteByKey(ctx, pk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) onInsert(store kv.Store, message protoreflect.Message) error {
|
||||||
|
k, v, err := u.EncodeKVFromMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
has, err := store.Has(k)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
return ormerrors.UniqueKeyViolation
|
||||||
|
}
|
||||||
|
|
||||||
|
return store.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) onUpdate(store kv.Store, new, existing protoreflect.Message) error {
|
||||||
|
keyCodec := u.GetKeyCodec()
|
||||||
|
newValues := keyCodec.GetKeyValues(new)
|
||||||
|
existingValues := keyCodec.GetKeyValues(existing)
|
||||||
|
if keyCodec.CompareKeys(newValues, existingValues) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
newKey, err := keyCodec.EncodeKey(newValues)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
has, err := store.Has(newKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if has {
|
||||||
|
return ormerrors.UniqueKeyViolation
|
||||||
|
}
|
||||||
|
|
||||||
|
existingKey, err := keyCodec.EncodeKey(existingValues)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = store.Delete(existingKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, value, err := u.GetValueCodec().EncodeKeyFromMessage(new)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return store.Set(newKey, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) onDelete(store kv.Store, message protoreflect.Message) error {
|
||||||
|
_, key, err := u.GetKeyCodec().EncodeKeyFromMessage(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return store.Delete(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u uniqueKeyIndex) readValueFromIndexKey(store ReadBackend, primaryKey []protoreflect.Value, _ []byte, message proto.Message) error {
|
||||||
|
found, err := u.primaryKey.get(store, message, primaryKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return ormerrors.UnexpectedError.Wrapf("can't find primary key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p uniqueKeyIndex) Fields() string {
|
||||||
|
return p.fields.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ indexer = &uniqueKeyIndex{}
|
||||||
|
var _ UniqueIndex = &uniqueKeyIndex{}
|
||||||
|
|
||||||
|
// isNonTrivialUniqueKey checks if unique key fields are non-trivial, meaning that they
|
||||||
|
// don't contain the full primary key. If they contain the full primary key, then
|
||||||
|
// we can just use a regular index because there is no new unique constraint.
|
||||||
|
func isNonTrivialUniqueKey(fields []protoreflect.Name, primaryKeyFields []protoreflect.Name) bool {
|
||||||
|
have := map[protoreflect.Name]bool{}
|
||||||
|
for _, field := range fields {
|
||||||
|
have[field] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, field := range primaryKeyFields {
|
||||||
|
if !have[field] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,35 @@
|
||||||
|
package ormtable
|
||||||
|
|
||||||
|
// prefixEndBytes returns the []byte that would end a
|
||||||
|
// range query for all []byte with a certain prefix
|
||||||
|
// Deals with last byte of prefix being FF without overflowing
|
||||||
|
func prefixEndBytes(prefix []byte) []byte {
|
||||||
|
if len(prefix) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
end := make([]byte, len(prefix))
|
||||||
|
copy(end, prefix)
|
||||||
|
|
||||||
|
for {
|
||||||
|
if end[len(end)-1] != byte(255) {
|
||||||
|
end[len(end)-1]++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
end = end[:len(end)-1]
|
||||||
|
|
||||||
|
if len(end) == 0 {
|
||||||
|
end = nil
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return end
|
||||||
|
}
|
||||||
|
|
||||||
|
// inclusiveEndBytes returns the []byte that would end a
|
||||||
|
// range query such that the input would be included
|
||||||
|
func inclusiveEndBytes(inclusiveBytes []byte) []byte {
|
||||||
|
return append(inclusiveBytes, byte(0x00))
|
||||||
|
}
|
|
@ -11,7 +11,7 @@ var (
|
||||||
DuplicateKeyField = errors.New(codespace, 4, "duplicate field in key")
|
DuplicateKeyField = errors.New(codespace, 4, "duplicate field in key")
|
||||||
FieldNotFound = errors.New(codespace, 5, "field not found")
|
FieldNotFound = errors.New(codespace, 5, "field not found")
|
||||||
InvalidAutoIncrementKey = errors.New(codespace, 6, "an auto-increment primary key must specify a single uint64 field")
|
InvalidAutoIncrementKey = errors.New(codespace, 6, "an auto-increment primary key must specify a single uint64 field")
|
||||||
InvalidIndexId = errors.New(codespace, 7, "invalid or missing index id, need a non-zero value")
|
InvalidIndexId = errors.New(codespace, 7, "invalid or missing index id, need a value >= 0 and < 32768")
|
||||||
DuplicateIndexId = errors.New(codespace, 8, "duplicate index id")
|
DuplicateIndexId = errors.New(codespace, 8, "duplicate index id")
|
||||||
PrimaryKeyConstraintViolation = errors.New(codespace, 9, "object with primary key already exists")
|
PrimaryKeyConstraintViolation = errors.New(codespace, 9, "object with primary key already exists")
|
||||||
NotFoundOnUpdate = errors.New(codespace, 10, "can't update object which doesn't exist")
|
NotFoundOnUpdate = errors.New(codespace, 10, "can't update object which doesn't exist")
|
||||||
|
@ -28,4 +28,6 @@ var (
|
||||||
UnexpectedError = errors.New(codespace, 21, "unexpected error")
|
UnexpectedError = errors.New(codespace, 21, "unexpected error")
|
||||||
InvalidRangeIterationKeys = errors.New(codespace, 22, "invalid range iteration keys")
|
InvalidRangeIterationKeys = errors.New(codespace, 22, "invalid range iteration keys")
|
||||||
JSONImportError = errors.New(codespace, 23, "json import error")
|
JSONImportError = errors.New(codespace, 23, "json import error")
|
||||||
|
UniqueKeyViolation = errors.New(codespace, 24, "unique key violation")
|
||||||
|
InvalidTableDefinition = errors.New(codespace, 25, "invalid table definition")
|
||||||
)
|
)
|
||||||
|
|
|
@ -88,7 +88,8 @@ message SecondaryIndexDescriptor {
|
||||||
string fields = 1;
|
string fields = 1;
|
||||||
|
|
||||||
// id is a non-zero integer ID that must be unique within the indexes for this
|
// id is a non-zero integer ID that must be unique within the indexes for this
|
||||||
// table. It may be deprecated in the future when this can be auto-generated.
|
// table and less than 32768. It may be deprecated in the future when this can
|
||||||
|
// be auto-generated.
|
||||||
uint32 id = 2;
|
uint32 id = 2;
|
||||||
|
|
||||||
// unique specifies that this an unique index.
|
// unique specifies that this an unique index.
|
||||||
|
|
Loading…
Reference in New Issue