feat: ADR-040: Implement KV Store with decoupled storage and SMT (#9892)

## Description

Resolves: https://github.com/cosmos/cosmos-sdk/issues/10117

Implements a `CommitKVStore` which separates the concerns of state storage and state commitment according to [ADR-040](eb7d939f86/docs/architecture/adr-040-storage-and-smt-state-commitments.md).

---

### Author Checklist

*All items are required. Please add a note to the item if the item is not applicable and
please add links to any relevant follow up issues.*

I have...

- [x] included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title
- [ ] added `!` to the type prefix if API or client breaking change
- [x] targeted the correct branch (see [PR Targeting](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#pr-targeting))
- [x] provided a link to the relevant issue or specification
- [ ] followed the guidelines for [building modules](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules) - n/a
- [x] included the necessary unit and integration [tests](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#testing)
- [x] added a changelog entry to `CHANGELOG.md`
- [x] included comments for [documenting Go code](https://blog.golang.org/godoc)
- [x] updated the relevant documentation or specification
- [x] reviewed "Files changed" and left comments if necessary
- [ ] confirmed all CI checks have passed

### Reviewers Checklist

*All items are required. Please add a note if the item is not applicable and please add
your handle next to the items reviewed if you only reviewed selected items.*

I have...

- [ ] confirmed the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title
- [ ] confirmed `!` in the type prefix if API or client breaking change
- [ ] confirmed all author checklist items have been addressed 
- [ ] reviewed state machine logic
- [ ] reviewed API design and naming
- [ ] reviewed documentation is accurate
- [ ] reviewed tests and test coverage
- [ ] manually tested (if applicable)
This commit is contained in:
Roy Crihfield 2021-10-19 19:58:06 +08:00 committed by GitHub
parent 678986251a
commit 85eed1f359
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 1979 additions and 43 deletions

View File

@ -200,6 +200,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* [\#9848](https://github.com/cosmos/cosmos-sdk/pull/9848) ADR-040: Implement BadgerDB backend * [\#9848](https://github.com/cosmos/cosmos-sdk/pull/9848) ADR-040: Implement BadgerDB backend
* [\#9851](https://github.com/cosmos/cosmos-sdk/pull/9851) ADR-040: Implement RocksDB backend * [\#9851](https://github.com/cosmos/cosmos-sdk/pull/9851) ADR-040: Implement RocksDB backend
* [\#10308](https://github.com/cosmos/cosmos-sdk/pull/10308) ADR-040: Implement DBConnection.Revert * [\#10308](https://github.com/cosmos/cosmos-sdk/pull/10308) ADR-040: Implement DBConnection.Revert
* [\#9892](https://github.com/cosmos/cosmos-sdk/pull/9892) ADR-040: KV Store with decoupled storage and state commitment
### Client Breaking Changes ### Client Breaking Changes

23
db/adapter.go Normal file
View File

@ -0,0 +1,23 @@
package db
type readerRWAdapter struct{ DBReader }
// ReaderAsReadWriter returns a ReadWriter that forwards to a reader and errors if writes are
// attempted. Can be used to pass a Reader when a ReadWriter is expected
// but no writes will actually occur.
func ReaderAsReadWriter(r DBReader) DBReadWriter {
return readerRWAdapter{r}
}
func (readerRWAdapter) Set([]byte, []byte) error {
return ErrReadOnly
}
func (readerRWAdapter) Delete([]byte) error {
return ErrReadOnly
}
func (rw readerRWAdapter) Commit() error {
rw.Discard()
return nil
}

52
db/dbtest/util.go Normal file
View File

@ -0,0 +1,52 @@
package dbtest
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
)
func AssertNext(t *testing.T, itr dbm.Iterator, expected bool) {
t.Helper()
require.Equal(t, expected, itr.Next())
}
func AssertDomain(t *testing.T, itr dbm.Iterator, start, end []byte) {
t.Helper()
ds, de := itr.Domain()
assert.Equal(t, start, ds, "checkDomain domain start incorrect")
assert.Equal(t, end, de, "checkDomain domain end incorrect")
}
func AssertItem(t *testing.T, itr dbm.Iterator, key, value []byte) {
t.Helper()
assert.Exactly(t, itr.Key(), k)
assert.Exactly(t, itr.Value(), v)
}
func AssertInvalid(t *testing.T, itr dbm.Iterator) {
t.Helper()
AssertNext(t, itr, false)
AssertKeyPanics(t, itr)
AssertValuePanics(t, itr)
}
func AssertKeyPanics(t *testing.T, itr dbm.Iterator) {
t.Helper()
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
}
func AssertValue(t *testing.T, db dbm.DBReader, key, valueWanted []byte) {
t.Helper()
valueGot, err := db.Get(key)
assert.NoError(t, err)
assert.Equal(t, valueWanted, valueGot)
}
func AssertValuePanics(t *testing.T, itr dbm.Iterator) {
t.Helper()
assert.Panics(t, func() { itr.Value() })
}

View File

@ -18,7 +18,7 @@ func ValidateKv(key, value []byte) error {
func CombineErrors(ret error, also error, desc string) error { func CombineErrors(ret error, also error, desc string) error {
if also != nil { if also != nil {
if ret != nil { if ret != nil {
ret = fmt.Errorf("%w; %v: %v", ret, desc, also) ret = fmt.Errorf("%w; %s: %v", ret, desc, also)
} else { } else {
ret = also ret = also
} }

159
db/prefix/prefix.go Normal file
View File

@ -0,0 +1,159 @@
package prefix
import (
dbm "github.com/cosmos/cosmos-sdk/db"
)
// Prefix Reader/Writer lets you namespace multiple DBs within a single DB.
type prefixR struct {
db dbm.DBReader
prefix []byte
}
type prefixRW struct {
db dbm.DBReadWriter
prefix []byte
}
var _ dbm.DBReader = (*prefixR)(nil)
var _ dbm.DBReadWriter = (*prefixRW)(nil)
func NewPrefixReader(db dbm.DBReader, prefix []byte) prefixR {
return prefixR{
prefix: prefix,
db: db,
}
}
func NewPrefixReadWriter(db dbm.DBReadWriter, prefix []byte) prefixRW {
return prefixRW{
prefix: prefix,
db: db,
}
}
func prefixed(prefix, key []byte) []byte {
return append(prefix, key...)
}
// Get implements DBReader.
func (pdb prefixR) Get(key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, dbm.ErrKeyEmpty
}
return pdb.db.Get(prefixed(pdb.prefix, key))
}
// Has implements DBReader.
func (pdb prefixR) Has(key []byte) (bool, error) {
if len(key) == 0 {
return false, dbm.ErrKeyEmpty
}
return pdb.db.Has(prefixed(pdb.prefix, key))
}
// Iterator implements DBReader.
func (pdb prefixR) Iterator(start, end []byte) (dbm.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, dbm.ErrKeyEmpty
}
var pend []byte
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = prefixed(pdb.prefix, end)
}
itr, err := pdb.db.Iterator(prefixed(pdb.prefix, start), pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, itr), nil
}
// ReverseIterator implements DBReader.
func (pdb prefixR) ReverseIterator(start, end []byte) (dbm.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, dbm.ErrKeyEmpty
}
var pend []byte
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = prefixed(pdb.prefix, end)
}
ritr, err := pdb.db.ReverseIterator(prefixed(pdb.prefix, start), pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, ritr), nil
}
// Discard implements DBReader.
func (pdb prefixR) Discard() error { return pdb.db.Discard() }
// Set implements DBReadWriter.
func (pdb prefixRW) Set(key []byte, value []byte) error {
if len(key) == 0 {
return dbm.ErrKeyEmpty
}
return pdb.db.Set(prefixed(pdb.prefix, key), value)
}
// Delete implements DBReadWriter.
func (pdb prefixRW) Delete(key []byte) error {
if len(key) == 0 {
return dbm.ErrKeyEmpty
}
return pdb.db.Delete(prefixed(pdb.prefix, key))
}
// Get implements DBReadWriter.
func (pdb prefixRW) Get(key []byte) ([]byte, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Get(key)
}
// Has implements DBReadWriter.
func (pdb prefixRW) Has(key []byte) (bool, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Has(key)
}
// Iterator implements DBReadWriter.
func (pdb prefixRW) Iterator(start, end []byte) (dbm.Iterator, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Iterator(start, end)
}
// ReverseIterator implements DBReadWriter.
func (pdb prefixRW) ReverseIterator(start, end []byte) (dbm.Iterator, error) {
return NewPrefixReader(pdb.db, pdb.prefix).ReverseIterator(start, end)
}
// Close implements DBReadWriter.
func (pdb prefixRW) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb prefixRW) Discard() error { return pdb.db.Discard() }
// Returns a slice of the same length (big endian), but incremented by one.
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
if len(bz) == 0 {
panic("cpIncr expects non-zero bz length")
}
ret = make([]byte, len(bz))
copy(ret, bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i]++
return
}
ret[i] = byte(0x00)
if i == 0 {
// Overflow
return nil
}
}
return nil
}

View File

@ -0,0 +1,112 @@
package prefix
import (
"bytes"
"fmt"
dbm "github.com/cosmos/cosmos-sdk/db"
)
// IteratePrefix is a convenience function for iterating over a key domain
// restricted by prefix.
func IteratePrefix(db dbm.DBReader, prefix []byte) (dbm.Iterator, error) {
var start, end []byte
if len(prefix) != 0 {
start = prefix
end = cpIncr(prefix)
}
itr, err := db.Iterator(start, end)
if err != nil {
return nil, err
}
return itr, nil
}
// Strips prefix while iterating from Iterator.
type prefixDBIterator struct {
prefix []byte
start []byte
end []byte
source dbm.Iterator
err error
}
var _ dbm.Iterator = (*prefixDBIterator)(nil)
func newPrefixIterator(prefix, start, end []byte, source dbm.Iterator) *prefixDBIterator {
return &prefixDBIterator{
prefix: prefix,
start: start,
end: end,
source: source,
}
}
// Domain implements Iterator.
func (itr *prefixDBIterator) Domain() (start, end []byte) {
return itr.start, itr.end
}
func (itr *prefixDBIterator) valid() bool {
if itr.err != nil {
return false
}
key := itr.source.Key()
if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) {
itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)",
key, itr.prefix)
return false
}
return true
}
// Next implements Iterator.
func (itr *prefixDBIterator) Next() bool {
if !itr.source.Next() {
return false
}
key := itr.source.Key()
if !bytes.HasPrefix(key, itr.prefix) {
return false
}
// Empty keys are not allowed, so if a key exists in the database that exactly matches the
// prefix we need to skip it.
if bytes.Equal(key, itr.prefix) {
return itr.Next()
}
return true
}
// Next implements Iterator.
func (itr *prefixDBIterator) Key() []byte {
itr.assertIsValid()
key := itr.source.Key()
return key[len(itr.prefix):] // we have checked the key in Valid()
}
// Value implements Iterator.
func (itr *prefixDBIterator) Value() []byte {
itr.assertIsValid()
return itr.source.Value()
}
// Error implements Iterator.
func (itr *prefixDBIterator) Error() error {
if err := itr.source.Error(); err != nil {
return err
}
return itr.err
}
// Close implements Iterator.
func (itr *prefixDBIterator) Close() error {
return itr.source.Close()
}
func (itr *prefixDBIterator) assertIsValid() {
if !itr.valid() {
panic("iterator is invalid")
}
}

157
db/prefix/prefix_test.go Normal file
View File

@ -0,0 +1,157 @@
package prefix_test
import (
"testing"
"github.com/stretchr/testify/require"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/dbtest"
"github.com/cosmos/cosmos-sdk/db/memdb"
pfx "github.com/cosmos/cosmos-sdk/db/prefix"
)
func fillDBWithStuff(t *testing.T, dbw dbm.DBWriter) {
// Under "key" prefix
require.NoError(t, dbw.Set([]byte("key"), []byte("value")))
require.NoError(t, dbw.Set([]byte("key1"), []byte("value1")))
require.NoError(t, dbw.Set([]byte("key2"), []byte("value2")))
require.NoError(t, dbw.Set([]byte("key3"), []byte("value3")))
require.NoError(t, dbw.Set([]byte("something"), []byte("else")))
require.NoError(t, dbw.Set([]byte("k"), []byte("val")))
require.NoError(t, dbw.Set([]byte("ke"), []byte("valu")))
require.NoError(t, dbw.Set([]byte("kee"), []byte("valuu")))
require.NoError(t, dbw.Commit())
}
func mockDBWithStuff(t *testing.T) dbm.DBConnection {
db := memdb.NewDB()
fillDBWithStuff(t, db.Writer())
return db
}
func makePrefixReader(t *testing.T, db dbm.DBConnection, pre []byte) dbm.DBReader {
view := db.Reader()
require.NotNil(t, view)
return pfx.NewPrefixReader(view, pre)
}
func TestPrefixDBSimple(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
dbtest.AssertValue(t, pdb, []byte("key"), nil)
dbtest.AssertValue(t, pdb, []byte("key1"), nil)
dbtest.AssertValue(t, pdb, []byte("1"), []byte("value1"))
dbtest.AssertValue(t, pdb, []byte("key2"), nil)
dbtest.AssertValue(t, pdb, []byte("2"), []byte("value2"))
dbtest.AssertValue(t, pdb, []byte("key3"), nil)
dbtest.AssertValue(t, pdb, []byte("3"), []byte("value3"))
dbtest.AssertValue(t, pdb, []byte("something"), nil)
dbtest.AssertValue(t, pdb, []byte("k"), nil)
dbtest.AssertValue(t, pdb, []byte("ke"), nil)
dbtest.AssertValue(t, pdb, []byte("kee"), nil)
}
func TestPrefixDBIterator1(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.Iterator(nil, nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator1(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator(nil, nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator5(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator([]byte("1"), nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, []byte("1"), nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator6(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator([]byte("2"), nil)
require.NoError(t, err)
dbtest.AssertDomain(t, itr, []byte("2"), nil)
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("3"), []byte("value3"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("2"), []byte("value2"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator7(t *testing.T) {
pdb := makePrefixReader(t, mockDBWithStuff(t), []byte("key"))
itr, err := pdb.ReverseIterator(nil, []byte("2"))
require.NoError(t, err)
dbtest.AssertDomain(t, itr, nil, []byte("2"))
dbtest.AssertNext(t, itr, true)
dbtest.AssertItem(t, itr, []byte("1"), []byte("value1"))
dbtest.AssertNext(t, itr, false)
dbtest.AssertInvalid(t, itr)
itr.Close()
}
func TestPrefixDBViewVersion(t *testing.T) {
prefix := []byte("key")
db := memdb.NewDB()
fillDBWithStuff(t, db.Writer())
id, err := db.SaveNextVersion()
require.NoError(t, err)
pdb := pfx.NewPrefixReadWriter(db.ReadWriter(), prefix)
pdb.Set([]byte("1"), []byte("newvalue1"))
pdb.Delete([]byte("2"))
pdb.Set([]byte("4"), []byte("newvalue4"))
pdb.Discard()
dbview, err := db.ReaderAt(id)
require.NotNil(t, dbview)
require.NoError(t, err)
view := pfx.NewPrefixReader(dbview, prefix)
require.NotNil(t, view)
defer view.Discard()
dbtest.AssertValue(t, view, []byte("1"), []byte("value1"))
dbtest.AssertValue(t, view, []byte("2"), []byte("value2"))
dbtest.AssertValue(t, view, []byte("4"), nil)
}

View File

@ -107,6 +107,8 @@ type DBReader interface {
// DBWriter is a write-only transaction interface. // DBWriter is a write-only transaction interface.
// It is safe for concurrent writes, following an optimistic (OCC) strategy, detecting any write // It is safe for concurrent writes, following an optimistic (OCC) strategy, detecting any write
// conflicts and returning an error on commit, rather than locking the DB. // conflicts and returning an error on commit, rather than locking the DB.
// Callers must call Commit or Discard when done with the transaction.
//
// This can be used to wrap a write-optimized batch object if provided by the backend implementation. // This can be used to wrap a write-optimized batch object if provided by the backend implementation.
type DBWriter interface { type DBWriter interface {
// Set sets the value for the given key, replacing it if it already exists. // Set sets the value for the given key, replacing it if it already exists.
@ -136,29 +138,32 @@ type DBReadWriter interface {
// //
// Callers must make sure the iterator is valid before calling any methods on it, otherwise // Callers must make sure the iterator is valid before calling any methods on it, otherwise
// these methods will panic. This is in part caused by most backend databases using this convention. // these methods will panic. This is in part caused by most backend databases using this convention.
// Note that the iterator is invalid on contruction: Next() must be called to initialize it to its
// starting position.
// //
// As with DBReader, keys and values should be considered read-only, and must be copied before they are // As with DBReader, keys and values should be considered read-only, and must be copied before they are
// modified. // modified.
// //
// Typical usage: // Typical usage:
// //
// var itr Iterator = ... // var itr Iterator = ...
// defer itr.Close() // defer itr.Close()
// //
// for ; itr.Valid(); itr.Next() { // for itr.Next() {
// k, v := itr.Key(); itr.Value() // k, v := itr.Key(); itr.Value()
// ... // ...
// } // }
// if err := itr.Error(); err != nil { // if err := itr.Error(); err != nil {
// ... // ...
// } // }
type Iterator interface { type Iterator interface {
// Domain returns the start (inclusive) and end (exclusive) limits of the iterator. // Domain returns the start (inclusive) and end (exclusive) limits of the iterator.
// CONTRACT: start, end readonly []byte // CONTRACT: start, end readonly []byte
Domain() (start []byte, end []byte) Domain() (start []byte, end []byte)
// Next moves the iterator to the next key in the database, as defined by order of iteration; // Next moves the iterator to the next key in the database, as defined by order of iteration;
// returns whether the iterator is valid. Once invalid, it remains invalid forever. // returns whether the iterator is valid.
// Once this function returns false, the iterator remains invalid forever.
Next() bool Next() bool
// Key returns the key at the current position. Panics if the iterator is invalid. // Key returns the key at the current position. Panics if the iterator is invalid.

View File

@ -2,7 +2,6 @@ package db
import ( import (
"fmt" "fmt"
"math"
) )
// VersionManager encapsulates the current valid versions of a DB and computes // VersionManager encapsulates the current valid versions of a DB and computes
@ -18,10 +17,9 @@ var _ VersionSet = (*VersionManager)(nil)
func NewVersionManager(versions []uint64) *VersionManager { func NewVersionManager(versions []uint64) *VersionManager {
vmap := make(map[uint64]struct{}) vmap := make(map[uint64]struct{})
var init, last uint64 var init, last uint64
init = math.MaxUint64
for _, ver := range versions { for _, ver := range versions {
vmap[ver] = struct{}{} vmap[ver] = struct{}{}
if ver < init { if init == 0 || ver < init {
init = ver init = ver
} }
if ver > last { if ver > last {
@ -46,16 +44,11 @@ func (vm *VersionManager) Initial() uint64 {
return vm.initial return vm.initial
} }
func (vm *VersionManager) Next() uint64 {
return vm.Last() + 1
}
func (vm *VersionManager) Save(target uint64) (uint64, error) { func (vm *VersionManager) Save(target uint64) (uint64, error) {
next := vm.Next() next := vm.Last() + 1
if target == 0 { if target == 0 {
target = next target = next
} } else if target < next {
if target < next {
return 0, fmt.Errorf( return 0, fmt.Errorf(
"target version cannot be less than next sequential version (%v < %v)", target, next) "target version cannot be less than next sequential version (%v < %v)", target, next)
} }

View File

@ -17,10 +17,10 @@ func TestVersionManager(t *testing.T) {
require.True(t, vm.Equal(vm)) require.True(t, vm.Equal(vm))
require.False(t, vm.Exists(0)) require.False(t, vm.Exists(0))
id, err := vm.Save(0) id1, err := vm.Save(0)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, uint64(1), id) require.Equal(t, uint64(1), id1)
require.True(t, vm.Exists(id)) require.True(t, vm.Exists(id1))
id2, err := vm.Save(0) id2, err := vm.Save(0)
require.NoError(t, err) require.NoError(t, err)
require.True(t, vm.Exists(id2)) require.True(t, vm.Exists(id2))
@ -28,17 +28,17 @@ func TestVersionManager(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.True(t, vm.Exists(id3)) require.True(t, vm.Exists(id3))
id, err = vm.Save(id) // can't save existing id _, err = vm.Save(id1) // can't save existing id
require.Error(t, err) require.Error(t, err)
id, err = vm.Save(0) id4, err := vm.Save(0)
require.NoError(t, err) require.NoError(t, err)
require.True(t, vm.Exists(id)) require.True(t, vm.Exists(id4))
vm.Delete(id) vm.Delete(id4)
require.False(t, vm.Exists(id)) require.False(t, vm.Exists(id4))
vm.Delete(1) vm.Delete(id1)
require.False(t, vm.Exists(1)) require.False(t, vm.Exists(id1))
require.Equal(t, id2, vm.Initial()) require.Equal(t, id2, vm.Initial())
require.Equal(t, id3, vm.Last()) require.Equal(t, id3, vm.Last())
@ -50,9 +50,9 @@ func TestVersionManager(t *testing.T) {
require.Equal(t, []uint64{id2, id3}, all) require.Equal(t, []uint64{id2, id3}, all)
vmc := vm.Copy() vmc := vm.Copy()
id, err = vmc.Save(0) id5, err := vmc.Save(0)
require.NoError(t, err) require.NoError(t, err)
require.False(t, vm.Exists(id)) // true copy is made require.False(t, vm.Exists(id5)) // true copy is made
vm2 := dbm.NewVersionManager([]uint64{id2, id3}) vm2 := dbm.NewVersionManager([]uint64{id2, id3})
require.True(t, vm.Equal(vm2)) require.True(t, vm.Equal(vm2))

View File

@ -222,6 +222,26 @@ When `Store.{Get, Set}()` is called, the store forwards the call to its parent,
When `Store.Iterator()` is called, it does not simply prefix the `Store.prefix`, since it does not work as intended. In that case, some of the elements are traversed even they are not starting with the prefix. When `Store.Iterator()` is called, it does not simply prefix the `Store.prefix`, since it does not work as intended. In that case, some of the elements are traversed even they are not starting with the prefix.
## New Store package (`store/v2`)
The SDK is in the process of transitioning to use the types listed here as the default interface for state storage. At the time of writing, these cannot be used within an application and are not directly compatible with the `CommitMultiStore` and related types.
### `BasicKVStore` interface
An interface providing only the basic CRUD functionality (`Get`, `Set`, `Has`, and `Delete` methods), without iteration or caching. This is used to partially expose components of a larger store, such as a `flat.Store`.
### Flat Store
`flat.Store` is the new default persistent store, which internally decouples the concerns of state storage and commitment scheme. Values are stored directly in the backing key-value database (the "storage" bucket), while the value's hash is mapped in a separate store which is able to generate a cryptographic commitment (the "state commitment" bucket, implmented with `smt.Store`).
This can optionally be constructed to use different backend databases for each bucket.
<!-- TODO: add link +++ https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/store/v2/flat/store.go -->
### SMT Store
A `BasicKVStore` which is used to partially expose functions of an underlying store (for instance, to allow access to the commitment store in `flat.Store`).
## Next {hide} ## Next {hide}
Learn about [encoding](./encoding.md) {hide} Learn about [encoding](./encoding.md) {hide}

9
go.mod
View File

@ -11,6 +11,7 @@ require (
github.com/confio/ics23/go v0.6.6 github.com/confio/ics23/go v0.6.6
github.com/cosmos/btcutil v1.0.4 github.com/cosmos/btcutil v1.0.4
github.com/cosmos/cosmos-proto v0.0.0-20210914142853-23ed61ac79ce github.com/cosmos/cosmos-proto v0.0.0-20210914142853-23ed61ac79ce
github.com/cosmos/cosmos-sdk/db v0.0.0
github.com/cosmos/go-bip39 v1.0.0 github.com/cosmos/go-bip39 v1.0.0
github.com/cosmos/iavl v0.17.1 github.com/cosmos/iavl v0.17.1
github.com/cosmos/ledger-cosmos-go v0.11.1 github.com/cosmos/ledger-cosmos-go v0.11.1
@ -27,6 +28,7 @@ require (
github.com/improbable-eng/grpc-web v0.14.1 github.com/improbable-eng/grpc-web v0.14.1
github.com/jhump/protoreflect v1.10.1 github.com/jhump/protoreflect v1.10.1
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554
github.com/magiconair/properties v1.8.5 github.com/magiconair/properties v1.8.5
github.com/mattn/go-isatty v0.0.14 github.com/mattn/go-isatty v0.0.14
github.com/onsi/ginkgo v1.16.4 // indirect github.com/onsi/ginkgo v1.16.4 // indirect
@ -67,7 +69,7 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
github.com/dgraph-io/badger/v2 v2.2007.2 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect
github.com/dgraph-io/ristretto v0.0.3 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect
github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b // indirect
@ -76,6 +78,7 @@ require (
github.com/go-kit/kit v0.10.0 // indirect github.com/go-kit/kit v0.10.0 // indirect
github.com/go-logfmt/logfmt v0.5.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect
github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/snappy v0.0.3 // indirect github.com/golang/snappy v0.0.3 // indirect
github.com/google/btree v1.0.0 // indirect github.com/google/btree v1.0.0 // indirect
github.com/google/orderedcode v0.0.1 // indirect github.com/google/orderedcode v0.0.1 // indirect
@ -88,7 +91,7 @@ require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmhodges/levigo v1.0.0 // indirect github.com/jmhodges/levigo v1.0.0 // indirect
github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d // indirect github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d // indirect
github.com/klauspost/compress v1.11.7 // indirect github.com/klauspost/compress v1.12.3 // indirect
github.com/lib/pq v1.2.0 // indirect github.com/lib/pq v1.2.0 // indirect
github.com/libp2p/go-buffer-pool v0.0.2 // indirect github.com/libp2p/go-buffer-pool v0.0.2 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
@ -128,3 +131,5 @@ replace google.golang.org/grpc => google.golang.org/grpc v1.33.2
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1
replace github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 replace github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76
replace github.com/cosmos/cosmos-sdk/db => ./db

11
go.sum
View File

@ -221,9 +221,11 @@ github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFM
github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.1/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k=
github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE=
github.com/dgraph-io/badger/v3 v3.2103.1/go.mod h1:dULbq6ehJ5K0cGW/1TQ9iSfUk0gbSiToDWmWmTsJ53E=
github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI=
github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
@ -314,6 +316,7 @@ github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0=
github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -356,6 +359,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/flatbuffers v1.12.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@ -522,8 +526,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -536,6 +541,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554 h1:nDOkLO7klmnEw1s4AyKt1Arvpgyh33uj1JmkYlJaDsk=
github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554/go.mod h1:9+Pb2/tg1PvEgW7aFx4bFhDE4bvbI03zuJ8kb7nJ9Jc=
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=

14
internal/util.go Normal file
View File

@ -0,0 +1,14 @@
package util
import "fmt"
func CombineErrors(ret error, also error, desc string) error {
if also != nil {
if ret != nil {
ret = fmt.Errorf("%w; %v: %v", ret, desc, also)
} else {
ret = also
}
}
return ret
}

View File

@ -4,6 +4,7 @@ import (
"github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/merkle"
storetypes "github.com/cosmos/cosmos-sdk/store/types" storetypes "github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/smt"
) )
// RequireProof returns whether proof is required for the subpath. // RequireProof returns whether proof is required for the subpath.
@ -25,3 +26,10 @@ func DefaultProofRuntime() (prt *merkle.ProofRuntime) {
prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder) prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder)
return return
} }
// SMTProofRuntime returns a ProofRuntime for sparse merkle trees.
func SMTProofRuntime() (prt *merkle.ProofRuntime) {
prt = merkle.NewProofRuntime()
prt.RegisterOpDecoder(smt.ProofType, smt.ProofDecoder)
return prt
}

View File

@ -620,7 +620,7 @@ func (rs *Store) SetInitialVersion(version int64) error {
// If the store is wrapped with an inter-block cache, we must first unwrap // If the store is wrapped with an inter-block cache, we must first unwrap
// it to get the underlying IAVL store. // it to get the underlying IAVL store.
store = rs.GetCommitKVStore(key) store = rs.GetCommitKVStore(key)
store.(*iavl.Store).SetInitialVersion(version) store.(types.StoreWithInitialVersion).SetInitialVersion(version)
} }
} }

View File

@ -186,10 +186,8 @@ type CommitMultiStore interface {
//---------subsp------------------------------- //---------subsp-------------------------------
// KVStore // KVStore
// KVStore is a simple interface to get/set data // BasicKVStore is a simple interface to get/set data
type KVStore interface { type BasicKVStore interface {
Store
// Get returns nil iff key doesn't exist. Panics on nil key. // Get returns nil iff key doesn't exist. Panics on nil key.
Get(key []byte) []byte Get(key []byte) []byte
@ -201,6 +199,12 @@ type KVStore interface {
// Delete deletes the key. Panics on nil key. // Delete deletes the key. Panics on nil key.
Delete(key []byte) Delete(key []byte)
}
// KVStore additionally provides iteration and deletion
type KVStore interface {
Store
BasicKVStore
// Iterator over a domain of keys in ascending order. End is exclusive. // Iterator over a domain of keys in ascending order. End is exclusive.
// Start must be less than end, or the Iterator is invalid. // Start must be less than end, or the Iterator is invalid.
@ -289,6 +293,9 @@ const (
StoreTypeIAVL StoreTypeIAVL
StoreTypeTransient StoreTypeTransient
StoreTypeMemory StoreTypeMemory
StoreTypeSMT
StoreTypeDecoupled
StoreTypePersistent = StoreTypeDecoupled
) )
func (st StoreType) String() string { func (st StoreType) String() string {
@ -307,6 +314,12 @@ func (st StoreType) String() string {
case StoreTypeMemory: case StoreTypeMemory:
return "StoreTypeMemory" return "StoreTypeMemory"
case StoreTypeSMT:
return "StoreTypeSMT"
case StoreTypeDecoupled:
return "StoreTypeDecoupled"
} }
return "unknown store type" return "unknown store type"

479
store/v2/flat/store.go Normal file
View File

@ -0,0 +1,479 @@
package flat
import (
"crypto/sha256"
"errors"
"fmt"
"io"
"math"
"sync"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/prefix"
abci "github.com/tendermint/tendermint/abci/types"
util "github.com/cosmos/cosmos-sdk/internal"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/smt"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.CommitKVStore = (*Store)(nil)
_ types.Queryable = (*Store)(nil)
)
var (
merkleRootKey = []byte{0} // Key for root hash of Merkle tree
dataPrefix = []byte{1} // Prefix for state mappings
indexPrefix = []byte{2} // Prefix for Store reverse index
merkleNodePrefix = []byte{3} // Prefix for Merkle tree nodes
merkleValuePrefix = []byte{4} // Prefix for Merkle value mappings
)
var (
ErrVersionDoesNotExist = errors.New("version does not exist")
ErrMaximumHeight = errors.New("maximum block height reached")
)
type StoreConfig struct {
// Version pruning options for backing DBs.
Pruning types.PruningOptions
// The backing DB to use for the state commitment Merkle tree data.
// If nil, Merkle data is stored in the state storage DB under a separate prefix.
MerkleDB dbm.DBConnection
InitialVersion uint64
}
// Store is a CommitKVStore which handles state storage and commitments as separate concerns,
// optionally using separate backing key-value DBs for each.
// Allows synchronized R/W access by locking.
type Store struct {
stateDB dbm.DBConnection
stateTxn dbm.DBReadWriter
dataTxn dbm.DBReadWriter
merkleTxn dbm.DBReadWriter
indexTxn dbm.DBReadWriter
// State commitment (SC) KV store for current version
merkleStore *smt.Store
opts StoreConfig
mtx sync.RWMutex
}
var DefaultStoreConfig = StoreConfig{Pruning: types.PruneDefault, MerkleDB: nil}
// NewStore creates a new Store, or loads one if db contains existing data.
func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
versions, err := db.Versions()
if err != nil {
return
}
loadExisting := false
// If the DB is not empty, attempt to load existing data
if saved := versions.Count(); saved != 0 {
if opts.InitialVersion != 0 && versions.Last() < opts.InitialVersion {
return nil, fmt.Errorf("latest saved version is less than initial version: %v < %v",
versions.Last(), opts.InitialVersion)
}
loadExisting = true
}
err = db.Revert()
if err != nil {
return
}
stateTxn := db.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
merkleTxn := stateTxn
if opts.MerkleDB != nil {
var mversions dbm.VersionSet
mversions, err = opts.MerkleDB.Versions()
if err != nil {
return
}
// Version sets of each DB must match
if !versions.Equal(mversions) {
err = fmt.Errorf("Storage and Merkle DB have different version history")
return
}
err = opts.MerkleDB.Revert()
if err != nil {
return
}
merkleTxn = opts.MerkleDB.ReadWriter()
}
var merkleStore *smt.Store
if loadExisting {
var root []byte
root, err = stateTxn.Get(merkleRootKey)
if err != nil {
return
}
if root == nil {
err = fmt.Errorf("could not get root of SMT")
return
}
merkleStore = loadSMT(merkleTxn, root)
} else {
merkleNodes := prefix.NewPrefixReadWriter(merkleTxn, merkleNodePrefix)
merkleValues := prefix.NewPrefixReadWriter(merkleTxn, merkleValuePrefix)
merkleStore = smt.NewStore(merkleNodes, merkleValues)
}
return &Store{
stateDB: db,
stateTxn: stateTxn,
dataTxn: prefix.NewPrefixReadWriter(stateTxn, dataPrefix),
indexTxn: prefix.NewPrefixReadWriter(stateTxn, indexPrefix),
merkleTxn: merkleTxn,
merkleStore: merkleStore,
opts: opts,
}, nil
}
func (s *Store) Close() error {
err := s.stateTxn.Discard()
if s.opts.MerkleDB != nil {
err = util.CombineErrors(err, s.merkleTxn.Discard(), "merkleTxn.Discard also failed")
}
return err
}
// Get implements KVStore.
func (s *Store) Get(key []byte) []byte {
s.mtx.RLock()
defer s.mtx.RUnlock()
val, err := s.dataTxn.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *Store) Has(key []byte) bool {
s.mtx.RLock()
defer s.mtx.RUnlock()
has, err := s.dataTxn.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *Store) Set(key, value []byte) {
s.mtx.Lock()
defer s.mtx.Unlock()
err := s.dataTxn.Set(key, value)
if err != nil {
panic(err)
}
s.merkleStore.Set(key, value)
khash := sha256.Sum256(key)
err = s.indexTxn.Set(khash[:], key)
if err != nil {
panic(err)
}
}
// Delete implements KVStore.
func (s *Store) Delete(key []byte) {
khash := sha256.Sum256(key)
s.mtx.Lock()
defer s.mtx.Unlock()
s.merkleStore.Delete(key)
_ = s.indexTxn.Delete(khash[:])
_ = s.dataTxn.Delete(key)
}
type contentsIterator struct {
dbm.Iterator
valid bool
}
func newIterator(source dbm.Iterator) *contentsIterator {
ret := &contentsIterator{Iterator: source}
ret.Next()
return ret
}
func (it *contentsIterator) Next() { it.valid = it.Iterator.Next() }
func (it *contentsIterator) Valid() bool { return it.valid }
// Iterator implements KVStore.
func (s *Store) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataTxn.Iterator(start, end)
if err != nil {
panic(err)
}
return newIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataTxn.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return newIterator(iter)
}
// GetStoreType implements Store.
func (s *Store) GetStoreType() types.StoreType {
return types.StoreTypeDecoupled
}
// Commit implements Committer.
func (s *Store) Commit() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
target := versions.Last() + 1
if target > math.MaxInt64 {
panic(ErrMaximumHeight)
}
// Fast forward to initialversion if needed
if s.opts.InitialVersion != 0 && target < s.opts.InitialVersion {
target = s.opts.InitialVersion
}
cid, err := s.commit(target)
if err != nil {
panic(err)
}
previous := cid.Version - 1
if s.opts.Pruning.KeepEvery != 1 && s.opts.Pruning.Interval != 0 && cid.Version%int64(s.opts.Pruning.Interval) == 0 {
// The range of newly prunable versions
lastPrunable := previous - int64(s.opts.Pruning.KeepRecent)
firstPrunable := lastPrunable - int64(s.opts.Pruning.Interval)
for version := firstPrunable; version <= lastPrunable; version++ {
if s.opts.Pruning.KeepEvery == 0 || version%int64(s.opts.Pruning.KeepEvery) != 0 {
s.stateDB.DeleteVersion(uint64(version))
if s.opts.MerkleDB != nil {
s.opts.MerkleDB.DeleteVersion(uint64(version))
}
}
}
}
return *cid
}
func (s *Store) commit(target uint64) (id *types.CommitID, err error) {
root := s.merkleStore.Root()
err = s.stateTxn.Set(merkleRootKey, root)
if err != nil {
return
}
err = s.stateTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.stateDB.Revert(), "stateDB.Revert also failed")
}
}()
err = s.stateDB.SaveVersion(target)
if err != nil {
return
}
stateTxn := s.stateDB.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
merkleTxn := stateTxn
// If DBs are not separate, Merkle state has been commmitted & snapshotted
if s.opts.MerkleDB != nil {
defer func() {
if err != nil {
if delerr := s.stateDB.DeleteVersion(target); delerr != nil {
err = fmt.Errorf("%w: commit rollback failed: %v", err, delerr)
}
}
}()
err = s.merkleTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.opts.MerkleDB.Revert(), "merkleDB.Revert also failed")
}
}()
err = s.opts.MerkleDB.SaveVersion(target)
if err != nil {
return
}
merkleTxn = s.opts.MerkleDB.ReadWriter()
}
s.stateTxn = stateTxn
s.dataTxn = prefix.NewPrefixReadWriter(stateTxn, dataPrefix)
s.indexTxn = prefix.NewPrefixReadWriter(stateTxn, indexPrefix)
s.merkleTxn = merkleTxn
s.merkleStore = loadSMT(merkleTxn, root)
return &types.CommitID{Version: int64(target), Hash: root}, nil
}
// LastCommitID implements Committer.
func (s *Store) LastCommitID() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
last := versions.Last()
if last == 0 {
return types.CommitID{}
}
// Latest Merkle root is the one currently stored
hash, err := s.stateTxn.Get(merkleRootKey)
if err != nil {
panic(err)
}
return types.CommitID{Version: int64(last), Hash: hash}
}
func (s *Store) GetPruning() types.PruningOptions { return s.opts.Pruning }
func (s *Store) SetPruning(po types.PruningOptions) { s.opts.Pruning = po }
// Query implements ABCI interface, allows queries.
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (s *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if len(req.Data) == 0 {
return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrTxDecode, "query cannot be zero length"), false)
}
// if height is 0, use the latest height
height := req.Height
if height == 0 {
versions, err := s.stateDB.Versions()
if err != nil {
return sdkerrors.QueryResult(errors.New("failed to get version info"), false)
}
latest := versions.Last()
if versions.Exists(latest - 1) {
height = int64(latest - 1)
} else {
height = int64(latest)
}
}
if height < 0 {
return sdkerrors.QueryResult(fmt.Errorf("height overflow: %v", height), false)
}
res.Height = height
switch req.Path {
case "/key":
var err error
res.Key = req.Data // data holds the key bytes
dbr, err := s.stateDB.ReaderAt(uint64(height))
if err != nil {
if errors.Is(err, dbm.ErrVersionDoesNotExist) {
err = sdkerrors.ErrInvalidHeight
}
return sdkerrors.QueryResult(err, false)
}
defer dbr.Discard()
contents := prefix.NewPrefixReader(dbr, dataPrefix)
res.Value, err = contents.Get(res.Key)
if err != nil {
return sdkerrors.QueryResult(err, false)
}
if !req.Prove {
break
}
merkleView := dbr
if s.opts.MerkleDB != nil {
merkleView, err = s.opts.MerkleDB.ReaderAt(uint64(height))
if err != nil {
return sdkerrors.QueryResult(
fmt.Errorf("version exists in state DB but not Merkle DB: %v", height), false)
}
defer merkleView.Discard()
}
root, err := dbr.Get(merkleRootKey)
if err != nil {
return sdkerrors.QueryResult(err, false)
}
if root == nil {
return sdkerrors.QueryResult(errors.New("Merkle root hash not found"), false)
}
merkleStore := loadSMT(dbm.ReaderAsReadWriter(merkleView), root)
res.ProofOps, err = merkleStore.GetProof(res.Key)
if err != nil {
return sdkerrors.QueryResult(fmt.Errorf("Merkle proof creation failed for key: %v", res.Key), false)
}
case "/subspace":
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
subspace := req.Data
res.Key = subspace
iterator := s.Iterator(subspace, types.PrefixEndBytes(subspace))
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
iterator.Close()
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unexpected query path: %v", req.Path), false)
}
return res
}
func loadSMT(merkleTxn dbm.DBReadWriter, root []byte) *smt.Store {
merkleNodes := prefix.NewPrefixReadWriter(merkleTxn, merkleNodePrefix)
merkleValues := prefix.NewPrefixReadWriter(merkleTxn, merkleValuePrefix)
return smt.LoadStore(merkleNodes, merkleValues, root)
}
func (st *Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(st)
}
func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
}
func (st *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners))
}

586
store/v2/flat/store_test.go Normal file
View File

@ -0,0 +1,586 @@
package flat
import (
"errors"
"math"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
cacheSize = 100
alohaData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
)
func newStoreWithData(t *testing.T, db dbm.DBConnection, storeData map[string]string) *Store {
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
for k, v := range storeData {
store.Set([]byte(k), []byte(v))
}
return store
}
func newAlohaStore(t *testing.T, db dbm.DBConnection) *Store {
return newStoreWithData(t, db, alohaData)
}
func TestGetSetHasDelete(t *testing.T) {
store := newAlohaStore(t, memdb.NewDB())
key := "hello"
exists := store.Has([]byte(key))
require.True(t, exists)
require.EqualValues(t, []byte(alohaData[key]), store.Get([]byte(key)))
value2 := "notgoodbye"
store.Set([]byte(key), []byte(value2))
require.EqualValues(t, value2, store.Get([]byte(key)))
store.Delete([]byte(key))
exists = store.Has([]byte(key))
require.False(t, exists)
require.Panics(t, func() { store.Get(nil) }, "Get(nil key) should panic")
require.Panics(t, func() { store.Get([]byte{}) }, "Get(empty key) should panic")
require.Panics(t, func() { store.Has(nil) }, "Has(nil key) should panic")
require.Panics(t, func() { store.Has([]byte{}) }, "Has(empty key) should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "Set(nil key) should panic")
require.Panics(t, func() { store.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
require.Panics(t, func() { store.Set([]byte("key"), nil) }, "Set(nil value) should panic")
store.indexTxn = rwCrudFails{store.indexTxn}
require.Panics(t, func() { store.Set([]byte("key"), []byte("value")) },
"Set() when index fails should panic")
}
func TestConstructors(t *testing.T) {
db := memdb.NewDB()
store := newAlohaStore(t, db)
store.Commit()
require.NoError(t, store.Close())
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
value := store.Get([]byte("hello"))
require.Equal(t, []byte("goodbye"), value)
require.NoError(t, store.Close())
// Loading with an initial version beyond the lowest should error
opts := StoreConfig{InitialVersion: 5, Pruning: types.PruneNothing}
store, err = NewStore(db, opts)
require.Error(t, err)
db.Close()
store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreConfig)
require.Error(t, err)
store, err = NewStore(db, StoreConfig{MerkleDB: dbVersionsFails{memdb.NewDB()}})
require.Error(t, err)
// can't use a DB with open writers
db = memdb.NewDB()
merkledb := memdb.NewDB()
w := db.Writer()
store, err = NewStore(db, DefaultStoreConfig)
require.Error(t, err)
w.Discard()
w = merkledb.Writer()
store, err = NewStore(db, StoreConfig{MerkleDB: merkledb})
require.Error(t, err)
w.Discard()
// can't use DBs with different version history
merkledb.SaveNextVersion()
store, err = NewStore(db, StoreConfig{MerkleDB: merkledb})
require.Error(t, err)
merkledb.Close()
// can't load existing store when we can't access the latest Merkle root hash
store, err = NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
store.Commit()
require.NoError(t, store.Close())
// because root is misssing
w = db.Writer()
w.Delete(merkleRootKey)
w.Commit()
db.SaveNextVersion()
store, err = NewStore(db, DefaultStoreConfig)
require.Error(t, err)
// or, because of an error
store, err = NewStore(dbRWCrudFails{db}, DefaultStoreConfig)
require.Error(t, err)
}
func TestIterators(t *testing.T) {
store := newStoreWithData(t, memdb.NewDB(), map[string]string{
string([]byte{0x00}): "0",
string([]byte{0x00, 0x00}): "0 0",
string([]byte{0x00, 0x01}): "0 1",
string([]byte{0x00, 0x02}): "0 2",
string([]byte{0x01}): "1",
})
var testCase = func(t *testing.T, iter types.Iterator, expected []string) {
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testCase(t, store.Iterator(nil, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, []byte{0, 1}),
[]string{"0", "0 0"})
testCase(t, store.Iterator([]byte{0}, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0, 1}, []byte{1}),
[]string{"0 1", "0 2"})
testCase(t, store.Iterator(nil, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.Iterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, store.ReverseIterator(nil, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0, 1}),
[]string{"0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0, 1}, []byte{1}),
[]string{"0 2", "0 1"})
testCase(t, store.ReverseIterator(nil, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.ReverseIterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, types.KVStorePrefixIterator(store, []byte{0}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, types.KVStoreReversePrefixIterator(store, []byte{0}),
[]string{"0 2", "0 1", "0 0", "0"})
require.Panics(t, func() { store.Iterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.Iterator(nil, []byte{}) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator(nil, []byte{}) }, "Iterator(empty key) should panic")
}
func TestCommit(t *testing.T) {
testBasic := func(opts StoreConfig) {
// Sanity test for Merkle hashing
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Zero(t, store.LastCommitID())
idNew := store.Commit()
store.Set([]byte{0}, []byte{0})
idOne := store.Commit()
require.Equal(t, idNew.Version+1, idOne.Version)
require.NotEqual(t, idNew.Hash, idOne.Hash)
// Hash of emptied store is same as new store
store.Delete([]byte{0})
idEmptied := store.Commit()
require.Equal(t, idNew.Hash, idEmptied.Hash)
previd := idEmptied
for i := byte(1); i < 5; i++ {
store.Set([]byte{i}, []byte{i})
id := store.Commit()
lastid := store.LastCommitID()
require.Equal(t, id.Hash, lastid.Hash)
require.Equal(t, id.Version, lastid.Version)
require.NotEqual(t, previd.Hash, id.Hash)
require.NotEqual(t, previd.Version, id.Version)
}
}
testBasic(StoreConfig{Pruning: types.PruneNothing})
testBasic(StoreConfig{Pruning: types.PruneNothing, MerkleDB: memdb.NewDB()})
testFailedCommit := func(t *testing.T, store *Store, db dbm.DBConnection) {
opts := store.opts
if db == nil {
db = store.stateDB
}
store.Set([]byte{0}, []byte{0})
require.Panics(t, func() { store.Commit() })
require.NoError(t, store.Close())
versions, _ := db.Versions()
require.Equal(t, 0, versions.Count())
if opts.MerkleDB != nil {
versions, _ = opts.MerkleDB.Versions()
require.Equal(t, 0, versions.Count())
}
store, err := NewStore(db, opts)
require.NoError(t, err)
require.Nil(t, store.Get([]byte{0}))
require.NoError(t, store.Close())
}
// Ensure storage commit is rolled back in each failure case
t.Run("recover after failed Commit", func(t *testing.T) {
store, err := NewStore(
dbRWCommitFails{memdb.NewDB()},
StoreConfig{Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed SaveVersion", func(t *testing.T) {
store, err := NewStore(
dbSaveVersionFails{memdb.NewDB()},
StoreConfig{Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed MerkleDB Commit", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{MerkleDB: dbRWCommitFails{memdb.NewDB()}, Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed MerkleDB SaveVersion", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{MerkleDB: dbSaveVersionFails{memdb.NewDB()}, Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after stateDB.Versions error triggers failure", func(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
store.stateDB = dbVersionsFails{store.stateDB}
testFailedCommit(t, store, db)
})
t.Run("recover after stateTxn.Set error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), DefaultStoreConfig)
require.NoError(t, err)
store.stateTxn = rwCrudFails{store.stateTxn}
testFailedCommit(t, store, nil)
})
t.Run("stateDB.DeleteVersion error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.merkleTxn = rwCommitFails{store.merkleTxn}
store.stateDB = dbDeleteVersionFails{store.stateDB}
require.Panics(t, func() { store.Commit() })
})
t.Run("height overflow triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{InitialVersion: math.MaxInt64, Pruning: types.PruneNothing})
require.NoError(t, err)
require.Equal(t, int64(math.MaxInt64), store.Commit().Version)
require.Panics(t, func() { store.Commit() })
require.Equal(t, int64(math.MaxInt64), store.LastCommitID().Version) // version history not modified
})
// setting initial version
store, err := NewStore(memdb.NewDB(),
StoreConfig{InitialVersion: 5, Pruning: types.PruneNothing, MerkleDB: memdb.NewDB()})
require.NoError(t, err)
require.Equal(t, int64(5), store.Commit().Version)
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Commit()
store.stateDB = dbVersionsFails{store.stateDB}
require.Panics(t, func() { store.LastCommitID() })
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Commit()
store.stateTxn = rwCrudFails{store.stateTxn}
require.Panics(t, func() { store.LastCommitID() })
}
func sliceToSet(slice []uint64) map[uint64]struct{} {
res := make(map[uint64]struct{})
for _, x := range slice {
res[x] = struct{}{}
}
return res
}
func TestPruning(t *testing.T) {
// Save versions up to 10 and verify pruning at final commit
testCases := []struct {
types.PruningOptions
kept []uint64
}{
{types.PruningOptions{2, 4, 10}, []uint64{4, 8, 9, 10}},
{types.PruningOptions{0, 4, 10}, []uint64{4, 8, 10}},
{types.PruneEverything, []uint64{10}},
{types.PruneNothing, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for tci, tc := range testCases {
dbs := []dbm.DBConnection{memdb.NewDB(), memdb.NewDB()}
store, err := NewStore(dbs[0], StoreConfig{Pruning: tc.PruningOptions, MerkleDB: dbs[1]})
require.NoError(t, err)
for i := byte(1); i <= 10; i++ {
store.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
}
for _, db := range dbs {
versions, err := db.Versions()
require.NoError(t, err)
kept := sliceToSet(tc.kept)
for v := uint64(1); v <= 10; v++ {
_, has := kept[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, tci)
}
}
}
// Test pruning interval
// Save up to 20th version while checking history at specific version checkpoints
opts := types.PruningOptions{0, 5, 10}
testCheckPoints := map[uint64][]uint64{
5: []uint64{1, 2, 3, 4, 5},
10: []uint64{5, 10},
15: []uint64{5, 10, 11, 12, 13, 14, 15},
20: []uint64{5, 10, 15, 20},
}
db := memdb.NewDB()
store, err := NewStore(db, StoreConfig{Pruning: opts})
require.NoError(t, err)
for i := byte(1); i <= 20; i++ {
store.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
kept, has := testCheckPoints[latest]
if !has {
continue
}
versions, err := db.Versions()
require.NoError(t, err)
keptMap := sliceToSet(kept)
for v := uint64(1); v <= latest; v++ {
_, has := keptMap[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, i)
}
}
}
func TestQuery(t *testing.T) {
store := newStoreWithData(t, memdb.NewDB(), nil)
k1, v1 := []byte("key1"), []byte("val1")
k2, v2 := []byte("key2"), []byte("val2")
v3 := []byte("val3")
ksub := []byte("key")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
cid := store.Commit()
ver := cid.Version
query := abci.RequestQuery{Path: "/key", Data: k1, Height: ver}
querySub := abci.RequestQuery{Path: "/subspace", Data: ksub, Height: ver}
// query subspace before anything set
qres := store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSubEmpty, qres.Value)
// set data
store.Set(k1, v1)
store.Set(k2, v2)
// set data without commit, doesn't show up
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = store.Commit()
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// and for the subspace
qres = store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSub1, qres.Value)
// modify
store.Set(k1, v3)
cid = store.Commit()
// query will return old values, as height is fixed
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// update to latest in the query and we are happy
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v3, qres.Value)
query2 := abci.RequestQuery{Path: "/key", Data: k2, Height: cid.Version}
qres = store.Query(query2)
require.True(t, qres.IsOK())
require.Equal(t, v2, qres.Value)
// and for the subspace
qres = store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest -1
query0 := abci.RequestQuery{Path: "/key", Data: k1}
qres = store.Query(query0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// querying an empty store will fail
store2, err := NewStore(memdb.NewDB(), DefaultStoreConfig)
require.NoError(t, err)
qres = store2.Query(query0)
require.True(t, qres.IsErr())
// default shows latest, if latest-1 does not exist
store2.Set(k1, v1)
store2.Commit()
qres = store2.Query(query0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
store2.Close()
// artificial error cases for coverage (should never happen with defined usage)
// ensure that height overflow triggers an error
require.NoError(t, err)
store2.stateDB = dbVersionsIs{store2.stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})}
qres = store2.Query(query0)
require.True(t, qres.IsErr())
// failure to access versions triggers an error
store2.stateDB = dbVersionsFails{store.stateDB}
qres = store2.Query(query0)
require.True(t, qres.IsErr())
store2.Close()
// query with a nil or empty key fails
badquery := abci.RequestQuery{Path: "/key", Data: []byte{}}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
badquery.Data = nil
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// querying an invalid height will fail
badquery = abci.RequestQuery{Path: "/key", Data: k1, Height: store.LastCommitID().Version + 1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// or an invalid path
badquery = abci.RequestQuery{Path: "/badpath", Data: k1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// test that proofs are generated with single and separate DBs
testProve := func() {
queryProve0 := abci.RequestQuery{Path: "/key", Data: k1, Prove: true}
store.Query(queryProve0)
qres = store.Query(queryProve0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
require.NotNil(t, qres.ProofOps)
}
testProve()
store.Close()
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Set(k1, v1)
store.Commit()
testProve()
store.Close()
}
type dbDeleteVersionFails struct{ dbm.DBConnection }
type dbRWCommitFails struct{ *memdb.MemDB }
type dbRWCrudFails struct{ dbm.DBConnection }
type dbSaveVersionFails struct{ *memdb.MemDB }
type dbVersionsIs struct {
dbm.DBConnection
vset dbm.VersionSet
}
type dbVersionsFails struct{ dbm.DBConnection }
type rwCommitFails struct{ dbm.DBReadWriter }
type rwCrudFails struct{ dbm.DBReadWriter }
func (dbVersionsFails) Versions() (dbm.VersionSet, error) { return nil, errors.New("dbVersionsFails") }
func (db dbVersionsIs) Versions() (dbm.VersionSet, error) { return db.vset, nil }
func (db dbRWCrudFails) ReadWriter() dbm.DBReadWriter {
return rwCrudFails{db.DBConnection.ReadWriter()}
}
func (dbSaveVersionFails) SaveVersion(uint64) error { return errors.New("dbSaveVersionFails") }
func (dbDeleteVersionFails) DeleteVersion(uint64) error { return errors.New("dbDeleteVersionFails") }
func (tx rwCommitFails) Commit() error {
tx.Discard()
return errors.New("rwCommitFails")
}
func (db dbRWCommitFails) ReadWriter() dbm.DBReadWriter {
return rwCommitFails{db.MemDB.ReadWriter()}
}
func (rwCrudFails) Get([]byte) ([]byte, error) { return nil, errors.New("rwCrudFails.Get") }
func (rwCrudFails) Has([]byte) (bool, error) { return false, errors.New("rwCrudFails.Has") }
func (rwCrudFails) Set([]byte, []byte) error { return errors.New("rwCrudFails.Set") }
func (rwCrudFails) Delete([]byte) error { return errors.New("rwCrudFails.Delete") }

93
store/v2/smt/proof.go Normal file
View File

@ -0,0 +1,93 @@
package smt
import (
"bytes"
"crypto/sha256"
"encoding/gob"
"hash"
"github.com/cosmos/cosmos-sdk/store/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/lazyledger/smt"
"github.com/tendermint/tendermint/crypto/merkle"
tmmerkle "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
type HasherType byte
const (
SHA256 HasherType = iota
)
const (
ProofType = "smt"
)
type ProofOp struct {
Root []byte
Key []byte
Hasher HasherType
Proof smt.SparseMerkleProof
}
var _ merkle.ProofOperator = (*ProofOp)(nil)
// NewProofOp returns a ProofOp for a SparseMerkleProof.
func NewProofOp(root, key []byte, hasher HasherType, proof smt.SparseMerkleProof) *ProofOp {
return &ProofOp{
Root: root,
Key: key,
Hasher: hasher,
Proof: proof,
}
}
func (p *ProofOp) Run(args [][]byte) ([][]byte, error) {
switch len(args) {
case 0: // non-membership proof
if !smt.VerifyProof(p.Proof, p.Root, p.Key, []byte{}, getHasher(p.Hasher)) {
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "proof did not verify absence of key: %s", p.Key)
}
case 1: // membership proof
if !smt.VerifyProof(p.Proof, p.Root, p.Key, args[0], getHasher(p.Hasher)) {
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "proof did not verify existence of key %s with given value %x", p.Key, args[0])
}
default:
return nil, sdkerrors.Wrapf(types.ErrInvalidProof, "args must be length 0 or 1, got: %d", len(args))
}
return [][]byte{p.Root}, nil
}
func (p *ProofOp) GetKey() []byte {
return p.Key
}
func (p *ProofOp) ProofOp() tmmerkle.ProofOp {
var data bytes.Buffer
enc := gob.NewEncoder(&data)
enc.Encode(p)
return tmmerkle.ProofOp{
Type: "smt",
Key: p.Key,
Data: data.Bytes(),
}
}
func ProofDecoder(pop tmmerkle.ProofOp) (merkle.ProofOperator, error) {
dec := gob.NewDecoder(bytes.NewBuffer(pop.Data))
var proof ProofOp
err := dec.Decode(&proof)
if err != nil {
return nil, err
}
return &proof, nil
}
func getHasher(hasher HasherType) hash.Hash {
switch hasher {
case SHA256:
return sha256.New()
default:
return nil
}
}

View File

@ -0,0 +1,68 @@
package smt_test
import (
"crypto/sha256"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/db/memdb"
smtstore "github.com/cosmos/cosmos-sdk/store/v2/smt"
"github.com/lazyledger/smt"
)
func TestProofOpInterface(t *testing.T) {
hasher := sha256.New()
tree := smt.NewSparseMerkleTree(memdb.NewDB().ReadWriter(), memdb.NewDB().ReadWriter(), hasher)
key := []byte("foo")
value := []byte("bar")
root, err := tree.Update(key, value)
require.NoError(t, err)
require.NotEmpty(t, root)
proof, err := tree.Prove(key)
require.True(t, smt.VerifyProof(proof, root, key, value, hasher))
storeProofOp := smtstore.NewProofOp(root, key, smtstore.SHA256, proof)
require.NotNil(t, storeProofOp)
// inclusion proof
r, err := storeProofOp.Run([][]byte{value})
assert.NoError(t, err)
assert.NotEmpty(t, r)
assert.Equal(t, root, r[0])
// inclusion proof - wrong value - should fail
r, err = storeProofOp.Run([][]byte{key})
assert.Error(t, err)
assert.Empty(t, r)
// exclusion proof - should fail
r, err = storeProofOp.Run([][]byte{})
assert.Error(t, err)
assert.Empty(t, r)
// invalid request - should fail
r, err = storeProofOp.Run([][]byte{key, key})
assert.Error(t, err)
assert.Empty(t, r)
// encode
tmProofOp := storeProofOp.ProofOp()
assert.NotNil(t, tmProofOp)
assert.Equal(t, smtstore.ProofType, tmProofOp.Type)
assert.Equal(t, key, tmProofOp.Key, key)
assert.NotEmpty(t, tmProofOp.Data)
//decode
decoded, err := smtstore.ProofDecoder(tmProofOp)
assert.NoError(t, err)
assert.NotNil(t, decoded)
assert.Equal(t, key, decoded.GetKey())
// run proof after decoding
r, err = decoded.Run([][]byte{value})
assert.NoError(t, err)
assert.NotEmpty(t, r)
assert.Equal(t, root, r[0])
}

99
store/v2/smt/store.go Normal file
View File

@ -0,0 +1,99 @@
package smt
import (
"crypto/sha256"
"errors"
"github.com/cosmos/cosmos-sdk/store/types"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
"github.com/lazyledger/smt"
)
var (
_ types.BasicKVStore = (*Store)(nil)
)
var (
errKeyEmpty error = errors.New("key is empty or nil")
errValueNil error = errors.New("value is nil")
)
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree *smt.SparseMerkleTree
}
func NewStore(nodes, values smt.MapStore) *Store {
return &Store{
tree: smt.NewSparseMerkleTree(nodes, values, sha256.New()),
}
}
func LoadStore(nodes, values smt.MapStore, root []byte) *Store {
return &Store{
tree: smt.ImportSparseMerkleTree(nodes, values, sha256.New(), root),
}
}
func (s *Store) GetProof(key []byte) (*tmcrypto.ProofOps, error) {
proof, err := s.tree.Prove(key)
if err != nil {
return nil, err
}
op := NewProofOp(s.tree.Root(), key, SHA256, proof)
return &tmcrypto.ProofOps{Ops: []tmcrypto.ProofOp{op.ProofOp()}}, nil
}
func (s *Store) Root() []byte { return s.tree.Root() }
// BasicKVStore interface below:
// Get returns nil iff key doesn't exist. Panics on nil key.
func (s *Store) Get(key []byte) []byte {
if len(key) == 0 {
panic(errKeyEmpty)
}
val, err := s.tree.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has checks if a key exists. Panics on nil key.
func (s *Store) Has(key []byte) bool {
if len(key) == 0 {
panic(errKeyEmpty)
}
has, err := s.tree.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set sets the key. Panics on nil key or value.
func (s *Store) Set(key []byte, value []byte) {
if len(key) == 0 {
panic(errKeyEmpty)
}
if value == nil {
panic(errValueNil)
}
_, err := s.tree.Update(key, value)
if err != nil {
panic(err)
}
}
// Delete deletes the key. Panics on nil key.
func (s *Store) Delete(key []byte) {
if len(key) == 0 {
panic(errKeyEmpty)
}
_, err := s.tree.Delete(key)
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,42 @@
package smt_test
import (
"testing"
"github.com/stretchr/testify/assert"
store "github.com/cosmos/cosmos-sdk/store/v2/smt"
"github.com/lazyledger/smt"
)
func TestGetSetHasDelete(t *testing.T) {
s := store.NewStore(smt.NewSimpleMap(), smt.NewSimpleMap())
s.Set([]byte("foo"), []byte("bar"))
assert.Equal(t, []byte("bar"), s.Get([]byte("foo")))
assert.Equal(t, true, s.Has([]byte("foo")))
s.Delete([]byte("foo"))
assert.Equal(t, false, s.Has([]byte("foo")))
assert.Panics(t, func() { s.Get(nil) }, "Get(nil key) should panic")
assert.Panics(t, func() { s.Get([]byte{}) }, "Get(empty key) should panic")
assert.Panics(t, func() { s.Has(nil) }, "Has(nil key) should panic")
assert.Panics(t, func() { s.Has([]byte{}) }, "Has(empty key) should panic")
assert.Panics(t, func() { s.Set(nil, []byte("value")) }, "Set(nil key) should panic")
assert.Panics(t, func() { s.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
assert.Panics(t, func() { s.Set([]byte("key"), nil) }, "Set(nil value) should panic")
}
func TestLoadStore(t *testing.T) {
nodes, values := smt.NewSimpleMap(), smt.NewSimpleMap()
s := store.NewStore(nodes, values)
s.Set([]byte{0}, []byte{0})
s.Set([]byte{1}, []byte{1})
s.Delete([]byte{1})
root := s.Root()
s = store.LoadStore(nodes, values, root)
assert.Equal(t, []byte{0}, s.Get([]byte{0}))
assert.False(t, s.Has([]byte{1}))
}

View File

@ -10,7 +10,7 @@ import (
const ( const (
// SuccessABCICode declares an ABCI response use 0 to signal that the // SuccessABCICode declares an ABCI response use 0 to signal that the
// processing was successful and no error is returned. // processing was successful and no error is returned.
SuccessABCICode = 0 SuccessABCICode uint32 = 0
// All unclassified errors that do not provide an ABCI code are clubbed // All unclassified errors that do not provide an ABCI code are clubbed
// under an internal error code and a generic message instead of // under an internal error code and a generic message instead of