feat: ADR-040: Add `RootStore` implementation (#10430)

## Description

Part of: https://github.com/cosmos/cosmos-sdk/issues/10192

Introduces a new `RootStore` type in the `store/v2` package and an implementation, without yet replacing the `MultiStore` or refactoring its use within the SDK (which will happen in the follow up: https://github.com/cosmos/cosmos-sdk/pull/10174).
Specified by [ADR-040](1326fa2a7d/docs/architecture/adr-040-storage-and-smt-state-commitments.md).

Fixes https://github.com/cosmos/cosmos-sdk/issues/10651
Fixes https://github.com/cosmos/cosmos-sdk/issues/10263

---

### Author Checklist

*All items are required. Please add a note to the item if the item is not applicable and
please add links to any relevant follow up issues.*

I have...

- [x] included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title
- [ ] added `!` to the type prefix if API or client breaking change
- [x] targeted the correct branch (see [PR Targeting](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#pr-targeting))
- [x] provided a link to the relevant issue or specification
- [ ] followed the guidelines for [building modules](https://github.com/cosmos/cosmos-sdk/blob/master/docs/building-modules)
- [x] included the necessary unit and integration [tests](https://github.com/cosmos/cosmos-sdk/blob/master/CONTRIBUTING.md#testing)
- [x] added a changelog entry to `CHANGELOG.md`
- [x] included comments for [documenting Go code](https://blog.golang.org/godoc)
- [x] updated the relevant documentation or specification
- [ ] reviewed "Files changed" and left comments if necessary
- [ ] confirmed all CI checks have passed

### Reviewers Checklist

*All items are required. Please add a note if the item is not applicable and please add
your handle next to the items reviewed if you only reviewed selected items.*

I have...

- [ ] confirmed the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title
- [ ] confirmed `!` in the type prefix if API or client breaking change
- [ ] confirmed all author checklist items have been addressed 
- [ ] reviewed state machine logic
- [ ] reviewed API design and naming
- [ ] reviewed documentation is accurate
- [ ] reviewed tests and test coverage
- [ ] manually tested (if applicable)
This commit is contained in:
Roy Crihfield 2021-12-16 21:09:57 +08:00 committed by GitHub
parent ff590d198d
commit 109bc9422c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 3708 additions and 1117 deletions

View File

@ -57,6 +57,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* [\#10561](https://github.com/cosmos/cosmos-sdk/pull/10561) Add configurable IAVL cache size to app.toml
* [\#10507](https://github.com/cosmos/cosmos-sdk/pull/10507) Add middleware for tx priority.
* [\#10311](https://github.com/cosmos/cosmos-sdk/pull/10311) Adds cli to use tips transactions. It adds an `--aux` flag to all CLI tx commands to generate the aux signer data (with optional tip), and a new `tx aux-to-fee` subcommand to let the fee payer gather aux signer data and broadcast the tx
* [\#10430](https://github.com/cosmos/cosmos-sdk/pull/10430) ADR-040: Add store/v2 `MultiStore` implementation
### API Breaking Changes

View File

@ -14,6 +14,9 @@ HTTPS_GIT := https://github.com/cosmos/cosmos-sdk.git
DOCKER := $(shell which docker)
DOCKER_BUF := $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace bufbuild/buf:1.0.0-rc8
PROJECT_NAME = $(shell git remote get-url origin | xargs basename -s .git)
# RocksDB is a native dependency, so we don't assume the library is installed.
# Instead, it must be explicitly enabled and we warn when it is not.
ENABLE_ROCKSDB ?= false
export GO111MODULE = on
@ -61,6 +64,13 @@ ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \
-X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \
-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(TMVERSION)
ifeq ($(ENABLE_ROCKSDB),true)
BUILD_TAGS += rocksdb_build
test_tags += rocksdb_build
else
$(warning RocksDB support is disabled; to build and test with RocksDB support, set ENABLE_ROCKSDB=true)
endif
# DB backend selection
ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS)))
ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=cleveldb
@ -71,6 +81,9 @@ ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS)))
endif
# handle rocksdb
ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS)))
ifneq ($(ENABLE_ROCKSDB),true)
$(error Cannot use RocksDB backend unless ENABLE_ROCKSDB=true)
endif
CGO_ENABLED=1
BUILD_TAGS += rocksdb
ldflags += -X github.com/cosmos/cosmos-sdk/types.DBBackend=rocksdb
@ -132,6 +145,7 @@ mockgen_cmd=go run github.com/golang/mock/mockgen
mocks: $(MOCKS_DIR)
$(mockgen_cmd) -source=client/account_retriever.go -package mocks -destination tests/mocks/account_retriever.go
$(mockgen_cmd) -package mocks -destination tests/mocks/tendermint_tm_db_DB.go github.com/tendermint/tm-db DB
$(mockgen_cmd) -source db/types.go -package mocks -destination tests/mocks/db/types.go
$(mockgen_cmd) -source=types/module/module.go -package mocks -destination tests/mocks/types_module_module.go
$(mockgen_cmd) -source=types/invariant.go -package mocks -destination tests/mocks/types_invariant.go
$(mockgen_cmd) -source=types/router.go -package mocks -destination tests/mocks/types_router.go
@ -190,7 +204,7 @@ build-docs:
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
cp ~/output/$${path_prefix}/index.html ~/output ; \
done < versions ;
.PHONY: build-docs
###############################################################################
@ -206,22 +220,24 @@ TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-
# Test runs-specific rules. To add a new test target, just add
# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and
# append the new rule to the TEST_TARGETS list.
test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
test-ledger: ARGS=-tags='cgo ledger norace'
test-ledger-mock: ARGS=-tags='ledger test_ledger_mock norace'
test-race: ARGS=-race -tags='cgo ledger test_ledger_mock'
test-unit: test_tags += cgo ledger test_ledger_mock norace
test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace
test-ledger: test_tags += cgo ledger norace
test-ledger-mock: test_tags += ledger test_ledger_mock norace
test-race: test_tags += cgo ledger test_ledger_mock
test-race: ARGS=-race
test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION)
$(TEST_TARGETS): run-tests
# check-* compiles and collects tests without running them
# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513)
CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino
check-test-unit: ARGS=-tags='cgo ledger test_ledger_mock norace'
check-test-unit-amino: ARGS=-tags='ledger test_ledger_mock test_amino norace'
check-test-unit: test_tags += cgo ledger test_ledger_mock norace
check-test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace
$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none
$(CHECK_TEST_TARGETS): run-tests
ARGS += -tags "$(test_tags)"
SUB_MODULES = $(shell find . -type f -name 'go.mod' -print0 | xargs -0 -n1 dirname | sort)
CURRENT_DIR = $(shell pwd)
run-tests:
@ -486,7 +502,7 @@ localnet-build-dlv:
localnet-build-nodes:
$(DOCKER) run --rm -v $(CURDIR)/.testnets:/data cosmossdk/simd \
testnet init-files --v 4 -o /data --starting-ip-address 192.168.10.2 --keyring-backend=test
testnet init-files --v 4 -o /data --starting-ip-address 192.168.10.2 --keyring-backend=test
docker-compose up -d
localnet-stop:

View File

@ -31,6 +31,6 @@ require (
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
// FIXME: gorocksdb bindings for OptimisticTransactionDB are not merged upstream, so we use a fork
// Note: gorocksdb bindings for OptimisticTransactionDB are not merged upstream, so we use a fork
// See https://github.com/tecbot/gorocksdb/pull/216
replace github.com/tecbot/gorocksdb => github.com/roysc/gorocksdb v1.1.1
replace github.com/tecbot/gorocksdb => github.com/cosmos/gorocksdb v1.1.1

View File

@ -14,6 +14,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cosmos/gorocksdb v1.1.1 h1:N0OqpEKXgsi2qtDm8T1+AlNMXkTm6s1jowYf7/4pH5I=
github.com/cosmos/gorocksdb v1.1.1/go.mod h1:b/U29r/CtguX3TF7mKG1Jjn4APDqh4wECshxXdiWHpA=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -99,8 +101,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/roysc/gorocksdb v1.1.1 h1:5qKNwi7V/AchRMjyVf5TMCcZP70ro+VyaRmQxzpRvd4=
github.com/roysc/gorocksdb v1.1.1/go.mod h1:b/U29r/CtguX3TF7mKG1Jjn4APDqh4wECshxXdiWHpA=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=

View File

@ -25,7 +25,9 @@ const (
//
// Versioning is implemented by maintaining references to copy-on-write clones of the backing btree.
//
// TODO: Currently transactions do not detect write conflicts, so writers cannot be used concurrently.
// Note: Currently, transactions do not detect write conflicts, so multiple writers cannot be
// safely committed to overlapping domains. Because of this, the number of open writers is
// limited to 1.
type MemDB struct {
btree *btree.BTree // Main contents
mtx sync.RWMutex // Guards version history

View File

@ -1,23 +1,35 @@
// Prefixed DB reader/writer types let you namespace multiple DBs within a single DB.
package prefix
import (
dbm "github.com/cosmos/cosmos-sdk/db"
)
// Prefix Reader/Writer lets you namespace multiple DBs within a single DB.
// prefixed Reader
type prefixR struct {
db dbm.DBReader
prefix []byte
}
// prefixed ReadWriter
type prefixRW struct {
db dbm.DBReadWriter
prefix []byte
}
// prefixed Writer
type prefixW struct {
db dbm.DBWriter
prefix []byte
}
var _ dbm.DBReader = (*prefixR)(nil)
var _ dbm.DBReadWriter = (*prefixRW)(nil)
var _ dbm.DBWriter = (*prefixW)(nil)
// NewPrefixReader returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewPrefixReader(db dbm.DBReader, prefix []byte) prefixR {
return prefixR{
prefix: prefix,
@ -25,6 +37,8 @@ func NewPrefixReader(db dbm.DBReader, prefix []byte) prefixR {
}
}
// NewPrefixReadWriter returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewPrefixReadWriter(db dbm.DBReadWriter, prefix []byte) prefixRW {
return prefixRW{
prefix: prefix,
@ -32,8 +46,17 @@ func NewPrefixReadWriter(db dbm.DBReadWriter, prefix []byte) prefixRW {
}
}
// NewPrefixWriter returns a DBWriter that reads/writes only from the subset of DB keys
// that contain the given prefix
func NewPrefixWriter(db dbm.DBWriter, prefix []byte) prefixW {
return prefixW{
prefix: prefix,
db: db,
}
}
func prefixed(prefix, key []byte) []byte {
return append(prefix, key...)
return append(cp(prefix), key...)
}
// Get implements DBReader.
@ -135,15 +158,42 @@ func (pdb prefixRW) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb prefixRW) Discard() error { return pdb.db.Discard() }
// Returns a slice of the same length (big endian), but incremented by one.
// Set implements DBReadWriter.
func (pdb prefixW) Set(key []byte, value []byte) error {
if len(key) == 0 {
return dbm.ErrKeyEmpty
}
return pdb.db.Set(prefixed(pdb.prefix, key), value)
}
// Delete implements DBWriter.
func (pdb prefixW) Delete(key []byte) error {
if len(key) == 0 {
return dbm.ErrKeyEmpty
}
return pdb.db.Delete(prefixed(pdb.prefix, key))
}
// Close implements DBWriter.
func (pdb prefixW) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb prefixW) Discard() error { return pdb.db.Discard() }
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))
copy(ret, bz)
return ret
}
// Returns a new slice of the same length (big endian), but incremented by one.
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
if len(bz) == 0 {
panic("cpIncr expects non-zero bz length")
}
ret = make([]byte, len(bz))
copy(ret, bz)
ret = cp(bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i]++

View File

@ -1,3 +1,5 @@
//go:build rocksdb_build
package rocksdb
import (

View File

@ -1,3 +1,5 @@
//go:build rocksdb_build
package rocksdb
import (

View File

@ -1,7 +1,10 @@
//go:build rocksdb_build
package rocksdb
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
@ -46,18 +49,29 @@ func TestRevertRecovery(t *testing.T) {
dir := t.TempDir()
db, err := NewDB(dir)
require.NoError(t, err)
_, err = db.SaveNextVersion()
require.NoError(t, err)
txn := db.Writer()
require.NoError(t, txn.Set([]byte{1}, []byte{1}))
require.NoError(t, txn.Commit())
_, err = db.SaveNextVersion()
require.NoError(t, err)
txn = db.Writer()
require.NoError(t, txn.Set([]byte{2}, []byte{2}))
require.NoError(t, txn.Commit())
// make checkpoints dir temporarily unreadable to trigger an error
require.NoError(t, os.Chmod(db.checkpointsDir(), 0000))
// move checkpoints dir temporarily to trigger an error
hideDir := filepath.Join(dir, "hide_checkpoints")
require.NoError(t, os.Rename(db.checkpointsDir(), hideDir))
require.Error(t, db.Revert())
require.NoError(t, os.Rename(hideDir, db.checkpointsDir()))
require.NoError(t, os.Chmod(db.checkpointsDir(), 0755))
db, err = NewDB(dir)
require.NoError(t, err)
view := db.Reader()
val, err := view.Get([]byte{1})
require.NoError(t, err)
require.Equal(t, []byte{1}, val)
val, err = view.Get([]byte{2})
require.NoError(t, err)
require.Nil(t, val)
view.Discard()
}

View File

@ -1,3 +1,5 @@
//go:build rocksdb_build
package rocksdb
import (

View File

@ -232,26 +232,48 @@ Additional information about state streaming configuration can be found in the [
When `KVStore.Set` or `KVStore.Delete` methods are called, `listenkv.Store` automatically writes the operations to the set of `Store.listeners`.
## New Store package (`store/v2`)
# New Store package (`store/v2`)
The SDK is in the process of transitioning to use the types listed here as the default interface for state storage. At the time of writing, these cannot be used within an application and are not directly compatible with the `CommitMultiStore` and related types.
### `BasicKVStore` interface
These types use the new `db` sub-module of Cosmos-SDK (`github.com/cosmos/cosmos-sdk/db`), rather than `tmdb` (`github.com/tendermint/tm-db`).
An interface providing only the basic CRUD functionality (`Get`, `Set`, `Has`, and `Delete` methods), without iteration or caching. This is used to partially expose components of a larger store, such as a `flat.Store`.
See [ADR-040](../architecture/adr-040-storage-and-smt-state-commitments.md) for the motivations and design specifications of the change.
### Flat Store
## `BasicKVStore` interface
`flat.Store` is the new default persistent store, which internally decouples the concerns of state storage and commitment scheme. Values are stored directly in the backing key-value database (the "storage" bucket), while the value's hash is mapped in a separate store which is able to generate a cryptographic commitment (the "state commitment" bucket, implmented with `smt.Store`).
An interface providing only the basic CRUD functionality (`Get`, `Set`, `Has`, and `Delete` methods), without iteration or caching. This is used to partially expose components of a larger store, such as a `root.Store`.
This can optionally be constructed to use different backend databases for each bucket.
## MultiStore
<!-- TODO: add link +++ https://github.com/cosmos/cosmos-sdk/blob/v0.44.0/store/v2/flat/store.go -->
This is the new interface (or, set of interfaces) for the main client store, replacing the role of `store/types.MultiStore` (v1). There are a few significant differences in behavior compared with v1:
* Commits are atomic and are performed on the entire store state; individual substores cannot be committed separately and cannot have different version numbers.
* The store's current version and version history track that of the backing `db.DBConnection`. Past versions are accessible read-only.
* The set of valid substores is defined at initialization and cannot be updated dynamically in an existing store instance.
### SMT Store
### `CommitMultiStore`
A `BasicKVStore` which is used to partially expose functions of an underlying store (for instance, to allow access to the commitment store in `flat.Store`).
This is the main interface for persisent application state, analogous to the original `CommitMultiStore`.
* Past version views are accessed with `GetVersion`, which returns a `BasicMultiStore`.
* Substores are accessed with `GetKVStore`. Trying to get a substore that was not defined at initialization will cause a panic.
* `Close` must be called to release the DB resources being used by the store.
## Next {hide}
### `BasicMultiStore`
A minimal interface that only allows accessing substores. Note: substores returned by `BasicMultiStore.GetKVStore` are read-only and will panic on `Set` or `Delete` calls.
### Implementation (`root.Store`)
The canonical implementation of `MultiStore` is in `store/v2/root`. It internally decouples the concerns of state storage and state commitment: values are stored in, and read directly from, the backing key-value database (state storage, or *SS*), but are also mapped in a logically separate database which generates cryptographic proofs (for state-commitment or *SC*).
The state-commitment component of each substore is implemented as an independent `smt.Store` (see below). Internally, each substore is allocated in a logically separate partition within the same backing DB, such that commits apply to the state of all substores. Therefore, views of past versions also include the state of all substores (including *SS* and *SC* data).
This store can optionally be configured to use a different backend database instance for *SC* (e.g., `badgerdb` for the state storage DB and `memdb` for the state-commitment DB; see `StoreConfig.StateCommitmentDB`).
## SMT Store
`store/v2/smt.Store` maps values into a Sparse Merkle Tree (SMT), and supports a `BasicKVStore` interface as well as methods for cryptographic proof generation.
# Next {hide}
Learn about [encoding](./encoding.md) {hide}

View File

@ -0,0 +1,24 @@
package db
import (
dbm "github.com/cosmos/cosmos-sdk/db"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
)
var _ = (*storetypes.Iterator)(nil)
type dbAsStoreIter struct {
dbm.Iterator
valid bool
}
// DBToStoreIterator returns an iterator wrapping the given iterator so that it satisfies the
// (store/types).Iterator interface.
func DBToStoreIterator(source dbm.Iterator) *dbAsStoreIter {
ret := &dbAsStoreIter{Iterator: source}
ret.Next() // The DB iterator must be primed before it can access the first element, because Next also returns the validity status
return ret
}
func (it *dbAsStoreIter) Next() { it.valid = it.Iterator.Next() }
func (it *dbAsStoreIter) Valid() bool { return it.valid }

View File

@ -21,7 +21,7 @@ execute_mod_tests() {
echo ">>> running $go_mod tests"
cd $mod_dir;
go test -mod=readonly -timeout 30m -coverprofile=${root_dir}/${coverage_file}.tmp -covermode=atomic -tags='norace ledger test_ledger_mock' ./...
go test -mod=readonly -timeout 30m -coverprofile=${root_dir}/${coverage_file}.tmp -covermode=atomic -tags='norace ledger test_ledger_mock rocksdb_build' ./...
local ret=$?
echo "test return: " $ret;
cd -;

View File

@ -13,7 +13,7 @@ type Store struct {
### Get
`Store.Get()` checks `Store.cache` first in order to find if there is any cached value associated with the key. If the value exists, the function returns it. If not, the function calls `Store.parent.Get()`, sets the key-value pair to the `Store.cache`, and returns it.
`Store.Get()` checks `Store.cache` first in order to find if there is any cached value associated with the key. If the value exists, the function returns it. If not, the function calls `Store.parent.Get()`, sets the key-value pair in the `Store.cache`, and returns it.
### Set

View File

@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/transient"
"github.com/cosmos/cosmos-sdk/store/types"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/store/v2/transient"
)
var k, v = []byte("hello"), []byte("world")

View File

@ -297,8 +297,7 @@ const (
StoreTypeTransient
StoreTypeMemory
StoreTypeSMT
StoreTypeDecoupled
StoreTypePersistent = StoreTypeDecoupled
StoreTypePersistent
)
func (st StoreType) String() string {
@ -321,8 +320,8 @@ func (st StoreType) String() string {
case StoreTypeSMT:
return "StoreTypeSMT"
case StoreTypeDecoupled:
return "StoreTypeDecoupled"
case StoreTypePersistent:
return "StoreTypePersistent"
}
return "unknown store type"

View File

@ -0,0 +1,92 @@
package dbadapter
import (
"io"
dbm "github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
var _ types.KVStore = Store{}
// Wrapper type for dbm.Db with implementation of KVStore
type Store struct {
DB dbm.DBReadWriter
}
// Get wraps the underlying DB's Get method panicing on error.
func (dsa Store) Get(key []byte) []byte {
v, err := dsa.DB.Get(key)
if err != nil {
panic(err)
}
return v
}
// Has wraps the underlying DB's Has method panicing on error.
func (dsa Store) Has(key []byte) bool {
ok, err := dsa.DB.Has(key)
if err != nil {
panic(err)
}
return ok
}
// Set wraps the underlying DB's Set method panicing on error.
func (dsa Store) Set(key, value []byte) {
types.AssertValidKey(key)
if err := dsa.DB.Set(key, value); err != nil {
panic(err)
}
}
// Delete wraps the underlying DB's Delete method panicing on error.
func (dsa Store) Delete(key []byte) {
if err := dsa.DB.Delete(key); err != nil {
panic(err)
}
}
// Iterator wraps the underlying DB's Iterator method panicing on error.
func (dsa Store) Iterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.Iterator(start, end)
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
}
// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
func (dsa Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
}
// GetStoreType returns the type of the store.
func (dsa Store) GetStoreType() types.StoreType {
return types.StoreTypeDB
}
// CacheWrap branches the underlying store.
func (dsa Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(dsa)
}
// CacheWrapWithTrace implements KVStore.
func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners))
}

View File

@ -0,0 +1,105 @@
package dbadapter_test
import (
"bytes"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/cachekv"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/store/v2/dbadapter"
mocks "github.com/cosmos/cosmos-sdk/tests/mocks/db"
)
var errFoo = errors.New("dummy")
func TestAccessors(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mocks.NewMockDBReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Equal(t, types.StoreTypeDB, store.GetStoreType())
retFoo := []byte("xxx")
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil)
require.True(t, bytes.Equal(retFoo, store.Get(key)))
require.Equal(t, []byte{1, 2, 3}, []byte{1, 2, 3})
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Get(key) })
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil)
require.True(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil)
require.False(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo)
require.Panics(t, func() { store.Has(key) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Delete(key) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Delete(key) })
}
func TestIterators(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mocks.NewMockDBReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
start, end := key, []byte("test_end")
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.ReverseIterator(start, end) })
mockIter := mocks.NewMockIterator(mockCtrl)
mockIter.EXPECT().Next().Times(1).Return(true)
mockIter.EXPECT().Key().Times(1).Return(key)
mockIter.EXPECT().Value().Times(1).Return(value)
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(mockIter, nil)
iter := store.Iterator(start, end)
require.Equal(t, key, iter.Key())
require.Equal(t, value, iter.Value())
}
func TestCacheWraps(t *testing.T) {
mockCtrl := gomock.NewController(t)
mockDB := mocks.NewMockDBReadWriter(mockCtrl)
store := dbadapter.Store{mockDB}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}

View File

@ -1,479 +0,0 @@
package flat
import (
"crypto/sha256"
"errors"
"fmt"
"io"
"math"
"sync"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/prefix"
abci "github.com/tendermint/tendermint/abci/types"
util "github.com/cosmos/cosmos-sdk/internal"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/smt"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.CommitKVStore = (*Store)(nil)
_ types.Queryable = (*Store)(nil)
)
var (
merkleRootKey = []byte{0} // Key for root hash of Merkle tree
dataPrefix = []byte{1} // Prefix for state mappings
indexPrefix = []byte{2} // Prefix for Store reverse index
merkleNodePrefix = []byte{3} // Prefix for Merkle tree nodes
merkleValuePrefix = []byte{4} // Prefix for Merkle value mappings
)
var (
ErrVersionDoesNotExist = errors.New("version does not exist")
ErrMaximumHeight = errors.New("maximum block height reached")
)
type StoreConfig struct {
// Version pruning options for backing DBs.
Pruning types.PruningOptions
// The backing DB to use for the state commitment Merkle tree data.
// If nil, Merkle data is stored in the state storage DB under a separate prefix.
MerkleDB dbm.DBConnection
InitialVersion uint64
}
// Store is a CommitKVStore which handles state storage and commitments as separate concerns,
// optionally using separate backing key-value DBs for each.
// Allows synchronized R/W access by locking.
type Store struct {
stateDB dbm.DBConnection
stateTxn dbm.DBReadWriter
dataTxn dbm.DBReadWriter
merkleTxn dbm.DBReadWriter
indexTxn dbm.DBReadWriter
// State commitment (SC) KV store for current version
merkleStore *smt.Store
opts StoreConfig
mtx sync.RWMutex
}
var DefaultStoreConfig = StoreConfig{Pruning: types.PruneDefault, MerkleDB: nil}
// NewStore creates a new Store, or loads one if db contains existing data.
func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
versions, err := db.Versions()
if err != nil {
return
}
loadExisting := false
// If the DB is not empty, attempt to load existing data
if saved := versions.Count(); saved != 0 {
if opts.InitialVersion != 0 && versions.Last() < opts.InitialVersion {
return nil, fmt.Errorf("latest saved version is less than initial version: %v < %v",
versions.Last(), opts.InitialVersion)
}
loadExisting = true
}
err = db.Revert()
if err != nil {
return
}
stateTxn := db.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
merkleTxn := stateTxn
if opts.MerkleDB != nil {
var mversions dbm.VersionSet
mversions, err = opts.MerkleDB.Versions()
if err != nil {
return
}
// Version sets of each DB must match
if !versions.Equal(mversions) {
err = fmt.Errorf("storage and Merkle DB have different version history")
return
}
err = opts.MerkleDB.Revert()
if err != nil {
return
}
merkleTxn = opts.MerkleDB.ReadWriter()
}
var merkleStore *smt.Store
if loadExisting {
var root []byte
root, err = stateTxn.Get(merkleRootKey)
if err != nil {
return
}
if root == nil {
err = fmt.Errorf("could not get root of SMT")
return
}
merkleStore = loadSMT(merkleTxn, root)
} else {
merkleNodes := prefix.NewPrefixReadWriter(merkleTxn, merkleNodePrefix)
merkleValues := prefix.NewPrefixReadWriter(merkleTxn, merkleValuePrefix)
merkleStore = smt.NewStore(merkleNodes, merkleValues)
}
return &Store{
stateDB: db,
stateTxn: stateTxn,
dataTxn: prefix.NewPrefixReadWriter(stateTxn, dataPrefix),
indexTxn: prefix.NewPrefixReadWriter(stateTxn, indexPrefix),
merkleTxn: merkleTxn,
merkleStore: merkleStore,
opts: opts,
}, nil
}
func (s *Store) Close() error {
err := s.stateTxn.Discard()
if s.opts.MerkleDB != nil {
err = util.CombineErrors(err, s.merkleTxn.Discard(), "merkleTxn.Discard also failed")
}
return err
}
// Get implements KVStore.
func (s *Store) Get(key []byte) []byte {
s.mtx.RLock()
defer s.mtx.RUnlock()
val, err := s.dataTxn.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *Store) Has(key []byte) bool {
s.mtx.RLock()
defer s.mtx.RUnlock()
has, err := s.dataTxn.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *Store) Set(key, value []byte) {
s.mtx.Lock()
defer s.mtx.Unlock()
err := s.dataTxn.Set(key, value)
if err != nil {
panic(err)
}
s.merkleStore.Set(key, value)
khash := sha256.Sum256(key)
err = s.indexTxn.Set(khash[:], key)
if err != nil {
panic(err)
}
}
// Delete implements KVStore.
func (s *Store) Delete(key []byte) {
khash := sha256.Sum256(key)
s.mtx.Lock()
defer s.mtx.Unlock()
s.merkleStore.Delete(key)
_ = s.indexTxn.Delete(khash[:])
_ = s.dataTxn.Delete(key)
}
type contentsIterator struct {
dbm.Iterator
valid bool
}
func newIterator(source dbm.Iterator) *contentsIterator {
ret := &contentsIterator{Iterator: source}
ret.Next()
return ret
}
func (it *contentsIterator) Next() { it.valid = it.Iterator.Next() }
func (it *contentsIterator) Valid() bool { return it.valid }
// Iterator implements KVStore.
func (s *Store) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataTxn.Iterator(start, end)
if err != nil {
panic(err)
}
return newIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataTxn.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return newIterator(iter)
}
// GetStoreType implements Store.
func (s *Store) GetStoreType() types.StoreType {
return types.StoreTypeDecoupled
}
// Commit implements Committer.
func (s *Store) Commit() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
target := versions.Last() + 1
if target > math.MaxInt64 {
panic(ErrMaximumHeight)
}
// Fast forward to initialversion if needed
if s.opts.InitialVersion != 0 && target < s.opts.InitialVersion {
target = s.opts.InitialVersion
}
cid, err := s.commit(target)
if err != nil {
panic(err)
}
previous := cid.Version - 1
if s.opts.Pruning.KeepEvery != 1 && s.opts.Pruning.Interval != 0 && cid.Version%int64(s.opts.Pruning.Interval) == 0 {
// The range of newly prunable versions
lastPrunable := previous - int64(s.opts.Pruning.KeepRecent)
firstPrunable := lastPrunable - int64(s.opts.Pruning.Interval)
for version := firstPrunable; version <= lastPrunable; version++ {
if s.opts.Pruning.KeepEvery == 0 || version%int64(s.opts.Pruning.KeepEvery) != 0 {
s.stateDB.DeleteVersion(uint64(version))
if s.opts.MerkleDB != nil {
s.opts.MerkleDB.DeleteVersion(uint64(version))
}
}
}
}
return *cid
}
func (s *Store) commit(target uint64) (id *types.CommitID, err error) {
root := s.merkleStore.Root()
err = s.stateTxn.Set(merkleRootKey, root)
if err != nil {
return
}
err = s.stateTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.stateDB.Revert(), "stateDB.Revert also failed")
}
}()
err = s.stateDB.SaveVersion(target)
if err != nil {
return
}
stateTxn := s.stateDB.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
merkleTxn := stateTxn
// If DBs are not separate, Merkle state has been commmitted & snapshotted
if s.opts.MerkleDB != nil {
defer func() {
if err != nil {
if delerr := s.stateDB.DeleteVersion(target); delerr != nil {
err = fmt.Errorf("%w: commit rollback failed: %v", err, delerr)
}
}
}()
err = s.merkleTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.opts.MerkleDB.Revert(), "merkleDB.Revert also failed")
}
}()
err = s.opts.MerkleDB.SaveVersion(target)
if err != nil {
return
}
merkleTxn = s.opts.MerkleDB.ReadWriter()
}
s.stateTxn = stateTxn
s.dataTxn = prefix.NewPrefixReadWriter(stateTxn, dataPrefix)
s.indexTxn = prefix.NewPrefixReadWriter(stateTxn, indexPrefix)
s.merkleTxn = merkleTxn
s.merkleStore = loadSMT(merkleTxn, root)
return &types.CommitID{Version: int64(target), Hash: root}, nil
}
// LastCommitID implements Committer.
func (s *Store) LastCommitID() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
last := versions.Last()
if last == 0 {
return types.CommitID{}
}
// Latest Merkle root is the one currently stored
hash, err := s.stateTxn.Get(merkleRootKey)
if err != nil {
panic(err)
}
return types.CommitID{Version: int64(last), Hash: hash}
}
func (s *Store) GetPruning() types.PruningOptions { return s.opts.Pruning }
func (s *Store) SetPruning(po types.PruningOptions) { s.opts.Pruning = po }
// Query implements ABCI interface, allows queries.
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (s *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if len(req.Data) == 0 {
return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrTxDecode, "query cannot be zero length"), false)
}
// if height is 0, use the latest height
height := req.Height
if height == 0 {
versions, err := s.stateDB.Versions()
if err != nil {
return sdkerrors.QueryResult(errors.New("failed to get version info"), false)
}
latest := versions.Last()
if versions.Exists(latest - 1) {
height = int64(latest - 1)
} else {
height = int64(latest)
}
}
if height < 0 {
return sdkerrors.QueryResult(fmt.Errorf("height overflow: %v", height), false)
}
res.Height = height
switch req.Path {
case "/key":
var err error
res.Key = req.Data // data holds the key bytes
dbr, err := s.stateDB.ReaderAt(uint64(height))
if err != nil {
if errors.Is(err, dbm.ErrVersionDoesNotExist) {
err = sdkerrors.ErrInvalidHeight
}
return sdkerrors.QueryResult(err, false)
}
defer dbr.Discard()
contents := prefix.NewPrefixReader(dbr, dataPrefix)
res.Value, err = contents.Get(res.Key)
if err != nil {
return sdkerrors.QueryResult(err, false)
}
if !req.Prove {
break
}
merkleView := dbr
if s.opts.MerkleDB != nil {
merkleView, err = s.opts.MerkleDB.ReaderAt(uint64(height))
if err != nil {
return sdkerrors.QueryResult(
fmt.Errorf("version exists in state DB but not Merkle DB: %v", height), false)
}
defer merkleView.Discard()
}
root, err := dbr.Get(merkleRootKey)
if err != nil {
return sdkerrors.QueryResult(err, false)
}
if root == nil {
return sdkerrors.QueryResult(errors.New("Merkle root hash not found"), false) //nolint: stylecheck // proper name
}
merkleStore := loadSMT(dbm.ReaderAsReadWriter(merkleView), root)
res.ProofOps, err = merkleStore.GetProof(res.Key)
if err != nil {
return sdkerrors.QueryResult(fmt.Errorf("Merkle proof creation failed for key: %v", res.Key), false) //nolint: stylecheck // proper name
}
case "/subspace":
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
subspace := req.Data
res.Key = subspace
iterator := s.Iterator(subspace, types.PrefixEndBytes(subspace))
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
iterator.Close()
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unexpected query path: %v", req.Path), false)
}
return res
}
func loadSMT(merkleTxn dbm.DBReadWriter, root []byte) *smt.Store {
merkleNodes := prefix.NewPrefixReadWriter(merkleTxn, merkleNodePrefix)
merkleValues := prefix.NewPrefixReadWriter(merkleTxn, merkleValuePrefix)
return smt.LoadStore(merkleNodes, merkleValues, root)
}
func (s *Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
func (s *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
func (s *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}

View File

@ -1,586 +0,0 @@
package flat
import (
"errors"
"math"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
cacheSize = 100
alohaData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
)
func newStoreWithData(t *testing.T, db dbm.DBConnection, storeData map[string]string) *Store {
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
for k, v := range storeData {
store.Set([]byte(k), []byte(v))
}
return store
}
func newAlohaStore(t *testing.T, db dbm.DBConnection) *Store {
return newStoreWithData(t, db, alohaData)
}
func TestGetSetHasDelete(t *testing.T) {
store := newAlohaStore(t, memdb.NewDB())
key := "hello"
exists := store.Has([]byte(key))
require.True(t, exists)
require.EqualValues(t, []byte(alohaData[key]), store.Get([]byte(key)))
value2 := "notgoodbye"
store.Set([]byte(key), []byte(value2))
require.EqualValues(t, value2, store.Get([]byte(key)))
store.Delete([]byte(key))
exists = store.Has([]byte(key))
require.False(t, exists)
require.Panics(t, func() { store.Get(nil) }, "Get(nil key) should panic")
require.Panics(t, func() { store.Get([]byte{}) }, "Get(empty key) should panic")
require.Panics(t, func() { store.Has(nil) }, "Has(nil key) should panic")
require.Panics(t, func() { store.Has([]byte{}) }, "Has(empty key) should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "Set(nil key) should panic")
require.Panics(t, func() { store.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
require.Panics(t, func() { store.Set([]byte("key"), nil) }, "Set(nil value) should panic")
store.indexTxn = rwCrudFails{store.indexTxn}
require.Panics(t, func() { store.Set([]byte("key"), []byte("value")) },
"Set() when index fails should panic")
}
func TestConstructors(t *testing.T) {
db := memdb.NewDB()
store := newAlohaStore(t, db)
store.Commit()
require.NoError(t, store.Close())
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
value := store.Get([]byte("hello"))
require.Equal(t, []byte("goodbye"), value)
require.NoError(t, store.Close())
// Loading with an initial version beyond the lowest should error
opts := StoreConfig{InitialVersion: 5, Pruning: types.PruneNothing}
store, err = NewStore(db, opts)
require.Error(t, err)
db.Close()
store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreConfig)
require.Error(t, err)
store, err = NewStore(db, StoreConfig{MerkleDB: dbVersionsFails{memdb.NewDB()}})
require.Error(t, err)
// can't use a DB with open writers
db = memdb.NewDB()
merkledb := memdb.NewDB()
w := db.Writer()
store, err = NewStore(db, DefaultStoreConfig)
require.Error(t, err)
w.Discard()
w = merkledb.Writer()
store, err = NewStore(db, StoreConfig{MerkleDB: merkledb})
require.Error(t, err)
w.Discard()
// can't use DBs with different version history
merkledb.SaveNextVersion()
store, err = NewStore(db, StoreConfig{MerkleDB: merkledb})
require.Error(t, err)
merkledb.Close()
// can't load existing store when we can't access the latest Merkle root hash
store, err = NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
store.Commit()
require.NoError(t, store.Close())
// because root is misssing
w = db.Writer()
w.Delete(merkleRootKey)
w.Commit()
db.SaveNextVersion()
store, err = NewStore(db, DefaultStoreConfig)
require.Error(t, err)
// or, because of an error
store, err = NewStore(dbRWCrudFails{db}, DefaultStoreConfig)
require.Error(t, err)
}
func TestIterators(t *testing.T) {
store := newStoreWithData(t, memdb.NewDB(), map[string]string{
string([]byte{0x00}): "0",
string([]byte{0x00, 0x00}): "0 0",
string([]byte{0x00, 0x01}): "0 1",
string([]byte{0x00, 0x02}): "0 2",
string([]byte{0x01}): "1",
})
var testCase = func(t *testing.T, iter types.Iterator, expected []string) {
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testCase(t, store.Iterator(nil, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, []byte{0, 1}),
[]string{"0", "0 0"})
testCase(t, store.Iterator([]byte{0}, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0, 1}, []byte{1}),
[]string{"0 1", "0 2"})
testCase(t, store.Iterator(nil, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.Iterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, store.ReverseIterator(nil, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0, 1}),
[]string{"0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0, 1}, []byte{1}),
[]string{"0 2", "0 1"})
testCase(t, store.ReverseIterator(nil, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.ReverseIterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, types.KVStorePrefixIterator(store, []byte{0}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, types.KVStoreReversePrefixIterator(store, []byte{0}),
[]string{"0 2", "0 1", "0 0", "0"})
require.Panics(t, func() { store.Iterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.Iterator(nil, []byte{}) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator(nil, []byte{}) }, "Iterator(empty key) should panic")
}
func TestCommit(t *testing.T) {
testBasic := func(opts StoreConfig) {
// Sanity test for Merkle hashing
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Zero(t, store.LastCommitID())
idNew := store.Commit()
store.Set([]byte{0}, []byte{0})
idOne := store.Commit()
require.Equal(t, idNew.Version+1, idOne.Version)
require.NotEqual(t, idNew.Hash, idOne.Hash)
// Hash of emptied store is same as new store
store.Delete([]byte{0})
idEmptied := store.Commit()
require.Equal(t, idNew.Hash, idEmptied.Hash)
previd := idEmptied
for i := byte(1); i < 5; i++ {
store.Set([]byte{i}, []byte{i})
id := store.Commit()
lastid := store.LastCommitID()
require.Equal(t, id.Hash, lastid.Hash)
require.Equal(t, id.Version, lastid.Version)
require.NotEqual(t, previd.Hash, id.Hash)
require.NotEqual(t, previd.Version, id.Version)
}
}
testBasic(StoreConfig{Pruning: types.PruneNothing})
testBasic(StoreConfig{Pruning: types.PruneNothing, MerkleDB: memdb.NewDB()})
testFailedCommit := func(t *testing.T, store *Store, db dbm.DBConnection) {
opts := store.opts
if db == nil {
db = store.stateDB
}
store.Set([]byte{0}, []byte{0})
require.Panics(t, func() { store.Commit() })
require.NoError(t, store.Close())
versions, _ := db.Versions()
require.Equal(t, 0, versions.Count())
if opts.MerkleDB != nil {
versions, _ = opts.MerkleDB.Versions()
require.Equal(t, 0, versions.Count())
}
store, err := NewStore(db, opts)
require.NoError(t, err)
require.Nil(t, store.Get([]byte{0}))
require.NoError(t, store.Close())
}
// Ensure storage commit is rolled back in each failure case
t.Run("recover after failed Commit", func(t *testing.T) {
store, err := NewStore(
dbRWCommitFails{memdb.NewDB()},
StoreConfig{Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed SaveVersion", func(t *testing.T) {
store, err := NewStore(
dbSaveVersionFails{memdb.NewDB()},
StoreConfig{Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed MerkleDB Commit", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{MerkleDB: dbRWCommitFails{memdb.NewDB()}, Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after failed MerkleDB SaveVersion", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{MerkleDB: dbSaveVersionFails{memdb.NewDB()}, Pruning: types.PruneNothing})
require.NoError(t, err)
testFailedCommit(t, store, nil)
})
t.Run("recover after stateDB.Versions error triggers failure", func(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, DefaultStoreConfig)
require.NoError(t, err)
store.stateDB = dbVersionsFails{store.stateDB}
testFailedCommit(t, store, db)
})
t.Run("recover after stateTxn.Set error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), DefaultStoreConfig)
require.NoError(t, err)
store.stateTxn = rwCrudFails{store.stateTxn}
testFailedCommit(t, store, nil)
})
t.Run("stateDB.DeleteVersion error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.merkleTxn = rwCommitFails{store.merkleTxn}
store.stateDB = dbDeleteVersionFails{store.stateDB}
require.Panics(t, func() { store.Commit() })
})
t.Run("height overflow triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(),
StoreConfig{InitialVersion: math.MaxInt64, Pruning: types.PruneNothing})
require.NoError(t, err)
require.Equal(t, int64(math.MaxInt64), store.Commit().Version)
require.Panics(t, func() { store.Commit() })
require.Equal(t, int64(math.MaxInt64), store.LastCommitID().Version) // version history not modified
})
// setting initial version
store, err := NewStore(memdb.NewDB(),
StoreConfig{InitialVersion: 5, Pruning: types.PruneNothing, MerkleDB: memdb.NewDB()})
require.NoError(t, err)
require.Equal(t, int64(5), store.Commit().Version)
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Commit()
store.stateDB = dbVersionsFails{store.stateDB}
require.Panics(t, func() { store.LastCommitID() })
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Commit()
store.stateTxn = rwCrudFails{store.stateTxn}
require.Panics(t, func() { store.LastCommitID() })
}
func sliceToSet(slice []uint64) map[uint64]struct{} {
res := make(map[uint64]struct{})
for _, x := range slice {
res[x] = struct{}{}
}
return res
}
func TestPruning(t *testing.T) {
// Save versions up to 10 and verify pruning at final commit
testCases := []struct {
types.PruningOptions
kept []uint64
}{
{types.PruningOptions{2, 4, 10}, []uint64{4, 8, 9, 10}},
{types.PruningOptions{0, 4, 10}, []uint64{4, 8, 10}},
{types.PruneEverything, []uint64{10}},
{types.PruneNothing, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for tci, tc := range testCases {
dbs := []dbm.DBConnection{memdb.NewDB(), memdb.NewDB()}
store, err := NewStore(dbs[0], StoreConfig{Pruning: tc.PruningOptions, MerkleDB: dbs[1]})
require.NoError(t, err)
for i := byte(1); i <= 10; i++ {
store.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
}
for _, db := range dbs {
versions, err := db.Versions()
require.NoError(t, err)
kept := sliceToSet(tc.kept)
for v := uint64(1); v <= 10; v++ {
_, has := kept[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, tci)
}
}
}
// Test pruning interval
// Save up to 20th version while checking history at specific version checkpoints
opts := types.PruningOptions{0, 5, 10}
testCheckPoints := map[uint64][]uint64{
5: []uint64{1, 2, 3, 4, 5},
10: []uint64{5, 10},
15: []uint64{5, 10, 11, 12, 13, 14, 15},
20: []uint64{5, 10, 15, 20},
}
db := memdb.NewDB()
store, err := NewStore(db, StoreConfig{Pruning: opts})
require.NoError(t, err)
for i := byte(1); i <= 20; i++ {
store.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
kept, has := testCheckPoints[latest]
if !has {
continue
}
versions, err := db.Versions()
require.NoError(t, err)
keptMap := sliceToSet(kept)
for v := uint64(1); v <= latest; v++ {
_, has := keptMap[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, i)
}
}
}
func TestQuery(t *testing.T) {
store := newStoreWithData(t, memdb.NewDB(), nil)
k1, v1 := []byte("key1"), []byte("val1")
k2, v2 := []byte("key2"), []byte("val2")
v3 := []byte("val3")
ksub := []byte("key")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
cid := store.Commit()
ver := cid.Version
query := abci.RequestQuery{Path: "/key", Data: k1, Height: ver}
querySub := abci.RequestQuery{Path: "/subspace", Data: ksub, Height: ver}
// query subspace before anything set
qres := store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSubEmpty, qres.Value)
// set data
store.Set(k1, v1)
store.Set(k2, v2)
// set data without commit, doesn't show up
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = store.Commit()
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// and for the subspace
qres = store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSub1, qres.Value)
// modify
store.Set(k1, v3)
cid = store.Commit()
// query will return old values, as height is fixed
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// update to latest in the query and we are happy
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK())
require.Equal(t, v3, qres.Value)
query2 := abci.RequestQuery{Path: "/key", Data: k2, Height: cid.Version}
qres = store.Query(query2)
require.True(t, qres.IsOK())
require.Equal(t, v2, qres.Value)
// and for the subspace
qres = store.Query(querySub)
require.True(t, qres.IsOK())
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest -1
query0 := abci.RequestQuery{Path: "/key", Data: k1}
qres = store.Query(query0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
// querying an empty store will fail
store2, err := NewStore(memdb.NewDB(), DefaultStoreConfig)
require.NoError(t, err)
qres = store2.Query(query0)
require.True(t, qres.IsErr())
// default shows latest, if latest-1 does not exist
store2.Set(k1, v1)
store2.Commit()
qres = store2.Query(query0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
store2.Close()
// artificial error cases for coverage (should never happen with defined usage)
// ensure that height overflow triggers an error
require.NoError(t, err)
store2.stateDB = dbVersionsIs{store2.stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})}
qres = store2.Query(query0)
require.True(t, qres.IsErr())
// failure to access versions triggers an error
store2.stateDB = dbVersionsFails{store.stateDB}
qres = store2.Query(query0)
require.True(t, qres.IsErr())
store2.Close()
// query with a nil or empty key fails
badquery := abci.RequestQuery{Path: "/key", Data: []byte{}}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
badquery.Data = nil
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// querying an invalid height will fail
badquery = abci.RequestQuery{Path: "/key", Data: k1, Height: store.LastCommitID().Version + 1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// or an invalid path
badquery = abci.RequestQuery{Path: "/badpath", Data: k1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// test that proofs are generated with single and separate DBs
testProve := func() {
queryProve0 := abci.RequestQuery{Path: "/key", Data: k1, Prove: true}
store.Query(queryProve0)
qres = store.Query(queryProve0)
require.True(t, qres.IsOK())
require.Equal(t, v1, qres.Value)
require.NotNil(t, qres.ProofOps)
}
testProve()
store.Close()
store, err = NewStore(memdb.NewDB(), StoreConfig{MerkleDB: memdb.NewDB()})
require.NoError(t, err)
store.Set(k1, v1)
store.Commit()
testProve()
store.Close()
}
type dbDeleteVersionFails struct{ dbm.DBConnection }
type dbRWCommitFails struct{ *memdb.MemDB }
type dbRWCrudFails struct{ dbm.DBConnection }
type dbSaveVersionFails struct{ *memdb.MemDB }
type dbVersionsIs struct {
dbm.DBConnection
vset dbm.VersionSet
}
type dbVersionsFails struct{ dbm.DBConnection }
type rwCommitFails struct{ dbm.DBReadWriter }
type rwCrudFails struct{ dbm.DBReadWriter }
func (dbVersionsFails) Versions() (dbm.VersionSet, error) { return nil, errors.New("dbVersionsFails") }
func (db dbVersionsIs) Versions() (dbm.VersionSet, error) { return db.vset, nil }
func (db dbRWCrudFails) ReadWriter() dbm.DBReadWriter {
return rwCrudFails{db.DBConnection.ReadWriter()}
}
func (dbSaveVersionFails) SaveVersion(uint64) error { return errors.New("dbSaveVersionFails") }
func (dbDeleteVersionFails) DeleteVersion(uint64) error { return errors.New("dbDeleteVersionFails") }
func (tx rwCommitFails) Commit() error {
tx.Discard()
return errors.New("rwCommitFails")
}
func (db dbRWCommitFails) ReadWriter() dbm.DBReadWriter {
return rwCommitFails{db.MemDB.ReadWriter()}
}
func (rwCrudFails) Get([]byte) ([]byte, error) { return nil, errors.New("rwCrudFails.Get") }
func (rwCrudFails) Has([]byte) (bool, error) { return false, errors.New("rwCrudFails.Has") }
func (rwCrudFails) Set([]byte, []byte) error { return errors.New("rwCrudFails.Set") }
func (rwCrudFails) Delete([]byte) error { return errors.New("rwCrudFails.Delete") }

44
store/v2/mem/store.go Normal file
View File

@ -0,0 +1,44 @@
package mem
import (
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/dbadapter"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store implements an in-memory only KVStore. Entries are persisted between
// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node
type Store struct {
dbadapter.Store
conn dbm.DBConnection
}
// NewStore constructs a new in-memory store.
func NewStore() *Store {
db := memdb.NewDB()
return &Store{
Store: dbadapter.Store{DB: db.ReadWriter()},
conn: db,
}
}
// GetStoreType returns the Store's type.
func (s Store) GetStoreType() types.StoreType {
return types.StoreTypeMemory
}
// Commit commits to the underlying DB.
func (s *Store) Commit() (id types.CommitID) {
return
}
func (s *Store) SetPruning(pruning types.PruningOptions) {}
func (s *Store) GetPruning() types.PruningOptions { return types.PruningOptions{} }
func (s Store) LastCommitID() (id types.CommitID) { return }

View File

@ -0,0 +1,39 @@
package mem_test
import (
"testing"
"github.com/stretchr/testify/require"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/store/v2/mem"
)
func TestStore(t *testing.T) {
store := mem.NewStore()
key, value := []byte("key"), []byte("value")
require.Equal(t, types.StoreTypeMemory, store.GetStoreType())
require.Nil(t, store.Get(key))
store.Set(key, value)
require.Equal(t, value, store.Get(key))
newValue := []byte("newValue")
store.Set(key, newValue)
require.Equal(t, newValue, store.Get(key))
store.Delete(key)
require.Nil(t, store.Get(key))
}
func TestCommit(t *testing.T) {
store := mem.NewStore()
key, value := []byte("key"), []byte("value")
store.Set(key, value)
id := store.Commit()
require.True(t, id.IsZero())
require.True(t, store.LastCommitID().IsZero())
require.Equal(t, value, store.Get(key))
}

View File

@ -0,0 +1,36 @@
package root
import (
"github.com/cosmos/cosmos-sdk/store/cachekv"
types "github.com/cosmos/cosmos-sdk/store/v2"
)
// GetKVStore implements BasicMultiStore.
func (cs *cacheStore) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
sub, has := cs.substores[key]
if !has {
sub = cachekv.NewStore(cs.source.GetKVStore(skey))
cs.substores[key] = sub
}
// Wrap with trace/listen if needed. Note: we don't cache this, so users must get a new substore after
// modifying tracers/listeners.
return cs.wrapTraceListen(sub, skey)
}
// Write implements CacheMultiStore.
func (cs *cacheStore) Write() {
for _, sub := range cs.substores {
sub.Write()
}
}
// CacheMultiStore implements BasicMultiStore.
// This recursively wraps the CacheMultiStore in another cache store.
func (cs *cacheStore) CacheMultiStore() types.CacheMultiStore {
return &cacheStore{
source: cs,
substores: map[string]types.CacheKVStore{},
traceListenMixin: newTraceListenMixin(),
}
}

19
store/v2/multi/doc.go Normal file
View File

@ -0,0 +1,19 @@
// This package provides concrete implementations of the store/v2 "MultiStore" types, including
// CommitMultiStore, CacheMultiStore, and BasicMultiStore (as read-only stores at past versions).
//
// Substores are declared as part of a schema within StoreOptions.
// The schema cannot be changed once a CommitMultiStore is initialized, and changes to the schema must be done
// by migrating via StoreOptions.Upgrades. If a past version is accessed, it will be loaded with the past schema.
// Stores may be declared as StoreTypePersistent, StoreTypeMemory (not persisted after close), or
// StoreTypeTransient (not persisted across commits). Non-persistent substores cannot be migrated or accessed
// in past versions.
//
// A declared persistent substore is initially empty and stores nothing in the backing DB until a value is set.
// A non-empty store is stored within a prefixed subdomain of the backing DB (using db/prefix).
// If the MultiStore is configured to use a separate DBConnection for StateCommitmentDB, it will store the
// state commitment (SC) store (as an SMT) in subdomains there, and the "flat" state is stored in the main DB.
// Each substore's SC is allocated as an independent SMT, and query proofs contain two components: a proof
// of a key's (non)existence within the substore SMT, and a proof of the substore's existence within the
// MultiStore (using the Merkle map proof spec (TendermintSpec)).
package root

905
store/v2/multi/store.go Normal file
View File

@ -0,0 +1,905 @@
package root
import (
"errors"
"fmt"
"io"
"math"
"strings"
"sync"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/cosmos/cosmos-sdk/db"
prefixdb "github.com/cosmos/cosmos-sdk/db/prefix"
util "github.com/cosmos/cosmos-sdk/internal"
sdkmaps "github.com/cosmos/cosmos-sdk/store/internal/maps"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/prefix"
"github.com/cosmos/cosmos-sdk/store/tracekv"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/store/v2/mem"
"github.com/cosmos/cosmos-sdk/store/v2/smt"
"github.com/cosmos/cosmos-sdk/store/v2/transient"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
_ types.Queryable = (*Store)(nil)
_ types.CommitMultiStore = (*Store)(nil)
_ types.CacheMultiStore = (*cacheStore)(nil)
_ types.BasicMultiStore = (*viewStore)(nil)
_ types.KVStore = (*substore)(nil)
)
var (
// Root prefixes
merkleRootKey = []byte{0} // Key for root hash of namespace tree
schemaPrefix = []byte{1} // Prefix for store keys (namespaces)
contentPrefix = []byte{2} // Prefix for store contents
// Per-substore prefixes
substoreMerkleRootKey = []byte{0} // Key for root hashes of Merkle trees
dataPrefix = []byte{1} // Prefix for state mappings
indexPrefix = []byte{2} // Prefix for Store reverse index
merkleNodePrefix = []byte{3} // Prefix for Merkle tree nodes
merkleValuePrefix = []byte{4} // Prefix for Merkle value mappings
ErrVersionDoesNotExist = errors.New("version does not exist")
ErrMaximumHeight = errors.New("maximum block height reached")
)
func ErrStoreNotFound(skey string) error {
return fmt.Errorf("store does not exist for key: %s", skey)
}
// StoreConfig is used to define a schema and other options and pass them to the MultiStore constructor.
type StoreConfig struct {
// Version pruning options for backing DBs.
Pruning types.PruningOptions
// The minimum allowed version number.
InitialVersion uint64
// The backing DB to use for the state commitment Merkle tree data.
// If nil, Merkle data is stored in the state storage DB under a separate prefix.
StateCommitmentDB dbm.DBConnection
prefixRegistry
PersistentCache types.MultiStorePersistentCache
Upgrades []types.StoreUpgrades
*traceListenMixin
}
// StoreSchema defineds a mapping of substore keys to store types
type StoreSchema map[string]types.StoreType
// Store is the main persistent store type implementing CommitMultiStore.
// Substores consist of an SMT-based state commitment store and state storage.
// Substores must be reserved in the StoreConfig or defined as part of a StoreUpgrade in order to be valid.
// Note:
// The state commitment data and proof are structured in the same basic pattern as the MultiStore, but use an SMT rather than IAVL tree:
// * The state commitment store of each substore consists of a independent SMT.
// * The state commitment of the root store consists of a Merkle map of all registered persistent substore names to the root hash of their corresponding SMTs
type Store struct {
stateDB dbm.DBConnection
stateTxn dbm.DBReadWriter
StateCommitmentDB dbm.DBConnection
stateCommitmentTxn dbm.DBReadWriter
schema StoreSchema
mem *mem.Store
tran *transient.Store
mtx sync.RWMutex
// Copied from StoreConfig
Pruning types.PruningOptions
InitialVersion uint64 // if
*traceListenMixin
PersistentCache types.MultiStorePersistentCache
substoreCache map[string]*substore
}
type substore struct {
root *Store
name string
dataBucket dbm.DBReadWriter
indexBucket dbm.DBReadWriter
stateCommitmentStore *smt.Store
}
// Branched state
type cacheStore struct {
source types.BasicMultiStore
substores map[string]types.CacheKVStore
*traceListenMixin
}
// Read-only store for querying past versions
type viewStore struct {
stateView dbm.DBReader
stateCommitmentView dbm.DBReader
substoreCache map[string]*viewSubstore
schema StoreSchema
}
type viewSubstore struct {
dataBucket dbm.DBReader
indexBucket dbm.DBReader
stateCommitmentStore *smt.Store
}
// Builder type used to create a valid schema with no prefix conflicts
type prefixRegistry struct {
StoreSchema
reserved []string
}
// Mixin type that to compose trace & listen state into each root store variant type
type traceListenMixin struct {
listeners map[string][]types.WriteListener
TraceWriter io.Writer
TraceContext types.TraceContext
}
func newTraceListenMixin() *traceListenMixin {
return &traceListenMixin{listeners: map[string][]types.WriteListener{}}
}
// DefaultStoreConfig returns a MultiStore config with an empty schema, a single backing DB,
// pruning with PruneDefault, no listeners and no tracer.
func DefaultStoreConfig() StoreConfig {
return StoreConfig{
Pruning: types.PruneDefault,
prefixRegistry: prefixRegistry{
StoreSchema: StoreSchema{},
},
traceListenMixin: newTraceListenMixin(),
}
}
// Returns true for valid store types for a MultiStore schema
func validSubStoreType(sst types.StoreType) bool {
switch sst {
case types.StoreTypePersistent:
return true
case types.StoreTypeMemory:
return true
case types.StoreTypeTransient:
return true
default:
return false
}
}
// Returns true iff both schema maps match exactly (including mem/tran stores)
func (this StoreSchema) equal(that StoreSchema) bool {
if len(this) != len(that) {
return false
}
for key, val := range that {
myval, has := this[key]
if !has {
return false
}
if val != myval {
return false
}
}
return true
}
// Parses a schema from the DB
func readSavedSchema(bucket dbm.DBReader) (*prefixRegistry, error) {
ret := prefixRegistry{StoreSchema: StoreSchema{}}
it, err := bucket.Iterator(nil, nil)
if err != nil {
return nil, err
}
for it.Next() {
value := it.Value()
if len(value) != 1 || !validSubStoreType(types.StoreType(value[0])) {
return nil, fmt.Errorf("invalid mapping for store key: %v => %v", it.Key(), value)
}
ret.StoreSchema[string(it.Key())] = types.StoreType(value[0])
ret.reserved = append(ret.reserved, string(it.Key())) // assume iter yields keys sorted
}
if err = it.Close(); err != nil {
return nil, err
}
return &ret, nil
}
// NewStore constructs a MultiStore directly from a database.
// Creates a new store if no data exists; otherwise loads existing data.
func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
versions, err := db.Versions()
if err != nil {
return
}
// If the DB is not empty, attempt to load existing data
if saved := versions.Count(); saved != 0 {
if opts.InitialVersion != 0 && versions.Last() < opts.InitialVersion {
return nil, fmt.Errorf("latest saved version is less than initial version: %v < %v",
versions.Last(), opts.InitialVersion)
}
}
// To abide by atomicity constraints, revert the DB to the last saved version, in case it contains
// committed data in the "working" version.
// This should only happen if Store.Commit previously failed.
err = db.Revert()
if err != nil {
return
}
stateTxn := db.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
stateCommitmentTxn := stateTxn
if opts.StateCommitmentDB != nil {
var scVersions dbm.VersionSet
scVersions, err = opts.StateCommitmentDB.Versions()
if err != nil {
return
}
// Version sets of each DB must match
if !versions.Equal(scVersions) {
err = fmt.Errorf("Storage and StateCommitment DB have different version history") //nolint:stylecheck
return
}
err = opts.StateCommitmentDB.Revert()
if err != nil {
return
}
stateCommitmentTxn = opts.StateCommitmentDB.ReadWriter()
}
ret = &Store{
stateDB: db,
stateTxn: stateTxn,
StateCommitmentDB: opts.StateCommitmentDB,
stateCommitmentTxn: stateCommitmentTxn,
mem: mem.NewStore(),
tran: transient.NewStore(),
substoreCache: map[string]*substore{},
traceListenMixin: opts.traceListenMixin,
PersistentCache: opts.PersistentCache,
Pruning: opts.Pruning,
InitialVersion: opts.InitialVersion,
}
// Now load the substore schema
schemaView := prefixdb.NewPrefixReader(ret.stateDB.Reader(), schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
err = util.CombineErrors(err, ret.Close(), "base.Close also failed")
}
}()
reg, err := readSavedSchema(schemaView)
if err != nil {
return
}
// If the loaded schema is empty (for new store), just copy the config schema;
// Otherwise, verify it is identical to the config schema
if len(reg.StoreSchema) == 0 {
for k, v := range opts.StoreSchema {
reg.StoreSchema[k] = v
}
reg.reserved = make([]string, len(opts.reserved))
copy(reg.reserved, opts.reserved)
} else {
if !reg.equal(opts.StoreSchema) {
err = errors.New("loaded schema does not match configured schema")
return
}
}
// Apply migrations, then clear old schema and write the new one
for _, upgrades := range opts.Upgrades {
err = reg.migrate(ret, upgrades)
if err != nil {
return
}
}
schemaWriter := prefixdb.NewPrefixWriter(ret.stateTxn, schemaPrefix)
it, err := schemaView.Iterator(nil, nil)
if err != nil {
return
}
for it.Next() {
err = schemaWriter.Delete(it.Key())
if err != nil {
return
}
}
err = it.Close()
if err != nil {
return
}
err = schemaView.Discard()
if err != nil {
return
}
// NB. the migrated contents and schema are not committed until the next store.Commit
for skey, typ := range reg.StoreSchema {
err = schemaWriter.Set([]byte(skey), []byte{byte(typ)})
if err != nil {
return
}
}
ret.schema = reg.StoreSchema
return
}
func (s *Store) Close() error {
err := s.stateTxn.Discard()
if s.StateCommitmentDB != nil {
err = util.CombineErrors(err, s.stateCommitmentTxn.Discard(), "stateCommitmentTxn.Discard also failed")
}
return err
}
// Applies store upgrades to the DB contents.
func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) error {
// Get a view of current state to allow mutation while iterating
reader := store.stateDB.Reader()
scReader := reader
if store.StateCommitmentDB != nil {
scReader = store.StateCommitmentDB.Reader()
}
for _, key := range upgrades.Deleted {
sst, ix, err := pr.storeInfo(key)
if err != nil {
return err
}
if sst != types.StoreTypePersistent {
return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", key, sst)
}
pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...)
delete(pr.StoreSchema, key)
pfx := substorePrefix(key)
subReader := prefixdb.NewPrefixReader(reader, pfx)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
store.stateTxn.Delete(it.Key())
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewPrefixReader(scReader, pfx)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
store.stateCommitmentTxn.Delete(it.Key())
}
it.Close()
}
}
for _, rename := range upgrades.Renamed {
sst, ix, err := pr.storeInfo(rename.OldKey)
if err != nil {
return err
}
if sst != types.StoreTypePersistent {
return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", rename.OldKey, sst)
}
pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...)
delete(pr.StoreSchema, rename.OldKey)
err = pr.RegisterSubstore(rename.NewKey, types.StoreTypePersistent)
if err != nil {
return err
}
oldPrefix := substorePrefix(rename.OldKey)
newPrefix := substorePrefix(rename.NewKey)
subReader := prefixdb.NewPrefixReader(reader, oldPrefix)
subWriter := prefixdb.NewPrefixWriter(store.stateTxn, newPrefix)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
subWriter.Set(it.Key(), it.Value())
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewPrefixReader(scReader, oldPrefix)
subWriter = prefixdb.NewPrefixWriter(store.stateCommitmentTxn, newPrefix)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
}
for it.Next() {
subWriter.Set(it.Key(), it.Value())
}
it.Close()
}
}
for _, key := range upgrades.Added {
err := pr.RegisterSubstore(key, types.StoreTypePersistent)
if err != nil {
return err
}
}
return nil
}
func substorePrefix(key string) []byte {
return append(contentPrefix, key...)
}
// GetKVStore implements BasicMultiStore.
func (rs *Store) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
var parent types.KVStore
typ, has := rs.schema[key]
if !has {
panic(ErrStoreNotFound(key))
}
switch typ {
case types.StoreTypeMemory:
parent = rs.mem
case types.StoreTypeTransient:
parent = rs.tran
case types.StoreTypePersistent:
default:
panic(fmt.Errorf("StoreType not supported: %v", typ)) // should never happen
}
var ret types.KVStore
if parent != nil { // store is non-persistent
ret = prefix.NewStore(parent, []byte(key))
} else { // store is persistent
sub, err := rs.getSubstore(key)
if err != nil {
panic(err)
}
rs.substoreCache[key] = sub
ret = sub
}
// Wrap with trace/listen if needed. Note: we don't cache this, so users must get a new substore after
// modifying tracers/listeners.
return rs.wrapTraceListen(ret, skey)
}
// Gets a persistent substore. This reads, but does not update the substore cache.
// Use it in cases where we need to access a store internally (e.g. read/write Merkle keys, queries)
func (rs *Store) getSubstore(key string) (*substore, error) {
if cached, has := rs.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateRW := prefixdb.NewPrefixReadWriter(rs.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewPrefixReadWriter(rs.stateCommitmentTxn, pfx)
var stateCommitmentStore *smt.Store
rootHash, err := stateRW.Get(substoreMerkleRootKey)
if err != nil {
return nil, err
}
if rootHash != nil {
stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
} else {
merkleNodes := prefixdb.NewPrefixReadWriter(stateCommitmentRW, merkleNodePrefix)
merkleValues := prefixdb.NewPrefixReadWriter(stateCommitmentRW, merkleValuePrefix)
stateCommitmentStore = smt.NewStore(merkleNodes, merkleValues)
}
return &substore{
root: rs,
name: key,
dataBucket: prefixdb.NewPrefixReadWriter(stateRW, dataPrefix),
indexBucket: prefixdb.NewPrefixReadWriter(stateRW, indexPrefix),
stateCommitmentStore: stateCommitmentStore,
}, nil
}
// Resets a substore's state after commit (because root stateTxn has been discarded)
func (s *substore) refresh(rootHash []byte) {
pfx := substorePrefix(s.name)
stateRW := prefixdb.NewPrefixReadWriter(s.root.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewPrefixReadWriter(s.root.stateCommitmentTxn, pfx)
s.dataBucket = prefixdb.NewPrefixReadWriter(stateRW, dataPrefix)
s.indexBucket = prefixdb.NewPrefixReadWriter(stateRW, indexPrefix)
s.stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
}
// Commit implements Committer.
func (s *Store) Commit() types.CommitID {
// Substores read-lock this mutex; lock to prevent racey invalidation of underlying txns
s.mtx.Lock()
defer s.mtx.Unlock()
// Determine the target version
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
target := versions.Last() + 1
if target > math.MaxInt64 {
panic(ErrMaximumHeight)
}
// Fast forward to initial version if needed
if s.InitialVersion != 0 && target < s.InitialVersion {
target = s.InitialVersion
}
cid, err := s.commit(target)
if err != nil {
panic(err)
}
// Prune if necessary
previous := cid.Version - 1
if s.Pruning.KeepEvery != 1 && s.Pruning.Interval != 0 && cid.Version%int64(s.Pruning.Interval) == 0 {
// The range of newly prunable versions
lastPrunable := previous - int64(s.Pruning.KeepRecent)
firstPrunable := lastPrunable - int64(s.Pruning.Interval)
for version := firstPrunable; version <= lastPrunable; version++ {
if s.Pruning.KeepEvery == 0 || version%int64(s.Pruning.KeepEvery) != 0 {
s.stateDB.DeleteVersion(uint64(version))
if s.StateCommitmentDB != nil {
s.StateCommitmentDB.DeleteVersion(uint64(version))
}
}
}
}
s.tran.Commit()
return *cid
}
func (s *Store) getMerkleRoots() (ret map[string][]byte, err error) {
ret = map[string][]byte{}
for key := range s.schema {
sub, has := s.substoreCache[key]
if !has {
sub, err = s.getSubstore(key)
if err != nil {
return
}
}
ret[key] = sub.stateCommitmentStore.Root()
}
return
}
// Calculates root hashes and commits to DB. Does not verify target version or perform pruning.
func (s *Store) commit(target uint64) (id *types.CommitID, err error) {
storeHashes, err := s.getMerkleRoots()
if err != nil {
return
}
// Update substore Merkle roots
for key, storeHash := range storeHashes {
pfx := substorePrefix(key)
stateW := prefixdb.NewPrefixReadWriter(s.stateTxn, pfx)
if err = stateW.Set(substoreMerkleRootKey, storeHash); err != nil {
return
}
}
rootHash := sdkmaps.HashFromMap(storeHashes)
if err = s.stateTxn.Set(merkleRootKey, rootHash); err != nil {
return
}
if err = s.stateTxn.Commit(); err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.stateDB.Revert(), "stateDB.Revert also failed")
}
}()
err = s.stateDB.SaveVersion(target)
if err != nil {
return
}
stateTxn := s.stateDB.ReadWriter()
defer func() {
if err != nil {
err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed")
}
}()
stateCommitmentTxn := stateTxn
// If DBs are not separate, StateCommitment state has been committed & snapshotted
if s.StateCommitmentDB != nil {
// if any error is encountered henceforth, we must revert the state and SC dbs
defer func() {
if err != nil {
if delerr := s.stateDB.DeleteVersion(target); delerr != nil {
err = fmt.Errorf("%w: commit rollback failed: %v", err, delerr)
}
}
}()
err = s.stateCommitmentTxn.Commit()
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, s.StateCommitmentDB.Revert(), "stateCommitmentDB.Revert also failed")
}
}()
err = s.StateCommitmentDB.SaveVersion(target)
if err != nil {
return
}
stateCommitmentTxn = s.StateCommitmentDB.ReadWriter()
}
s.stateTxn = stateTxn
s.stateCommitmentTxn = stateCommitmentTxn
// the state of all live substores must be refreshed
for key, sub := range s.substoreCache {
sub.refresh(storeHashes[key])
}
return &types.CommitID{Version: int64(target), Hash: rootHash}, nil
}
// LastCommitID implements Committer.
func (s *Store) LastCommitID() types.CommitID {
versions, err := s.stateDB.Versions()
if err != nil {
panic(err)
}
last := versions.Last()
if last == 0 {
return types.CommitID{}
}
// Latest Merkle root is the one currently stored
hash, err := s.stateTxn.Get(merkleRootKey)
if err != nil {
panic(err)
}
return types.CommitID{Version: int64(last), Hash: hash}
}
// SetInitialVersion implements CommitMultiStore.
func (rs *Store) SetInitialVersion(version uint64) error {
rs.InitialVersion = uint64(version)
return nil
}
// GetVersion implements CommitMultiStore.
func (rs *Store) GetVersion(version int64) (types.BasicMultiStore, error) {
return rs.getView(version)
}
// CacheMultiStore implements BasicMultiStore.
func (rs *Store) CacheMultiStore() types.CacheMultiStore {
return &cacheStore{
source: rs,
substores: map[string]types.CacheKVStore{},
traceListenMixin: newTraceListenMixin(),
}
}
// parsePath expects a format like /<storeName>[/<subpath>]
// Must start with /, subpath may be empty
// Returns error if it doesn't start with /
func parsePath(path string) (storeName string, subpath string, err error) {
if !strings.HasPrefix(path, "/") {
return storeName, subpath, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "invalid path: %s", path)
}
paths := strings.SplitN(path[1:], "/", 2)
storeName = paths[0]
if len(paths) == 2 {
subpath = "/" + paths[1]
}
return storeName, subpath, nil
}
// Query implements ABCI interface, allows queries.
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if len(req.Data) == 0 {
return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrTxDecode, "query cannot be zero length"), false)
}
// if height is 0, use the latest height
height := req.Height
if height == 0 {
versions, err := rs.stateDB.Versions()
if err != nil {
return sdkerrors.QueryResult(errors.New("failed to get version info"), false)
}
latest := versions.Last()
if versions.Exists(latest - 1) {
height = int64(latest - 1)
} else {
height = int64(latest)
}
}
if height < 0 {
return sdkerrors.QueryResult(fmt.Errorf("height overflow: %v", height), false)
}
res.Height = height
storeName, subpath, err := parsePath(req.Path)
if err != nil {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to parse path"), false)
}
view, err := rs.getView(height)
if err != nil {
if errors.Is(err, dbm.ErrVersionDoesNotExist) {
err = sdkerrors.ErrInvalidHeight
}
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access height"), false)
}
if _, has := rs.schema[storeName]; !has {
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName), false)
}
substore, err := view.getSubstore(storeName)
if err != nil {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access store: %s", storeName), false)
}
switch subpath {
case "/key":
var err error
res.Key = req.Data // data holds the key bytes
res.Value = substore.Get(res.Key)
if !req.Prove {
break
}
// TODO: actual IBC compatible proof. This is a placeholder so unit tests can pass
res.ProofOps, err = substore.stateCommitmentStore.GetProof([]byte(storeName + string(res.Key)))
if err != nil {
return sdkerrors.QueryResult(fmt.Errorf("Merkle proof creation failed for key: %v", res.Key), false) //nolint: stylecheck // proper name
}
case "/subspace":
res.Key = req.Data // data holds the subspace prefix
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
res.Key = req.Data // data holds the subspace prefix
iterator := substore.Iterator(res.Key, types.PrefixEndBytes(res.Key))
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
iterator.Close()
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unexpected query path: %v", req.Path), false)
}
return res
}
func loadSMT(stateCommitmentTxn dbm.DBReadWriter, root []byte) *smt.Store {
merkleNodes := prefixdb.NewPrefixReadWriter(stateCommitmentTxn, merkleNodePrefix)
merkleValues := prefixdb.NewPrefixReadWriter(stateCommitmentTxn, merkleValuePrefix)
return smt.LoadStore(merkleNodes, merkleValues, root)
}
// Returns closest index and whether it's a match
func binarySearch(hay []string, ndl string) (int, bool) {
var mid int
from, to := 0, len(hay)-1
for from <= to {
mid = (from + to) / 2
switch strings.Compare(hay[mid], ndl) {
case -1:
from = mid + 1
case 1:
to = mid - 1
default:
return mid, true
}
}
return from, false
}
func (pr *prefixRegistry) storeInfo(key string) (sst types.StoreType, ix int, err error) {
ix, has := binarySearch(pr.reserved, key)
if !has {
err = fmt.Errorf("prefix does not exist: %v", key)
return
}
sst, has = pr.StoreSchema[key]
if !has {
err = fmt.Errorf("prefix is registered but not in schema: %v", key)
}
return
}
func (pr *prefixRegistry) RegisterSubstore(key string, typ types.StoreType) error {
if !validSubStoreType(typ) {
return fmt.Errorf("StoreType not supported: %v", typ)
}
// Find the neighboring reserved prefix, and check for duplicates and conflicts
i, has := binarySearch(pr.reserved, key)
if has {
return fmt.Errorf("prefix already exists: %v", key)
}
if i > 0 && strings.HasPrefix(key, pr.reserved[i-1]) {
return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i-1], key)
}
if i < len(pr.reserved) && strings.HasPrefix(pr.reserved[i], key) {
return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i], key)
}
reserved := pr.reserved[:i]
reserved = append(reserved, key)
pr.reserved = append(reserved, pr.reserved[i:]...)
pr.StoreSchema[key] = typ
return nil
}
func (tlm *traceListenMixin) AddListeners(skey types.StoreKey, listeners []types.WriteListener) {
key := skey.Name()
tlm.listeners[key] = append(tlm.listeners[key], listeners...)
}
// ListeningEnabled returns if listening is enabled for a specific KVStore
func (tlm *traceListenMixin) ListeningEnabled(key types.StoreKey) bool {
if ls, has := tlm.listeners[key.Name()]; has {
return len(ls) != 0
}
return false
}
func (tlm *traceListenMixin) TracingEnabled() bool {
return tlm.TraceWriter != nil
}
func (tlm *traceListenMixin) SetTracer(w io.Writer) {
tlm.TraceWriter = w
}
func (tlm *traceListenMixin) SetTraceContext(tc types.TraceContext) {
tlm.TraceContext = tc
}
func (tlm *traceListenMixin) wrapTraceListen(store types.KVStore, skey types.StoreKey) types.KVStore {
if tlm.TracingEnabled() {
store = tracekv.NewStore(store, tlm.TraceWriter, tlm.TraceContext)
}
if tlm.ListeningEnabled(skey) {
store = listenkv.NewStore(store, skey, tlm.listeners[skey.Name()])
}
return store
}
func (s *Store) GetPruning() types.PruningOptions { return s.Pruning }
func (s *Store) SetPruning(po types.PruningOptions) { s.Pruning = po }
func (rs *Store) Restore(height uint64, format uint32, chunks <-chan io.ReadCloser, ready chan<- struct{}) error {
return nil
}
func (rs *Store) Snapshot(height uint64, format uint32) (<-chan io.ReadCloser, error) {
return nil, nil
}

View File

@ -0,0 +1,980 @@
package root
import (
"bytes"
"math"
"testing"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/cosmos/cosmos-sdk/codec"
codecTypes "github.com/cosmos/cosmos-sdk/codec/types"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/types/kv"
)
var (
cacheSize = 100
alohaData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
skey_1 = types.NewKVStoreKey("store1")
skey_2 = types.NewKVStoreKey("store2")
skey_3 = types.NewKVStoreKey("store3")
skey_4 = types.NewKVStoreKey("store4")
skey_1b = types.NewKVStoreKey("store1b")
skey_2b = types.NewKVStoreKey("store2b")
skey_3b = types.NewKVStoreKey("store3b")
)
func simpleStoreConfig(t *testing.T) StoreConfig {
opts := DefaultStoreConfig()
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
return opts
}
func storeConfig123(t *testing.T) StoreConfig {
opts := DefaultStoreConfig()
opts.Pruning = types.PruneNothing
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent))
return opts
}
func newSubStoreWithData(t *testing.T, db dbm.DBConnection, storeData map[string]string) (*Store, types.KVStore) {
root, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
store := root.GetKVStore(skey_1)
for k, v := range storeData {
store.Set([]byte(k), []byte(v))
}
return root, store
}
func TestGetSetHasDelete(t *testing.T) {
_, store := newSubStoreWithData(t, memdb.NewDB(), alohaData)
key := "hello"
exists := store.Has([]byte(key))
require.True(t, exists)
require.EqualValues(t, []byte(alohaData[key]), store.Get([]byte(key)))
value2 := "notgoodbye"
store.Set([]byte(key), []byte(value2))
require.EqualValues(t, value2, store.Get([]byte(key)))
store.Delete([]byte(key))
exists = store.Has([]byte(key))
require.False(t, exists)
require.Panics(t, func() { store.Get(nil) }, "Get(nil key) should panic")
require.Panics(t, func() { store.Get([]byte{}) }, "Get(empty key) should panic")
require.Panics(t, func() { store.Has(nil) }, "Has(nil key) should panic")
require.Panics(t, func() { store.Has([]byte{}) }, "Has(empty key) should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "Set(nil key) should panic")
require.Panics(t, func() { store.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic")
require.Panics(t, func() { store.Set([]byte("key"), nil) }, "Set(nil value) should panic")
sub := store.(*substore)
sub.indexBucket = rwCrudFails{sub.indexBucket, nil}
require.Panics(t, func() {
store.Set([]byte("key"), []byte("value"))
}, "Set() when index fails should panic")
}
func TestConstructors(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
_ = store.GetKVStore(skey_1)
store.Commit()
require.NoError(t, store.Close())
t.Run("fail to load if InitialVersion > lowest existing version", func(t *testing.T) {
opts := StoreConfig{InitialVersion: 5, Pruning: types.PruneNothing}
store, err = NewStore(db, opts)
require.Error(t, err)
db.Close()
})
t.Run("can't load store when db.Versions fails", func(t *testing.T) {
store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreConfig())
require.Error(t, err)
store, err = NewStore(db, StoreConfig{StateCommitmentDB: dbVersionsFails{memdb.NewDB()}})
require.Error(t, err)
})
db = memdb.NewDB()
merkledb := memdb.NewDB()
w := db.Writer()
t.Run("can't use a DB with open writers", func(t *testing.T) {
store, err = NewStore(db, DefaultStoreConfig())
require.Error(t, err)
w.Discard()
w = merkledb.Writer()
store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb})
require.Error(t, err)
w.Discard()
})
t.Run("can't use DBs with different version history", func(t *testing.T) {
merkledb.SaveNextVersion()
store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb})
require.Error(t, err)
})
merkledb.Close()
t.Run("can't load existing store if we can't access root hash", func(t *testing.T) {
store, err = NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
store.Commit()
require.NoError(t, store.Close())
// ...whether because root is misssing
w = db.Writer()
s1RootKey := append(contentPrefix, substorePrefix(skey_1.Name())...)
s1RootKey = append(s1RootKey, merkleRootKey...)
w.Delete(s1RootKey)
w.Commit()
db.SaveNextVersion()
store, err = NewStore(db, DefaultStoreConfig())
require.Error(t, err)
// ...or, because of an error
store, err = NewStore(dbRWCrudFails{db}, DefaultStoreConfig())
require.Error(t, err)
})
}
func TestIterators(t *testing.T) {
_, store := newSubStoreWithData(t, memdb.NewDB(), map[string]string{
string([]byte{0x00}): "0",
string([]byte{0x00, 0x00}): "0 0",
string([]byte{0x00, 0x01}): "0 1",
string([]byte{0x00, 0x02}): "0 2",
string([]byte{0x01}): "1",
})
var testCase = func(t *testing.T, iter types.Iterator, expected []string) {
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testCase(t, store.Iterator(nil, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, nil),
[]string{"0", "0 0", "0 1", "0 2", "1"})
testCase(t, store.Iterator([]byte{0}, []byte{0, 1}),
[]string{"0", "0 0"})
testCase(t, store.Iterator([]byte{0}, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0, 1}, []byte{1}),
[]string{"0 1", "0 2"})
testCase(t, store.Iterator(nil, []byte{1}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, store.Iterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.Iterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, store.ReverseIterator(nil, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, nil),
[]string{"1", "0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0, 1}),
[]string{"0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0, 1}, []byte{1}),
[]string{"0 2", "0 1"})
testCase(t, store.ReverseIterator(nil, []byte{1}),
[]string{"0 2", "0 1", "0 0", "0"})
testCase(t, store.ReverseIterator([]byte{0}, []byte{0}), []string{}) // start = end
testCase(t, store.ReverseIterator([]byte{1}, []byte{0}), []string{}) // start > end
testCase(t, types.KVStorePrefixIterator(store, []byte{0}),
[]string{"0", "0 0", "0 1", "0 2"})
testCase(t, types.KVStoreReversePrefixIterator(store, []byte{0}),
[]string{"0 2", "0 1", "0 0", "0"})
require.Panics(t, func() { store.Iterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.Iterator(nil, []byte{}) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator([]byte{}, nil) }, "Iterator(empty key) should panic")
require.Panics(t, func() { store.ReverseIterator(nil, []byte{}) }, "Iterator(empty key) should panic")
}
func TestCommit(t *testing.T) {
testBasic := func(opts StoreConfig) {
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
require.Zero(t, store.LastCommitID())
idNew := store.Commit()
// Adding one record changes the hash
s1 := store.GetKVStore(skey_1)
s1.Set([]byte{0}, []byte{0})
idOne := store.Commit()
require.Equal(t, idNew.Version+1, idOne.Version)
require.NotEqual(t, idNew.Hash, idOne.Hash)
// Hash of emptied store is same as new store
s1.Delete([]byte{0})
idEmptied := store.Commit()
require.Equal(t, idNew.Hash, idEmptied.Hash)
previd := idOne
for i := byte(1); i < 5; i++ {
s1.Set([]byte{i}, []byte{i})
id := store.Commit()
lastid := store.LastCommitID()
require.Equal(t, id.Hash, lastid.Hash)
require.Equal(t, id.Version, lastid.Version)
require.NotEqual(t, previd.Hash, id.Hash)
require.NotEqual(t, previd.Version, id.Version)
}
}
basicOpts := simpleStoreConfig(t)
basicOpts.Pruning = types.PruneNothing
t.Run("sanity tests for Merkle hashing", func(t *testing.T) {
testBasic(basicOpts)
})
t.Run("sanity tests for Merkle hashing with separate DBs", func(t *testing.T) {
basicOpts.StateCommitmentDB = memdb.NewDB()
testBasic(basicOpts)
})
// test that we can recover from a failed commit
testFailedCommit := func(t *testing.T,
store *Store,
db dbm.DBConnection,
opts StoreConfig) {
if db == nil {
db = store.stateDB
}
s1 := store.GetKVStore(skey_1)
s1.Set([]byte{0}, []byte{0})
require.Panics(t, func() { store.Commit() })
require.NoError(t, store.Close())
// No version should be saved in the backing DB(s)
versions, _ := db.Versions()
require.Equal(t, 0, versions.Count())
if store.StateCommitmentDB != nil {
versions, _ = store.StateCommitmentDB.Versions()
require.Equal(t, 0, versions.Count())
}
// The store should now be reloaded successfully
store, err := NewStore(db, opts)
require.NoError(t, err)
s1 = store.GetKVStore(skey_1)
require.Nil(t, s1.Get([]byte{0}))
require.NoError(t, store.Close())
}
opts := simpleStoreConfig(t)
opts.Pruning = types.PruneNothing
// Ensure Store's commit is rolled back in each failure case...
t.Run("recover after failed Commit", func(t *testing.T) {
store, err := NewStore(dbRWCommitFails{memdb.NewDB()}, opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
// If SaveVersion and Revert both fail during Store.Commit, the DB will contain
// committed data that belongs to no version: non-atomic behavior from the Store user's perspective.
// So, that data must be reverted when the store is reloaded.
t.Run("recover after failed SaveVersion and Revert", func(t *testing.T) {
var db dbm.DBConnection
db = dbSaveVersionFails{memdb.NewDB()}
// Revert should succeed in initial NewStore call, but fail during Commit
db = dbRevertFails{db, []bool{false, true}}
store, err := NewStore(db, opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
// Repeat the above for StateCommitmentDB
t.Run("recover after failed StateCommitmentDB Commit", func(t *testing.T) {
opts.StateCommitmentDB = dbRWCommitFails{memdb.NewDB()}
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
t.Run("recover after failed StateCommitmentDB SaveVersion and Revert", func(t *testing.T) {
var db dbm.DBConnection
db = dbSaveVersionFails{memdb.NewDB()}
db = dbRevertFails{db, []bool{false, true}}
opts.StateCommitmentDB = db
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
testFailedCommit(t, store, nil, opts)
})
opts = simpleStoreConfig(t)
t.Run("recover after stateDB.Versions error triggers failure", func(t *testing.T) {
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
store.stateDB = dbVersionsFails{store.stateDB}
testFailedCommit(t, store, db, opts)
})
t.Run("recover after stateTxn.Set error triggers failure", func(t *testing.T) {
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.stateTxn = rwCrudFails{store.stateTxn, merkleRootKey}
testFailedCommit(t, store, nil, opts)
})
t.Run("stateDB.DeleteVersion error triggers failure", func(t *testing.T) {
opts.StateCommitmentDB = memdb.NewDB()
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.stateCommitmentTxn = rwCommitFails{store.stateCommitmentTxn}
store.stateDB = dbDeleteVersionFails{store.stateDB}
require.Panics(t, func() { store.Commit() })
})
t.Run("height overflow triggers failure", func(t *testing.T) {
opts.StateCommitmentDB = nil
opts.InitialVersion = math.MaxInt64
opts.Pruning = types.PruneNothing
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Equal(t, int64(math.MaxInt64), store.Commit().Version)
require.Panics(t, func() { store.Commit() })
require.Equal(t, int64(math.MaxInt64), store.LastCommitID().Version) // version history not modified
})
t.Run("first commit version matches InitialVersion", func(t *testing.T) {
opts = simpleStoreConfig(t)
opts.InitialVersion = 5
opts.Pruning = types.PruneNothing
opts.StateCommitmentDB = memdb.NewDB()
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
require.Equal(t, int64(5), store.Commit().Version)
})
// test improbable failures to fill out test coverage
opts = simpleStoreConfig(t)
store, err := NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.Commit()
store.stateDB = dbVersionsFails{store.stateDB}
require.Panics(t, func() { store.LastCommitID() })
opts = simpleStoreConfig(t)
opts.StateCommitmentDB = memdb.NewDB()
store, err = NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.Commit()
store.stateTxn = rwCrudFails{store.stateTxn, nil}
require.Panics(t, func() { store.LastCommitID() })
}
func sliceToSet(slice []uint64) map[uint64]struct{} {
res := make(map[uint64]struct{})
for _, x := range slice {
res[x] = struct{}{}
}
return res
}
func TestPruning(t *testing.T) {
// Save versions up to 10 and verify pruning at final commit
testCases := []struct {
types.PruningOptions
kept []uint64
}{
{types.PruningOptions{2, 4, 10}, []uint64{4, 8, 9, 10}},
{types.PruningOptions{0, 4, 10}, []uint64{4, 8, 10}},
{types.PruneEverything, []uint64{10}},
{types.PruneNothing, []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for tci, tc := range testCases {
dbs := []dbm.DBConnection{memdb.NewDB(), memdb.NewDB()}
opts := simpleStoreConfig(t)
opts.Pruning = tc.PruningOptions
opts.StateCommitmentDB = dbs[1]
store, err := NewStore(dbs[0], opts)
require.NoError(t, err)
s1 := store.GetKVStore(skey_1)
for i := byte(1); i <= 10; i++ {
s1.Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
}
for _, db := range dbs {
versions, err := db.Versions()
require.NoError(t, err)
kept := sliceToSet(tc.kept)
for v := uint64(1); v <= 10; v++ {
_, has := kept[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, tci)
}
}
}
// Test pruning interval
// Save up to 20th version while checking history at specific version checkpoints
testCheckPoints := map[uint64][]uint64{
5: []uint64{1, 2, 3, 4, 5},
10: []uint64{5, 10},
15: []uint64{5, 10, 11, 12, 13, 14, 15},
20: []uint64{5, 10, 15, 20},
}
db := memdb.NewDB()
opts := simpleStoreConfig(t)
opts.Pruning = types.PruningOptions{0, 5, 10}
store, err := NewStore(db, opts)
require.NoError(t, err)
for i := byte(1); i <= 20; i++ {
store.GetKVStore(skey_1).Set([]byte{i}, []byte{i})
cid := store.Commit()
latest := uint64(i)
require.Equal(t, latest, uint64(cid.Version))
kept, has := testCheckPoints[latest]
if !has {
continue
}
versions, err := db.Versions()
require.NoError(t, err)
keptMap := sliceToSet(kept)
for v := uint64(1); v <= latest; v++ {
_, has := keptMap[v]
require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, i)
}
}
}
func queryPath(skey types.StoreKey, endp string) string { return "/" + skey.Name() + endp }
func TestQuery(t *testing.T) {
k1, v1 := []byte("k1"), []byte("v1")
k2, v2 := []byte("k2"), []byte("v2")
v3 := []byte("v3")
ksub := []byte("k")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
store, err := NewStore(memdb.NewDB(), simpleStoreConfig(t))
require.NoError(t, err)
cid := store.Commit()
ver := cid.Version
query := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: ver}
querySub := abci.RequestQuery{Path: queryPath(skey_1, "/subspace"), Data: ksub, Height: ver}
queryHeight0 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1}
// query subspace before anything set
qres := store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSubEmpty, qres.Value)
sub := store.GetKVStore(skey_1)
require.NotNil(t, sub)
// set data
sub.Set(k1, v1)
sub.Set(k2, v2)
t.Run("basic queries", func(t *testing.T) {
// set data without commit, doesn't show up
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = store.Commit()
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
// and for the subspace
querySub.Height = cid.Version
qres = store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSub1, qres.Value)
// modify
sub.Set(k1, v3)
cid = store.Commit()
// query will return old values, as height is fixed
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
// update to latest height in the query and we are happy
query.Height = cid.Version
qres = store.Query(query)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v3, qres.Value)
// try other key
query2 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k2, Height: cid.Version}
qres = store.Query(query2)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v2, qres.Value)
// and for the subspace
querySub.Height = cid.Version
qres = store.Query(querySub)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest-1
qres = store.Query(queryHeight0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
})
// querying an empty store will fail
store2, err := NewStore(memdb.NewDB(), simpleStoreConfig(t))
require.NoError(t, err)
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
// default shows latest, if latest-1 does not exist
store2.GetKVStore(skey_1).Set(k1, v1)
store2.Commit()
qres = store2.Query(queryHeight0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
store2.Close()
t.Run("failed queries", func(t *testing.T) {
// artificial error cases for coverage (should never happen with prescribed usage)
// ensure that height overflow triggers an error
require.NoError(t, err)
store2.stateDB = dbVersionsIs{store2.stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})}
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
// failure to access versions triggers an error
store2.stateDB = dbVersionsFails{store.stateDB}
qres = store2.Query(queryHeight0)
require.True(t, qres.IsErr())
store2.Close()
// query with a nil or empty key fails
badquery := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: []byte{}}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
badquery.Data = nil
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// querying an invalid height will fail
badquery = abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: store.LastCommitID().Version + 1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
// or an invalid path
badquery = abci.RequestQuery{Path: queryPath(skey_1, "/badpath"), Data: k1}
qres = store.Query(badquery)
require.True(t, qres.IsErr())
})
t.Run("queries with proof", func(t *testing.T) {
// test that proofs are generated with single and separate DBs
testProve := func() {
queryProve0 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Prove: true}
qres = store.Query(queryProve0)
require.True(t, qres.IsOK(), qres.Log)
require.Equal(t, v1, qres.Value)
require.NotNil(t, qres.ProofOps)
}
testProve()
store.Close()
opts := simpleStoreConfig(t)
opts.StateCommitmentDB = memdb.NewDB()
store, err = NewStore(memdb.NewDB(), opts)
require.NoError(t, err)
store.GetKVStore(skey_1).Set(k1, v1)
store.Commit()
testProve()
store.Close()
})
}
func TestStoreConfig(t *testing.T) {
opts := DefaultStoreConfig()
// Fail with invalid types
require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeDB))
require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeSMT))
// Ensure that no prefix conflicts are allowed
require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent))
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3b.Name(), types.StoreTypeTransient))
require.Error(t, opts.RegisterSubstore(skey_1b.Name(), types.StoreTypePersistent))
require.Error(t, opts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent))
require.Error(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent))
}
func TestMultiStoreBasic(t *testing.T) {
opts := DefaultStoreConfig()
err := opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)
require.NoError(t, err)
db := memdb.NewDB()
store, err := NewStore(db, opts)
require.NoError(t, err)
store_1 := store.GetKVStore(skey_1)
require.NotNil(t, store_1)
store_1.Set([]byte{0}, []byte{0})
val := store_1.Get([]byte{0})
require.Equal(t, []byte{0}, val)
store_1.Delete([]byte{0})
val = store_1.Get([]byte{0})
require.Equal(t, []byte(nil), val)
}
func TestGetVersion(t *testing.T) {
db := memdb.NewDB()
opts := storeConfig123(t)
store, err := NewStore(db, opts)
require.NoError(t, err)
cid := store.Commit()
view, err := store.GetVersion(cid.Version)
require.NoError(t, err)
subview := view.GetKVStore(skey_1)
require.NotNil(t, subview)
// version view should be read-only
require.Panics(t, func() { subview.Set([]byte{1}, []byte{1}) })
require.Panics(t, func() { subview.Delete([]byte{0}) })
// nonexistent version shouldn't be accessible
view, err = store.GetVersion(cid.Version + 1)
require.Equal(t, ErrVersionDoesNotExist, err)
substore := store.GetKVStore(skey_1)
require.NotNil(t, substore)
substore.Set([]byte{0}, []byte{0})
// setting a value shouldn't affect old version
require.False(t, subview.Has([]byte{0}))
cid = store.Commit()
view, err = store.GetVersion(cid.Version)
require.NoError(t, err)
subview = view.GetKVStore(skey_1)
require.NotNil(t, subview)
// deleting a value shouldn't affect old version
substore.Delete([]byte{0})
require.Equal(t, []byte{0}, subview.Get([]byte{0}))
}
func TestMultiStoreMigration(t *testing.T) {
db := memdb.NewDB()
opts := storeConfig123(t)
store, err := NewStore(db, opts)
require.NoError(t, err)
// write some data in all stores
k1, v1 := []byte("first"), []byte("store")
s1 := store.GetKVStore(skey_1)
require.NotNil(t, s1)
s1.Set(k1, v1)
k2, v2 := []byte("second"), []byte("restore")
s2 := store.GetKVStore(skey_2)
require.NotNil(t, s2)
s2.Set(k2, v2)
k3, v3 := []byte("third"), []byte("dropped")
s3 := store.GetKVStore(skey_3)
require.NotNil(t, s3)
s3.Set(k3, v3)
k4, v4 := []byte("fourth"), []byte("created")
require.Panics(t, func() { store.GetKVStore(skey_4) })
cid := store.Commit()
require.NoError(t, store.Close())
var migratedID types.CommitID
// Load without changes and make sure it is sensible
store, err = NewStore(db, opts)
require.NoError(t, err)
// let's query data to see it was saved properly
s2 = store.GetKVStore(skey_2)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
require.NoError(t, store.Close())
t.Run("basic migration", func(t *testing.T) {
// now, let's load with upgrades...
opts.Upgrades = []types.StoreUpgrades{
types.StoreUpgrades{
Added: []string{skey_4.Name()},
Renamed: []types.StoreRename{{
OldKey: skey_2.Name(),
NewKey: skey_2b.Name(),
}},
Deleted: []string{skey_3.Name()},
},
}
store, err = NewStore(db, opts)
require.Nil(t, err)
// s1 was not changed
s1 = store.GetKVStore(skey_1)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
// store2 is no longer valid
require.Panics(t, func() { store.GetKVStore(skey_2) })
// store2b has the old data
rs2 := store.GetKVStore(skey_2b)
require.NotNil(t, rs2)
require.Equal(t, v2, rs2.Get(k2))
// store3 is gone
require.Panics(t, func() { s3 = store.GetKVStore(skey_3) })
// store4 is valid
s4 := store.GetKVStore(skey_4)
require.NotNil(t, s4)
values := 0
it := s4.Iterator(nil, nil)
for ; it.Valid(); it.Next() {
values += 1
}
require.Zero(t, values)
require.NoError(t, it.Close())
// write something inside store4
s4.Set(k4, v4)
// store this migrated data, and load it again without migrations
migratedID = store.Commit()
require.Equal(t, migratedID.Version, int64(2))
require.NoError(t, store.Close())
})
t.Run("reload after migrations", func(t *testing.T) {
// fail to load the migrated store with the old schema
store, err = NewStore(db, storeConfig123(t))
require.Error(t, err)
// pass in a schema reflecting the migrations
migratedOpts := DefaultStoreConfig()
err = migratedOpts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)
require.NoError(t, err)
err = migratedOpts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent)
require.NoError(t, err)
err = migratedOpts.RegisterSubstore(skey_4.Name(), types.StoreTypePersistent)
require.NoError(t, err)
store, err = NewStore(db, migratedOpts)
require.Nil(t, err)
require.Equal(t, migratedID, store.LastCommitID())
// query this new store
rl1 := store.GetKVStore(skey_1)
require.NotNil(t, rl1)
require.Equal(t, v1, rl1.Get(k1))
rl2 := store.GetKVStore(skey_2b)
require.NotNil(t, rl2)
require.Equal(t, v2, rl2.Get(k2))
rl4 := store.GetKVStore(skey_4)
require.NotNil(t, rl4)
require.Equal(t, v4, rl4.Get(k4))
})
t.Run("load view from before migrations", func(t *testing.T) {
// load and check a view of the store at first commit
view, err := store.GetVersion(cid.Version)
require.NoError(t, err)
s1 = view.GetKVStore(skey_1)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
s2 = view.GetKVStore(skey_2)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
s3 = view.GetKVStore(skey_3)
require.NotNil(t, s3)
require.Equal(t, v3, s3.Get(k3))
require.Panics(t, func() {
view.GetKVStore(skey_4)
})
})
}
func TestTrace(t *testing.T) {
key, value := []byte("test-key"), []byte("test-value")
tctx := types.TraceContext(map[string]interface{}{"blockHeight": 64})
expected_Set := "{\"operation\":\"write\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Get := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Get_missing := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_Delete := "{\"operation\":\"delete\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_IterKey := "{\"operation\":\"iterKey\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n"
expected_IterValue := "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n"
db := memdb.NewDB()
opts := simpleStoreConfig(t)
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient))
store, err := NewStore(db, opts)
require.NoError(t, err)
store.SetTraceContext(tctx)
require.False(t, store.TracingEnabled())
var buf bytes.Buffer
store.SetTracer(&buf)
require.True(t, store.TracingEnabled())
for _, skey := range []types.StoreKey{skey_1, skey_2, skey_3} {
buf.Reset()
store.GetKVStore(skey).Get(key)
require.Equal(t, expected_Get_missing, buf.String())
buf.Reset()
store.GetKVStore(skey).Set(key, value)
require.Equal(t, expected_Set, buf.String())
buf.Reset()
require.Equal(t, value, store.GetKVStore(skey).Get(key))
require.Equal(t, expected_Get, buf.String())
iter := store.GetKVStore(skey).Iterator(nil, nil)
buf.Reset()
require.Equal(t, key, iter.Key())
require.Equal(t, expected_IterKey, buf.String())
buf.Reset()
require.Equal(t, value, iter.Value())
require.Equal(t, expected_IterValue, buf.String())
require.NoError(t, iter.Close())
buf.Reset()
store.GetKVStore(skey).Delete(key)
require.Equal(t, expected_Delete, buf.String())
}
store.SetTracer(nil)
require.False(t, store.TracingEnabled())
require.NoError(t, store.Close())
}
func TestListeners(t *testing.T) {
kvPairs := []types.KVPair{
{Key: []byte{1}, Value: []byte("v1")},
{Key: []byte{2}, Value: []byte("v2")},
{Key: []byte{3}, Value: []byte("v3")},
}
testCases := []struct {
key []byte
value []byte
skey types.StoreKey
}{
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
skey: skey_1,
},
{
key: kvPairs[1].Key,
value: kvPairs[1].Value,
skey: skey_2,
},
{
key: kvPairs[2].Key,
value: kvPairs[2].Value,
skey: skey_3,
},
}
var interfaceRegistry = codecTypes.NewInterfaceRegistry()
var marshaller = codec.NewProtoCodec(interfaceRegistry)
db := memdb.NewDB()
opts := simpleStoreConfig(t)
require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory))
require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient))
store, err := NewStore(db, opts)
require.NoError(t, err)
for i, tc := range testCases {
var buf bytes.Buffer
listener := types.NewStoreKVPairWriteListener(&buf, marshaller)
store.AddListeners(tc.skey, []types.WriteListener{listener})
require.True(t, store.ListeningEnabled(tc.skey))
// Set case
expected := types.StoreKVPair{
Key: tc.key,
Value: tc.value,
StoreKey: tc.skey.Name(),
Delete: false,
}
var kvpair types.StoreKVPair
buf.Reset()
store.GetKVStore(tc.skey).Set(tc.key, tc.value)
require.NoError(t, marshaller.UnmarshalLengthPrefixed(buf.Bytes(), &kvpair))
require.Equal(t, expected, kvpair, i)
// Delete case
expected = types.StoreKVPair{
Key: tc.key,
Value: nil,
StoreKey: tc.skey.Name(),
Delete: true,
}
kvpair = types.StoreKVPair{}
buf.Reset()
store.GetKVStore(tc.skey).Delete(tc.key)
require.NoError(t, marshaller.UnmarshalLengthPrefixed(buf.Bytes(), &kvpair))
require.Equal(t, expected, kvpair, i)
}
require.NoError(t, store.Close())
}

117
store/v2/multi/sub_store.go Normal file
View File

@ -0,0 +1,117 @@
package root
import (
"crypto/sha256"
"io"
"sync"
dbm "github.com/cosmos/cosmos-sdk/db"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
// Get implements KVStore.
func (s *substore) Get(key []byte) []byte {
s.root.mtx.RLock()
defer s.root.mtx.RUnlock()
val, err := s.dataBucket.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *substore) Has(key []byte) bool {
s.root.mtx.RLock()
defer s.root.mtx.RUnlock()
has, err := s.dataBucket.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *substore) Set(key, value []byte) {
s.root.mtx.Lock()
defer s.root.mtx.Unlock()
err := s.dataBucket.Set(key, value)
if err != nil {
panic(err)
}
s.stateCommitmentStore.Set(key, value)
khash := sha256.Sum256(key)
err = s.indexBucket.Set(khash[:], key)
if err != nil {
panic(err)
}
}
// Delete implements KVStore.
func (s *substore) Delete(key []byte) {
khash := sha256.Sum256(key)
s.root.mtx.Lock()
defer s.root.mtx.Unlock()
s.stateCommitmentStore.Delete(key)
_ = s.indexBucket.Delete(khash[:])
_ = s.dataBucket.Delete(key)
}
type contentsIterator struct {
types.Iterator
locker sync.Locker
}
func (s *substore) newSubstoreIterator(source dbm.Iterator) *contentsIterator {
locker := s.root.mtx.RLocker()
locker.Lock()
return &contentsIterator{dbutil.DBToStoreIterator(source), locker}
}
func (it *contentsIterator) Close() error {
defer it.locker.Unlock()
return it.Iterator.Close()
}
// Iterator implements KVStore.
func (s *substore) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.Iterator(start, end)
if err != nil {
panic(err)
}
return s.newSubstoreIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *substore) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return s.newSubstoreIterator(iter)
}
// GetStoreType implements Store.
func (s *substore) GetStoreType() types.StoreType {
return types.StoreTypePersistent
}
func (s *substore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
func (s *substore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
func (s *substore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}

View File

@ -0,0 +1,78 @@
package root
import (
"bytes"
"errors"
dbm "github.com/cosmos/cosmos-sdk/db"
)
type dbDeleteVersionFails struct{ dbm.DBConnection }
type dbRWCommitFails struct{ dbm.DBConnection }
type dbRWCrudFails struct{ dbm.DBConnection }
type dbSaveVersionFails struct{ dbm.DBConnection }
type dbRevertFails struct {
dbm.DBConnection
// order of calls to fail on (eg. [1, 0] => first call fails; second succeeds)
failOn []bool
}
type dbVersionsIs struct {
dbm.DBConnection
vset dbm.VersionSet
}
type dbVersionsFails struct{ dbm.DBConnection }
type rwCommitFails struct{ dbm.DBReadWriter }
type rwCrudFails struct {
dbm.DBReadWriter
onKey []byte
}
func (dbVersionsFails) Versions() (dbm.VersionSet, error) { return nil, errors.New("dbVersionsFails") }
func (db dbVersionsIs) Versions() (dbm.VersionSet, error) { return db.vset, nil }
func (db dbRWCrudFails) ReadWriter() dbm.DBReadWriter {
return rwCrudFails{db.DBConnection.ReadWriter(), nil}
}
func (dbSaveVersionFails) SaveVersion(uint64) error { return errors.New("dbSaveVersionFails") }
func (db dbRevertFails) Revert() error {
fail := false
if len(db.failOn) > 0 {
fail, db.failOn = db.failOn[0], db.failOn[1:]
}
if fail {
return errors.New("dbRevertFails")
}
return db.DBConnection.Revert()
}
func (dbDeleteVersionFails) DeleteVersion(uint64) error { return errors.New("dbDeleteVersionFails") }
func (tx rwCommitFails) Commit() error {
tx.Discard()
return errors.New("rwCommitFails")
}
func (db dbRWCommitFails) ReadWriter() dbm.DBReadWriter {
return rwCommitFails{db.DBConnection.ReadWriter()}
}
func (rw rwCrudFails) Get(k []byte) ([]byte, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return nil, errors.New("rwCrudFails.Get")
}
return rw.DBReadWriter.Get(k)
}
func (rw rwCrudFails) Has(k []byte) (bool, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return false, errors.New("rwCrudFails.Has")
}
return rw.DBReadWriter.Has(k)
}
func (rw rwCrudFails) Set(k []byte, v []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Set")
}
return rw.DBReadWriter.Set(k, v)
}
func (rw rwCrudFails) Delete(k []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Delete")
}
return rw.DBReadWriter.Delete(k)
}

View File

@ -0,0 +1,161 @@
package root
import (
"errors"
"io"
dbm "github.com/cosmos/cosmos-sdk/db"
prefixdb "github.com/cosmos/cosmos-sdk/db/prefix"
util "github.com/cosmos/cosmos-sdk/internal"
dbutil "github.com/cosmos/cosmos-sdk/internal/db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
types "github.com/cosmos/cosmos-sdk/store/v2"
"github.com/cosmos/cosmos-sdk/store/v2/smt"
)
var ErrReadOnly = errors.New("cannot modify read-only store")
func (s *viewSubstore) GetStateCommitmentStore() *smt.Store {
return s.stateCommitmentStore
}
// Get implements KVStore.
func (s *viewSubstore) Get(key []byte) []byte {
val, err := s.dataBucket.Get(key)
if err != nil {
panic(err)
}
return val
}
// Has implements KVStore.
func (s *viewSubstore) Has(key []byte) bool {
has, err := s.dataBucket.Has(key)
if err != nil {
panic(err)
}
return has
}
// Set implements KVStore.
func (s *viewSubstore) Set(key []byte, value []byte) {
panic(ErrReadOnly)
}
// Delete implements KVStore.
func (s *viewSubstore) Delete(key []byte) {
panic(ErrReadOnly)
}
// Iterator implements KVStore.
func (s *viewSubstore) Iterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.Iterator(start, end)
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
}
// ReverseIterator implements KVStore.
func (s *viewSubstore) ReverseIterator(start, end []byte) types.Iterator {
iter, err := s.dataBucket.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
}
// GetStoreType implements Store.
func (s *viewSubstore) GetStoreType() types.StoreType {
return types.StoreTypePersistent
}
func (st *viewSubstore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(st)
}
func (st *viewSubstore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
}
func (st *viewSubstore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners))
}
func (store *Store) getView(version int64) (ret *viewStore, err error) {
stateView, err := store.stateDB.ReaderAt(uint64(version))
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, stateView.Discard(), "stateView.Discard also failed")
}
}()
stateCommitmentView := stateView
if store.StateCommitmentDB != nil {
stateCommitmentView, err = store.StateCommitmentDB.ReaderAt(uint64(version))
if err != nil {
return
}
defer func() {
if err != nil {
err = util.CombineErrors(err, stateCommitmentView.Discard(), "stateCommitmentView.Discard also failed")
}
}()
}
// Now read this version's schema
schemaView := prefixdb.NewPrefixReader(stateView, schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
}
}()
pr, err := readSavedSchema(schemaView)
if err != nil {
return
}
// The migrated contents and schema are not committed until the next store.Commit
ret = &viewStore{
stateView: stateView,
stateCommitmentView: stateCommitmentView,
substoreCache: map[string]*viewSubstore{},
schema: pr.StoreSchema,
}
return
}
func (vs *viewStore) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
if _, has := vs.schema[key]; !has {
panic(ErrStoreNotFound(key))
}
ret, err := vs.getSubstore(key)
if err != nil {
panic(err)
}
vs.substoreCache[key] = ret
return ret
}
// Reads but does not update substore cache
func (vs *viewStore) getSubstore(key string) (*viewSubstore, error) {
if cached, has := vs.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateR := prefixdb.NewPrefixReader(vs.stateView, pfx)
stateCommitmentR := prefixdb.NewPrefixReader(vs.stateCommitmentView, pfx)
rootHash, err := stateR.Get(merkleRootKey)
if err != nil {
return nil, err
}
return &viewSubstore{
dataBucket: prefixdb.NewPrefixReader(stateR, dataPrefix),
indexBucket: prefixdb.NewPrefixReader(stateR, indexPrefix),
stateCommitmentStore: loadSMT(dbm.ReaderAsReadWriter(stateCommitmentR), rootHash),
}, nil
}

View File

@ -4,14 +4,16 @@ import (
"crypto/sha256"
"errors"
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/store/types"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
"github.com/lazyledger/smt"
tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto"
)
var (
_ types.BasicKVStore = (*Store)(nil)
_ smt.MapStore = (dbMapStore{})
)
var (
@ -24,15 +26,19 @@ type Store struct {
tree *smt.SparseMerkleTree
}
func NewStore(nodes, values smt.MapStore) *Store {
// An smt.MapStore that wraps Get to raise smt.InvalidKeyError;
// smt.SparseMerkleTree expects this error to be returned when a key is not found
type dbMapStore struct{ dbm.DBReadWriter }
func NewStore(nodes, values dbm.DBReadWriter) *Store {
return &Store{
tree: smt.NewSparseMerkleTree(nodes, values, sha256.New()),
tree: smt.NewSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New()),
}
}
func LoadStore(nodes, values smt.MapStore, root []byte) *Store {
func LoadStore(nodes, values dbm.DBReadWriter, root []byte) *Store {
return &Store{
tree: smt.ImportSparseMerkleTree(nodes, values, sha256.New(), root),
tree: smt.ImportSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New(), root),
}
}
@ -97,3 +103,14 @@ func (s *Store) Delete(key []byte) {
panic(err)
}
}
func (ms dbMapStore) Get(key []byte) ([]byte, error) {
val, err := ms.DBReadWriter.Get(key)
if err != nil {
return nil, err
}
if val == nil {
return nil, &smt.InvalidKeyError{key}
}
return val, nil
}

View File

@ -5,12 +5,13 @@ import (
"github.com/stretchr/testify/assert"
"github.com/cosmos/cosmos-sdk/db/memdb"
store "github.com/cosmos/cosmos-sdk/store/v2/smt"
"github.com/lazyledger/smt"
)
func TestGetSetHasDelete(t *testing.T) {
s := store.NewStore(smt.NewSimpleMap(), smt.NewSimpleMap())
nodes, values := memdb.NewDB(), memdb.NewDB()
s := store.NewStore(nodes.ReadWriter(), values.ReadWriter())
s.Set([]byte("foo"), []byte("bar"))
assert.Equal(t, []byte("bar"), s.Get([]byte("foo")))
@ -28,15 +29,16 @@ func TestGetSetHasDelete(t *testing.T) {
}
func TestLoadStore(t *testing.T) {
nodes, values := smt.NewSimpleMap(), smt.NewSimpleMap()
s := store.NewStore(nodes, values)
nodes, values := memdb.NewDB(), memdb.NewDB()
nmap, vmap := nodes.ReadWriter(), values.ReadWriter()
s := store.NewStore(nmap, vmap)
s.Set([]byte{0}, []byte{0})
s.Set([]byte{1}, []byte{1})
s.Delete([]byte{1})
root := s.Root()
s = store.LoadStore(nodes, values, root)
s = store.LoadStore(nmap, vmap, root)
assert.Equal(t, []byte{0}, s.Get([]byte{0}))
assert.False(t, s.Has([]byte{1}))
}

View File

@ -0,0 +1,46 @@
package transient
import (
dbm "github.com/cosmos/cosmos-sdk/db"
"github.com/cosmos/cosmos-sdk/db/memdb"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/dbadapter"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store is a wrapper for a memory store which does not persist data.
type Store struct {
dbadapter.Store
conn dbm.DBConnection
}
// NewStore constructs a new transient store.
func NewStore() *Store {
db := memdb.NewDB()
return &Store{
Store: dbadapter.Store{DB: db.ReadWriter()},
conn: db,
}
}
// Implements Store.
func (ts *Store) GetStoreType() types.StoreType {
return types.StoreTypeTransient
}
// Implements CommitStore
// Commit cleans up Store.
func (ts *Store) Commit() (id types.CommitID) {
ts.DB.Discard()
ts.Store = dbadapter.Store{DB: ts.conn.ReadWriter()}
return
}
func (ts *Store) SetPruning(types.PruningOptions) {}
func (ts *Store) GetPruning() types.PruningOptions { return types.PruningOptions{} }
func (ts *Store) LastCommitID() (id types.CommitID) { return }

View File

@ -0,0 +1,27 @@
package transient_test
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/store/v2/transient"
)
var k, v = []byte("hello"), []byte("world")
func TestTransientStore(t *testing.T) {
tstore := transient.NewStore()
require.Nil(t, tstore.Get(k))
tstore.Set(k, v)
require.Equal(t, v, tstore.Get(k))
tstore.Commit()
require.Nil(t, tstore.Get(k))
emptyCommitID := tstore.LastCommitID()
require.Equal(t, emptyCommitID.Version, int64(0))
require.True(t, bytes.Equal(emptyCommitID.Hash, nil))
require.Equal(t, types.StoreTypeTransient, tstore.GetStoreType())
}

109
store/v2/types.go Normal file
View File

@ -0,0 +1,109 @@
package types
import (
"io"
snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types"
v1 "github.com/cosmos/cosmos-sdk/store/types"
)
// Re-export relevant original store types
type (
StoreKey = v1.StoreKey
StoreType = v1.StoreType
CommitID = v1.CommitID
StoreUpgrades = v1.StoreUpgrades
StoreRename = v1.StoreRename
Iterator = v1.Iterator
PruningOptions = v1.PruningOptions
TraceContext = v1.TraceContext
WriteListener = v1.WriteListener
BasicKVStore = v1.BasicKVStore
KVStore = v1.KVStore
Committer = v1.Committer
CommitKVStore = v1.CommitKVStore
CacheKVStore = v1.CacheKVStore
Queryable = v1.Queryable
CacheWrap = v1.CacheWrap
KVStoreKey = v1.KVStoreKey
MemoryStoreKey = v1.MemoryStoreKey
TransientStoreKey = v1.TransientStoreKey
KVPair = v1.KVPair
StoreKVPair = v1.StoreKVPair
)
// Re-export relevant constants, values and utility functions
const (
StoreTypeMemory = v1.StoreTypeMemory
StoreTypeTransient = v1.StoreTypeTransient
StoreTypeDB = v1.StoreTypeDB
StoreTypeSMT = v1.StoreTypeSMT
StoreTypePersistent = v1.StoreTypePersistent
)
var (
PruneDefault = v1.PruneDefault
PruneEverything = v1.PruneEverything
PruneNothing = v1.PruneNothing
NewKVStoreKey = v1.NewKVStoreKey
PrefixEndBytes = v1.PrefixEndBytes
KVStorePrefixIterator = v1.KVStorePrefixIterator
KVStoreReversePrefixIterator = v1.KVStoreReversePrefixIterator
NewStoreKVPairWriteListener = v1.NewStoreKVPairWriteListener
)
// BasicMultiStore defines a minimal interface for accessing root state.
type BasicMultiStore interface {
// Returns a KVStore which has access only to the namespace of the StoreKey.
// Panics if the key is not found in the schema.
GetKVStore(StoreKey) KVStore
}
// mixin interface for trace and listen methods
type rootStoreTraceListen interface {
TracingEnabled() bool
SetTracer(w io.Writer)
SetTraceContext(TraceContext)
ListeningEnabled(key StoreKey) bool
AddListeners(key StoreKey, listeners []WriteListener)
}
// CommitMultiStore defines a complete interface for persistent root state, including
// (read-only) access to past versions, pruning, trace/listen, and state snapshots.
type CommitMultiStore interface {
BasicMultiStore
rootStoreTraceListen
Committer
snapshottypes.Snapshotter
// Gets a read-only view of the store at a specific version.
// Returns an error if the version is not found.
GetVersion(int64) (BasicMultiStore, error)
// Closes the store and all backing transactions.
Close() error
// Returns a branched whose modifications are later merged back in.
CacheMultiStore() CacheMultiStore
// Defines the minimum version number that can be saved by this store.
SetInitialVersion(uint64) error
}
// CacheMultiStore defines a branch of the root state which can be written back to the source store.
type CacheMultiStore interface {
BasicMultiStore
rootStoreTraceListen
// Returns a branched whose modifications are later merged back in.
CacheMultiStore() CacheMultiStore
// Write all cached changes back to the source store. Note: this overwrites any intervening changes.
Write()
}
// MultiStorePersistentCache provides inter-block (persistent) caching capabilities for a CommitMultiStore.
// TODO: placeholder. Implement and redefine this
type MultiStorePersistentCache = v1.MultiStorePersistentCache

745
tests/mocks/db/types.go Normal file
View File

@ -0,0 +1,745 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: db/types.go
// Package mocks is a generated GoMock package.
package mocks
import (
reflect "reflect"
db "github.com/cosmos/cosmos-sdk/db"
gomock "github.com/golang/mock/gomock"
)
// MockDBConnection is a mock of DBConnection interface.
type MockDBConnection struct {
ctrl *gomock.Controller
recorder *MockDBConnectionMockRecorder
}
// MockDBConnectionMockRecorder is the mock recorder for MockDBConnection.
type MockDBConnectionMockRecorder struct {
mock *MockDBConnection
}
// NewMockDBConnection creates a new mock instance.
func NewMockDBConnection(ctrl *gomock.Controller) *MockDBConnection {
mock := &MockDBConnection{ctrl: ctrl}
mock.recorder = &MockDBConnectionMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDBConnection) EXPECT() *MockDBConnectionMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockDBConnection) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockDBConnectionMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDBConnection)(nil).Close))
}
// DeleteVersion mocks base method.
func (m *MockDBConnection) DeleteVersion(arg0 uint64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteVersion", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteVersion indicates an expected call of DeleteVersion.
func (mr *MockDBConnectionMockRecorder) DeleteVersion(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVersion", reflect.TypeOf((*MockDBConnection)(nil).DeleteVersion), arg0)
}
// ReadWriter mocks base method.
func (m *MockDBConnection) ReadWriter() db.DBReadWriter {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadWriter")
ret0, _ := ret[0].(db.DBReadWriter)
return ret0
}
// ReadWriter indicates an expected call of ReadWriter.
func (mr *MockDBConnectionMockRecorder) ReadWriter() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadWriter", reflect.TypeOf((*MockDBConnection)(nil).ReadWriter))
}
// Reader mocks base method.
func (m *MockDBConnection) Reader() db.DBReader {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
ret0, _ := ret[0].(db.DBReader)
return ret0
}
// Reader indicates an expected call of Reader.
func (mr *MockDBConnectionMockRecorder) Reader() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockDBConnection)(nil).Reader))
}
// ReaderAt mocks base method.
func (m *MockDBConnection) ReaderAt(arg0 uint64) (db.DBReader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReaderAt", arg0)
ret0, _ := ret[0].(db.DBReader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReaderAt indicates an expected call of ReaderAt.
func (mr *MockDBConnectionMockRecorder) ReaderAt(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReaderAt", reflect.TypeOf((*MockDBConnection)(nil).ReaderAt), arg0)
}
// Revert mocks base method.
func (m *MockDBConnection) Revert() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Revert")
ret0, _ := ret[0].(error)
return ret0
}
// Revert indicates an expected call of Revert.
func (mr *MockDBConnectionMockRecorder) Revert() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revert", reflect.TypeOf((*MockDBConnection)(nil).Revert))
}
// SaveNextVersion mocks base method.
func (m *MockDBConnection) SaveNextVersion() (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveNextVersion")
ret0, _ := ret[0].(uint64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SaveNextVersion indicates an expected call of SaveNextVersion.
func (mr *MockDBConnectionMockRecorder) SaveNextVersion() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNextVersion", reflect.TypeOf((*MockDBConnection)(nil).SaveNextVersion))
}
// SaveVersion mocks base method.
func (m *MockDBConnection) SaveVersion(arg0 uint64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveVersion", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// SaveVersion indicates an expected call of SaveVersion.
func (mr *MockDBConnectionMockRecorder) SaveVersion(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveVersion", reflect.TypeOf((*MockDBConnection)(nil).SaveVersion), arg0)
}
// Versions mocks base method.
func (m *MockDBConnection) Versions() (db.VersionSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Versions")
ret0, _ := ret[0].(db.VersionSet)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Versions indicates an expected call of Versions.
func (mr *MockDBConnectionMockRecorder) Versions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Versions", reflect.TypeOf((*MockDBConnection)(nil).Versions))
}
// Writer mocks base method.
func (m *MockDBConnection) Writer() db.DBWriter {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Writer")
ret0, _ := ret[0].(db.DBWriter)
return ret0
}
// Writer indicates an expected call of Writer.
func (mr *MockDBConnectionMockRecorder) Writer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Writer", reflect.TypeOf((*MockDBConnection)(nil).Writer))
}
// MockDBReader is a mock of DBReader interface.
type MockDBReader struct {
ctrl *gomock.Controller
recorder *MockDBReaderMockRecorder
}
// MockDBReaderMockRecorder is the mock recorder for MockDBReader.
type MockDBReaderMockRecorder struct {
mock *MockDBReader
}
// NewMockDBReader creates a new mock instance.
func NewMockDBReader(ctrl *gomock.Controller) *MockDBReader {
mock := &MockDBReader{ctrl: ctrl}
mock.recorder = &MockDBReaderMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDBReader) EXPECT() *MockDBReaderMockRecorder {
return m.recorder
}
// Discard mocks base method.
func (m *MockDBReader) Discard() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Discard")
ret0, _ := ret[0].(error)
return ret0
}
// Discard indicates an expected call of Discard.
func (mr *MockDBReaderMockRecorder) Discard() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockDBReader)(nil).Discard))
}
// Get mocks base method.
func (m *MockDBReader) Get(arg0 []byte) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockDBReaderMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDBReader)(nil).Get), arg0)
}
// Has mocks base method.
func (m *MockDBReader) Has(key []byte) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Has", key)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Has indicates an expected call of Has.
func (mr *MockDBReaderMockRecorder) Has(key interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDBReader)(nil).Has), key)
}
// Iterator mocks base method.
func (m *MockDBReader) Iterator(start, end []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Iterator", start, end)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Iterator indicates an expected call of Iterator.
func (mr *MockDBReaderMockRecorder) Iterator(start, end interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDBReader)(nil).Iterator), start, end)
}
// ReverseIterator mocks base method.
func (m *MockDBReader) ReverseIterator(start, end []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReverseIterator", start, end)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReverseIterator indicates an expected call of ReverseIterator.
func (mr *MockDBReaderMockRecorder) ReverseIterator(start, end interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDBReader)(nil).ReverseIterator), start, end)
}
// MockDBWriter is a mock of DBWriter interface.
type MockDBWriter struct {
ctrl *gomock.Controller
recorder *MockDBWriterMockRecorder
}
// MockDBWriterMockRecorder is the mock recorder for MockDBWriter.
type MockDBWriterMockRecorder struct {
mock *MockDBWriter
}
// NewMockDBWriter creates a new mock instance.
func NewMockDBWriter(ctrl *gomock.Controller) *MockDBWriter {
mock := &MockDBWriter{ctrl: ctrl}
mock.recorder = &MockDBWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDBWriter) EXPECT() *MockDBWriterMockRecorder {
return m.recorder
}
// Commit mocks base method.
func (m *MockDBWriter) Commit() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit")
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit.
func (mr *MockDBWriterMockRecorder) Commit() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockDBWriter)(nil).Commit))
}
// Delete mocks base method.
func (m *MockDBWriter) Delete(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockDBWriterMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBWriter)(nil).Delete), arg0)
}
// Discard mocks base method.
func (m *MockDBWriter) Discard() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Discard")
ret0, _ := ret[0].(error)
return ret0
}
// Discard indicates an expected call of Discard.
func (mr *MockDBWriterMockRecorder) Discard() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockDBWriter)(nil).Discard))
}
// Set mocks base method.
func (m *MockDBWriter) Set(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Set", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Set indicates an expected call of Set.
func (mr *MockDBWriterMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDBWriter)(nil).Set), arg0, arg1)
}
// MockDBReadWriter is a mock of DBReadWriter interface.
type MockDBReadWriter struct {
ctrl *gomock.Controller
recorder *MockDBReadWriterMockRecorder
}
// MockDBReadWriterMockRecorder is the mock recorder for MockDBReadWriter.
type MockDBReadWriterMockRecorder struct {
mock *MockDBReadWriter
}
// NewMockDBReadWriter creates a new mock instance.
func NewMockDBReadWriter(ctrl *gomock.Controller) *MockDBReadWriter {
mock := &MockDBReadWriter{ctrl: ctrl}
mock.recorder = &MockDBReadWriterMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDBReadWriter) EXPECT() *MockDBReadWriterMockRecorder {
return m.recorder
}
// Commit mocks base method.
func (m *MockDBReadWriter) Commit() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Commit")
ret0, _ := ret[0].(error)
return ret0
}
// Commit indicates an expected call of Commit.
func (mr *MockDBReadWriterMockRecorder) Commit() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockDBReadWriter)(nil).Commit))
}
// Delete mocks base method.
func (m *MockDBReadWriter) Delete(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockDBReadWriterMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDBReadWriter)(nil).Delete), arg0)
}
// Discard mocks base method.
func (m *MockDBReadWriter) Discard() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Discard")
ret0, _ := ret[0].(error)
return ret0
}
// Discard indicates an expected call of Discard.
func (mr *MockDBReadWriterMockRecorder) Discard() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockDBReadWriter)(nil).Discard))
}
// Get mocks base method.
func (m *MockDBReadWriter) Get(arg0 []byte) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockDBReadWriterMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDBReadWriter)(nil).Get), arg0)
}
// Has mocks base method.
func (m *MockDBReadWriter) Has(key []byte) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Has", key)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Has indicates an expected call of Has.
func (mr *MockDBReadWriterMockRecorder) Has(key interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDBReadWriter)(nil).Has), key)
}
// Iterator mocks base method.
func (m *MockDBReadWriter) Iterator(start, end []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Iterator", start, end)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Iterator indicates an expected call of Iterator.
func (mr *MockDBReadWriterMockRecorder) Iterator(start, end interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDBReadWriter)(nil).Iterator), start, end)
}
// ReverseIterator mocks base method.
func (m *MockDBReadWriter) ReverseIterator(start, end []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReverseIterator", start, end)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReverseIterator indicates an expected call of ReverseIterator.
func (mr *MockDBReadWriterMockRecorder) ReverseIterator(start, end interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDBReadWriter)(nil).ReverseIterator), start, end)
}
// Set mocks base method.
func (m *MockDBReadWriter) Set(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Set", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Set indicates an expected call of Set.
func (mr *MockDBReadWriterMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDBReadWriter)(nil).Set), arg0, arg1)
}
// MockIterator is a mock of Iterator interface.
type MockIterator struct {
ctrl *gomock.Controller
recorder *MockIteratorMockRecorder
}
// MockIteratorMockRecorder is the mock recorder for MockIterator.
type MockIteratorMockRecorder struct {
mock *MockIterator
}
// NewMockIterator creates a new mock instance.
func NewMockIterator(ctrl *gomock.Controller) *MockIterator {
mock := &MockIterator{ctrl: ctrl}
mock.recorder = &MockIteratorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockIterator) EXPECT() *MockIteratorMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockIterator) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockIteratorMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIterator)(nil).Close))
}
// Domain mocks base method.
func (m *MockIterator) Domain() ([]byte, []byte) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Domain")
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].([]byte)
return ret0, ret1
}
// Domain indicates an expected call of Domain.
func (mr *MockIteratorMockRecorder) Domain() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Domain", reflect.TypeOf((*MockIterator)(nil).Domain))
}
// Error mocks base method.
func (m *MockIterator) Error() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Error")
ret0, _ := ret[0].(error)
return ret0
}
// Error indicates an expected call of Error.
func (mr *MockIteratorMockRecorder) Error() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockIterator)(nil).Error))
}
// Key mocks base method.
func (m *MockIterator) Key() []byte {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Key")
ret0, _ := ret[0].([]byte)
return ret0
}
// Key indicates an expected call of Key.
func (mr *MockIteratorMockRecorder) Key() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockIterator)(nil).Key))
}
// Next mocks base method.
func (m *MockIterator) Next() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Next")
ret0, _ := ret[0].(bool)
return ret0
}
// Next indicates an expected call of Next.
func (mr *MockIteratorMockRecorder) Next() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next))
}
// Value mocks base method.
func (m *MockIterator) Value() []byte {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Value")
ret0, _ := ret[0].([]byte)
return ret0
}
// Value indicates an expected call of Value.
func (mr *MockIteratorMockRecorder) Value() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockIterator)(nil).Value))
}
// MockVersionSet is a mock of VersionSet interface.
type MockVersionSet struct {
ctrl *gomock.Controller
recorder *MockVersionSetMockRecorder
}
// MockVersionSetMockRecorder is the mock recorder for MockVersionSet.
type MockVersionSetMockRecorder struct {
mock *MockVersionSet
}
// NewMockVersionSet creates a new mock instance.
func NewMockVersionSet(ctrl *gomock.Controller) *MockVersionSet {
mock := &MockVersionSet{ctrl: ctrl}
mock.recorder = &MockVersionSetMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVersionSet) EXPECT() *MockVersionSetMockRecorder {
return m.recorder
}
// Count mocks base method.
func (m *MockVersionSet) Count() int {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Count")
ret0, _ := ret[0].(int)
return ret0
}
// Count indicates an expected call of Count.
func (mr *MockVersionSetMockRecorder) Count() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockVersionSet)(nil).Count))
}
// Equal mocks base method.
func (m *MockVersionSet) Equal(arg0 db.VersionSet) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Equal", arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// Equal indicates an expected call of Equal.
func (mr *MockVersionSetMockRecorder) Equal(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Equal", reflect.TypeOf((*MockVersionSet)(nil).Equal), arg0)
}
// Exists mocks base method.
func (m *MockVersionSet) Exists(arg0 uint64) bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Exists", arg0)
ret0, _ := ret[0].(bool)
return ret0
}
// Exists indicates an expected call of Exists.
func (mr *MockVersionSetMockRecorder) Exists(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exists", reflect.TypeOf((*MockVersionSet)(nil).Exists), arg0)
}
// Iterator mocks base method.
func (m *MockVersionSet) Iterator() db.VersionIterator {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Iterator")
ret0, _ := ret[0].(db.VersionIterator)
return ret0
}
// Iterator indicates an expected call of Iterator.
func (mr *MockVersionSetMockRecorder) Iterator() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockVersionSet)(nil).Iterator))
}
// Last mocks base method.
func (m *MockVersionSet) Last() uint64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Last")
ret0, _ := ret[0].(uint64)
return ret0
}
// Last indicates an expected call of Last.
func (mr *MockVersionSetMockRecorder) Last() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Last", reflect.TypeOf((*MockVersionSet)(nil).Last))
}
// MockVersionIterator is a mock of VersionIterator interface.
type MockVersionIterator struct {
ctrl *gomock.Controller
recorder *MockVersionIteratorMockRecorder
}
// MockVersionIteratorMockRecorder is the mock recorder for MockVersionIterator.
type MockVersionIteratorMockRecorder struct {
mock *MockVersionIterator
}
// NewMockVersionIterator creates a new mock instance.
func NewMockVersionIterator(ctrl *gomock.Controller) *MockVersionIterator {
mock := &MockVersionIterator{ctrl: ctrl}
mock.recorder = &MockVersionIteratorMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockVersionIterator) EXPECT() *MockVersionIteratorMockRecorder {
return m.recorder
}
// Next mocks base method.
func (m *MockVersionIterator) Next() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Next")
ret0, _ := ret[0].(bool)
return ret0
}
// Next indicates an expected call of Next.
func (mr *MockVersionIteratorMockRecorder) Next() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockVersionIterator)(nil).Next))
}
// Value mocks base method.
func (m *MockVersionIterator) Value() uint64 {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Value")
ret0, _ := ret[0].(uint64)
return ret0
}
// Value indicates an expected call of Value.
func (mr *MockVersionIteratorMockRecorder) Value() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockVersionIterator)(nil).Value))
}