fix: state listener observe writes at wrong time (backport #13516) (#14137)

* fix: state listener observe writes at wrong time (#13516)

* fix: state listener observe writes at wrong time

Closes: #13457

Currently state listener is notified when the cache store write, which happens in commit event only, which breaks the current design.
The solution (as discussed in the issue) is to listen state writes on rootmulti store only.

It also changes the file streamer to output single data file for the writes in the whole block, since we can't distinguish writes from different stage of abci events.

It adds new config items for file streamer:
- streamers.file.output-metadata
- streamers.file.stop-node-on-error
- streamers.file.fsync

* synchronous abci call, and format doc

* fix comment

* update file streamer readme and fix typos

* typo

* fix: state listener observe writes at wrong time

Closes: #13457

Currently state listener is notified when the cache store write, which happens in commit event only, which breaks the current design.
The solution (as discussed in the issue) is to listen state writes on rootmulti store only.

It also changes the file streamer to output single data file for the writes in the whole block, since we can't distinguish writes from different stage of abci events.

It adds new config items for file streamer:
- streamers.file.output-metadata
- streamers.file.stop-node-on-error
- streamers.file.fsync

synchronous abci call, and format doc

fix comment

update file streamer readme and fix typos

typo

* improve UX of file streamer, make it immediately usable after enabled

- set default value to write_dir.
- make write_dir based on home directory by default.
- auto-create the directory if not exists.

* get homePage from opts

Co-authored-by: Marko <marbar3778@yahoo.com>
(cherry picked from commit 1f91ee2ee941fd9a1dd4bc3acecd753e3cb7e237)

# Conflicts:
#	CHANGELOG.md
#	baseapp/streaming.go
#	store/streaming/constructor.go
#	store/streaming/file/service.go

* fix changelog

* fix conflicts

Co-authored-by: yihuang <huang@crypto.com>
Co-authored-by: Julien Robert <julien@rbrt.fr>
This commit is contained in:
mergify[bot] 2022-12-05 22:23:03 +00:00 committed by GitHub
parent fe84b9f4f9
commit e7afcca232
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 3003 additions and 690 deletions

View File

@ -45,6 +45,13 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking Changes
* (x/auth)[#13780](https://github.com/cosmos/cosmos-sdk/pull/13780) Querying with `id` (type of int64) in `AccountAddressByID` grpc query now throws error, use account-id(type of uint64) instead.
* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Update State Streaming APIs:
* Add method `ListenCommit` to `ABCIListener`
* Move `ListeningEnabled` and `AddListener` methods to `CommitMultiStore`
* Remove `CacheWrapWithListeners` from `CacheWrap` and `CacheWrapper` interfaces
* Remove listening APIs from the caching layer (it should only listen to the `rootmulti.Store`)
* Add three new options to file streaming service constructor.
* Modify `ABCIListener` such that any error from any method will always halt the app via `panic`
### State Machine Breaking
@ -59,6 +66,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (x/upgrade) [#13936](https://github.com/cosmos/cosmos-sdk/pull/13936) Make downgrade verification work again.
* (x/group) [#13742](https://github.com/cosmos/cosmos-sdk/pull/13742) Fix `validate-genesis` when group policy accounts exist.
* (baseapp) [#14049](https://github.com/cosmos/cosmos-sdk/pull/14049) Fix state sync when interval is zero.
* (store) [#13516](https://github.com/cosmos/cosmos-sdk/pull/13516) Fix state listener that was observing writes at wrong time.
## [v0.47.0-alpha1](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.47.0-alpha1) - 2022-11-21

File diff suppressed because it is too large Load Diff

View File

@ -202,7 +202,7 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg
// call the hooks with the BeginBlock messages
for _, streamingListener := range app.abciListeners {
if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err)
panic(fmt.Errorf("BeginBlock listening hook failed, height: %d, err: %w", req.Header.Height, err))
}
}
@ -227,7 +227,7 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc
// call the streaming service hooks with the EndBlock messages
for _, streamingListener := range app.abciListeners {
if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err)
panic(fmt.Errorf("EndBlock listening hook failed, height: %d, err: %w", req.Height, err))
}
}
@ -330,7 +330,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv
defer func() {
for _, streamingListener := range app.abciListeners {
if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("DeliverTx listening hook failed", "err", err)
panic(fmt.Errorf("DeliverTx listening hook failed: %w", err))
}
}
}()
@ -364,7 +364,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliv
// defined in config, Commit will execute a deferred function call to check
// against that height and gracefully halt if it matches the latest committed
// height.
func (app *BaseApp) Commit() (res abci.ResponseCommit) {
func (app *BaseApp) Commit() abci.ResponseCommit {
header := app.deliverState.ctx.BlockHeader()
retainHeight := app.GetBlockRetentionHeight(header.Height)
@ -373,6 +373,19 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) {
// MultiStore (app.cms) so when Commit() is called is persists those values.
app.deliverState.ms.Write()
commitID := app.cms.Commit()
res := abci.ResponseCommit{
Data: commitID.Hash,
RetainHeight: retainHeight,
}
// call the hooks with the Commit message
for _, streamingListener := range app.abciListeners {
if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil {
panic(fmt.Errorf("Commit listening hook failed, height: %d, err: %w", header.Height, err))
}
}
app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID))
// Reset the Check state to the latest committed.
@ -406,10 +419,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) {
go app.snapshotManager.SnapshotIfApplicable(header.Height)
return abci.ResponseCommit{
Data: commitID.Hash,
RetainHeight: retainHeight,
}
return res
}
// halt attempts to gracefully shutdown the node via SIGINT and SIGTERM falling

View File

@ -1,23 +1,27 @@
package baseapp
import (
"context"
"io"
"sync"
abci "github.com/tendermint/tendermint/abci/types"
store "github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/types"
)
// ABCIListener interface used to hook into the ABCI message processing of the BaseApp
// ABCIListener interface used to hook into the ABCI message processing of the BaseApp.
// the error results are propagated to consensus state machine,
// if you don't want to affect consensus, handle the errors internally and always return `nil` in these APIs.
type ABCIListener interface {
// ListenBeginBlock updates the streaming service with the latest BeginBlock messages
ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error
ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error
// ListenEndBlock updates the steaming service with the latest EndBlock messages
ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error
ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error
// ListenDeliverTx updates the steaming service with the latest DeliverTx messages
ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error
ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error
// ListenCommit updates the steaming service with the latest Commit event
ListenCommit(ctx context.Context, res abci.ResponseCommit) error
}
// StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks

View File

@ -3,6 +3,11 @@
## Changelog
* 11/23/2020: Initial draft
* 10/14/2022:
* Add `ListenCommit`, flatten the state writes in a block to a single batch.
* Remove listeners from cache stores, should only listen to `rootmulti.Store`.
* Remove `HaltAppOnDeliveryError()`, the errors are propogated by default, the implementations should return nil if don't want to propogate errors.
## Status
@ -20,7 +25,7 @@ In addition to these request/response queries, it would be beneficial to have a
## Decision
We will modify the `MultiStore` interface and its concrete (`rootmulti` and `cachemulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores.
We will modify the `CommitMultiStore` interface and its concrete (`rootmulti`) implementations and introduce a new `listenkv.Store` to allow listening to state changes in underlying KVStores. We don't need to listen to cache stores, because we can't be sure that the writes will be committed eventually, and the writes are duplicated in `rootmulti.Store` eventually, so we should only listen to `rootmulti.Store`.
We will introduce a plugin system for configuring and running streaming services that write these state changes and their surrounding ABCI message context to different destinations.
### Listening interface
@ -39,8 +44,8 @@ type WriteListener interface {
### Listener type
We will create a concrete implementation of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf
encoded KV pairs to an underlying `io.Writer`.
We will create two concrete implementations of the `WriteListener` interface in `store/types/listening.go`, that writes out protobuf
encoded KV pairs to an underlying `io.Writer`, and simply accumulate them in memory.
This will include defining a simple protobuf type for the KV pairs. In addition to the key and value fields this message
will include the StoreKey for the originating KVStore so that we can write out from separate KVStores to the same stream/file
@ -89,6 +94,42 @@ func (wl *StoreKVPairWriteListener) OnWrite(storeKey types.StoreKey, key []byte,
}
```
```golang
// MemoryListener listens to the state writes and accumulate the records in memory.
type MemoryListener struct {
key StoreKey
stateCache []StoreKVPair
}
// NewMemoryListener creates a listener that accumulate the state writes in memory.
func NewMemoryListener(key StoreKey) *MemoryListener {
return &MemoryListener{key: key}
}
// OnWrite implements WriteListener interface
func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error {
fl.stateCache = append(fl.stateCache, StoreKVPair{
StoreKey: storeKey.Name(),
Delete: delete,
Key: key,
Value: value,
})
return nil
}
// PopStateCache returns the current state caches and set to nil
func (fl *MemoryListener) PopStateCache() []StoreKVPair {
res := fl.stateCache
fl.stateCache = nil
return res
}
// StoreKey returns the storeKey it listens to
func (fl *MemoryListener) StoreKey() StoreKey {
return fl.key
}
```
### ListenKVStore
We will create a new `Store` type `listenkv.Store` that the `MultiStore` wraps around a `KVStore` to enable state listening.
@ -137,11 +178,10 @@ func (s *Store) onWrite(delete bool, key, value []byte) {
### MultiStore interface updates
We will update the `MultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`.
Additionally, we will update the `CacheWrap` and `CacheWrapper` interfaces to enable listening in the caching layer.
We will update the `CommitMultiStore` interface to allow us to wrap a set of listeners around a specific `KVStore`.
```go
type MultiStore interface {
type CommitMultiStore interface {
...
// ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
@ -153,25 +193,9 @@ type MultiStore interface {
}
```
```go
type CacheWrap interface {
...
// CacheWrapWithListeners recursively wraps again with listening enabled
CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap
}
type CacheWrapper interface {
...
// CacheWrapWithListeners recursively wraps again with listening enabled
CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap
}
```
### MultiStore implementation updates
We will modify all of the `Store` and `MultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method
We will modify all of the `CommitMultiStore` implementations to satisfy these new interfaces, and adjust the `rootmulti` `GetKVStore` method
to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on for that `Store`.
```go
@ -189,16 +213,21 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore {
}
```
We will also adjust the `cachemulti` constructor methods and the `rootmulti` `CacheMultiStore` method to forward the listeners
to and enable listening in the cache layer.
We will also adjust the `rootmulti` `CacheMultiStore` method to wrap the stores with `listenkv.Store` to enable listening when the cache layer writes.
```go
func (rs *Store) CacheMultiStore() types.CacheMultiStore {
stores := make(map[types.StoreKey]types.CacheWrapper)
for k, v := range rs.stores {
stores[k] = v
store := v.(types.KVStore)
// Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
// set same listeners on cache store will observe duplicated writes.
if rs.ListeningEnabled(k) {
store = listenkv.NewStore(store, k, rs.listeners[k])
}
return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners)
stores[k] = store
}
return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext())
}
```
@ -208,10 +237,9 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore {
We will introduce a new `StreamingService` interface for exposing `WriteListener` data streams to external consumers.
In addition to streaming state changes as `StoreKVPair`s, the interface satisfies an `ABCIListener` interface that plugs
into the BaseApp and relays ABCI requests and responses so that the service can group the state changes with the ABCI
requests that affected them and the ABCI responses they affected. The `ABCIListener` interface also exposes a
`ListenSuccess` method which is (optionally) used by the `BaseApp` to await positive acknowledgement of message
receipt from the `StreamingService`.
into the BaseApp and relays ABCI requests and responses so that the service can observe those block metadatas as well.
The `WriteListener`s of `StreamingService` listens to the `rootmulti.Store`, which is only written into at commit event by the cache store of `deliverState`.
```go
// ABCIListener interface used to hook into the ABCI message processing of the BaseApp
@ -222,13 +250,9 @@ type ABCIListener interface {
ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error
// ListenDeliverTx updates the steaming service with the latest DeliverTx messages
ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error
// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails
// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. When `false, the app will operate in fire-and-forget mode.
// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will
// be replayed to all listeners when the node restarts and all successful listeners that received data
// prior to the halt will receive duplicate data. Whether or not a listener operates in a fire-and-forget mode
// is determined by the listener's configuration property `halt_app_on_delivery_error = true|false`.
HaltAppOnDeliveryError() bool
// ListenCommit updates the steaming service with the latest Commit message,
// All the state writes of current block should have notified before this message.
ListenCommit(ctx types.Context, res abci.ResponseCommit) error
}
// StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks
@ -269,32 +293,14 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg
...
defer func() {
// call the hooks with the BeginBlock messages
wg := new(sync.WaitGroup)
for _, streamingListener := range app.abciListeners {
streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines
if streamingListener.HaltAppOnDeliveryError() {
// increment the wait group counter
wg.Add(1)
go func() {
// decrement the counter when the go routine completes
defer wg.Done()
if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err)
app.halt()
panic(sdkerrors.Wrapf(err, "BeginBlock listening hook failed, height: %d", req.Header.Height))
}
}
}()
} else {
// fire and forget semantics
go func() {
if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err)
}
}()
}
}
// wait for all the listener calls to finish
wg.Wait()
return res
}
@ -305,68 +311,29 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc
...
defer func() {
// Call the streaming service hooks with the EndBlock messages
wg := new(sync.WaitGroup)
for _, streamingListener := range app.abciListeners {
streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines
if streamingListener.HaltAppOnDeliveryError() {
// increment the wait group counter
wg.Add(1)
go func() {
// decrement the counter when the go routine completes
defer wg.Done()
if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err)
app.halt()
panic(sdkerrors.Wrapf(err, "EndBlock listening hook failed, height: %d", req.Height))
}
}
}()
} else {
// fire and forget semantics
go func() {
if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil {
app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err)
}
}()
}
}
// wait for all the listener calls to finish
wg.Wait()
return res
}
```
```go
func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) (res abci.ResponseDeliverTx) {
var abciRes abci.ResponseDeliverTx
defer func() {
// call the hooks with the BeginBlock messages
wg := new(sync.WaitGroup)
// call the hooks with the DeliverTx messages
for _, streamingListener := range app.abciListeners {
streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines
if streamingListener.HaltAppOnDeliveryError() {
// increment the wait group counter
wg.Add(1)
go func() {
// decrement the counter when the go routine completes
defer wg.Done()
if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil {
app.logger.Error("DeliverTx listening hook failed", "err", err)
app.halt()
}
}()
} else {
// fire and forget semantics
go func() {
if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil {
app.logger.Error("DeliverTx listening hook failed", "err", err)
}
}()
if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil {
panic(sdkerrors.Wrap(err, "DeliverTx listening hook failed"))
}
}
// wait for all the listener calls to finish
wg.Wait()
}()
...
@ -375,6 +342,53 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx
}
```
```golang
func (app *BaseApp) Commit() abci.ResponseCommit {
header := app.deliverState.ctx.BlockHeader()
retainHeight := app.GetBlockRetentionHeight(header.Height)
// Write the DeliverTx state into branched storage and commit the MultiStore.
// The write to the DeliverTx state writes all state transitions to the root
// MultiStore (app.cms) so when Commit() is called is persists those values.
app.deliverState.ms.Write()
commitID := app.cms.Commit()
res := abci.ResponseCommit{
Data: commitID.Hash,
RetainHeight: retainHeight,
}
// call the hooks with the Commit message
for _, streamingListener := range app.abciListeners {
if err := streamingListener.ListenCommit(app.deliverState.ctx, res); err != nil {
panic(sdkerrors.Wrapf(err, "Commit listening hook failed, height: %d", header.Height))
}
}
app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID))
...
}
```
#### Error Handling And Async Consumers
`ABCIListener`s are called synchronously inside the consensus state machine, the returned error causes panic which in turn halt the consensus state machine. The implementer should be careful not to break consensus unexpectedly or slow down it too much.
For some async use cases, one can spawn a go-routine internanlly to avoid slow down consensus state machine, and handle the errors internally and always returns `nil` to avoid halting consensus state machine on error.
Furthermore, for most of the cases, we only need to use the builtin file streamer to listen to state changes directly inside cosmos-sdk, the other consumers should subscribe to the file streamer output externally.
#### File Streamer
We provide a minimal filesystem based implementation inside cosmos-sdk, and provides options to write output files reliably, the output files can be further consumed by external consumers, so most of the state listeners actually don't need to live inside the sdk and node, which improves the node robustness and simplify sdk internals.
The file streamer can be wired in app like this:
```golang
exposeStoreKeys := ... // decide the key list to listen
service, err := file.NewStreamingService(streamingDir, "", exposeStoreKeys, appCodec, logger)
bApp.SetStreamingService(service)
```
#### Plugin system
We propose a plugin architecture to load and run `StreamingService` implementations. We will introduce a plugin
@ -539,7 +553,7 @@ These changes will provide a means of subscribing to KVStore state changes in re
### Backwards Compatibility
* This ADR changes the `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces, implementations supporting the previous version of these interfaces will not support the new ones
* This ADR changes the `CommitMultiStore` interface, implementations supporting the previous version of these interfaces will not support the new ones
### Positive
@ -547,7 +561,7 @@ These changes will provide a means of subscribing to KVStore state changes in re
### Negative
* Changes `MultiStore`, `CacheWrap`, and `CacheWrapper` interfaces
* Changes `CommitMultiStore`interface
### Neutral

View File

@ -1,6 +1,8 @@
syntax = "proto3";
package cosmos.base.store.v1beta1;
import "tendermint/abci/types.proto";
option go_package = "github.com/cosmos/cosmos-sdk/store/types";
// StoreKVPair is a KVStore KVPair used for listening to state changes (Sets and Deletes)
@ -14,3 +16,19 @@ message StoreKVPair {
bytes key = 3;
bytes value = 4;
}
// BlockMetadata contains all the abci event data of a block
// the file streamer dump them into files together with the state changes.
message BlockMetadata {
// DeliverTx encapulate deliver tx request and response.
message DeliverTx {
tendermint.abci.RequestDeliverTx request = 1;
tendermint.abci.ResponseDeliverTx response = 2;
}
tendermint.abci.RequestBeginBlock request_begin_block = 1;
tendermint.abci.ResponseBeginBlock response_begin_block = 2;
repeated DeliverTx deliver_txs = 3;
tendermint.abci.RequestEndBlock request_end_block = 4;
tendermint.abci.ResponseEndBlock response_end_block = 5;
tendermint.abci.ResponseCommit response_commit = 6;
}

View File

@ -227,6 +227,15 @@ type (
Keys []string `mapstructure:"keys"`
WriteDir string `mapstructure:"write_dir"`
Prefix string `mapstructure:"prefix"`
// OutputMetadata specifies if output the block metadata file which includes
// the abci requests/responses, otherwise only the data file is outputted.
OutputMetadata bool `mapstructure:"output-metadata"`
// StopNodeOnError specifies if propagate the streamer errors to the consensus
// state machine, it's nesserary for data integrity of output.
StopNodeOnError bool `mapstructure:"stop-node-on-error"`
// Fsync specifies if calling fsync after writing the files, it slows down
// the commit, but don't lose data in face of system crash.
Fsync bool `mapstructure:"fsync"`
}
)
@ -331,6 +340,12 @@ func DefaultConfig() *Config {
Streamers: StreamersConfig{
File: FileStreamerConfig{
Keys: []string{"*"},
WriteDir: "data/file_streamer",
OutputMetadata: true,
StopNodeOnError: true,
// NOTICE: the default config don't protect the streamer data integrity
// in face of system crash.
Fsync: false,
},
},
Mempool: MempoolConfig{

View File

@ -248,6 +248,13 @@ streamers = [{{ range .Store.Streamers }}{{ printf "%q, " . }}{{end}}]
keys = [{{ range .Streamers.File.Keys }}{{ printf "%q, " . }}{{end}}]
write_dir = "{{ .Streamers.File.WriteDir }}"
prefix = "{{ .Streamers.File.Prefix }}"
# output-metadata specifies if output the metadata file which includes the abci request/responses
# during processing the block.
output-metadata = "{{ .Streamers.File.OutputMetadata }}"
# stop-node-on-error specifies if propagate the file streamer errors to consensus state machine.
stop-node-on-error = "{{ .Streamers.File.StopNodeOnError }}"
# fsync specifies if call fsync after writing the files.
fsync = "{{ .Streamers.File.Fsync }}"
###############################################################################
### Mempool ###

View File

@ -248,7 +248,7 @@ func NewSimApp(
memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, "testingkey")
// load state streaming if enabled
if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, keys); err != nil {
if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, logger, keys); err != nil {
fmt.Printf("failed to load state streaming: %s", err)
os.Exit(1)
}

View File

@ -268,7 +268,7 @@ func NewSimApp(
app.App = appBuilder.Build(logger, db, traceStore, baseAppOptions...)
// load state streaming if enabled
if _, _, err := streaming.LoadStreamingServices(app.App.BaseApp, appOpts, app.appCodec, app.keys); err != nil {
if _, _, err := streaming.LoadStreamingServices(app.App.BaseApp, appOpts, app.appCodec, logger, app.keys); err != nil {
fmt.Printf("failed to load state streaming: %s", err)
os.Exit(1)
}

View File

@ -10,7 +10,6 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/internal/conv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/types/kv"
@ -163,11 +162,6 @@ func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types
return NewStore(tracekv.NewStore(store, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (store *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return NewStore(listenkv.NewStore(store, storeKey, listeners))
}
//----------------------------------------
// Iteration

View File

@ -8,7 +8,6 @@ import (
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/dbadapter"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
@ -31,8 +30,6 @@ type Store struct {
traceWriter io.Writer
traceContext types.TraceContext
listeners map[types.StoreKey][]types.WriteListener
}
var _ types.CacheMultiStore = Store{}
@ -43,18 +40,13 @@ var _ types.CacheMultiStore = Store{}
func NewFromKVStore(
store types.KVStore, stores map[types.StoreKey]types.CacheWrapper,
keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext,
listeners map[types.StoreKey][]types.WriteListener,
) Store {
if listeners == nil {
listeners = make(map[types.StoreKey][]types.WriteListener)
}
cms := Store{
db: cachekv.NewStore(store),
stores: make(map[types.StoreKey]types.CacheWrap, len(stores)),
keys: keys,
traceWriter: traceWriter,
traceContext: traceContext,
listeners: listeners,
}
for key, store := range stores {
@ -65,9 +57,6 @@ func NewFromKVStore(
store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx)
}
if cms.ListeningEnabled(key) {
store = listenkv.NewStore(store.(types.KVStore), key, listeners[key])
}
cms.stores[key] = cachekv.NewStore(store.(types.KVStore))
}
@ -78,9 +67,9 @@ func NewFromKVStore(
// CacheWrapper objects. Each CacheWrapper store is a branched store.
func NewStore(
db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey,
traceWriter io.Writer, traceContext types.TraceContext, listeners map[types.StoreKey][]types.WriteListener,
traceWriter io.Writer, traceContext types.TraceContext,
) Store {
return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext, listeners)
return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext)
}
func newCacheMultiStoreFromCMS(cms Store) Store {
@ -89,8 +78,7 @@ func newCacheMultiStoreFromCMS(cms Store) Store {
stores[k] = v
}
// don't pass listeners to nested cache store.
return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext, nil)
return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext)
}
// SetTracer sets the tracer for the MultiStore that the underlying
@ -121,23 +109,6 @@ func (cms Store) TracingEnabled() bool {
return cms.traceWriter != nil
}
// AddListeners adds listeners for a specific KVStore
func (cms Store) AddListeners(key types.StoreKey, listeners []types.WriteListener) {
if ls, ok := cms.listeners[key]; ok {
cms.listeners[key] = append(ls, listeners...)
} else {
cms.listeners[key] = listeners
}
}
// ListeningEnabled returns if listening is enabled for a specific KVStore
func (cms Store) ListeningEnabled(key types.StoreKey) bool {
if ls, ok := cms.listeners[key]; ok {
return len(ls) != 0
}
return false
}
// LatestVersion returns the branch version of the store
func (cms Store) LatestVersion() int64 {
panic("cannot get latest version from branch cached multi-store")
@ -166,11 +137,6 @@ func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac
return cms.CacheWrap()
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (cms Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap {
return cms.CacheWrap()
}
// Implements MultiStore.
func (cms Store) CacheMultiStore() types.CacheMultiStore {
return newCacheMultiStoreFromCMS(cms)

View File

@ -6,7 +6,6 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
@ -86,10 +85,5 @@ func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca
return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (dsa Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(dsa, storeKey, listeners))
}
// dbm.DB implements KVStore so we can CacheKVStore it.
var _ types.KVStore = Store{}

View File

@ -84,7 +84,4 @@ func TestCacheWraps(t *testing.T) {
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}

View File

@ -92,11 +92,6 @@ func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac
panic("cannot CacheWrapWithTrace a GasKVStore")
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (gs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap {
panic("cannot CacheWrapWithListeners a GasKVStore")
}
func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {

View File

@ -25,7 +25,6 @@ func TestGasKVStoreBasic(t *testing.T) {
require.Equal(t, types.StoreTypeDB, st.GetStoreType())
require.Panics(t, func() { st.CacheWrap() })
require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) })
require.Panics(t, func() { st.CacheWrapWithListeners(nil, nil) })
require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")

View File

@ -14,7 +14,6 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
@ -191,11 +190,6 @@ func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Ca
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (st *Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners))
}
// Implements types.KVStore.
func (st *Store) Set(key, value []byte) {
types.AssertValidKey(key)

View File

@ -656,7 +656,4 @@ func TestCacheWraps(t *testing.T) {
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}

View File

@ -141,12 +141,6 @@ func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cach
panic("cannot CacheWrapWithTrace a ListenKVStore")
}
// CacheWrapWithListeners implements the KVStore interface. It panics as a
// Store cannot be cache wrapped.
func (s *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap {
panic("cannot CacheWrapWithListeners a ListenKVStore")
}
// onWrite writes a KVStore operation to all of the WriteListeners
func (s *Store) onWrite(delete bool, key, value []byte) {
for _, l := range s.listeners {

View File

@ -292,8 +292,3 @@ func TestListenKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}
func TestListenKVStoreCacheWrapWithListeners(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) })
}

View File

@ -34,9 +34,6 @@ func TestStore(t *testing.T) {
cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := db.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}
func TestCommit(t *testing.T) {

View File

@ -7,7 +7,6 @@ import (
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/dbadapter"
"github.com/cosmos/cosmos-sdk/store/listenkv"
pruningtypes "github.com/cosmos/cosmos-sdk/store/pruning/types"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
@ -47,11 +46,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}
// Commit performs a no-op as entries are persistent between commitments.
func (s *Store) Commit() (id types.CommitID) { return }

View File

@ -6,7 +6,6 @@ import (
"io"
"github.com/cosmos/cosmos-sdk/store/cachekv"
"github.com/cosmos/cosmos-sdk/store/listenkv"
"github.com/cosmos/cosmos-sdk/store/tracekv"
"github.com/cosmos/cosmos-sdk/store/types"
)
@ -58,11 +57,6 @@ func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.Cach
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (s Store) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}
// Implements KVStore
func (s Store) Get(key []byte) []byte {
res := s.parent.Get(s.key(key))

View File

@ -438,7 +438,4 @@ func TestCacheWraps(t *testing.T) {
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := store.CacheWrapWithListeners(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithListeners)
}

View File

@ -257,7 +257,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error {
// If it was deleted, remove all data
if upgrades.IsDeleted(key.Name()) {
if err := deleteKVStore(store.(types.KVStore)); err != nil {
if err := deleteKVStore(types.KVStore(store)); err != nil {
return errors.Wrapf(err, "failed to delete store %s", key.Name())
}
rs.removalMap[key] = true
@ -275,7 +275,7 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error {
}
// move all data
if err := moveKVStoreData(oldStore.(types.KVStore), store.(types.KVStore)); err != nil {
if err := moveKVStoreData(types.KVStore(oldStore), types.KVStore(store)); err != nil {
return errors.Wrapf(err, "failed to move store %s -> %s", oldName, key.Name())
}
@ -475,19 +475,20 @@ func (rs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Cac
return rs.CacheWrap()
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (rs *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap {
return rs.CacheWrap()
}
// CacheMultiStore creates ephemeral branch of the multi-store and returns a CacheMultiStore.
// It implements the MultiStore interface.
func (rs *Store) CacheMultiStore() types.CacheMultiStore {
stores := make(map[types.StoreKey]types.CacheWrapper)
for k, v := range rs.stores {
stores[k] = v
store := types.KVStore(v)
// Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
// set same listeners on cache store will observe duplicated writes.
if rs.ListeningEnabled(k) {
store = listenkv.NewStore(store, k, rs.listeners[k])
}
return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners)
stores[k] = store
}
return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.getTracingContext())
}
// CacheMultiStoreWithVersion is analogous to CacheMultiStore except that it
@ -497,6 +498,7 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore {
func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStore, error) {
cachedStores := make(map[types.StoreKey]types.CacheWrapper)
for key, store := range rs.stores {
var cacheStore types.KVStore
switch store.GetStoreType() {
case types.StoreTypeIAVL:
// If the store is wrapped with an inter-block cache, we must first unwrap
@ -505,19 +507,25 @@ func (rs *Store) CacheMultiStoreWithVersion(version int64) (types.CacheMultiStor
// Attempt to lazy-load an already saved IAVL store version. If the
// version does not exist or is pruned, an error should be returned.
iavlStore, err := store.(*iavl.Store).GetImmutable(version)
var err error
cacheStore, err = store.(*iavl.Store).GetImmutable(version)
if err != nil {
return nil, err
}
cachedStores[key] = iavlStore
default:
cachedStores[key] = store
}
cacheStore = store
}
return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext(), rs.listeners), nil
// Wire the listenkv.Store to allow listeners to observe the writes from the cache store,
// set same listeners on cache store will observe duplicated writes.
if rs.ListeningEnabled(key) {
cacheStore = listenkv.NewStore(cacheStore, key, rs.listeners[key])
}
cachedStores[key] = cacheStore
}
return cachemulti.NewStore(rs.db, cachedStores, rs.keysByName, rs.traceWriter, rs.getTracingContext()), nil
}
// GetStore returns a mounted Store for a given StoreKey. If the StoreKey does
@ -546,7 +554,7 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore {
if s == nil {
panic(fmt.Sprintf("store does not exist for key: %s", key.Name()))
}
store := s.(types.KVStore)
store := types.KVStore(s)
if rs.TracingEnabled() {
store = tracekv.NewStore(store, rs.traceWriter, rs.getTracingContext())

View File

@ -741,9 +741,6 @@ func TestCacheWraps(t *testing.T) {
cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil)
require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace)
cacheWrappedWithListeners := multi.CacheWrapWithListeners(nil, nil)
require.IsType(t, cachemulti.Store{}, cacheWrappedWithListeners)
}
func TestTraceConcurrency(t *testing.T) {

View File

@ -2,20 +2,24 @@ package streaming
import (
"fmt"
"os"
"path"
"strings"
"sync"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/codec"
serverTypes "github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/store/streaming/file"
"github.com/cosmos/cosmos-sdk/store/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/spf13/cast"
)
// ServiceConstructor is used to construct a streaming service
type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error)
type ServiceConstructor func(serverTypes.AppOptions, []types.StoreKey, codec.BinaryCodec, log.Logger) (baseapp.StreamingService, error)
// ServiceType enum for specifying the type of StreamingService
type ServiceType int
@ -26,7 +30,19 @@ const (
// add more in the future
)
// ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name
// Streaming option keys
const (
OptStreamersFilePrefix = "streamers.file.prefix"
OptStreamersFileWriteDir = "streamers.file.write_dir"
OptStreamersFileOutputMetadata = "streamers.file.output-metadata"
OptStreamersFileStopNodeOnError = "streamers.file.stop-node-on-error"
OptStreamersFileFsync = "streamers.file.fsync"
OptStoreStreamers = "store.streamers"
)
// ServiceTypeFromString returns the streaming.ServiceType corresponding to the
// provided name.
func ServiceTypeFromString(name string) ServiceType {
switch strings.ToLower(name) {
case "file", "f":
@ -63,16 +79,47 @@ func NewServiceConstructor(name string) (ServiceConstructor, error) {
return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String())
}
// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService
func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) {
filePrefix := cast.ToString(opts.Get("streamers.file.prefix"))
fileDir := cast.ToString(opts.Get("streamers.file.write_dir"))
return file.NewStreamingService(fileDir, filePrefix, keys, marshaller)
// NewFileStreamingService is the streaming.ServiceConstructor function for
// creating a FileStreamingService.
func NewFileStreamingService(
opts serverTypes.AppOptions,
keys []types.StoreKey,
marshaller codec.BinaryCodec,
logger log.Logger,
) (baseapp.StreamingService, error) {
homePath := cast.ToString(opts.Get(flags.FlagHome))
filePrefix := cast.ToString(opts.Get(OptStreamersFilePrefix))
fileDir := cast.ToString(opts.Get(OptStreamersFileWriteDir))
outputMetadata := cast.ToBool(opts.Get(OptStreamersFileOutputMetadata))
stopNodeOnErr := cast.ToBool(opts.Get(OptStreamersFileStopNodeOnError))
fsync := cast.ToBool(opts.Get(OptStreamersFileFsync))
// relative path is based on node home directory.
if !path.IsAbs(fileDir) {
fileDir = path.Join(homePath, fileDir)
}
// try to create output directory if not exists.
if _, err := os.Stat(fileDir); os.IsNotExist(err) {
if err = os.MkdirAll(fileDir, os.ModePerm); err != nil {
return nil, err
}
}
return file.NewStreamingService(fileDir, filePrefix, keys, marshaller, logger, outputMetadata, stopNodeOnErr, fsync)
}
// LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys
// It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup
func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) {
// LoadStreamingServices is a function for loading StreamingServices onto the
// BaseApp using the provided AppOptions, codec, and keys. It returns the
// WaitGroup and quit channel used to synchronize with the streaming services
// and any error that occurs during the setup.
func LoadStreamingServices(
bApp *baseapp.BaseApp,
appOpts serverTypes.AppOptions,
appCodec codec.BinaryCodec,
logger log.Logger,
keys map[string]*types.KVStoreKey,
) ([]baseapp.StreamingService, *sync.WaitGroup, error) {
// waitgroup and quit channel for optional shutdown coordination of the streaming service(s)
wg := new(sync.WaitGroup)
// configure state listening capabilities using AppOptions
@ -107,8 +154,10 @@ func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions
}
return nil, nil, err
}
// generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose
streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec)
// Generate the streaming service using the constructor, appOptions, and the
// StoreKeys we want to expose.
streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec, logger)
if err != nil {
// close any services we may have already spun up before hitting the error on this one
for _, activeStreamer := range activeStreamers {

View File

@ -22,7 +22,13 @@ import (
type fakeOptions struct{}
func (f *fakeOptions) Get(string) interface{} { return nil }
func (f *fakeOptions) Get(key string) interface{} {
if key == "streamers.file.write_dir" {
return "data/file_streamer"
}
return nil
}
var (
mockOptions = new(fakeOptions)
@ -40,7 +46,7 @@ func TestStreamingServiceConstructor(t *testing.T) {
var expectedType streaming.ServiceConstructor
require.IsType(t, expectedType, constructor)
serv, err := constructor(mockOptions, mockKeys, testMarshaller)
serv, err := constructor(mockOptions, mockKeys, testMarshaller, log.NewNopLogger())
require.Nil(t, err)
require.IsType(t, &file.StreamingService{}, serv)
listeners := serv.Listeners()
@ -78,7 +84,7 @@ func TestLoadStreamingServices(t *testing.T) {
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
activeStreamers, _, err := streaming.LoadStreamingServices(bApp, tc.appOpts, encCdc.Codec, keys)
activeStreamers, _, err := streaming.LoadStreamingServices(bApp, tc.appOpts, encCdc.Codec, log.NewNopLogger(), keys)
require.NoError(t, err)
require.Equal(t, tc.activeStreamersLen, len(activeStreamers))
})
@ -95,6 +101,8 @@ func (ao streamingAppOptions) Get(o string) interface{} {
return []string{"file"}
case "streamers.file.keys":
return ao.keys
case "streamers.file.write_dir":
return "data/file_streamer"
default:
return nil
}

View File

@ -25,42 +25,67 @@ We turn the service on by adding its name, "file", to `store.streamers`- the lis
In `streamers.file` we include three configuration parameters for the file streaming service:
1. `streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service.
In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off.
1. `streamers.file.keys` contains the list of `StoreKey` names for the KVStores to expose using this service.
In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off.
2. `streamers.file.write_dir` contains the path to the directory to write the files to.
3. `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions
with other App `StreamingService` output files.
with other App `StreamingService` output files.
4. `streamers.file.output-metadata` specifies if output the metadata file, otherwise only data file is outputted.
5. `streamers.file.stop-node-on-error` specifies if propagate the error to consensus state machine, it's nesserary for data integrity when node restarts.
6. `streamers.file.fsync` specifies if call fsync after writing the files, it's nesserary for data integrity when system crash, but slows down the commit time.
### Encoding
For each pair of `BeginBlock` requests and responses, a file is created and named `block-{N}-begin`, where N is the block number.
At the head of this file the length-prefixed protobuf encoded `BeginBlock` request is written.
At the tail of this file the length-prefixed protobuf encoded `BeginBlock` response is written.
In between these two encoded messages, the state changes that occurred due to the `BeginBlock` request are written chronologically as
a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service
is configured to listen to.
For each block, two files are created and names `block-{N}-meta` and `block-{N}-data`, where `N` is the block number.
For each pair of `DeliverTx` requests and responses, a file is created and named `block-{N}-tx-{M}` where N is the block number and M
is the tx number in the block (i.e. 0, 1, 2...).
At the head of this file the length-prefixed protobuf encoded `DeliverTx` request is written.
At the tail of this file the length-prefixed protobuf encoded `DeliverTx` response is written.
In between these two encoded messages, the state changes that occurred due to the `DeliverTx` request are written chronologically as
a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service
is configured to listen to.
The meta file contains the protobuf encoded message `BlockMetadata` which contains the abci event requests and responses of the block:
For each pair of `EndBlock` requests and responses, a file is created and named `block-{N}-end`, where N is the block number.
At the head of this file the length-prefixed protobuf encoded `EndBlock` request is written.
At the tail of this file the length-prefixed protobuf encoded `EndBlock` response is written.
In between these two encoded messages, the state changes that occurred due to the `EndBlock` request are written chronologically as
a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores the service
is configured to listen to.
```protobuf
message BlockMetadata {
message DeliverTx {
tendermint.abci.RequestDeliverTx request = 1;
tendermint.abci.ResponseDeliverTx response = 2;
}
tendermint.abci.RequestBeginBlock request_begin_block = 1;
tendermint.abci.ResponseBeginBlock response_begin_block = 2;
repeated DeliverTx deliver_txs = 3;
tendermint.abci.RequestEndBlock request_end_block = 4;
tendermint.abci.ResponseEndBlock response_end_block = 5;
tendermint.abci.ResponseCommit response_commit = 6;
}
```
The data file contains a series of length-prefixed protobuf encoded `StoreKVPair`s representing `Set` and `Delete` operations within the KVStores during the execution of block.
Both meta and data files are prefixed with the length of the data content for consumer to detect completeness of the file, the length is encoded as 8 bytes with big endianness.
The files are written at abci commit event, by default the error happens will be propagated to interuppted consensus state machine, but fsync is not called, it'll have good performance but have the risk of lossing data in face of rare event of system crash.
### Decoding
To decode the files written in the above format we read all the bytes from a given file into memory and segment them into proto
messages based on the length-prefixing of each message. Once segmented, it is known that the first message is the ABCI request,
the last message is the ABCI response, and that every message in between is a `StoreKVPair`. This enables us to decode each segment into
the appropriate message type.
The pseudo-code for decoding is like this:
The type of ABCI req/res, the block height, and the transaction index (where relevant) is known
from the file name, and the KVStore each `StoreKVPair` originates from is known since the `StoreKey` is included as a field in the proto message.
```python
def decode_meta_file(file):
bz = file.read(8)
if len(bz) < 8:
raise "incomplete file exception"
size = int.from_bytes(bz, 'big')
if file.size != size + 8:
raise "incomplete file exception"
return decode_protobuf_message(BlockMetadata, file)
def decode_data_file(file):
bz = file.read(8)
if len(bz) < 8:
raise "incomplete file exception"
size = int.from_bytes(bz, 'big')
if file.size != size + 8:
raise "incomplete file exception"
while not file.eof():
yield decode_length_prefixed_protobuf_message(StoreKVStore, file)
```

View File

@ -1,65 +1,57 @@
package file
import (
"errors"
"bytes"
"context"
"fmt"
"io"
"os"
"path"
"path/filepath"
"sort"
"sync"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
"github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
var _ baseapp.StreamingService = &StreamingService{}
// StreamingService is a concrete implementation of StreamingService that writes state changes out to files
type StreamingService struct {
listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp
srcChan <-chan []byte // the channel that all the WriteListeners write their data out to
storeListeners []*types.MemoryListener // a series of KVStore listeners for each KVStore
filePrefix string // optional prefix for each of the generated files
writeDir string // directory to write files into
codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files
stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received
stateCacheLock *sync.Mutex // mutex for the state cache
currentBlockNumber int64 // the current block number
currentTxIndex int64 // the index of the current tx
quitChan chan struct{} // channel to synchronize closure
}
logger log.Logger
// IntermediateWriter is used so that we do not need to update the underlying io.Writer
// inside the StoreKVPairWriteListener everytime we begin writing to a new file
type IntermediateWriter struct {
outChan chan<- []byte
}
// NewIntermediateWriter create an instance of an IntermediateWriter that sends to the provided channel
func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter {
return &IntermediateWriter{
outChan: outChan,
}
}
// Write satisfies io.Writer
func (iw *IntermediateWriter) Write(b []byte) (int, error) {
iw.outChan <- b
return len(b), nil
currentBlockNumber int64
blockMetadata types.BlockMetadata
// if write the metadata file, otherwise only data file is outputted.
outputMetadata bool
// if true, when commit failed it will panic and stop the consensus state machine to ensure the
// eventual consistency of the output, otherwise the error is ignored and have the risk of lossing data.
stopNodeOnErr bool
// if true, the file.Sync() is called to make sure the data is persisted onto disk, otherwise it risks lossing data when system crash.
fsync bool
}
// NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys
func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) {
listenChan := make(chan []byte)
iw := NewIntermediateWriter(listenChan)
listener := types.NewStoreKVPairWriteListener(iw, c)
listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys))
func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec, logger log.Logger, outputMetadata bool, stopNodeOnErr bool, fsync bool) (*StreamingService, error) {
// sort storeKeys for deterministic output
sort.SliceStable(storeKeys, func(i, j int) bool {
return storeKeys[i].Name() < storeKeys[j].Name()
})
listeners := make([]*types.MemoryListener, len(storeKeys))
// in this case, we are using the same listener for each Store
for _, key := range storeKeys {
listeners[key] = append(listeners[key], listener)
for i, key := range storeKeys {
listeners[i] = types.NewMemoryListener(key)
}
// check that the writeDir exists and is writable so that we can catch the error here at initialization if it is not
// we don't open a dstFile until we receive our first ABCI message
@ -67,13 +59,14 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey
return nil, err
}
return &StreamingService{
listeners: listeners,
srcChan: listenChan,
storeListeners: listeners,
filePrefix: filePrefix,
writeDir: writeDir,
codec: c,
stateCache: make([][]byte, 0),
stateCacheLock: new(sync.Mutex),
logger: logger,
outputMetadata: outputMetadata,
stopNodeOnErr: stopNodeOnErr,
fsync: fsync,
}, nil
}
@ -81,201 +74,106 @@ func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey
// It returns the StreamingService's underlying WriteListeners
// Use for registering the underlying WriteListeners with the BaseApp
func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener {
return fss.listeners
listeners := make(map[types.StoreKey][]types.WriteListener, len(fss.storeListeners))
for _, listener := range fss.storeListeners {
listeners[listener.StoreKey()] = []types.WriteListener{listener}
}
return listeners
}
// ListenBeginBlock satisfies the baseapp.ABCIListener interface
// It writes the received BeginBlock request and response and the resulting state changes
// out to a file as described in the above the naming schema
func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) (rerr error) {
// generate the new file
dstFile, err := fss.openBeginBlockFile(req)
if err != nil {
return err
}
defer func() {
cerr := dstFile.Close()
if rerr == nil {
rerr = cerr
}
}()
// write req to file
lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req)
if err != nil {
return err
}
if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil {
return err
}
// write all state changes cached for this stage to file
fss.stateCacheLock.Lock()
for _, stateChange := range fss.stateCache {
if _, err = dstFile.Write(stateChange); err != nil {
fss.stateCache = nil
fss.stateCacheLock.Unlock()
return err
}
}
// reset cache
fss.stateCache = nil
fss.stateCacheLock.Unlock()
// write res to file
lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res)
if err != nil {
return err
}
_, err = dstFile.Write(lengthPrefixedResBytes)
return err
}
func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) {
fss.currentBlockNumber = req.GetHeader().Height
fss.currentTxIndex = 0
fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber)
if fss.filePrefix != "" {
fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName)
}
return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600)
func (fss *StreamingService) ListenBeginBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) (rerr error) {
fss.blockMetadata.RequestBeginBlock = &req
fss.blockMetadata.ResponseBeginBlock = &res
fss.currentBlockNumber = req.Header.Height
return nil
}
// ListenDeliverTx satisfies the baseapp.ABCIListener interface
// It writes the received DeliverTx request and response and the resulting state changes
// out to a file as described in the above the naming schema
func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) (rerr error) {
// generate the new file
dstFile, err := fss.openDeliverTxFile()
if err != nil {
return err
}
defer func() {
cerr := dstFile.Close()
if rerr == nil {
rerr = cerr
}
}()
// write req to file
lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req)
if err != nil {
return err
}
if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil {
return err
}
// write all state changes cached for this stage to file
fss.stateCacheLock.Lock()
for _, stateChange := range fss.stateCache {
if _, err = dstFile.Write(stateChange); err != nil {
fss.stateCache = nil
fss.stateCacheLock.Unlock()
return err
}
}
// reset cache
fss.stateCache = nil
fss.stateCacheLock.Unlock()
// write res to file
lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res)
if err != nil {
return err
}
_, err = dstFile.Write(lengthPrefixedResBytes)
return err
}
func (fss *StreamingService) openDeliverTxFile() (*os.File, error) {
fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex)
if fss.filePrefix != "" {
fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName)
}
fss.currentTxIndex++
return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600)
func (fss *StreamingService) ListenDeliverTx(ctx context.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) (rerr error) {
fss.blockMetadata.DeliverTxs = append(fss.blockMetadata.DeliverTxs, &types.BlockMetadata_DeliverTx{
Request: &req,
Response: &res,
})
return nil
}
// ListenEndBlock satisfies the baseapp.ABCIListener interface
// It writes the received EndBlock request and response and the resulting state changes
// out to a file as described in the above the naming schema
func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) (rerr error) {
// generate the new file
dstFile, err := fss.openEndBlockFile()
if err != nil {
return err
}
defer func() {
cerr := dstFile.Close()
if rerr == nil {
rerr = cerr
}
}()
// write req to file
lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req)
if err != nil {
return err
}
if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil {
return err
}
// write all state changes cached for this stage to file
fss.stateCacheLock.Lock()
for _, stateChange := range fss.stateCache {
if _, err = dstFile.Write(stateChange); err != nil {
fss.stateCache = nil
fss.stateCacheLock.Unlock()
return err
}
}
// reset cache
fss.stateCache = nil
fss.stateCacheLock.Unlock()
// write res to file
lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res)
if err != nil {
return err
}
_, err = dstFile.Write(lengthPrefixedResBytes)
return err
func (fss *StreamingService) ListenEndBlock(ctx context.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) (rerr error) {
fss.blockMetadata.RequestEndBlock = &req
fss.blockMetadata.ResponseEndBlock = &res
return nil
}
func (fss *StreamingService) openEndBlockFile() (*os.File, error) {
fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber)
if fss.filePrefix != "" {
fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName)
// ListenEndBlock satisfies the baseapp.ABCIListener interface
func (fss *StreamingService) ListenCommit(ctx context.Context, res abci.ResponseCommit) error {
err := fss.doListenCommit(ctx, res)
if err != nil {
fss.logger.Error("Commit listening hook failed", "height", fss.currentBlockNumber, "err", err)
if fss.stopNodeOnErr {
return err
}
return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0o600)
}
return nil
}
func (fss *StreamingService) doListenCommit(ctx context.Context, res abci.ResponseCommit) (err error) {
fss.blockMetadata.ResponseCommit = &res
// write to target files, the file size is written at the beginning, which can be used to detect completeness.
metaFileName := fmt.Sprintf("block-%d-meta", fss.currentBlockNumber)
dataFileName := fmt.Sprintf("block-%d-data", fss.currentBlockNumber)
if fss.filePrefix != "" {
metaFileName = fmt.Sprintf("%s-%s", fss.filePrefix, metaFileName)
dataFileName = fmt.Sprintf("%s-%s", fss.filePrefix, dataFileName)
}
if fss.outputMetadata {
bz, err := fss.codec.Marshal(&fss.blockMetadata)
if err != nil {
return err
}
if err := writeLengthPrefixedFile(path.Join(fss.writeDir, metaFileName), bz, fss.fsync); err != nil {
return err
}
}
var buf bytes.Buffer
if err := fss.writeBlockData(&buf); err != nil {
return err
}
return writeLengthPrefixedFile(path.Join(fss.writeDir, dataFileName), buf.Bytes(), fss.fsync)
}
func (fss *StreamingService) writeBlockData(writer io.Writer) error {
for _, listener := range fss.storeListeners {
cache := listener.PopStateCache()
for i := range cache {
bz, err := fss.codec.MarshalLengthPrefixed(&cache[i])
if err != nil {
return err
}
if _, err = writer.Write(bz); err != nil {
return err
}
}
}
return nil
}
// Stream satisfies the baseapp.StreamingService interface
// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs
// and caches them in the order they were received
// returns an error if it is called twice
func (fss *StreamingService) Stream(wg *sync.WaitGroup) error {
if fss.quitChan != nil {
return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again")
}
fss.quitChan = make(chan struct{})
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-fss.quitChan:
fss.quitChan = nil
return
case by := <-fss.srcChan:
fss.stateCacheLock.Lock()
fss.stateCache = append(fss.stateCache, by)
fss.stateCacheLock.Unlock()
}
}
}()
return nil
}
// Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface
func (fss *StreamingService) Close() error {
close(fss.quitChan)
return nil
}
@ -288,3 +186,32 @@ func isDirWriteable(dir string) error {
}
return os.Remove(f)
}
func writeLengthPrefixedFile(path string, data []byte, fsync bool) (err error) {
var f *os.File
f, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
return sdkerrors.Wrapf(err, "open file failed: %s", path)
}
defer func() {
// avoid overriding the real error with file close error
if err1 := f.Close(); err1 != nil && err == nil {
err = sdkerrors.Wrapf(err, "close file failed: %s", path)
}
}()
_, err = f.Write(sdk.Uint64ToBigEndian(uint64(len(data))))
if err != nil {
return sdkerrors.Wrapf(err, "write length prefix failed: %s", path)
}
_, err = f.Write(data)
if err != nil {
return sdkerrors.Wrapf(err, "write block data failed: %s", path)
}
if fsync {
err = f.Sync()
if err != nil {
return sdkerrors.Wrapf(err, "fsync failed: %s", path)
}
}
return
}

View File

@ -2,6 +2,7 @@ package file
import (
"encoding/binary"
"errors"
"fmt"
"os"
"path/filepath"
@ -10,6 +11,7 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/cosmos/cosmos-sdk/codec"
@ -56,6 +58,10 @@ var (
ConsensusParamUpdates: &tmproto.ConsensusParams{},
ValidatorUpdates: []abci.ValidatorUpdate{},
}
testCommitRes = abci.ResponseCommit{
Data: []byte{1},
RetainHeight: 0,
}
mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1}
testDeliverTxReq1 = abci.RequestDeliverTx{
Tx: mockTxBytes1,
@ -104,25 +110,6 @@ var (
mockValue3 = []byte{5, 4, 3}
)
func TestIntermediateWriter(t *testing.T) {
outChan := make(chan []byte, 0)
iw := NewIntermediateWriter(outChan)
require.IsType(t, &IntermediateWriter{}, iw)
testBytes := []byte{1, 2, 3, 4, 5}
var length int
var err error
waitChan := make(chan struct{}, 0)
go func() {
length, err = iw.Write(testBytes)
waitChan <- struct{}{}
}()
receivedBytes := <-outChan
<-waitChan
require.Equal(t, len(testBytes), length)
require.Equal(t, testBytes, receivedBytes)
require.Nil(t, err)
}
func TestFileStreamingService(t *testing.T) {
if os.Getenv("CI") != "" {
t.Skip("Skipping TestFileStreamingService in CI environment")
@ -132,29 +119,23 @@ func TestFileStreamingService(t *testing.T) {
defer os.RemoveAll(testDir)
testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2}
testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller)
testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller, log.NewNopLogger(), true, false, false)
require.Nil(t, err)
require.IsType(t, &StreamingService{}, testStreamingService)
require.Equal(t, testPrefix, testStreamingService.filePrefix)
require.Equal(t, testDir, testStreamingService.writeDir)
require.Equal(t, testMarshaller, testStreamingService.codec)
testListener1 = testStreamingService.listeners[mockStoreKey1][0]
testListener2 = testStreamingService.listeners[mockStoreKey2][0]
testListener1 = testStreamingService.storeListeners[0]
testListener2 = testStreamingService.storeListeners[1]
wg := new(sync.WaitGroup)
testStreamingService.Stream(wg)
testListenBeginBlock(t)
testListenDeliverTx1(t)
testListenDeliverTx2(t)
testListenEndBlock(t)
testListenBlock(t)
testStreamingService.Close()
wg.Wait()
}
func testListenBeginBlock(t *testing.T) {
expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq)
require.Nil(t, err)
expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes)
require.Nil(t, err)
func testListenBlock(t *testing.T) {
var expectKVPairsStore1, expectKVPairsStore2 [][]byte
// write state changes
testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false)
@ -183,162 +164,102 @@ func testListenBeginBlock(t *testing.T) {
Delete: false,
})
require.Nil(t, err)
expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair3)
expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2)
// send the ABCI messages
err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes)
require.Nil(t, err)
// load the file, checking that it was created with the expected name
fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height)
fileBytes, err := readInFile(fileName)
require.Nil(t, err)
// segment the file into the separate gRPC messages and check the correctness of each
segments, err := segmentBytes(fileBytes)
require.Nil(t, err)
require.Equal(t, 5, len(segments))
require.Equal(t, expectedBeginBlockReqBytes, segments[0])
require.Equal(t, expectedKVPair1, segments[1])
require.Equal(t, expectedKVPair2, segments[2])
require.Equal(t, expectedKVPair3, segments[3])
require.Equal(t, expectedBeginBlockResBytes, segments[4])
}
func testListenDeliverTx1(t *testing.T) {
expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1)
require.Nil(t, err)
expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1)
require.Nil(t, err)
// write state changes
testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false)
testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false)
testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
// expected KV pairs
expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey1.Name(),
Key: mockKey1,
Value: mockValue1,
Delete: false,
})
require.Nil(t, err)
expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey2.Name(),
Key: mockKey2,
Value: mockValue2,
Delete: false,
})
require.Nil(t, err)
expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey2.Name(),
Key: mockKey3,
Value: mockValue3,
Delete: false,
})
require.Nil(t, err)
expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1)
expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair2, expectedKVPair3)
// send the ABCI messages
err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1)
require.Nil(t, err)
// load the file, checking that it was created with the expected name
fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0)
fileBytes, err := readInFile(fileName)
require.Nil(t, err)
// segment the file into the separate gRPC messages and check the correctness of each
segments, err := segmentBytes(fileBytes)
require.Nil(t, err)
require.Equal(t, 5, len(segments))
require.Equal(t, expectedDeliverTxReq1Bytes, segments[0])
require.Equal(t, expectedKVPair1, segments[1])
require.Equal(t, expectedKVPair2, segments[2])
require.Equal(t, expectedKVPair3, segments[3])
require.Equal(t, expectedDeliverTxRes1Bytes, segments[4])
}
func testListenDeliverTx2(t *testing.T) {
expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2)
require.Nil(t, err)
expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2)
require.Nil(t, err)
// write state changes
testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false)
testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false)
testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
testListener2.OnWrite(mockStoreKey2, mockKey1, mockValue1, false)
testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false)
testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
// expected KV pairs
expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey2.Name(),
Key: mockKey1,
Value: mockValue1,
Delete: false,
})
require.Nil(t, err)
expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey1.Name(),
Key: mockKey2,
Value: mockValue2,
Delete: false,
})
require.Nil(t, err)
expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey2.Name(),
Key: mockKey3,
Value: mockValue3,
Delete: false,
})
require.Nil(t, err)
expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair2)
expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair1, expectedKVPair3)
// send the ABCI messages
err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2)
require.Nil(t, err)
// load the file, checking that it was created with the expected name
fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1)
fileBytes, err := readInFile(fileName)
require.Nil(t, err)
// segment the file into the separate gRPC messages and check the correctness of each
segments, err := segmentBytes(fileBytes)
require.Nil(t, err)
require.Equal(t, 5, len(segments))
require.Equal(t, expectedDeliverTxReq2Bytes, segments[0])
require.Equal(t, expectedKVPair1, segments[1])
require.Equal(t, expectedKVPair2, segments[2])
require.Equal(t, expectedKVPair3, segments[3])
require.Equal(t, expectedDeliverTxRes2Bytes, segments[4])
}
func testListenEndBlock(t *testing.T) {
expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq)
require.Nil(t, err)
expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes)
require.Nil(t, err)
// write state changes
testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false)
testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false)
testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
testListener1.OnWrite(mockStoreKey1, mockKey2, mockValue2, false)
testListener2.OnWrite(mockStoreKey2, mockKey3, mockValue3, false)
// expected KV pairs
expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair1, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey1.Name(),
Key: mockKey1,
Value: mockValue1,
Delete: false,
})
require.Nil(t, err)
expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair2, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey1.Name(),
Key: mockKey2,
Value: mockValue2,
Delete: false,
})
require.Nil(t, err)
expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{
expectedKVPair3, err = testMarshaller.Marshal(&types.StoreKVPair{
StoreKey: mockStoreKey2.Name(),
Key: mockKey3,
Value: mockValue3,
@ -346,29 +267,58 @@ func testListenEndBlock(t *testing.T) {
})
require.Nil(t, err)
expectKVPairsStore1 = append(expectKVPairsStore1, expectedKVPair1, expectedKVPair2)
expectKVPairsStore2 = append(expectKVPairsStore2, expectedKVPair3)
// send the ABCI messages
err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes)
require.Nil(t, err)
// load the file, checking that it was created with the expected name
fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height)
fileBytes, err := readInFile(fileName)
err = testStreamingService.ListenCommit(emptyContext, testCommitRes)
require.Nil(t, err)
// segment the file into the separate gRPC messages and check the correctness of each
segments, err := segmentBytes(fileBytes)
// load the file, checking that it was created with the expected name
metaFileName := fmt.Sprintf("%s-block-%d-meta", testPrefix, testBeginBlockReq.GetHeader().Height)
dataFileName := fmt.Sprintf("%s-block-%d-data", testPrefix, testBeginBlockReq.GetHeader().Height)
metaFileBytes, err := readInFile(metaFileName)
require.Nil(t, err)
require.Equal(t, 5, len(segments))
require.Equal(t, expectedEndBlockReqBytes, segments[0])
require.Equal(t, expectedKVPair1, segments[1])
require.Equal(t, expectedKVPair2, segments[2])
require.Equal(t, expectedKVPair3, segments[3])
require.Equal(t, expectedEndBlockResBytes, segments[4])
dataFileBytes, err := readInFile(dataFileName)
require.Nil(t, err)
metadata := types.BlockMetadata{
RequestBeginBlock: &testBeginBlockReq,
ResponseBeginBlock: &testBeginBlockRes,
RequestEndBlock: &testEndBlockReq,
ResponseEndBlock: &testEndBlockRes,
ResponseCommit: &testCommitRes,
DeliverTxs: []*types.BlockMetadata_DeliverTx{
{Request: &testDeliverTxReq1, Response: &testDeliverTxRes1},
{Request: &testDeliverTxReq2, Response: &testDeliverTxRes2},
},
}
expectedMetadataBytes, err := testMarshaller.Marshal(&metadata)
require.Nil(t, err)
require.Equal(t, expectedMetadataBytes, metaFileBytes)
// segment the file into the separate gRPC messages and check the correctness of each
segments, err := segmentBytes(dataFileBytes)
require.Nil(t, err)
require.Equal(t, len(expectKVPairsStore1)+len(expectKVPairsStore2), len(segments))
require.Equal(t, expectKVPairsStore1, segments[:len(expectKVPairsStore1)])
require.Equal(t, expectKVPairsStore2, segments[len(expectKVPairsStore1):])
}
func readInFile(name string) ([]byte, error) {
path := filepath.Join(testDir, name)
return os.ReadFile(path)
bz, err := os.ReadFile(path)
if err != nil {
return nil, err
}
size := sdk.BigEndianToUint64(bz[:8])
if len(bz) != int(size)+8 {
return nil, errors.New("incomplete file ")
}
return bz[8:], nil
}
// segmentBytes returns all of the protobuf messages contained in the byte array as an array of byte arrays

View File

@ -173,11 +173,6 @@ func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.Ca
panic("cannot CacheWrapWithTrace a TraceKVStore")
}
// CacheWrapWithListeners implements the CacheWrapper interface.
func (tkv *Store) CacheWrapWithListeners(_ types.StoreKey, _ []types.WriteListener) types.CacheWrap {
panic("cannot CacheWrapWithListeners a TraceKVStore")
}
// writeOperation writes a KVStore operation to the underlying io.Writer as
// JSON-encoded data where the key/value pair is base64 encoded.
func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) {

View File

@ -290,8 +290,3 @@ func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}
func TestTraceKVStoreCacheWrapWithListeners(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithListeners(nil, nil) })
}

View File

@ -45,3 +45,37 @@ func (wl *StoreKVPairWriteListener) OnWrite(storeKey StoreKey, key []byte, value
}
return nil
}
// MemoryListener listens to the state writes and accumulate the records in memory.
type MemoryListener struct {
key StoreKey
stateCache []StoreKVPair
}
// NewMemoryListener creates a listener that accumulate the state writes in memory.
func NewMemoryListener(key StoreKey) *MemoryListener {
return &MemoryListener{key: key}
}
// OnWrite implements WriteListener interface
func (fl *MemoryListener) OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error {
fl.stateCache = append(fl.stateCache, StoreKVPair{
StoreKey: storeKey.Name(),
Delete: delete,
Key: key,
Value: value,
})
return nil
}
// PopStateCache returns the current state caches and set to nil
func (fl *MemoryListener) PopStateCache() []StoreKVPair {
res := fl.stateCache
fl.stateCache = nil
return res
}
// StoreKey returns the storeKey it listens to
func (fl *MemoryListener) StoreKey() StoreKey {
return fl.key
}

View File

@ -6,6 +6,7 @@ package types
import (
fmt "fmt"
proto "github.com/cosmos/gogoproto/proto"
types "github.com/tendermint/tendermint/abci/types"
io "io"
math "math"
math_bits "math/bits"
@ -95,8 +96,149 @@ func (m *StoreKVPair) GetValue() []byte {
return nil
}
// BlockMetadata contains all the abci event data of a block
// the file streamer dump them into files together with the state changes.
type BlockMetadata struct {
RequestBeginBlock *types.RequestBeginBlock `protobuf:"bytes,1,opt,name=request_begin_block,json=requestBeginBlock,proto3" json:"request_begin_block,omitempty"`
ResponseBeginBlock *types.ResponseBeginBlock `protobuf:"bytes,2,opt,name=response_begin_block,json=responseBeginBlock,proto3" json:"response_begin_block,omitempty"`
DeliverTxs []*BlockMetadata_DeliverTx `protobuf:"bytes,3,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"`
RequestEndBlock *types.RequestEndBlock `protobuf:"bytes,4,opt,name=request_end_block,json=requestEndBlock,proto3" json:"request_end_block,omitempty"`
ResponseEndBlock *types.ResponseEndBlock `protobuf:"bytes,5,opt,name=response_end_block,json=responseEndBlock,proto3" json:"response_end_block,omitempty"`
ResponseCommit *types.ResponseCommit `protobuf:"bytes,6,opt,name=response_commit,json=responseCommit,proto3" json:"response_commit,omitempty"`
}
func (m *BlockMetadata) Reset() { *m = BlockMetadata{} }
func (m *BlockMetadata) String() string { return proto.CompactTextString(m) }
func (*BlockMetadata) ProtoMessage() {}
func (*BlockMetadata) Descriptor() ([]byte, []int) {
return fileDescriptor_a5d350879fe4fecd, []int{1}
}
func (m *BlockMetadata) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BlockMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BlockMetadata.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BlockMetadata) XXX_Merge(src proto.Message) {
xxx_messageInfo_BlockMetadata.Merge(m, src)
}
func (m *BlockMetadata) XXX_Size() int {
return m.Size()
}
func (m *BlockMetadata) XXX_DiscardUnknown() {
xxx_messageInfo_BlockMetadata.DiscardUnknown(m)
}
var xxx_messageInfo_BlockMetadata proto.InternalMessageInfo
func (m *BlockMetadata) GetRequestBeginBlock() *types.RequestBeginBlock {
if m != nil {
return m.RequestBeginBlock
}
return nil
}
func (m *BlockMetadata) GetResponseBeginBlock() *types.ResponseBeginBlock {
if m != nil {
return m.ResponseBeginBlock
}
return nil
}
func (m *BlockMetadata) GetDeliverTxs() []*BlockMetadata_DeliverTx {
if m != nil {
return m.DeliverTxs
}
return nil
}
func (m *BlockMetadata) GetRequestEndBlock() *types.RequestEndBlock {
if m != nil {
return m.RequestEndBlock
}
return nil
}
func (m *BlockMetadata) GetResponseEndBlock() *types.ResponseEndBlock {
if m != nil {
return m.ResponseEndBlock
}
return nil
}
func (m *BlockMetadata) GetResponseCommit() *types.ResponseCommit {
if m != nil {
return m.ResponseCommit
}
return nil
}
// DeliverTx encapulate deliver tx request and response.
type BlockMetadata_DeliverTx struct {
Request *types.RequestDeliverTx `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
Response *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=response,proto3" json:"response,omitempty"`
}
func (m *BlockMetadata_DeliverTx) Reset() { *m = BlockMetadata_DeliverTx{} }
func (m *BlockMetadata_DeliverTx) String() string { return proto.CompactTextString(m) }
func (*BlockMetadata_DeliverTx) ProtoMessage() {}
func (*BlockMetadata_DeliverTx) Descriptor() ([]byte, []int) {
return fileDescriptor_a5d350879fe4fecd, []int{1, 0}
}
func (m *BlockMetadata_DeliverTx) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BlockMetadata_DeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BlockMetadata_DeliverTx.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BlockMetadata_DeliverTx) XXX_Merge(src proto.Message) {
xxx_messageInfo_BlockMetadata_DeliverTx.Merge(m, src)
}
func (m *BlockMetadata_DeliverTx) XXX_Size() int {
return m.Size()
}
func (m *BlockMetadata_DeliverTx) XXX_DiscardUnknown() {
xxx_messageInfo_BlockMetadata_DeliverTx.DiscardUnknown(m)
}
var xxx_messageInfo_BlockMetadata_DeliverTx proto.InternalMessageInfo
func (m *BlockMetadata_DeliverTx) GetRequest() *types.RequestDeliverTx {
if m != nil {
return m.Request
}
return nil
}
func (m *BlockMetadata_DeliverTx) GetResponse() *types.ResponseDeliverTx {
if m != nil {
return m.Response
}
return nil
}
func init() {
proto.RegisterType((*StoreKVPair)(nil), "cosmos.base.store.v1beta1.StoreKVPair")
proto.RegisterType((*BlockMetadata)(nil), "cosmos.base.store.v1beta1.BlockMetadata")
proto.RegisterType((*BlockMetadata_DeliverTx)(nil), "cosmos.base.store.v1beta1.BlockMetadata.DeliverTx")
}
func init() {
@ -104,21 +246,37 @@ func init() {
}
var fileDescriptor_a5d350879fe4fecd = []byte{
// 224 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce,
0xcd, 0x2f, 0xd6, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0x33,
0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0xc9, 0x2c, 0x2e, 0x49, 0xcd, 0xcb, 0xcc, 0x4b, 0xd7,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x84, 0x28, 0xd5, 0x03, 0x29, 0xd5, 0x03, 0x2b, 0xd5,
0x83, 0x2a, 0x55, 0xca, 0xe2, 0xe2, 0x0e, 0x06, 0x09, 0x78, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09,
0x49, 0x73, 0x71, 0x82, 0xe5, 0xe3, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83,
0x38, 0xc0, 0x02, 0xde, 0xa9, 0x95, 0x42, 0x62, 0x5c, 0x6c, 0x29, 0xa9, 0x39, 0xa9, 0x25, 0xa9,
0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x1c, 0x41, 0x50, 0x9e, 0x90, 0x00, 0x17, 0x33, 0x48, 0x39, 0xb3,
0x02, 0xa3, 0x06, 0x4f, 0x10, 0x88, 0x29, 0x24, 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a,
0xc1, 0x02, 0x16, 0x83, 0x70, 0x9c, 0x9c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1,
0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e,
0x21, 0x4a, 0x23, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0xea, 0x2d,
0x08, 0xa5, 0x5b, 0x9c, 0x92, 0x0d, 0xf5, 0x5c, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8,
0x47, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xe0, 0xb3, 0x51, 0xfe, 0x00, 0x00, 0x00,
// 473 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x4f, 0x6f, 0xd3, 0x40,
0x10, 0xc5, 0xe3, 0xa6, 0x09, 0xc9, 0x04, 0x68, 0x59, 0x2a, 0x64, 0x5a, 0xc9, 0xb8, 0xe1, 0x62,
0x0e, 0xac, 0xd5, 0x70, 0x44, 0xe2, 0x10, 0x40, 0x42, 0x2a, 0x08, 0xe4, 0x02, 0x07, 0x2e, 0x96,
0xff, 0x8c, 0xc2, 0x12, 0xdb, 0x1b, 0x76, 0x37, 0x51, 0x73, 0xe6, 0xc2, 0x91, 0x8f, 0xc5, 0xb1,
0x47, 0x8e, 0x28, 0xf9, 0x22, 0xc8, 0x6b, 0xc7, 0xa9, 0x43, 0x7d, 0xca, 0xee, 0xe4, 0xbd, 0x9f,
0xdf, 0xcc, 0x6a, 0xe0, 0x49, 0xc4, 0x65, 0xca, 0xa5, 0x1b, 0x06, 0x12, 0x5d, 0xa9, 0xb8, 0x40,
0x77, 0x71, 0x16, 0xa2, 0x0a, 0xce, 0xdc, 0x84, 0x49, 0x85, 0x19, 0xcb, 0x26, 0x74, 0x26, 0xb8,
0xe2, 0xe4, 0x61, 0x21, 0xa5, 0xb9, 0x94, 0x6a, 0x29, 0x2d, 0xa5, 0xc7, 0x27, 0x0a, 0xb3, 0x18,
0x45, 0xca, 0x32, 0xe5, 0x06, 0x61, 0xc4, 0x5c, 0xb5, 0x9c, 0xa1, 0x2c, 0x7c, 0xc3, 0x6f, 0x30,
0xb8, 0xc8, 0xd5, 0xe7, 0x9f, 0x3f, 0x04, 0x4c, 0x90, 0x13, 0xe8, 0x6b, 0xb3, 0x3f, 0xc5, 0xa5,
0x69, 0xd8, 0x86, 0xd3, 0xf7, 0x7a, 0xba, 0x70, 0x8e, 0x4b, 0xf2, 0x00, 0xba, 0x31, 0x26, 0xa8,
0xd0, 0xdc, 0xb3, 0x0d, 0xa7, 0xe7, 0x95, 0x37, 0x72, 0x08, 0xed, 0x5c, 0xde, 0xb6, 0x0d, 0xe7,
0xb6, 0x97, 0x1f, 0xc9, 0x11, 0x74, 0x16, 0x41, 0x32, 0x47, 0x73, 0x5f, 0xd7, 0x8a, 0xcb, 0xf0,
0x47, 0x07, 0xee, 0x8c, 0x13, 0x1e, 0x4d, 0xdf, 0xa1, 0x0a, 0xe2, 0x40, 0x05, 0xc4, 0x83, 0xfb,
0x02, 0xbf, 0xcf, 0x51, 0x2a, 0x3f, 0xc4, 0x09, 0xcb, 0xfc, 0x30, 0xff, 0x5b, 0x7f, 0x78, 0x30,
0x1a, 0xd2, 0x6d, 0x70, 0x9a, 0x07, 0xa7, 0x5e, 0xa1, 0x1d, 0xe7, 0x52, 0x0d, 0xf2, 0xee, 0x89,
0xdd, 0x12, 0xf9, 0x04, 0x47, 0x02, 0xe5, 0x8c, 0x67, 0x12, 0x6b, 0xd0, 0x3d, 0x0d, 0x7d, 0x7c,
0x03, 0xb4, 0x10, 0x5f, 0xa3, 0x12, 0xf1, 0x5f, 0x8d, 0x5c, 0xc0, 0x20, 0xc6, 0x84, 0x2d, 0x50,
0xf8, 0xea, 0x52, 0x9a, 0x6d, 0xbb, 0xed, 0x0c, 0x46, 0x23, 0xda, 0x38, 0x76, 0x5a, 0xeb, 0x94,
0xbe, 0x2a, 0xbc, 0x1f, 0x2f, 0x3d, 0x88, 0x37, 0x47, 0x49, 0xde, 0xc2, 0xa6, 0x01, 0x1f, 0xb3,
0xb8, 0x0c, 0xba, 0xaf, 0x83, 0xda, 0x4d, 0xdd, 0xbf, 0xce, 0xe2, 0x22, 0xe5, 0x81, 0xa8, 0x17,
0xc8, 0x7b, 0xa8, 0x82, 0x5f, 0xc3, 0x75, 0x34, 0xee, 0xb4, 0xb1, 0xef, 0x8a, 0x77, 0x28, 0x76,
0x2a, 0xe4, 0x0d, 0x1c, 0x54, 0xc0, 0x88, 0xa7, 0x29, 0x53, 0x66, 0x57, 0xd3, 0x1e, 0x35, 0xd2,
0x5e, 0x6a, 0x99, 0x77, 0x57, 0xd4, 0xee, 0xc7, 0x3f, 0x0d, 0xe8, 0x57, 0x23, 0x20, 0xcf, 0xe1,
0x56, 0x99, 0xbd, 0x7c, 0xea, 0xd3, 0xa6, 0x66, 0xb7, 0x63, 0xdb, 0x38, 0xc8, 0x0b, 0xe8, 0x6d,
0xe0, 0xe5, 0x9b, 0x0e, 0x1b, 0xd3, 0x6c, 0xed, 0x95, 0x67, 0x3c, 0xfe, 0xbd, 0xb2, 0x8c, 0xab,
0x95, 0x65, 0xfc, 0x5d, 0x59, 0xc6, 0xaf, 0xb5, 0xd5, 0xba, 0x5a, 0x5b, 0xad, 0x3f, 0x6b, 0xab,
0xf5, 0xc5, 0x99, 0x30, 0xf5, 0x75, 0x1e, 0xd2, 0x88, 0xa7, 0x6e, 0xb9, 0x79, 0xc5, 0xcf, 0x53,
0x19, 0x4f, 0xcb, 0xfd, 0xd3, 0xbb, 0x13, 0x76, 0xf5, 0xf2, 0x3c, 0xfb, 0x17, 0x00, 0x00, 0xff,
0xff, 0x69, 0x5c, 0x8f, 0x23, 0xa1, 0x03, 0x00, 0x00,
}
func (m *StoreKVPair) Marshal() (dAtA []byte, err error) {
@ -175,6 +333,150 @@ func (m *StoreKVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
func (m *BlockMetadata) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BlockMetadata) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BlockMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.ResponseCommit != nil {
{
size, err := m.ResponseCommit.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
}
if m.ResponseEndBlock != nil {
{
size, err := m.ResponseEndBlock.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x2a
}
if m.RequestEndBlock != nil {
{
size, err := m.RequestEndBlock.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
}
if len(m.DeliverTxs) > 0 {
for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
}
if m.ResponseBeginBlock != nil {
{
size, err := m.ResponseBeginBlock.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.RequestBeginBlock != nil {
{
size, err := m.RequestBeginBlock.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *BlockMetadata_DeliverTx) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BlockMetadata_DeliverTx) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BlockMetadata_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Response != nil {
{
size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
if m.Request != nil {
{
size, err := m.Request.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintListening(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintListening(dAtA []byte, offset int, v uint64) int {
offset -= sovListening(v)
base := offset
@ -210,6 +512,58 @@ func (m *StoreKVPair) Size() (n int) {
return n
}
func (m *BlockMetadata) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.RequestBeginBlock != nil {
l = m.RequestBeginBlock.Size()
n += 1 + l + sovListening(uint64(l))
}
if m.ResponseBeginBlock != nil {
l = m.ResponseBeginBlock.Size()
n += 1 + l + sovListening(uint64(l))
}
if len(m.DeliverTxs) > 0 {
for _, e := range m.DeliverTxs {
l = e.Size()
n += 1 + l + sovListening(uint64(l))
}
}
if m.RequestEndBlock != nil {
l = m.RequestEndBlock.Size()
n += 1 + l + sovListening(uint64(l))
}
if m.ResponseEndBlock != nil {
l = m.ResponseEndBlock.Size()
n += 1 + l + sovListening(uint64(l))
}
if m.ResponseCommit != nil {
l = m.ResponseCommit.Size()
n += 1 + l + sovListening(uint64(l))
}
return n
}
func (m *BlockMetadata_DeliverTx) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Request != nil {
l = m.Request.Size()
n += 1 + l + sovListening(uint64(l))
}
if m.Response != nil {
l = m.Response.Size()
n += 1 + l + sovListening(uint64(l))
}
return n
}
func sovListening(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@ -386,6 +740,392 @@ func (m *StoreKVPair) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *BlockMetadata) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BlockMetadata: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BlockMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestBeginBlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RequestBeginBlock == nil {
m.RequestBeginBlock = &types.RequestBeginBlock{}
}
if err := m.RequestBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResponseBeginBlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ResponseBeginBlock == nil {
m.ResponseBeginBlock = &types.ResponseBeginBlock{}
}
if err := m.ResponseBeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DeliverTxs = append(m.DeliverTxs, &BlockMetadata_DeliverTx{})
if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RequestEndBlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.RequestEndBlock == nil {
m.RequestEndBlock = &types.RequestEndBlock{}
}
if err := m.RequestEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResponseEndBlock", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ResponseEndBlock == nil {
m.ResponseEndBlock = &types.ResponseEndBlock{}
}
if err := m.ResponseEndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ResponseCommit", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.ResponseCommit == nil {
m.ResponseCommit = &types.ResponseCommit{}
}
if err := m.ResponseCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipListening(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthListening
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *BlockMetadata_DeliverTx) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: DeliverTx: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: DeliverTx: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Request == nil {
m.Request = &types.RequestDeliverTx{}
}
if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowListening
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthListening
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthListening
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Response == nil {
m.Response = &types.ResponseDeliverTx{}
}
if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipListening(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthListening
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipListening(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0

View File

@ -128,13 +128,6 @@ type MultiStore interface {
// tracing operations. The modified MultiStore is returned.
SetTracingContext(TraceContext) MultiStore
// ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
ListeningEnabled(key StoreKey) bool
// AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey
// It appends the listeners to a current set, if one already exists
AddListeners(key StoreKey, listeners []WriteListener)
// LatestVersion returns the latest version in the store
LatestVersion() int64
}
@ -197,6 +190,13 @@ type CommitMultiStore interface {
// RollbackToVersion rollback the db to specific version(height).
RollbackToVersion(version int64) error
// ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey
ListeningEnabled(key StoreKey) bool
// AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey
// It appends the listeners to a current set, if one already exists
AddListeners(key StoreKey, listeners []WriteListener)
}
//---------subsp-------------------------------
@ -273,9 +273,6 @@ type CacheWrap interface {
// CacheWrapWithTrace recursively wraps again with tracing enabled.
CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap
// CacheWrapWithListeners recursively wraps again with listening enabled
CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap
}
type CacheWrapper interface {
@ -284,9 +281,6 @@ type CacheWrapper interface {
// CacheWrapWithTrace branches a store with tracing enabled.
CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap
// CacheWrapWithListeners recursively wraps again with listening enabled
CacheWrapWithListeners(storeKey StoreKey, listeners []WriteListener) CacheWrap
}
func (cid CommitID) IsZero() bool {