Merge branch 'master' into jordan/cosmos-docs

This commit is contained in:
Jordan Bibla 2018-07-12 20:01:27 -04:00 committed by GitHub
commit 28da104e1a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1009 additions and 127 deletions

View File

@ -14,8 +14,10 @@ FEATURES
IMPROVEMENTS
* Updated docs folder to accommodate cosmos.network docs project
* [store] Added support for tracing multi-store operations via `--trace-store`
BUG FIXES
* \#1630 - redelegation nolonger removes tokens from the delegator liquid account
* [keys] \#1629 - updating password no longer asks for a new password when the first entered password was incorrect
* [lcd] importing an account would create a random account
@ -111,6 +113,7 @@ FEATURES
- Auth has its invariants checked within the framework
* [tests] Add WaitForNextNBlocksTM helper method
* [keys] New keys now have 24 word recovery keys, for heightened security
- [keys] Add a temporary method for exporting the private key
IMPROVEMENTS
* [x/bank] Now uses go-wire codec instead of 'encoding/json'

View File

@ -2,12 +2,14 @@ package baseapp
import (
"fmt"
"io"
"runtime/debug"
"strings"
"github.com/pkg/errors"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/tmhash"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
@ -37,7 +39,7 @@ const (
runTxModeDeliver runTxMode = iota
)
// The ABCI application
// BaseApp reflects the ABCI application implementation.
type BaseApp struct {
// initialized on creation
Logger log.Logger
@ -71,7 +73,12 @@ type BaseApp struct {
var _ abci.Application = (*BaseApp)(nil)
// Create and name new BaseApp
// NewBaseApp returns a reference to an initialized BaseApp.
//
// TODO: Determine how to use a flexible and robust configuration paradigm that
// allows for sensible defaults while being highly configurable
// (e.g. functional options).
//
// NOTE: The db is used to store the version number for now.
// Accepts variable number of option functions, which act on the BaseApp to set configuration choices
func NewBaseApp(name string, cdc *wire.Codec, logger log.Logger, db dbm.DB, options ...func(*BaseApp)) *BaseApp {
@ -85,7 +92,9 @@ func NewBaseApp(name string, cdc *wire.Codec, logger log.Logger, db dbm.DB, opti
codespacer: sdk.NewCodespacer(),
txDecoder: defaultTxDecoder(cdc),
}
// Register the undefined & root codespaces, which should not be used by any modules
// Register the undefined & root codespaces, which should not be used by
// any modules.
app.codespacer.RegisterOrPanic(sdk.CodespaceRoot)
for _, option := range options {
option(app)
@ -98,6 +107,12 @@ func (app *BaseApp) Name() string {
return app.name
}
// SetCommitMultiStoreTracer sets the store tracer on the BaseApp's underlying
// CommitMultiStore.
func (app *BaseApp) SetCommitMultiStoreTracer(w io.Writer) {
app.cms.WithTracer(w)
}
// Register the next available codespace through the baseapp's codespacer, starting from a default
func (app *BaseApp) RegisterCodespace(codespace sdk.CodespaceType) sdk.CodespaceType {
return app.codespacer.RegisterNext(codespace)
@ -392,13 +407,18 @@ func handleQueryP2P(app *BaseApp, path []string, req abci.RequestQuery) (res abc
return sdk.ErrUnknownRequest(msg).QueryResult()
}
// Implements ABCI
// BeginBlock implements the ABCI application interface.
func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeginBlock) {
// Initialize the DeliverTx state.
// If this is the first block, it should already
// be initialized in InitChain.
// Otherwise app.deliverState will be nil, since it
// is reset on Commit.
if app.cms.TracingEnabled() {
app.cms.ResetTraceContext()
app.cms.WithTracingContext(sdk.TraceContext(
map[string]interface{}{"blockHeight": req.Header.Height},
))
}
// Initialize the DeliverTx state. If this is the first block, it should
// already be initialized in InitChain. Otherwise app.deliverState will be
// nil, since it is reset on Commit.
if app.deliverState == nil {
app.setDeliverState(req.Header)
} else {
@ -406,9 +426,11 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg
// by InitChain. Context is now updated with Header information.
app.deliverState.ctx = app.deliverState.ctx.WithBlockHeader(req.Header)
}
if app.beginBlocker != nil {
res = app.beginBlocker(app.deliverState.ctx, req)
}
// set the signed validators for addition to context in deliverTx
app.signedValidators = req.Validators
return
@ -548,25 +570,26 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg) (result sdk.Result)
return result
}
// Returns deliverState if app is in runTxModeDeliver, otherwhise returns checkstate
// Returns the applicantion's deliverState if app is in runTxModeDeliver,
// otherwise it returns the application's checkstate.
func getState(app *BaseApp, mode runTxMode) *state {
if mode == runTxModeCheck || mode == runTxModeSimulate {
return app.checkState
}
return app.deliverState
}
// txBytes may be nil in some cases, eg. in tests.
// Also, in the future we may support "internal" transactions.
// runTx processes a transaction. The transactions is proccessed via an
// anteHandler. txBytes may be nil in some cases, eg. in tests. Also, in the
// future we may support "internal" transactions.
func (app *BaseApp) runTx(mode runTxMode, txBytes []byte, tx sdk.Tx) (result sdk.Result) {
//NOTE: GasWanted should be returned by the AnteHandler.
// GasUsed is determined by the GasMeter.
// We need access to the context to get the gas meter so
// we initialize upfront
// NOTE: GasWanted should be returned by the AnteHandler. GasUsed is
// determined by the GasMeter. We need access to the context to get the gas
// meter so we initialize upfront.
var gasWanted int64
ctx := app.getContextForAnte(mode, txBytes)
// Handle any panics.
defer func() {
if r := recover(); r != nil {
switch rType := r.(type) {
@ -578,11 +601,11 @@ func (app *BaseApp) runTx(mode runTxMode, txBytes []byte, tx sdk.Tx) (result sdk
result = sdk.ErrInternal(log).Result()
}
}
result.GasWanted = gasWanted
result.GasUsed = ctx.GasMeter().GasConsumed()
}()
// Get the Msg.
var msgs = tx.GetMsgs()
err := validateBasicTxMsgs(msgs)
@ -590,7 +613,7 @@ func (app *BaseApp) runTx(mode runTxMode, txBytes []byte, tx sdk.Tx) (result sdk
return err.Result()
}
// Run the ante handler.
// run the ante handler
if app.anteHandler != nil {
newCtx, anteResult, abort := app.anteHandler(ctx, tx)
if abort {
@ -599,17 +622,24 @@ func (app *BaseApp) runTx(mode runTxMode, txBytes []byte, tx sdk.Tx) (result sdk
if !newCtx.IsZero() {
ctx = newCtx
}
gasWanted = result.GasWanted
}
// CacheWrap the state in case it fails.
// Keep the state in a transient CacheWrap in case processing the messages
// fails.
msCache := getState(app, mode).CacheMultiStore()
ctx = ctx.WithMultiStore(msCache)
if msCache.TracingEnabled() {
msCache = msCache.WithTracingContext(sdk.TraceContext(
map[string]interface{}{"txHash": cmn.HexBytes(tmhash.Sum(txBytes)).String()},
)).(sdk.CacheMultiStore)
}
ctx = ctx.WithMultiStore(msCache)
result = app.runMsgs(ctx, msgs)
result.GasWanted = gasWanted
// Only update state if all messages pass and we're not in a simulation.
// only update state if all messages pass and we're not in a simulation
if result.IsOK() && mode != runTxModeSimulate {
msCache.Write()
}
@ -617,11 +647,16 @@ func (app *BaseApp) runTx(mode runTxMode, txBytes []byte, tx sdk.Tx) (result sdk
return
}
// Implements ABCI
// EndBlock implements the ABCI application interface.
func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBlock) {
if app.deliverState.ms.TracingEnabled() {
app.deliverState.ms = app.deliverState.ms.ResetTraceContext().(sdk.CacheMultiStore)
}
if app.endBlocker != nil {
res = app.endBlocker(app.deliverState.ctx, req)
}
return
}

View File

@ -105,7 +105,7 @@ func InitializeTestLCD(t *testing.T, nValidators int, initAddrs []sdk.AccAddress
privVal := pvm.LoadOrGenFilePV(privValidatorFile)
privVal.Reset()
db := dbm.NewMemDB()
app := gapp.NewGaiaApp(logger, db)
app := gapp.NewGaiaApp(logger, db, nil)
cdc = gapp.MakeCodec()
genesisFile := config.GenesisFile()

View File

@ -2,6 +2,7 @@ package app
import (
"encoding/json"
"io"
"os"
abci "github.com/tendermint/tendermint/abci/types"
@ -55,12 +56,19 @@ type GaiaApp struct {
govKeeper gov.Keeper
}
func NewGaiaApp(logger log.Logger, db dbm.DB) *GaiaApp {
// NewGaiaApp returns a reference to an initialized GaiaApp.
//
// TODO: Determine how to use a flexible and robust configuration paradigm that
// allows for sensible defaults while being highly configurable
// (e.g. functional options).
func NewGaiaApp(logger log.Logger, db dbm.DB, traceStore io.Writer) *GaiaApp {
cdc := MakeCodec()
// create your application object
bApp := bam.NewBaseApp(appName, cdc, logger, db)
bApp.SetCommitMultiStoreTracer(traceStore)
var app = &GaiaApp{
BaseApp: bam.NewBaseApp(appName, cdc, logger, db),
BaseApp: bApp,
cdc: cdc,
keyMain: sdk.NewKVStoreKey("main"),
keyAccount: sdk.NewKVStoreKey("acc"),

View File

@ -2,6 +2,7 @@ package main
import (
"encoding/json"
"io"
"github.com/spf13/cobra"
@ -38,11 +39,13 @@ func main() {
}
}
func newApp(logger log.Logger, db dbm.DB) abci.Application {
return app.NewGaiaApp(logger, db)
func newApp(logger log.Logger, db dbm.DB, traceStore io.Writer) abci.Application {
return app.NewGaiaApp(logger, db, traceStore)
}
func exportAppStateAndTMValidators(logger log.Logger, db dbm.DB) (json.RawMessage, []tmtypes.GenesisValidator, error) {
gapp := app.NewGaiaApp(logger, db)
return gapp.ExportAppStateAndValidators()
func exportAppStateAndTMValidators(
logger log.Logger, db dbm.DB, traceStore io.Writer,
) (json.RawMessage, []tmtypes.GenesisValidator, error) {
gApp := app.NewGaiaApp(logger, db, traceStore)
return gApp.ExportAppStateAndValidators()
}

View File

@ -143,9 +143,12 @@ type GaiaApp struct {
func NewGaiaApp(logger log.Logger, db dbm.DB) *GaiaApp {
cdc := MakeCodec()
bApp := bam.NewBaseApp(appName, cdc, logger, db)
bApp.SetCommitMultiStoreTracer(os.Stdout)
// create your application object
var app = &GaiaApp{
BaseApp: bam.NewBaseApp(appName, cdc, logger, db),
BaseApp: bApp,
cdc: cdc,
keyMain: sdk.NewKVStoreKey("main"),
keyAccount: sdk.NewKVStoreKey("acc"),

View File

@ -240,6 +240,31 @@ func (kb dbKeybase) Sign(name, passphrase string, msg []byte) (sig tcrypto.Signa
return sig, pub, nil
}
func (kb dbKeybase) ExportPrivateKeyObject(name string, passphrase string) (tcrypto.PrivKey, error) {
info, err := kb.Get(name)
if err != nil {
return nil, err
}
var priv tcrypto.PrivKey
switch info.(type) {
case localInfo:
linfo := info.(localInfo)
if linfo.PrivKeyArmor == "" {
err = fmt.Errorf("private key not available")
return nil, err
}
priv, err = unarmorDecryptPrivKey(linfo.PrivKeyArmor, passphrase)
if err != nil {
return nil, err
}
case ledgerInfo:
return nil, errors.New("Only works on local private keys")
case offlineInfo:
return nil, errors.New("Only works on local private keys")
}
return priv, nil
}
func (kb dbKeybase) Export(name string) (armor string, err error) {
bz := kb.db.Get(infoKey(name))
if bz == nil {

View File

@ -39,6 +39,9 @@ type Keybase interface {
ImportPubKey(name string, armor string) (err error)
Export(name string) (armor string, err error)
ExportPubKey(name string) (armor string, err error)
// *only* works on locally-stored keys. Temporary method until we redo the exporting API
ExportPrivateKeyObject(name string, passphrase string) (crypto.PrivKey, error)
}
// Info is the publicly exposed information about a keypair

View File

@ -1,10 +1,45 @@
# Running a Node
TODO: document `gaiad`
> TODO: Improve documentation of `gaiad`
## Basics
To start a node:
```shell
$ gaiad start <flags>
```
Options for running the `gaiad` binary are effectively the same as for `tendermint`.
See `gaiad --help` and the
[guide to using Tendermint](https://github.com/tendermint/tendermint/blob/master/docs/using-tendermint.md)
for more details.
## Debugging
Optionally, you can run `gaiad` with `--trace-store` to trace all store operations
to a specified file.
```shell
$ gaiad start <flags> --trace-store=/path/to/trace.out
```
Key/value pairs will be base64 encoded. Additionally, the block number and any
correlated transaction hash will be included as metadata.
e.g.
```json
...
{"operation":"write","key":"ATW6Bu997eeuUeRBwv1EPGvXRfPR","value":"BggEEBYgFg==","metadata":{"blockHeight":12,"txHash":"5AAC197EC45E6C5DE0798C4A4E2F54BBB695CA9E"}}
{"operation":"write","key":"AjW6Bu997eeuUeRBwv1EPGvXRfPRCgAAAAAAAAA=","value":"AQE=","metadata":{"blockHeight":12,"txHash":"5AAC197EC45E6C5DE0798C4A4E2F54BBB695CA9E"}}
{"operation":"read","key":"ATW6Bu997eeuUeRBwv1EPGvXRfPR","value":"BggEEBYgFg==","metadata":{"blockHeight":13}}
{"operation":"read","key":"AjW6Bu997eeuUeRBwv1EPGvXRfPRCwAAAAAAAAA=","value":"","metadata":{"blockHeight":13}}
...
```
You can then query for the various traced operations using a tool like [jq](https://github.com/stedolan/jq).
```shell
$ jq -s '.[] | select((.key=="ATW6Bu997eeuUeRBwv1EPGvXRfPR") and .metadata.blockHeight==14)' /path/to/trace.out
```

View File

@ -2,6 +2,7 @@ package main
import (
"encoding/json"
"io"
"os"
"github.com/cosmos/cosmos-sdk/examples/basecoin/app"
@ -39,11 +40,11 @@ func main() {
}
}
func newApp(logger log.Logger, db dbm.DB) abci.Application {
func newApp(logger log.Logger, db dbm.DB, storeTracer io.Writer) abci.Application {
return app.NewBasecoinApp(logger, db)
}
func exportAppStateAndTMValidators(logger log.Logger, db dbm.DB) (json.RawMessage, []tmtypes.GenesisValidator, error) {
func exportAppStateAndTMValidators(logger log.Logger, db dbm.DB, storeTracer io.Writer) (json.RawMessage, []tmtypes.GenesisValidator, error) {
bapp := app.NewBasecoinApp(logger, db)
return bapp.ExportAppStateAndValidators()
}

View File

@ -2,6 +2,7 @@ package main
import (
"encoding/json"
"io"
"os"
"github.com/spf13/cobra"
@ -50,11 +51,11 @@ func CoolAppGenState(cdc *wire.Codec, appGenTxs []json.RawMessage) (appState jso
return
}
func newApp(logger log.Logger, db dbm.DB) abci.Application {
func newApp(logger log.Logger, db dbm.DB, _ io.Writer) abci.Application {
return app.NewDemocoinApp(logger, db)
}
func exportAppStateAndTMValidators(logger log.Logger, db dbm.DB) (json.RawMessage, []tmtypes.GenesisValidator, error) {
func exportAppStateAndTMValidators(logger log.Logger, db dbm.DB, _ io.Writer) (json.RawMessage, []tmtypes.GenesisValidator, error) {
dapp := app.NewDemocoinApp(logger, db)
return dapp.ExportAppStateAndValidators()
}

View File

@ -2,6 +2,8 @@ package server
import (
"encoding/json"
"io"
"os"
"path/filepath"
abci "github.com/tendermint/tendermint/abci/types"
@ -10,34 +12,73 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
)
// AppCreator lets us lazily initialize app, using home dir
// and other flags (?) to start
type AppCreator func(string, log.Logger) (abci.Application, error)
type (
// AppCreator reflects a function that allows us to lazily initialize an
// application using various configurations.
AppCreator func(home string, logger log.Logger, traceStore string) (abci.Application, error)
// AppExporter dumps all app state to JSON-serializable structure and returns the current validator set
type AppExporter func(home string, log log.Logger) (json.RawMessage, []tmtypes.GenesisValidator, error)
// AppExporter reflects a function that dumps all app state to
// JSON-serializable structure and returns the current validator set.
AppExporter func(home string, logger log.Logger, traceStore string) (json.RawMessage, []tmtypes.GenesisValidator, error)
// ConstructAppCreator returns an application generation function
func ConstructAppCreator(appFn func(log.Logger, dbm.DB) abci.Application, name string) AppCreator {
return func(rootDir string, logger log.Logger) (abci.Application, error) {
// AppCreatorInit reflects a function that performs initialization of an
// AppCreator.
AppCreatorInit func(log.Logger, dbm.DB, io.Writer) abci.Application
// AppExporterInit reflects a function that performs initialization of an
// AppExporter.
AppExporterInit func(log.Logger, dbm.DB, io.Writer) (json.RawMessage, []tmtypes.GenesisValidator, error)
)
// ConstructAppCreator returns an application generation function.
func ConstructAppCreator(appFn AppCreatorInit, name string) AppCreator {
return func(rootDir string, logger log.Logger, traceStore string) (abci.Application, error) {
dataDir := filepath.Join(rootDir, "data")
db, err := dbm.NewGoLevelDB(name, dataDir)
if err != nil {
return nil, err
}
app := appFn(logger, db)
var traceStoreWriter io.Writer
if traceStore != "" {
traceStoreWriter, err = os.OpenFile(
traceStore,
os.O_WRONLY|os.O_APPEND|os.O_CREATE,
0666,
)
if err != nil {
return nil, err
}
}
app := appFn(logger, db, traceStoreWriter)
return app, nil
}
}
// ConstructAppExporter returns an application export function
func ConstructAppExporter(appFn func(log.Logger, dbm.DB) (json.RawMessage, []tmtypes.GenesisValidator, error), name string) AppExporter {
return func(rootDir string, logger log.Logger) (json.RawMessage, []tmtypes.GenesisValidator, error) {
// ConstructAppExporter returns an application export function.
func ConstructAppExporter(appFn AppExporterInit, name string) AppExporter {
return func(rootDir string, logger log.Logger, traceStore string) (json.RawMessage, []tmtypes.GenesisValidator, error) {
dataDir := filepath.Join(rootDir, "data")
db, err := dbm.NewGoLevelDB(name, dataDir)
if err != nil {
return nil, nil, err
}
return appFn(logger, db)
var traceStoreWriter io.Writer
if traceStore != "" {
traceStoreWriter, err = os.OpenFile(
traceStore,
os.O_WRONLY|os.O_APPEND|os.O_CREATE,
0666,
)
if err != nil {
return nil, nil, err
}
}
return appFn(logger, db, traceStoreWriter)
}
}

View File

@ -11,27 +11,33 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
)
// ExportCmd dumps app state to JSON
// ExportCmd dumps app state to JSON.
func ExportCmd(ctx *Context, cdc *wire.Codec, appExporter AppExporter) *cobra.Command {
return &cobra.Command{
Use: "export",
Short: "Export state to JSON",
RunE: func(cmd *cobra.Command, args []string) error {
home := viper.GetString("home")
appState, validators, err := appExporter(home, ctx.Logger)
traceStore := viper.GetString(flagTraceStore)
appState, validators, err := appExporter(home, ctx.Logger, traceStore)
if err != nil {
return errors.Errorf("error exporting state: %v\n", err)
}
doc, err := tmtypes.GenesisDocFromFile(ctx.Config.GenesisFile())
if err != nil {
return err
}
doc.AppStateJSON = appState
doc.Validators = validators
encoded, err := wire.MarshalJSONIndent(cdc, doc)
if err != nil {
return err
}
fmt.Println(string(encoded))
return nil
},

View File

@ -1,6 +1,8 @@
package mock
import (
"io"
dbm "github.com/tendermint/tendermint/libs/db"
sdk "github.com/cosmos/cosmos-sdk/types"
@ -18,6 +20,26 @@ func (ms multiStore) CacheWrap() sdk.CacheWrap {
panic("not implemented")
}
func (ms multiStore) CacheWrapWithTrace(_ io.Writer, _ sdk.TraceContext) sdk.CacheWrap {
panic("not implemented")
}
func (ms multiStore) ResetTraceContext() sdk.MultiStore {
panic("not implemented")
}
func (ms multiStore) TracingEnabled() bool {
panic("not implemented")
}
func (ms multiStore) WithTracingContext(tc sdk.TraceContext) sdk.MultiStore {
panic("not implemented")
}
func (ms multiStore) WithTracer(w io.Writer) sdk.MultiStore {
panic("not implemented")
}
func (ms multiStore) Commit() sdk.CommitID {
panic("not implemented")
}
@ -70,6 +92,10 @@ func (kv kvStore) CacheWrap() sdk.CacheWrap {
panic("not implemented")
}
func (kv kvStore) CacheWrapWithTrace(w io.Writer, tc sdk.TraceContext) sdk.CacheWrap {
panic("not implemented")
}
func (kv kvStore) GetStoreType() sdk.StoreType {
panic("not implemented")
}

View File

@ -17,10 +17,11 @@ import (
const (
flagWithTendermint = "with-tendermint"
flagAddress = "address"
flagTraceStore = "trace-store"
)
// StartCmd runs the service passed in, either
// stand-alone, or in-process with tendermint
// StartCmd runs the service passed in, either stand-alone or in-process with
// Tendermint.
func StartCmd(ctx *Context, appCreator AppCreator) *cobra.Command {
cmd := &cobra.Command{
Use: "start",
@ -30,26 +31,30 @@ func StartCmd(ctx *Context, appCreator AppCreator) *cobra.Command {
ctx.Logger.Info("Starting ABCI without Tendermint")
return startStandAlone(ctx, appCreator)
}
ctx.Logger.Info("Starting ABCI with Tendermint")
_, err := startInProcess(ctx, appCreator)
return err
},
}
// basic flags for abci app
cmd.Flags().Bool(flagWithTendermint, true, "run abci app embedded in-process with tendermint")
// core flags for the ABCI application
cmd.Flags().Bool(flagWithTendermint, true, "Run abci app embedded in-process with tendermint")
cmd.Flags().String(flagAddress, "tcp://0.0.0.0:26658", "Listen address")
cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
// AddNodeFlags adds support for all tendermint-specific command line options
// add support for all Tendermint-specific command line options
tcmd.AddNodeFlags(cmd)
return cmd
}
func startStandAlone(ctx *Context, appCreator AppCreator) error {
// Generate the app in the proper dir
addr := viper.GetString(flagAddress)
home := viper.GetString("home")
app, err := appCreator(home, ctx.Logger)
traceStore := viper.GetString(flagTraceStore)
app, err := appCreator(home, ctx.Logger, traceStore)
if err != nil {
return err
}
@ -58,15 +63,17 @@ func startStandAlone(ctx *Context, appCreator AppCreator) error {
if err != nil {
return errors.Errorf("error creating listener: %v\n", err)
}
svr.SetLogger(ctx.Logger.With("module", "abci-server"))
err = svr.Start()
if err != nil {
cmn.Exit(err.Error())
}
// Wait forever
// wait forever
cmn.TrapSignal(func() {
// Cleanup
// cleanup
err = svr.Stop()
if err != nil {
cmn.Exit(err.Error())
@ -78,29 +85,33 @@ func startStandAlone(ctx *Context, appCreator AppCreator) error {
func startInProcess(ctx *Context, appCreator AppCreator) (*node.Node, error) {
cfg := ctx.Config
home := cfg.RootDir
app, err := appCreator(home, ctx.Logger)
traceStore := viper.GetString(flagTraceStore)
app, err := appCreator(home, ctx.Logger, traceStore)
if err != nil {
return nil, err
}
// Create & start tendermint node
n, err := node.NewNode(cfg,
// create & start tendermint node
tmNode, err := node.NewNode(
cfg,
pvm.LoadOrGenFilePV(cfg.PrivValidatorFile()),
proxy.NewLocalClientCreator(app),
node.DefaultGenesisDocProviderFunc(cfg),
node.DefaultDBProvider,
node.DefaultMetricsProvider,
ctx.Logger.With("module", "node"))
ctx.Logger.With("module", "node"),
)
if err != nil {
return nil, err
}
err = n.Start()
err = tmNode.Start()
if err != nil {
return nil, err
}
// Trap signal, run forever.
n.RunForever()
return n, nil
// trap signal (run forever)
tmNode.RunForever()
return tmNode, nil
}

View File

@ -2,6 +2,7 @@ package store
import (
"bytes"
"io"
"sort"
"sync"
@ -27,11 +28,10 @@ var _ CacheKVStore = (*cacheKVStore)(nil)
// nolint
func NewCacheKVStore(parent KVStore) *cacheKVStore {
ci := &cacheKVStore{
return &cacheKVStore{
cache: make(map[string]cValue),
parent: parent,
}
return ci
}
// Implements Store.
@ -98,6 +98,7 @@ func (ci *cacheKVStore) Write() {
keys = append(keys, key)
}
}
sort.Strings(keys)
// TODO: Consider allowing usage of Batch, which would allow the write to
@ -125,6 +126,11 @@ func (ci *cacheKVStore) CacheWrap() CacheWrap {
return NewCacheKVStore(ci)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (ci *cacheKVStore) CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap {
return NewCacheKVStore(NewTraceKVStore(ci, w, tc))
}
//----------------------------------------
// Iteration
@ -140,32 +146,39 @@ func (ci *cacheKVStore) ReverseIterator(start, end []byte) Iterator {
func (ci *cacheKVStore) iterator(start, end []byte, ascending bool) Iterator {
var parent, cache Iterator
if ascending {
parent = ci.parent.Iterator(start, end)
} else {
parent = ci.parent.ReverseIterator(start, end)
}
items := ci.dirtyItems(ascending)
cache = newMemIterator(start, end, items)
return newCacheMergeIterator(parent, cache, ascending)
}
// Constructs a slice of dirty items, to use w/ memIterator.
func (ci *cacheKVStore) dirtyItems(ascending bool) []cmn.KVPair {
items := make([]cmn.KVPair, 0, len(ci.cache))
for key, cacheValue := range ci.cache {
if !cacheValue.dirty {
continue
}
items = append(items,
cmn.KVPair{[]byte(key), cacheValue.value})
items = append(items, cmn.KVPair{Key: []byte(key), Value: cacheValue.value})
}
sort.Slice(items, func(i, j int) bool {
if ascending {
return bytes.Compare(items[i].Key, items[j].Key) < 0
}
return bytes.Compare(items[i].Key, items[j].Key) > 0
})
return items
}
@ -180,10 +193,9 @@ func (ci *cacheKVStore) assertValidKey(key []byte) {
// Only entrypoint to mutate ci.cache.
func (ci *cacheKVStore) setCacheValue(key, value []byte, deleted bool, dirty bool) {
cacheValue := cValue{
ci.cache[string(key)] = cValue{
value: value,
deleted: deleted,
dirty: dirty,
}
ci.cache[string(key)] = cacheValue
}

View File

@ -1,6 +1,8 @@
package store
import (
"io"
sdk "github.com/cosmos/cosmos-sdk/types"
)
@ -13,33 +15,86 @@ type cacheMultiStore struct {
db CacheKVStore
stores map[StoreKey]CacheWrap
keysByName map[string]StoreKey
traceWriter io.Writer
traceContext TraceContext
}
var _ CacheMultiStore = cacheMultiStore{}
func newCacheMultiStoreFromRMS(rms *rootMultiStore) cacheMultiStore {
cms := cacheMultiStore{
db: NewCacheKVStore(dbStoreAdapter{rms.db}),
stores: make(map[StoreKey]CacheWrap, len(rms.stores)),
keysByName: rms.keysByName,
db: NewCacheKVStore(dbStoreAdapter{rms.db}),
stores: make(map[StoreKey]CacheWrap, len(rms.stores)),
keysByName: rms.keysByName,
traceWriter: rms.traceWriter,
traceContext: rms.traceContext,
}
for key, store := range rms.stores {
cms.stores[key] = store.CacheWrap()
if cms.TracingEnabled() {
cms.stores[key] = store.CacheWrapWithTrace(cms.traceWriter, cms.traceContext)
} else {
cms.stores[key] = store.CacheWrap()
}
}
return cms
}
func newCacheMultiStoreFromCMS(cms cacheMultiStore) cacheMultiStore {
cms2 := cacheMultiStore{
db: NewCacheKVStore(cms.db),
stores: make(map[StoreKey]CacheWrap, len(cms.stores)),
db: NewCacheKVStore(cms.db),
stores: make(map[StoreKey]CacheWrap, len(cms.stores)),
traceWriter: cms.traceWriter,
traceContext: cms.traceContext,
}
for key, store := range cms.stores {
cms2.stores[key] = store.CacheWrap()
if cms2.TracingEnabled() {
cms2.stores[key] = store.CacheWrapWithTrace(cms2.traceWriter, cms2.traceContext)
} else {
cms2.stores[key] = store.CacheWrap()
}
}
return cms2
}
// WithTracer sets the tracer for the MultiStore that the underlying
// stores will utilize to trace operations. A MultiStore is returned.
func (cms cacheMultiStore) WithTracer(w io.Writer) MultiStore {
cms.traceWriter = w
return cms
}
// WithTracingContext updates the tracing context for the MultiStore by merging
// the given context with the existing context by key. Any existing keys will
// be overwritten. It is implied that the caller should update the context when
// necessary between tracing operations. It returns a modified MultiStore.
func (cms cacheMultiStore) WithTracingContext(tc TraceContext) MultiStore {
if cms.traceContext != nil {
for k, v := range tc {
cms.traceContext[k] = v
}
} else {
cms.traceContext = tc
}
return cms
}
// TracingEnabled returns if tracing is enabled for the MultiStore.
func (cms cacheMultiStore) TracingEnabled() bool {
return cms.traceWriter != nil
}
// ResetTraceContext resets the current tracing context.
func (cms cacheMultiStore) ResetTraceContext() MultiStore {
cms.traceContext = nil
return cms
}
// Implements Store.
func (cms cacheMultiStore) GetStoreType() StoreType {
return sdk.StoreTypeMulti
@ -58,6 +113,11 @@ func (cms cacheMultiStore) CacheWrap() CacheWrap {
return cms.CacheMultiStore().(CacheWrap)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (cms cacheMultiStore) CacheWrapWithTrace(_ io.Writer, _ TraceContext) CacheWrap {
return cms.CacheWrap()
}
// Implements MultiStore.
func (cms cacheMultiStore) CacheMultiStore() CacheMultiStore {
return newCacheMultiStoreFromCMS(cms)

View File

@ -1,6 +1,8 @@
package store
import (
"io"
sdk "github.com/cosmos/cosmos-sdk/types"
dbm "github.com/tendermint/tendermint/libs/db"
)
@ -19,6 +21,11 @@ func (dsa dbStoreAdapter) CacheWrap() CacheWrap {
return NewCacheKVStore(dsa)
}
// CacheWrapWithTrace implements the KVStore interface.
func (dsa dbStoreAdapter) CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap {
return NewCacheKVStore(NewTraceKVStore(dsa, w, tc))
}
// Implements KVStore
func (dsa dbStoreAdapter) Prefix(prefix []byte) KVStore {
return prefixStore{dsa, prefix}

View File

@ -1,6 +1,8 @@
package store
import (
"io"
sdk "github.com/cosmos/cosmos-sdk/types"
)
@ -82,7 +84,12 @@ func (gi *gasKVStore) ReverseIterator(start, end []byte) sdk.Iterator {
// Implements KVStore.
func (gi *gasKVStore) CacheWrap() sdk.CacheWrap {
panic("you cannot CacheWrap a GasKVStore")
panic("cannot CacheWrap a GasKVStore")
}
// CacheWrapWithTrace implements the KVStore interface.
func (gi *gasKVStore) CacheWrapWithTrace(_ io.Writer, _ TraceContext) CacheWrap {
panic("cannot CacheWrapWithTrace a GasKVStore")
}
func (gi *gasKVStore) iterator(start, end []byte, ascending bool) sdk.Iterator {

View File

@ -2,6 +2,7 @@ package store
import (
"fmt"
"io"
"sync"
"github.com/tendermint/go-amino"
@ -117,6 +118,11 @@ func (st *iavlStore) CacheWrap() CacheWrap {
return NewCacheKVStore(st)
}
// CacheWrapWithTrace implements the Store interface.
func (st *iavlStore) CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap {
return NewCacheKVStore(NewTraceKVStore(st, w, tc))
}
// Implements KVStore.
func (st *iavlStore) Set(key, value []byte) {
st.tree.Set(key, value)

View File

@ -1,6 +1,8 @@
package store
import (
"io"
sdk "github.com/cosmos/cosmos-sdk/types"
)
@ -19,6 +21,11 @@ func (s prefixStore) CacheWrap() CacheWrap {
return NewCacheKVStore(s)
}
// CacheWrapWithTrace implements the KVStore interface.
func (s prefixStore) CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap {
return NewCacheKVStore(NewTraceKVStore(s, w, tc))
}
// Implements KVStore
func (s prefixStore) Get(key []byte) []byte {
return s.store.Get(append(s.prefix, key...))

View File

@ -2,6 +2,7 @@ package store
import (
"fmt"
"io"
"strings"
"golang.org/x/crypto/ripemd160"
@ -18,16 +19,18 @@ const (
commitInfoKeyFmt = "s/%d" // s/<version>
)
// rootMultiStore is composed of many CommitStores.
// Name contrasts with cacheMultiStore which is for cache-wrapping
// other MultiStores.
// Implements MultiStore.
// rootMultiStore is composed of many CommitStores. Name contrasts with
// cacheMultiStore which is for cache-wrapping other MultiStores. It implements
// the CommitMultiStore interface.
type rootMultiStore struct {
db dbm.DB
lastCommitID CommitID
storesParams map[StoreKey]storeParams
stores map[StoreKey]CommitStore
keysByName map[string]StoreKey
traceWriter io.Writer
traceContext TraceContext
}
var _ CommitMultiStore = (*rootMultiStore)(nil)
@ -130,6 +133,40 @@ func (rs *rootMultiStore) LoadVersion(ver int64) error {
return nil
}
// WithTracer sets the tracer for the MultiStore that the underlying
// stores will utilize to trace operations. A MultiStore is returned.
func (rs *rootMultiStore) WithTracer(w io.Writer) MultiStore {
rs.traceWriter = w
return rs
}
// WithTracingContext updates the tracing context for the MultiStore by merging
// the given context with the existing context by key. Any existing keys will
// be overwritten. It is implied that the caller should update the context when
// necessary between tracing operations. It returns a modified MultiStore.
func (rs *rootMultiStore) WithTracingContext(tc TraceContext) MultiStore {
if rs.traceContext != nil {
for k, v := range tc {
rs.traceContext[k] = v
}
} else {
rs.traceContext = tc
}
return rs
}
// TracingEnabled returns if tracing is enabled for the MultiStore.
func (rs *rootMultiStore) TracingEnabled() bool {
return rs.traceWriter != nil
}
// ResetTraceContext resets the current tracing context.
func (rs *rootMultiStore) ResetTraceContext() MultiStore {
rs.traceContext = nil
return rs
}
//----------------------------------------
// +CommitStore
@ -165,6 +202,11 @@ func (rs *rootMultiStore) CacheWrap() CacheWrap {
return rs.CacheMultiStore().(CacheWrap)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (rs *rootMultiStore) CacheWrapWithTrace(_ io.Writer, _ TraceContext) CacheWrap {
return rs.CacheWrap()
}
//----------------------------------------
// +MultiStore
@ -178,9 +220,17 @@ func (rs *rootMultiStore) GetStore(key StoreKey) Store {
return rs.stores[key]
}
// Implements MultiStore.
// GetKVStore implements the MultiStore interface. If tracing is enabled on the
// rootMultiStore, a wrapped TraceKVStore will be returned with the given
// tracer, otherwise, the original KVStore will be returned.
func (rs *rootMultiStore) GetKVStore(key StoreKey) KVStore {
return rs.stores[key].(KVStore)
store := rs.stores[key].(KVStore)
if rs.TracingEnabled() {
store = NewTraceKVStore(store, rs.traceWriter, rs.traceContext)
}
return store
}
// Implements MultiStore.

198
store/tracekvstore.go Normal file
View File

@ -0,0 +1,198 @@
package store
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
sdk "github.com/cosmos/cosmos-sdk/types"
)
const (
writeOp operation = "write"
readOp operation = "read"
deleteOp operation = "delete"
iterKeyOp operation = "iterKey"
iterValueOp operation = "iterValue"
)
type (
// TraceKVStore implements the KVStore interface with tracing enabled.
// Operations are traced on each core KVStore call and written to the
// underlying io.writer.
//
// TODO: Should we use a buffered writer and implement Commit on
// TraceKVStore?
TraceKVStore struct {
parent sdk.KVStore
writer io.Writer
context TraceContext
}
// operation represents an IO operation
operation string
// traceOperation implements a traced KVStore operation
traceOperation struct {
Operation operation `json:"operation"`
Key string `json:"key"`
Value string `json:"value"`
Metadata map[string]interface{} `json:"metadata"`
}
)
// NewTraceKVStore returns a reference to a new traceKVStore given a parent
// KVStore implementation and a buffered writer.
func NewTraceKVStore(parent sdk.KVStore, writer io.Writer, tc TraceContext) *TraceKVStore {
return &TraceKVStore{parent: parent, writer: writer, context: tc}
}
// Get implements the KVStore interface. It traces a read operation and
// delegates a Get call to the parent KVStore.
func (tkv *TraceKVStore) Get(key []byte) []byte {
value := tkv.parent.Get(key)
writeOperation(tkv.writer, readOp, tkv.context, key, value)
return value
}
// Set implements the KVStore interface. It traces a write operation and
// delegates the Set call to the parent KVStore.
func (tkv *TraceKVStore) Set(key []byte, value []byte) {
writeOperation(tkv.writer, writeOp, tkv.context, key, value)
tkv.parent.Set(key, value)
}
// Delete implements the KVStore interface. It traces a write operation and
// delegates the Delete call to the parent KVStore.
func (tkv *TraceKVStore) Delete(key []byte) {
writeOperation(tkv.writer, deleteOp, tkv.context, key, nil)
tkv.parent.Delete(key)
}
// Has implements the KVStore interface. It delegates the Has call to the
// parent KVStore.
func (tkv *TraceKVStore) Has(key []byte) bool {
return tkv.parent.Has(key)
}
// Prefix implements the KVStore interface.
func (tkv *TraceKVStore) Prefix(prefix []byte) KVStore {
return prefixStore{tkv, prefix}
}
// Iterator implements the KVStore interface. It delegates the Iterator call
// the to the parent KVStore.
func (tkv *TraceKVStore) Iterator(start, end []byte) sdk.Iterator {
return tkv.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It delegates the
// ReverseIterator call the to the parent KVStore.
func (tkv *TraceKVStore) ReverseIterator(start, end []byte) sdk.Iterator {
return tkv.iterator(start, end, false)
}
// iterator facilitates iteration over a KVStore. It delegates the necessary
// calls to it's parent KVStore.
func (tkv *TraceKVStore) iterator(start, end []byte, ascending bool) sdk.Iterator {
var parent sdk.Iterator
if ascending {
parent = tkv.parent.Iterator(start, end)
} else {
parent = tkv.parent.ReverseIterator(start, end)
}
return newTraceIterator(tkv.writer, parent, tkv.context)
}
type traceIterator struct {
parent sdk.Iterator
writer io.Writer
context TraceContext
}
func newTraceIterator(w io.Writer, parent sdk.Iterator, tc TraceContext) sdk.Iterator {
return &traceIterator{writer: w, parent: parent, context: tc}
}
// Domain implements the Iterator interface.
func (ti *traceIterator) Domain() (start []byte, end []byte) {
return ti.parent.Domain()
}
// Valid implements the Iterator interface.
func (ti *traceIterator) Valid() bool {
return ti.parent.Valid()
}
// Next implements the Iterator interface.
func (ti *traceIterator) Next() {
ti.parent.Next()
}
// Key implements the Iterator interface.
func (ti *traceIterator) Key() []byte {
key := ti.parent.Key()
writeOperation(ti.writer, iterKeyOp, ti.context, key, nil)
return key
}
// Value implements the Iterator interface.
func (ti *traceIterator) Value() []byte {
value := ti.parent.Value()
writeOperation(ti.writer, iterValueOp, ti.context, nil, value)
return value
}
// Close implements the Iterator interface.
func (ti *traceIterator) Close() {
ti.parent.Close()
}
// GetStoreType implements the KVStore interface. It returns the underlying
// KVStore type.
func (tkv *TraceKVStore) GetStoreType() sdk.StoreType {
return tkv.parent.GetStoreType()
}
// CacheWrap implements the KVStore interface. It panics as a TraceKVStore
// cannot be cache wrapped.
func (tkv *TraceKVStore) CacheWrap() sdk.CacheWrap {
panic("cannot CacheWrap a TraceKVStore")
}
// CacheWrapWithTrace implements the KVStore interface. It panics as a
// TraceKVStore cannot be cache wrapped.
func (tkv *TraceKVStore) CacheWrapWithTrace(_ io.Writer, _ TraceContext) CacheWrap {
panic("cannot CacheWrapWithTrace a TraceKVStore")
}
// writeOperation writes a KVStore operation to the underlying io.Writer as
// JSON-encoded data where the key/value pair is base64 encoded.
func writeOperation(w io.Writer, op operation, tc TraceContext, key, value []byte) {
traceOp := traceOperation{
Operation: op,
Key: base64.StdEncoding.EncodeToString(key),
Value: base64.StdEncoding.EncodeToString(value),
}
if tc != nil {
traceOp.Metadata = tc
}
raw, err := json.Marshal(traceOp)
if err != nil {
panic(fmt.Sprintf("failed to serialize trace operation: %v", err))
}
if _, err := w.Write(raw); err != nil {
panic(fmt.Sprintf("failed to write trace operation: %v", err))
}
io.WriteString(w, "\n")
}

283
store/tracekvstore_test.go Normal file
View File

@ -0,0 +1,283 @@
package store
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tendermint/libs/db"
)
var kvPairs = []KVPair{
KVPair{Key: keyFmt(1), Value: valFmt(1)},
KVPair{Key: keyFmt(2), Value: valFmt(2)},
KVPair{Key: keyFmt(3), Value: valFmt(3)},
}
func newTraceKVStore(w io.Writer) *TraceKVStore {
store := newEmptyTraceKVStore(w)
for _, kvPair := range kvPairs {
store.Set(kvPair.Key, kvPair.Value)
}
return store
}
func newEmptyTraceKVStore(w io.Writer) *TraceKVStore {
memDB := dbStoreAdapter{dbm.NewMemDB()}
tc := TraceContext(map[string]interface{}{"blockHeight": 64})
return NewTraceKVStore(memDB, w, tc)
}
func TestTraceKVStoreGet(t *testing.T) {
testCases := []struct {
key []byte
expectedValue []byte
expectedOut string
}{
{
key: []byte{},
expectedValue: nil,
expectedOut: "{\"operation\":\"read\",\"key\":\"\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedOut: "{\"operation\":\"read\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: []byte("does-not-exist"),
expectedValue: nil,
expectedOut: "{\"operation\":\"read\",\"key\":\"ZG9lcy1ub3QtZXhpc3Q=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
value := store.Get(tc.key)
require.Equal(t, tc.expectedValue, value)
require.Equal(t, tc.expectedOut, buf.String())
}
}
func TestTraceKVStoreSet(t *testing.T) {
testCases := []struct {
key []byte
value []byte
expectedOut string
}{
{
key: []byte{},
value: nil,
expectedOut: "{\"operation\":\"write\",\"key\":\"\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newEmptyTraceKVStore(&buf)
buf.Reset()
store.Set(tc.key, tc.value)
require.Equal(t, tc.expectedOut, buf.String())
}
}
func TestTraceKVStoreDelete(t *testing.T) {
testCases := []struct {
key []byte
expectedOut string
}{
{
key: []byte{},
expectedOut: "{\"operation\":\"delete\",\"key\":\"\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: kvPairs[0].Key,
expectedOut: "{\"operation\":\"delete\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
store.Delete(tc.key)
require.Equal(t, tc.expectedOut, buf.String())
}
}
func TestTraceKVStoreHas(t *testing.T) {
testCases := []struct {
key []byte
expected bool
}{
{
key: []byte{},
expected: false,
},
{
key: kvPairs[0].Key,
expected: true,
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
ok := store.Has(tc.key)
require.Equal(t, tc.expected, ok)
}
}
func TestTestTraceKVStoreIterator(t *testing.T) {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
iterator := store.Iterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []uint8([]byte(nil)), s)
require.Equal(t, []uint8([]byte(nil)), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
expectedKeyOut string
expectedvalueOut string
}{
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
buf.Reset()
ka := iterator.Key()
require.Equal(t, tc.expectedKeyOut, buf.String())
buf.Reset()
va := iterator.Value()
require.Equal(t, tc.expectedvalueOut, buf.String())
require.Equal(t, tc.expectedKey, ka)
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NotPanics(t, iterator.Close)
}
func TestTestTraceKVStoreReverseIterator(t *testing.T) {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
iterator := store.ReverseIterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []uint8([]byte(nil)), s)
require.Equal(t, []uint8([]byte(nil)), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
expectedKeyOut string
expectedvalueOut string
}{
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
buf.Reset()
ka := iterator.Key()
require.Equal(t, tc.expectedKeyOut, buf.String())
buf.Reset()
va := iterator.Value()
require.Equal(t, tc.expectedvalueOut, buf.String())
require.Equal(t, tc.expectedKey, ka)
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NotPanics(t, iterator.Close)
}
func TestTraceKVStorePrefix(t *testing.T) {
store := newEmptyTraceKVStore(nil)
pStore := store.Prefix([]byte("trace_prefix"))
require.IsType(t, prefixStore{}, pStore)
}
func TestTraceKVStoreGetStoreType(t *testing.T) {
memDB := dbStoreAdapter{dbm.NewMemDB()}
store := newEmptyTraceKVStore(nil)
require.Equal(t, memDB.GetStoreType(), store.GetStoreType())
}
func TestTraceKVStoreCacheWrap(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrap() })
}
func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}

View File

@ -6,20 +6,23 @@ import (
// Import cosmos-sdk/types/store.go for convenience.
// nolint
type Store = types.Store
type Committer = types.Committer
type CommitStore = types.CommitStore
type MultiStore = types.MultiStore
type CacheMultiStore = types.CacheMultiStore
type CommitMultiStore = types.CommitMultiStore
type KVStore = types.KVStore
type KVPair = types.KVPair
type Iterator = types.Iterator
type CacheKVStore = types.CacheKVStore
type CommitKVStore = types.CommitKVStore
type CacheWrapper = types.CacheWrapper
type CacheWrap = types.CacheWrap
type CommitID = types.CommitID
type StoreKey = types.StoreKey
type StoreType = types.StoreType
type Queryable = types.Queryable
type (
Store = types.Store
Committer = types.Committer
CommitStore = types.CommitStore
MultiStore = types.MultiStore
CacheMultiStore = types.CacheMultiStore
CommitMultiStore = types.CommitMultiStore
KVStore = types.KVStore
KVPair = types.KVPair
Iterator = types.Iterator
CacheKVStore = types.CacheKVStore
CommitKVStore = types.CommitKVStore
CacheWrapper = types.CacheWrapper
CacheWrap = types.CacheWrap
CommitID = types.CommitID
StoreKey = types.StoreKey
StoreType = types.StoreType
Queryable = types.Queryable
TraceContext = types.TraceContext
)

View File

@ -2,6 +2,7 @@ package types
import (
"fmt"
"io"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
@ -50,6 +51,21 @@ type MultiStore interface { //nolint
GetStore(StoreKey) Store
GetKVStore(StoreKey) KVStore
GetKVStoreWithGas(GasMeter, StoreKey) KVStore
// TracingEnabled returns if tracing is enabled for the MultiStore.
TracingEnabled() bool
// WithTracer sets the tracer for the MultiStore that the underlying
// stores will utilize to trace operations. A MultiStore is returned.
WithTracer(w io.Writer) MultiStore
// WithTracingContext sets the tracing context for a MultiStore. It is
// implied that the caller should update the context when necessary between
// tracing operations. A MultiStore is returned.
WithTracingContext(TraceContext) MultiStore
// ResetTraceContext resets the current tracing context.
ResetTraceContext() MultiStore
}
// From MultiStore.CacheMultiStore()....
@ -163,25 +179,27 @@ type KVStoreGetter interface {
//----------------------------------------
// CacheWrap
/*
CacheWrap() makes the most appropriate cache-wrap. For example,
IAVLStore.CacheWrap() returns a CacheKVStore.
CacheWrap() should not return a Committer, since Commit() on
cache-wraps make no sense. It can return KVStore, HeapStore,
SpaceStore, etc.
*/
// CacheWrap makes the most appropriate cache-wrap. For example,
// IAVLStore.CacheWrap() returns a CacheKVStore. CacheWrap should not return
// a Committer, since Commit cache-wraps make no sense. It can return KVStore,
// HeapStore, SpaceStore, etc.
type CacheWrap interface {
// Write syncs with the underlying store.
Write()
// CacheWrap recursively wraps again.
CacheWrap() CacheWrap
// CacheWrapWithTrace recursively wraps again with tracing enabled.
CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap
}
type CacheWrapper interface { //nolint
// CacheWrap cache wraps.
CacheWrap() CacheWrap
// CacheWrapWithTrace cache wraps with tracing enabled.
CacheWrapWithTrace(w io.Writer, tc TraceContext) CacheWrap
}
//----------------------------------------
@ -298,3 +316,7 @@ func (getter PrefixStoreGetter) KVStore(ctx Context) KVStore {
type KVPair cmn.KVPair
//----------------------------------------
// TraceContext contains TraceKVStore context data. It will be written with
// every trace operation.
type TraceContext map[string]interface{}

View File

@ -81,7 +81,7 @@ func handleMsgCreateValidator(ctx sdk.Context, msg types.MsgCreateValidator, k k
// move coins from the msg.Address account to a (self-delegation) delegator account
// the validator account and global shares are updated within here
_, err := k.Delegate(ctx, msg.DelegatorAddr, msg.Delegation, validator)
_, err := k.Delegate(ctx, msg.DelegatorAddr, msg.Delegation, validator, true)
if err != nil {
return err.Result()
}
@ -136,7 +136,7 @@ func handleMsgDelegate(ctx sdk.Context, msg types.MsgDelegate, k keeper.Keeper)
if validator.Revoked == true {
return ErrValidatorRevoked(k.Codespace()).Result()
}
_, err := k.Delegate(ctx, msg.DelegatorAddr, msg.Delegation, validator)
_, err := k.Delegate(ctx, msg.DelegatorAddr, msg.Delegation, validator, true)
if err != nil {
return err.Result()
}

View File

@ -268,6 +268,7 @@ func TestIncrementsMsgUnbond(t *testing.T) {
initBond := int64(1000)
ctx, accMapper, keeper := keep.CreateTestInput(t, false, initBond)
params := setInstantUnbondPeriod(keeper, ctx)
denom := params.BondDenom
// create validator, delegate
validatorAddr, delegatorAddr := keep.Addrs[0], keep.Addrs[1]
@ -276,10 +277,17 @@ func TestIncrementsMsgUnbond(t *testing.T) {
got := handleMsgCreateValidator(ctx, msgCreateValidator, keeper)
require.True(t, got.IsOK(), "expected create-validator to be ok, got %v", got)
// initial balance
amt1 := accMapper.GetAccount(ctx, delegatorAddr).GetCoins().AmountOf(denom)
msgDelegate := newTestMsgDelegate(delegatorAddr, validatorAddr, initBond)
got = handleMsgDelegate(ctx, msgDelegate, keeper)
require.True(t, got.IsOK(), "expected delegation to be ok, got %v", got)
// balance should have been subtracted after delegation
amt2 := accMapper.GetAccount(ctx, delegatorAddr).GetCoins().AmountOf(denom)
require.Equal(t, amt1.Sub(sdk.NewInt(initBond)).Int64(), amt2.Int64(), "expected coins to be subtracted")
validator, found := keeper.GetValidator(ctx, validatorAddr)
require.True(t, found)
require.Equal(t, initBond*2, validator.DelegatorShares.RoundInt64())
@ -528,8 +536,9 @@ func TestUnbondingPeriod(t *testing.T) {
}
func TestRedelegationPeriod(t *testing.T) {
ctx, _, keeper := keep.CreateTestInput(t, false, 1000)
ctx, AccMapper, keeper := keep.CreateTestInput(t, false, 1000)
validatorAddr, validatorAddr2 := keep.Addrs[0], keep.Addrs[1]
denom := keeper.GetParams(ctx).BondDenom
// set the unbonding time
params := keeper.GetParams(ctx)
@ -538,18 +547,32 @@ func TestRedelegationPeriod(t *testing.T) {
// create the validators
msgCreateValidator := newTestMsgCreateValidator(validatorAddr, keep.PKs[0], 10)
// initial balance
amt1 := AccMapper.GetAccount(ctx, validatorAddr).GetCoins().AmountOf(denom)
got := handleMsgCreateValidator(ctx, msgCreateValidator, keeper)
require.True(t, got.IsOK(), "expected no error on runMsgCreateValidator")
// balance should have been subtracted after creation
amt2 := AccMapper.GetAccount(ctx, validatorAddr).GetCoins().AmountOf(denom)
require.Equal(t, amt1.Sub(sdk.NewInt(10)).Int64(), amt2.Int64(), "expected coins to be subtracted")
msgCreateValidator = newTestMsgCreateValidator(validatorAddr2, keep.PKs[1], 10)
got = handleMsgCreateValidator(ctx, msgCreateValidator, keeper)
require.True(t, got.IsOK(), "expected no error on runMsgCreateValidator")
bal1 := AccMapper.GetAccount(ctx, validatorAddr).GetCoins()
// begin redelegate
msgBeginRedelegate := NewMsgBeginRedelegate(validatorAddr, validatorAddr, validatorAddr2, sdk.NewRat(10))
got = handleMsgBeginRedelegate(ctx, msgBeginRedelegate, keeper)
require.True(t, got.IsOK(), "expected no error, %v", got)
// origin account should not lose tokens as with a regular delegation
bal2 := AccMapper.GetAccount(ctx, validatorAddr).GetCoins()
require.Equal(t, bal1, bal2)
// cannot complete redelegation at same time
msgCompleteRedelegate := NewMsgCompleteRedelegate(validatorAddr, validatorAddr, validatorAddr2)
got = handleMsgCompleteRedelegate(ctx, msgCompleteRedelegate, keeper)

View File

@ -200,9 +200,9 @@ func (k Keeper) RemoveRedelegation(ctx sdk.Context, red types.Redelegation) {
//_____________________________________________________________________________________
// Perform a delegation, set/update everything necessary within the store
// Perform a delegation, set/update everything necessary within the store.
func (k Keeper) Delegate(ctx sdk.Context, delegatorAddr sdk.AccAddress, bondAmt sdk.Coin,
validator types.Validator) (newShares sdk.Rat, err sdk.Error) {
validator types.Validator, subtractAccount bool) (newShares sdk.Rat, err sdk.Error) {
// Get or create the delegator delegation
delegation, found := k.GetDelegation(ctx, delegatorAddr, validator.Owner)
@ -214,12 +214,15 @@ func (k Keeper) Delegate(ctx sdk.Context, delegatorAddr sdk.AccAddress, bondAmt
}
}
// Account new shares, save
pool := k.GetPool(ctx)
_, _, err = k.coinKeeper.SubtractCoins(ctx, delegation.DelegatorAddr, sdk.Coins{bondAmt})
if err != nil {
return
if subtractAccount {
// Account new shares, save
_, _, err = k.coinKeeper.SubtractCoins(ctx, delegation.DelegatorAddr, sdk.Coins{bondAmt})
if err != nil {
return
}
}
pool := k.GetPool(ctx)
validator, pool, newShares = validator.AddTokensFromDel(pool, bondAmt.Amount.Int64())
delegation.Shares = delegation.Shares.Add(newShares)
@ -358,7 +361,7 @@ func (k Keeper) BeginRedelegation(ctx sdk.Context, delegatorAddr, validatorSrcAd
if !found {
return types.ErrBadRedelegationDst(k.Codespace())
}
sharesCreated, err := k.Delegate(ctx, delegatorAddr, returnCoin, dstValidator)
sharesCreated, err := k.Delegate(ctx, delegatorAddr, returnCoin, dstValidator, false)
if err != nil {
return err
}