Refactor Logging using Zerolog (#8072)

* init commit

* server: use flags

* server: godoc++

* updates

* baseapp: update logging

* logging updates

* x/bank: update logging

* logging updates

* lint++

* logging updates

* logging updates

* logging updates

* logging updates

* cl++
This commit is contained in:
Aleksandr Bezobchuk 2020-12-03 18:17:21 -05:00 committed by GitHub
parent 183593ff6d
commit 5291a8ff31
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 231 additions and 114 deletions

View File

@ -37,6 +37,13 @@ Ref: https://keepachangelog.com/en/1.0.0/
## [Unreleased]
### Improvements
* (logging) [\#8072](https://github.com/cosmos/cosmos-sdk/pull/8072) Refactor logging:
* Use [zerolog](https://github.com/rs/zerolog) over Tendermint's go-kit logging wrapper.
* Introduce Tendermint's `--log_format=plain|json` flag. Using format `json` allows for emitting structured JSON
logs which can be consumed by an external logging facility (e.g. Loggly). Both formats log to STDERR.
* The existing `--log_level` flag and it's default value now solely relates to the global logging
level (e.g. `info`, `debug`, etc...) instead of `<module>:<level>`.
* (crypto) [\#7987](https://github.com/cosmos/cosmos-sdk/pull/7987) Fix the inconsistency of CryptoCdc, only use `codec/legacy.Cdc`.
* (SDK) [\#7925](https://github.com/cosmos/cosmos-sdk/pull/7925) Updated dependencies to use gRPC v1.33.2
* Updated gRPC dependency to v1.33.2
@ -44,6 +51,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (version) [\#7848](https://github.com/cosmos/cosmos-sdk/pull/7848) [\#7941](https://github.com/cosmos/cosmos-sdk/pull/7941) `version --long` output now shows the list of build dependencies and replaced build dependencies.
### State Machine Breaking Changes
* (x/upgrade) [\#7979](https://github.com/cosmos/cosmos-sdk/pull/7979) keeper pubkey storage serialization migration from bech32 to protobuf.
### Bug Fixes
@ -57,6 +65,7 @@ Ref: https://keepachangelog.com/en/1.0.0/
* (x/staking) [\#7419](https://github.com/cosmos/cosmos-sdk/pull/7419) The `TmConsPubKey` method on ValidatorI has been removed and replaced instead by `ConsPubKey` (which returns a SDK `cryptotypes.PubKey`) and `TmConsPublicKey` (which returns a Tendermint proto PublicKey).
### Improvements
* (tendermint) [\#7828](https://github.com/cosmos/cosmos-sdk/pull/7828) Update tendermint dependency to v0.34.0-rc6
## [v0.40.0-rc2](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.40.0-rc2) - 2020-11-02
@ -89,18 +98,16 @@ Ref: https://keepachangelog.com/en/1.0.0/
* __Modules__
* `x/crisis` has a new function: `AddModuleInitFlags`, which will register optional crisis module flags for the start command.
### Bug Fixes
* (client) [\#7699](https://github.com/cosmos/cosmos-sdk/pull/7699) Fix panic in context when setting invalid nodeURI. `WithNodeURI` does not set the `Client` in the context.
* (x/gov) [#7641](https://github.com/cosmos/cosmos-sdk/pull/7641) Fix tally calculation precision error.
### Improvements
### Improvements
* (rest) [#7649](https://github.com/cosmos/cosmos-sdk/pull/7649) Return an unsigned tx in legacy GET /tx endpoint when signature conversion fails
* (cli) [#7764](https://github.com/cosmos/cosmos-sdk/pull/7764) Update x/banking and x/crisis InitChain to improve node startup time
## [v0.40.0-rc1](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.40.0-rc1) - 2020-10-19
### Client Breaking Changes

View File

@ -291,7 +291,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) {
// MultiStore (app.cms) so when Commit() is called is persists those values.
app.deliverState.ms.Write()
commitID := app.cms.Commit()
app.logger.Debug("Commit synced", "commit", fmt.Sprintf("%X", commitID))
app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID))
// Reset the Check state to the latest committed.
//
@ -358,22 +358,27 @@ func (app *BaseApp) snapshot(height int64) {
app.logger.Info("snapshot manager not configured")
return
}
app.logger.Info("Creating state snapshot", "height", height)
app.logger.Info("creating state snapshot", "height", height)
snapshot, err := app.snapshotManager.Create(uint64(height))
if err != nil {
app.logger.Error("Failed to create state snapshot", "height", height, "err", err)
app.logger.Error("failed to create state snapshot", "height", height, "err", err)
return
}
app.logger.Info("Completed state snapshot", "height", height, "format", snapshot.Format)
app.logger.Info("completed state snapshot", "height", height, "format", snapshot.Format)
if app.snapshotKeepRecent > 0 {
app.logger.Debug("Pruning state snapshots")
app.logger.Debug("pruning state snapshots")
pruned, err := app.snapshotManager.Prune(app.snapshotKeepRecent)
if err != nil {
app.logger.Error("Failed to prune state snapshots", "err", err)
return
}
app.logger.Debug("Pruned state snapshots", "pruned", pruned)
app.logger.Debug("pruned state snapshots", "pruned", pruned)
}
}
@ -433,13 +438,14 @@ func (app *BaseApp) ListSnapshots(req abci.RequestListSnapshots) abci.ResponseLi
snapshots, err := app.snapshotManager.List()
if err != nil {
app.logger.Error("Failed to list snapshots", "err", err)
app.logger.Error("failed to list snapshots", "err", err)
return resp
}
for _, snapshot := range snapshots {
abciSnapshot, err := snapshot.ToABCI()
if err != nil {
app.logger.Error("Failed to list snapshots", "err", err)
app.logger.Error("failed to list snapshots", "err", err)
return resp
}
resp.Snapshots = append(resp.Snapshots, &abciSnapshot)
@ -455,8 +461,13 @@ func (app *BaseApp) LoadSnapshotChunk(req abci.RequestLoadSnapshotChunk) abci.Re
}
chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk)
if err != nil {
app.logger.Error("Failed to load snapshot chunk", "height", req.Height, "format", req.Format,
"chunk", req.Chunk, "err")
app.logger.Error(
"failed to load snapshot chunk",
"height", req.Height,
"format", req.Format,
"chunk", req.Chunk,
"err", err,
)
return abci.ResponseLoadSnapshotChunk{}
}
return abci.ResponseLoadSnapshotChunk{Chunk: chunk}
@ -470,15 +481,16 @@ func (app *BaseApp) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOf
}
if req.Snapshot == nil {
app.logger.Error("Received nil snapshot")
app.logger.Error("received nil snapshot")
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}
}
snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot)
if err != nil {
app.logger.Error("Failed to decode snapshot metadata", "err", err)
app.logger.Error("failed to decode snapshot metadata", "err", err)
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}
}
err = app.snapshotManager.Restore(snapshot)
switch {
case err == nil:
@ -488,13 +500,22 @@ func (app *BaseApp) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOf
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}
case errors.Is(err, snapshottypes.ErrInvalidMetadata):
app.logger.Error("Rejecting invalid snapshot", "height", req.Snapshot.Height,
"format", req.Snapshot.Format, "err", err)
app.logger.Error(
"rejecting invalid snapshot",
"height", req.Snapshot.Height,
"format", req.Snapshot.Format,
"err", err,
)
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}
default:
app.logger.Error("Failed to restore snapshot", "height", req.Snapshot.Height,
"format", req.Snapshot.Format, "err", err)
app.logger.Error(
"failed to restore snapshot",
"height", req.Snapshot.Height,
"format", req.Snapshot.Format,
"err", err,
)
// We currently don't support resetting the IAVL stores and retrying a different snapshot,
// so we ask Tendermint to abort all snapshot restoration.
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}
@ -514,8 +535,12 @@ func (app *BaseApp) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) abci.
return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}
case errors.Is(err, snapshottypes.ErrChunkHashMismatch):
app.logger.Error("Chunk checksum mismatch, rejecting sender and requesting refetch",
"chunk", req.Index, "sender", req.Sender, "err", err)
app.logger.Error(
"chunk checksum mismatch; rejecting sender and requesting refetch",
"chunk", req.Index,
"sender", req.Sender,
"err", err,
)
return abci.ResponseApplySnapshotChunk{
Result: abci.ResponseApplySnapshotChunk_RETRY,
RefetchChunks: []uint32{req.Index},
@ -523,7 +548,7 @@ func (app *BaseApp) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) abci.
}
default:
app.logger.Error("Failed to restore snapshot", "err", err)
app.logger.Error("failed to restore snapshot", "err", err)
return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}
}
}

View File

@ -65,6 +65,10 @@ const (
FlagCountTotal = "count-total"
FlagTimeoutHeight = "timeout-height"
FlagKeyAlgorithm = "algo"
// Tendermint logging flags
FlagLogLevel = "log_level"
FlagLogFormat = "log_format"
)
// LineBreak can be included in a command list to provide a blank line

View File

@ -12,7 +12,7 @@ VOLUME [ /simd ]
WORKDIR /simd
EXPOSE 26656 26657
ENTRYPOINT ["/usr/bin/wrapper.sh"]
CMD ["start"]
CMD ["start", "--log_format", "plain"]
STOPSIGNAL SIGTERM
COPY wrapper.sh /usr/bin/wrapper.sh

1
go.mod
View File

@ -35,6 +35,7 @@ require (
github.com/prometheus/common v0.15.0
github.com/rakyll/statik v0.1.7
github.com/regen-network/cosmos-proto v0.3.0
github.com/rs/zerolog v1.20.0
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/cast v1.3.1
github.com/spf13/cobra v1.1.1

4
go.sum
View File

@ -481,6 +481,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@ -726,6 +729,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=

55
server/logger.go Normal file
View File

@ -0,0 +1,55 @@
package server
import (
"github.com/rs/zerolog"
tmlog "github.com/tendermint/tendermint/libs/log"
)
var _ tmlog.Logger = (*ZeroLogWrapper)(nil)
// ZeroLogWrapper provides a wrapper around a zerolog.Logger instance. It implements
// Tendermint's Logger interface.
type ZeroLogWrapper struct {
zerolog.Logger
}
// Info implements Tendermint's Logger interface and logs with level INFO. A set
// of key/value tuples may be provided to add context to the log. The number of
// tuples must be even and the key of the tuple must be a string.
func (z ZeroLogWrapper) Info(msg string, keyVals ...interface{}) {
z.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg)
}
// Error implements Tendermint's Logger interface and logs with level ERR. A set
// of key/value tuples may be provided to add context to the log. The number of
// tuples must be even and the key of the tuple must be a string.
func (z ZeroLogWrapper) Error(msg string, keyVals ...interface{}) {
z.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg)
}
// Debug implements Tendermint's Logger interface and logs with level DEBUG. A set
// of key/value tuples may be provided to add context to the log. The number of
// tuples must be even and the key of the tuple must be a string.
func (z ZeroLogWrapper) Debug(msg string, keyVals ...interface{}) {
z.Logger.Debug().Fields(getLogFields(keyVals...)).Msg(msg)
}
// With returns a new wrapped logger with additional context provided by a set
// of key/value tuples. The number of tuples must be even and the key of the
// tuple must be a string.
func (z ZeroLogWrapper) With(keyVals ...interface{}) tmlog.Logger {
return ZeroLogWrapper{z.Logger.With().Fields(getLogFields(keyVals...)).Logger()}
}
func getLogFields(keyVals ...interface{}) map[string]interface{} {
if len(keyVals)%2 != 0 {
return nil
}
fields := make(map[string]interface{})
for i := 0; i < len(keyVals); i += 2 {
fields[keyVals[i].(string)] = keyVals[i+1]
}
return fields
}

View File

@ -240,17 +240,17 @@ func startInProcess(ctx *Context, clientCtx client.Context, appCreator types.App
genDocProvider,
node.DefaultDBProvider,
node.DefaultMetricsProvider(cfg.Instrumentation),
ctx.Logger.With("module", "node"),
ctx.Logger,
)
if err != nil {
return err
}
ctx.Logger.Debug("Initialization: tmNode created")
ctx.Logger.Debug("initialization: tmNode created")
if err := tmNode.Start(); err != nil {
return err
}
ctx.Logger.Debug("Initialization: tmNode started")
ctx.Logger.Debug("initialization: tmNode started")
config := config.GetConfig(ctx.Viper)

View File

@ -14,12 +14,12 @@ import (
"syscall"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
tmcfg "github.com/tendermint/tendermint/config"
tmcli "github.com/tendermint/tendermint/libs/cli"
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
"github.com/tendermint/tendermint/libs/log"
tmlog "github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"github.com/cosmos/cosmos-sdk/client/flags"
@ -39,7 +39,7 @@ const ServerContextKey = sdk.ContextKey("server.context")
type Context struct {
Viper *viper.Viper
Config *tmcfg.Config
Logger log.Logger
Logger tmlog.Logger
}
// ErrorCode contains the exit code for server exit.
@ -52,10 +52,14 @@ func (e ErrorCode) Error() string {
}
func NewDefaultContext() *Context {
return NewContext(viper.New(), tmcfg.DefaultConfig(), log.NewTMLogger(log.NewSyncWriter(os.Stdout)))
return NewContext(
viper.New(),
tmcfg.DefaultConfig(),
ZeroLogWrapper{log.Logger},
)
}
func NewContext(v *viper.Viper, config *tmcfg.Config, logger log.Logger) *Context {
func NewContext(v *viper.Viper, config *tmcfg.Config, logger tmlog.Logger) *Context {
return &Context{v, config, logger}
}
@ -86,27 +90,29 @@ func InterceptConfigsPreRunHandler(cmd *cobra.Command) error {
serverCtx.Viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_"))
serverCtx.Viper.AutomaticEnv()
// Intercept configuration files, using both Viper instances separately
// intercept configuration files, using both Viper instances separately
config, err := interceptConfigs(serverCtx.Viper)
if err != nil {
return err
}
// Return value is a tendermint configuration object
// return value is a tendermint configuration object
serverCtx.Config = config
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, tmcfg.DefaultLogLevel())
var logWriter io.Writer
if strings.ToLower(serverCtx.Viper.GetString(flags.FlagLogFormat)) == tmcfg.LogFormatPlain {
logWriter = zerolog.ConsoleWriter{Out: os.Stderr}
} else {
logWriter = os.Stderr
}
logLvlStr := serverCtx.Viper.GetString(flags.FlagLogLevel)
logLvl, err := zerolog.ParseLevel(logLvlStr)
if err != nil {
return err
return fmt.Errorf("failed to parse log level (%s): %w", logLvlStr, err)
}
// Check if the tendermint flag for trace logging is set
// if it is then setup a tracing logger in this app as well
if serverCtx.Viper.GetBool(tmcli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
serverCtx.Logger = logger.With("module", "main")
serverCtx.Logger = ZeroLogWrapper{zerolog.New(logWriter).Level(logLvl).With().Timestamp().Logger()}
return SetCmdServerContext(cmd, serverCtx)
}

View File

@ -6,8 +6,10 @@ import (
"os"
"path/filepath"
"github.com/rs/zerolog"
"github.com/spf13/cast"
"github.com/spf13/cobra"
tmcfg "github.com/tendermint/tendermint/config"
tmcli "github.com/tendermint/tendermint/libs/cli"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
@ -79,7 +81,8 @@ func Execute(rootCmd *cobra.Command) error {
ctx = context.WithValue(ctx, client.ClientContextKey, &client.Context{})
ctx = context.WithValue(ctx, server.ServerContextKey, srvCtx)
rootCmd.PersistentFlags().String("log_level", srvCtx.Config.LogLevel, "The logging level in the format of <module>:<level>,...")
rootCmd.PersistentFlags().String(flags.FlagLogLevel, zerolog.InfoLevel.String(), "The logging level (trace|debug|info|warn|error|fatal|panic)")
rootCmd.PersistentFlags().String(flags.FlagLogFormat, tmcfg.LogFormatJSON, "The logging format (json|plain)")
executor := tmcli.PrepareBaseCmd(rootCmd, "", simapp.DefaultNodeHome)
return executor.ExecuteContext(ctx)

View File

@ -86,7 +86,7 @@ func NewAccountKeeper(
// Logger returns a module-specific logger.
func (ak AccountKeeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// GetPubKey Returns the PubKey of the account at address

View File

@ -1,7 +1,6 @@
package keeper
import (
"fmt"
"time"
"github.com/cosmos/cosmos-sdk/codec"
@ -341,7 +340,7 @@ func (k BaseKeeper) MintCoins(ctx sdk.Context, moduleName string, amt sdk.Coins)
k.SetSupply(ctx, supply)
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("minted %s from %s module account", amt.String(), moduleName))
logger.Info("minted coins from module account", "amount", amt.String(), "from", moduleName)
return nil
}
@ -369,7 +368,7 @@ func (k BaseKeeper) BurnCoins(ctx sdk.Context, moduleName string, amt sdk.Coins)
k.SetSupply(ctx, supply)
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("burned %s from %s module account", amt.String(), moduleName))
logger.Info("burned tokens from module account", "amount", amt.String(), "from", moduleName)
return nil
}

View File

@ -49,7 +49,7 @@ func NewBaseViewKeeper(cdc codec.BinaryMarshaler, storeKey sdk.StoreKey, ak type
// Logger returns a module-specific logger.
func (k BaseViewKeeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// HasBalance returns whether or not an account has at least amt balance.

View File

@ -44,7 +44,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// RegisterRoute register the routes for each of the invariants
@ -76,7 +76,7 @@ func (k Keeper) AssertInvariants(ctx sdk.Context) {
invarRoutes := k.Routes()
n := len(invarRoutes)
for i, ir := range invarRoutes {
logger.Debug("Asserting cirisis invariants", "inv", fmt.Sprint(i, "/", n))
logger.Info("asserting cirisis invariants", "inv", fmt.Sprint(i, "/", n))
if res, stop := ir.Invar(ctx); stop {
// TODO: Include app name as part of context to allow for this to be
// variable.

View File

@ -152,9 +152,13 @@ func (k Keeper) withdrawDelegationRewards(ctx sdk.Context, val stakingtypes.Vali
rewards := rewardsRaw.Intersect(outstanding)
if !rewards.IsEqual(rewardsRaw) {
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("missing rewards rounding error, delegator %v"+
"withdrawing rewards from validator %v, should have received %v, got %v",
val.GetOperator(), del.GetDelegatorAddr(), rewardsRaw, rewards))
logger.Info(
"rounding error withdrawing rewards from validator",
"delegator", del.GetDelegatorAddr().String(),
"validator", val.GetOperator().String(),
"got", rewards.String(),
"expected", rewardsRaw.String(),
)
}
// truncate coins, return remainder to community pool

View File

@ -57,7 +57,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// SetWithdrawAddr sets a new address that will receive the rewards upon withdrawal

View File

@ -1,8 +1,6 @@
package keeper
import (
"fmt"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/x/distribution/types"
@ -13,16 +11,19 @@ func HandleCommunityPoolSpendProposal(ctx sdk.Context, k Keeper, p *types.Commun
if k.blockedAddrs[p.Recipient] {
return sdkerrors.Wrapf(sdkerrors.ErrUnauthorized, "%s is not allowed to receive external funds", p.Recipient)
}
recipient, addrErr := sdk.AccAddressFromBech32(p.Recipient)
if addrErr != nil {
return addrErr
}
err := k.DistributeFromFeePool(ctx, p.Amount, recipient)
if err != nil {
return err
}
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("transferred %s from the community pool to recipient %s", p.Amount, p.Recipient))
logger.Info("transferred from the community pool to recipient", "amount", p.Amount.String(), "recipient", p.Recipient)
return nil
}

View File

@ -40,7 +40,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// SetRouter sets the Evidence Handler router for the x/evidence module. Note,

View File

@ -30,13 +30,13 @@ func EndBlocker(ctx sdk.Context, keeper keeper.Keeper) {
)
logger.Info(
fmt.Sprintf("proposal %d (%s) didn't meet minimum deposit of %s (had only %s); deleted",
proposal.ProposalId,
proposal.GetTitle(),
keeper.GetDepositParams(ctx).MinDeposit,
proposal.TotalDeposit,
),
"proposal did not meet minimum deposit; deleted",
"proposal", proposal.ProposalId,
"title", proposal.GetTitle(),
"min_deposit", keeper.GetDepositParams(ctx).MinDeposit.String(),
"total_deposit", proposal.TotalDeposit.String(),
)
return false
})
@ -90,10 +90,10 @@ func EndBlocker(ctx sdk.Context, keeper keeper.Keeper) {
keeper.RemoveFromActiveProposalQueue(ctx, proposal.ProposalId, proposal.VotingEndTime)
logger.Info(
fmt.Sprintf(
"proposal %d (%s) tallied; result: %s",
proposal.ProposalId, proposal.GetTitle(), logMsg,
),
"proposal tallied",
"proposal", proposal.ProposalId,
"title", proposal.GetTitle(),
"result", logMsg,
)
ctx.EventManager().EmitEvent(

View File

@ -68,7 +68,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (keeper Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// Router returns the gov Keeper's Router

View File

@ -1,8 +1,6 @@
package keeper
import (
"fmt"
tmbytes "github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log"
@ -63,7 +61,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s-%s", host.ModuleName, types.ModuleName))
return ctx.Logger().With("module", "x/"+host.ModuleName+"-"+types.ModuleName)
}
// GetTransferAccount returns the ICS20 - transfers ModuleAccount

View File

@ -47,7 +47,7 @@ func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, paramSpace paramtype
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s/%s", host.ModuleName, types.SubModuleName))
return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
}
// GenerateClientIdentifier returns the next client identifier.

View File

@ -1,8 +1,6 @@
package keeper
import (
"fmt"
"github.com/tendermint/tendermint/libs/log"
"github.com/cosmos/cosmos-sdk/codec"
@ -36,7 +34,7 @@ func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, ck types.ClientKeepe
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s/%s", host.ModuleName, types.SubModuleName))
return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
}
// GetCommitmentPrefix returns the IBC connection store prefix as a commitment

View File

@ -1,7 +1,6 @@
package keeper
import (
"fmt"
"strconv"
"strings"
@ -52,7 +51,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s/%s", host.ModuleName, types.SubModuleName))
return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
}
// GenerateChannelIdentifier returns the next channel identifier.

View File

@ -26,7 +26,7 @@ func NewKeeper(sck capabilitykeeper.ScopedKeeper) Keeper {
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s/%s", host.ModuleName, types.SubModuleName))
return ctx.Logger().With("module", "x/"+host.ModuleName+"/"+types.SubModuleName)
}
// isBounded checks a given port ID is already bounded.

View File

@ -1,8 +1,6 @@
package keeper
import (
"fmt"
"github.com/tendermint/tendermint/libs/log"
"github.com/cosmos/cosmos-sdk/codec"
@ -51,7 +49,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// get the minter

View File

@ -1,8 +1,6 @@
package keeper
import (
"fmt"
"github.com/tendermint/tendermint/libs/log"
"github.com/cosmos/cosmos-sdk/codec"
@ -33,7 +31,7 @@ func NewKeeper(cdc codec.BinaryMarshaler, legacyAmino *codec.LegacyAmino, key, t
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", proposal.ModuleName))
return ctx.Logger().With("module", "x/"+proposal.ModuleName)
}
// Allocate subspace used for keepers

View File

@ -48,6 +48,8 @@ func (k Keeper) HandleValidatorSignature(ctx sdk.Context, addr cryptotypes.Addre
// Array value at this index has not changed, no need to update counter
}
minSignedPerWindow := k.MinSignedPerWindow(ctx)
if missed {
ctx.EventManager().EmitEvent(
sdk.NewEvent(
@ -59,21 +61,22 @@ func (k Keeper) HandleValidatorSignature(ctx sdk.Context, addr cryptotypes.Addre
)
logger.Info(
fmt.Sprintf("Absent validator %s at height %d, %d missed, threshold %d", consAddr, height, signInfo.MissedBlocksCounter, k.MinSignedPerWindow(ctx)))
"absent validator",
"height", height,
"validator", consAddr.String(),
"missed", signInfo.MissedBlocksCounter,
"threshold", minSignedPerWindow,
)
}
minHeight := signInfo.StartHeight + k.SignedBlocksWindow(ctx)
maxMissed := k.SignedBlocksWindow(ctx) - k.MinSignedPerWindow(ctx)
maxMissed := k.SignedBlocksWindow(ctx) - minSignedPerWindow
// if we are past the minimum height and the validator has missed too many blocks, punish them
if height > minHeight && signInfo.MissedBlocksCounter > maxMissed {
validator := k.sk.ValidatorByConsAddr(ctx, consAddr)
if validator != nil && !validator.IsJailed() {
// Downtime confirmed: slash and jail the validator
logger.Info(fmt.Sprintf("Validator %s past min height of %d and below signed blocks threshold of %d",
consAddr, minHeight, k.MinSignedPerWindow(ctx)))
// We need to retrieve the stake distribution which signed the block, so we subtract ValidatorUpdateDelay from the evidence height,
// and subtract an additional 1 since this is the LastCommit.
// Note that this *can* result in a negative "distributionHeight" up to -ValidatorUpdateDelay-1,
@ -99,10 +102,21 @@ func (k Keeper) HandleValidatorSignature(ctx sdk.Context, addr cryptotypes.Addre
signInfo.MissedBlocksCounter = 0
signInfo.IndexOffset = 0
k.clearValidatorMissedBlockBitArray(ctx, consAddr)
} else {
// Validator was (a) not found or (b) already jailed, don't slash
logger.Info(
fmt.Sprintf("Validator %s would have been slashed for downtime, but was either not found in store or already jailed", consAddr),
"slashing and jailing validator due to liveness fault",
"height", height,
"validator", consAddr.String(),
"min_height", minHeight,
"threshold", minSignedPerWindow,
"slashed", k.SlashFractionDowntime(ctx).String(),
"jailed_until", signInfo.JailedUntil,
)
} else {
// validator was (a) not found or (b) already jailed so we do not slash
logger.Info(
"validator would have been slashed for downtime, but was either not found in store or already jailed",
"validator", consAddr.String(),
)
}
}

View File

@ -36,7 +36,7 @@ func NewKeeper(cdc codec.BinaryMarshaler, key sdk.StoreKey, sk types.StakingKeep
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// AddPubkey sets a address-pubkey relation

View File

@ -65,7 +65,7 @@ func NewKeeper(
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// Set the validator hooks

View File

@ -41,10 +41,10 @@ func (k Keeper) Slash(ctx sdk.Context, consAddr sdk.ConsAddress, infractionHeigh
// NOTE: Correctness dependent on invariant that unbonding delegations / redelegations must also have been completely
// slashed in this case - which we don't explicitly check, but should be true.
// Log the slash attempt for future reference (maybe we should tag it too)
logger.Error(fmt.Sprintf(
"WARNING: Ignored attempt to slash a nonexistent validator with address %s, we recommend you investigate immediately",
consAddr))
logger.Error(
"WARNING: ignored attempt to slash a nonexistent validator; we recommend you investigate immediately",
"validator", consAddr.String(),
)
return
}
@ -71,10 +71,12 @@ func (k Keeper) Slash(ctx sdk.Context, consAddr sdk.ConsAddress, infractionHeigh
infractionHeight, ctx.BlockHeight()))
case infractionHeight == ctx.BlockHeight():
// Special-case slash at current height for efficiency - we don't need to look through unbonding delegations or redelegations
logger.Info(fmt.Sprintf(
"slashing at current height %d, not scanning unbonding delegations & redelegations",
infractionHeight))
// Special-case slash at current height for efficiency - we don't need to
// look through unbonding delegations or redelegations.
logger.Info(
"slashing at current height; not scanning unbonding delegations & redelegations",
"height", infractionHeight,
)
case infractionHeight < ctx.BlockHeight():
// Iterate through unbonding delegations from slashed validator
@ -132,10 +134,12 @@ func (k Keeper) Slash(ctx sdk.Context, consAddr sdk.ConsAddress, infractionHeigh
panic("invalid validator status")
}
// Log that a slash occurred!
logger.Info(fmt.Sprintf(
"validator %s slashed by slash factor of %s; burned %v tokens",
validator.GetOperator(), slashFactor.String(), tokensToBurn))
logger.Info(
"validator slashed by slash factor",
"validator", validator.GetOperator().String(),
"slash_factor", slashFactor.String(),
"burned", tokensToBurn,
)
}
// jail a validator
@ -143,7 +147,7 @@ func (k Keeper) Jail(ctx sdk.Context, consAddr sdk.ConsAddress) {
validator := k.mustGetValidatorByConsAddr(ctx, consAddr)
k.jailValidator(ctx, validator)
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("validator %s jailed", consAddr))
logger.Info("validator jailed", "validator", consAddr)
}
// unjail a validator
@ -151,7 +155,7 @@ func (k Keeper) Unjail(ctx sdk.Context, consAddr sdk.ConsAddress) {
validator := k.mustGetValidatorByConsAddr(ctx, consAddr)
k.unjailValidator(ctx, validator)
logger := k.Logger(ctx)
logger.Info(fmt.Sprintf("validator %s unjailed", consAddr))
logger.Info("validator un-jailed", "validator", consAddr)
}
// slash an unbonding delegation and update the pool

View File

@ -3,7 +3,6 @@ package keeper
import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
@ -183,7 +182,7 @@ func (k Keeper) ClearUpgradePlan(ctx sdk.Context) {
// Logger returns a module-specific logger.
func (k Keeper) Logger(ctx sdk.Context) log.Logger {
return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName))
return ctx.Logger().With("module", "x/"+types.ModuleName)
}
// GetUpgradePlan returns the currently scheduled Plan if any, setting havePlan to true if there is a scheduled