Merge PR #6475: Pruning Refactor

This commit is contained in:
Alexander Bezobchuk 2020-06-22 16:31:33 -04:00 committed by GitHub
parent 6700d776b8
commit 4716260a6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 416 additions and 577 deletions

View File

@ -189,7 +189,7 @@ func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) {
func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) {
rs := rootmulti.NewStore(db)
rs.SetPruning(store.PruneSyncable)
rs.SetPruning(store.PruneDefault)
key := sdk.NewKVStoreKey(storeKey)
rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil)
err := rs.LoadLatestVersion()
@ -255,7 +255,7 @@ func TestSetLoader(t *testing.T) {
func TestAppVersionSetterGetter(t *testing.T) {
logger := defaultLogger()
pruningOpt := SetPruning(store.PruneSyncable)
pruningOpt := SetPruning(store.PruneDefault)
db := dbm.NewMemDB()
name := t.Name()
app := NewBaseApp(name, logger, db, nil, pruningOpt)
@ -308,8 +308,9 @@ func TestLoadVersionInvalid(t *testing.T) {
func TestLoadVersionPruning(t *testing.T) {
logger := log.NewNopLogger()
pruningOptions := store.PruningOptions{
KeepEvery: 2,
SnapshotEvery: 6,
KeepRecent: 2,
KeepEvery: 3,
Interval: 1,
}
pruningOpt := SetPruning(pruningOptions)
db := dbm.NewMemDB()
@ -331,62 +332,33 @@ func TestLoadVersionPruning(t *testing.T) {
require.Equal(t, int64(0), lastHeight)
require.Equal(t, emptyCommitID, lastID)
// execute a block
header := abci.Header{Height: 1}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res := app.Commit()
var lastCommitID sdk.CommitID
// execute a block, collect commit ID
header = abci.Header{Height: 2}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res = app.Commit()
commitID2 := sdk.CommitID{Version: 2, Hash: res.Data}
// Commit seven blocks, of which 7 (latest) is kept in addition to 6, 5
// (keep recent) and 3 (keep every).
for i := int64(1); i <= 7; i++ {
app.BeginBlock(abci.RequestBeginBlock{Header: abci.Header{Height: i}})
res := app.Commit()
lastCommitID = sdk.CommitID{Version: i, Hash: res.Data}
}
// execute a block
header = abci.Header{Height: 3}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res = app.Commit()
commitID3 := sdk.CommitID{Version: 3, Hash: res.Data}
for _, v := range []int64{1, 2, 4} {
_, err = app.cms.CacheMultiStoreWithVersion(v)
require.Error(t, err)
}
// reload with LoadLatestVersion, check it loads last flushed version
for _, v := range []int64{3, 5, 6, 7} {
_, err = app.cms.CacheMultiStoreWithVersion(v)
require.NoError(t, err)
}
// reload with LoadLatestVersion, check it loads last version
app = NewBaseApp(name, logger, db, nil, pruningOpt)
app.MountStores(capKey)
err = app.LoadLatestVersion()
require.Nil(t, err)
testLoadVersionHelper(t, app, int64(2), commitID2)
// re-execute block 3 and check it is same CommitID
header = abci.Header{Height: 3}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res = app.Commit()
recommitID3 := sdk.CommitID{Version: 3, Hash: res.Data}
require.Equal(t, commitID3, recommitID3, "Commits of identical blocks not equal after reload")
// execute a block, collect commit ID
header = abci.Header{Height: 4}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res = app.Commit()
commitID4 := sdk.CommitID{Version: 4, Hash: res.Data}
// execute a block
header = abci.Header{Height: 5}
app.BeginBlock(abci.RequestBeginBlock{Header: header})
res = app.Commit()
// reload with LoadLatestVersion, check it loads last flushed version
app = NewBaseApp(name, logger, db, nil, pruningOpt)
app.MountStores(capKey)
err = app.LoadLatestVersion()
require.Nil(t, err)
testLoadVersionHelper(t, app, int64(4), commitID4)
// reload with LoadVersion of previous flushed version
// and check it fails since previous flush should be pruned
app = NewBaseApp(name, logger, db, nil, pruningOpt)
app.MountStores(capKey)
err = app.LoadVersion(2)
require.NotNil(t, err)
testLoadVersionHelper(t, app, int64(7), lastCommitID)
}
func testLoadVersionHelper(t *testing.T, app *BaseApp, expectedHeight int64, expectedID sdk.CommitID) {

8
go.mod
View File

@ -1,3 +1,5 @@
go 1.14
module github.com/cosmos/cosmos-sdk
require (
@ -35,15 +37,11 @@ require (
github.com/tendermint/btcd v0.1.1
github.com/tendermint/crypto v0.0.0-20191022145703-50d29ede1e15
github.com/tendermint/go-amino v0.15.1
github.com/tendermint/iavl v0.13.3
github.com/tendermint/iavl v0.13.4-0.20200621145059-83c3470ad61d
github.com/tendermint/tendermint v0.33.5
github.com/tendermint/tm-db v0.5.1
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 // indirect
google.golang.org/grpc v1.29.1
google.golang.org/protobuf v1.24.0 // indirect
gopkg.in/yaml.v2 v2.3.0
)
replace github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.2-alpha.regen.4
go 1.14

14
go.sum
View File

@ -201,6 +201,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@ -344,6 +346,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -506,8 +510,8 @@ github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYM
github.com/tendermint/go-amino v0.15.1 h1:D2uk35eT4iTsvJd9jWIetzthE5C0/k2QmMFkCN+4JgQ=
github.com/tendermint/go-amino v0.15.1/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME=
github.com/tendermint/iavl v0.13.2/go.mod h1:vE1u0XAGXYjHykd4BLp8p/yivrw2PF1TuoljBcsQoGA=
github.com/tendermint/iavl v0.13.3 h1:expgBDY1MX+6/3sqrIxGChbTNf9N9aTJ67SH4bPchCs=
github.com/tendermint/iavl v0.13.3/go.mod h1:2lE7GiWdSvc7kvT78ncIKmkOjCnp6JEnSb2O7B9htLw=
github.com/tendermint/iavl v0.13.4-0.20200621145059-83c3470ad61d h1:CeGkAbISdVP2LtmxWUv69KAzvrFjndehTtKTCd/kn9E=
github.com/tendermint/iavl v0.13.4-0.20200621145059-83c3470ad61d/go.mod h1:ZftLrZ/r+rapraBmqypoFAiw8YbvTglawkmZu2grdkg=
github.com/tendermint/tendermint v0.33.2 h1:NzvRMTuXJxqSsFed2J7uHmMU5N1CVzSpfi3nCc882KY=
github.com/tendermint/tendermint v0.33.2/go.mod h1:25DqB7YvV1tN3tHsjWoc2vFtlwICfrub9XO6UBO+4xk=
github.com/tendermint/tendermint v0.33.5 h1:jYgRd9ImkzA9iOyhpmgreYsqSB6tpDa6/rXYPb8HKE8=
@ -599,6 +603,8 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -642,6 +648,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -736,6 +744,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=

View File

@ -6,7 +6,7 @@ import (
"github.com/spf13/viper"
"github.com/cosmos/cosmos-sdk/store"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
"github.com/cosmos/cosmos-sdk/telemetry"
sdk "github.com/cosmos/cosmos-sdk/types"
)
@ -22,9 +22,10 @@ type BaseConfig struct {
// specified in this config (e.g. 0.25token1;0.0001token2).
MinGasPrices string `mapstructure:"minimum-gas-prices"`
Pruning string `mapstructure:"pruning"`
PruningKeepEvery string `mapstructure:"pruning-keep-every"`
PruningSnapshotEvery string `mapstructure:"pruning-snapshot-every"`
Pruning string `mapstructure:"pruning"`
PruningKeepRecent string `mapstructure:"pruning-keep-recent"`
PruningKeepEvery string `mapstructure:"pruning-keep-every"`
PruningInterval string `mapstructure:"pruning-interval"`
// HaltHeight contains a non-zero block height at which a node will gracefully
// halt and shutdown that can be used to assist upgrades and testing.
@ -114,11 +115,12 @@ func (c *Config) GetMinGasPrices() sdk.DecCoins {
func DefaultConfig() *Config {
return &Config{
BaseConfig: BaseConfig{
MinGasPrices: defaultMinGasPrices,
InterBlockCache: true,
Pruning: store.PruningStrategySyncable,
PruningKeepEvery: "0",
PruningSnapshotEvery: "0",
MinGasPrices: defaultMinGasPrices,
InterBlockCache: true,
Pruning: storetypes.PruningOptionDefault,
PruningKeepRecent: "0",
PruningKeepEvery: "0",
PruningInterval: "0",
},
Telemetry: telemetry.Config{
Enabled: false,
@ -148,13 +150,14 @@ func GetConfig() Config {
return Config{
BaseConfig: BaseConfig{
MinGasPrices: viper.GetString("minimum-gas-prices"),
InterBlockCache: viper.GetBool("inter-block-cache"),
Pruning: viper.GetString("pruning"),
PruningKeepEvery: viper.GetString("pruning-keep-every"),
PruningSnapshotEvery: viper.GetString("pruning-snapshot-every"),
HaltHeight: viper.GetUint64("halt-height"),
HaltTime: viper.GetUint64("halt-time"),
MinGasPrices: viper.GetString("minimum-gas-prices"),
InterBlockCache: viper.GetBool("inter-block-cache"),
Pruning: viper.GetString("pruning"),
PruningKeepRecent: viper.GetString("pruning-keep-recent"),
PruningKeepEvery: viper.GetString("pruning-keep-every"),
PruningInterval: viper.GetString("pruning-interval"),
HaltHeight: viper.GetUint64("halt-height"),
HaltTime: viper.GetUint64("halt-time"),
},
Telemetry: telemetry.Config{
ServiceName: viper.GetString("telemetry.service-name"),

View File

@ -20,16 +20,16 @@ const defaultConfigTemplate = `# This is a TOML config file.
# specified in this config (e.g. 0.25token1;0.0001token2).
minimum-gas-prices = "{{ .BaseConfig.MinGasPrices }}"
# Pruning sets the pruning strategy: syncable, nothing, everything, custom
# syncable: only those states not needed for state syncing will be deleted (keeps last 100 + every 10000th)
# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals
# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
# everything: all saved states will be deleted, storing only the current state
# custom: allows fine-grained control through the pruning-keep-every and pruning-snapshot-every options.
# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals
# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval'
pruning = "{{ .BaseConfig.Pruning }}"
# These are applied if and only if the pruning strategy is custom.
pruning-keep-recent = "{{ .BaseConfig.PruningKeepRecent }}"
pruning-keep-every = "{{ .BaseConfig.PruningKeepEvery }}"
pruning-snapshot-every = "{{ .BaseConfig.PruningSnapshotEvery }}"
pruning-interval = "{{ .BaseConfig.PruningInterval }}"
# HaltHeight contains a non-zero block height at which a node will gracefully
# halt and shutdown that can be used to assist upgrades and testing.

View File

@ -2,29 +2,34 @@ package server
import (
"fmt"
"strings"
"github.com/spf13/viper"
"github.com/cosmos/cosmos-sdk/store"
"github.com/cosmos/cosmos-sdk/store/types"
)
// GetPruningOptionsFromFlags parses start command flags and returns the correct PruningOptions.
// flagPruning prevails over flagPruningKeepEvery and flagPruningSnapshotEvery.
// Default option is PruneSyncable.
func GetPruningOptionsFromFlags() (store.PruningOptions, error) {
strategy := viper.GetString(flagPruning)
switch strategy {
case "syncable", "nothing", "everything":
return store.NewPruningOptionsFromString(viper.GetString(flagPruning)), nil
// GetPruningOptionsFromFlags parses command flags and returns the correct
// PruningOptions. If a pruning strategy is provided, that will be parsed and
// returned, otherwise, it is assumed custom pruning options are provided.
func GetPruningOptionsFromFlags() (types.PruningOptions, error) {
strategy := strings.ToLower(viper.GetString(FlagPruning))
case "custom":
opts := store.PruningOptions{
KeepEvery: viper.GetInt64(flagPruningKeepEvery),
SnapshotEvery: viper.GetInt64(flagPruningSnapshotEvery),
}
if !opts.IsValid() {
return opts, fmt.Errorf("invalid granular options")
switch strategy {
case types.PruningOptionDefault, types.PruningOptionNothing, types.PruningOptionEverything:
return types.NewPruningOptionsFromString(strategy), nil
case types.PruningOptionCustom:
opts := types.NewPruningOptions(
viper.GetUint64(FlagPruningKeepRecent),
viper.GetUint64(FlagPruningKeepEvery), viper.GetUint64(FlagPruningInterval),
)
if err := opts.Validate(); err != nil {
return opts, fmt.Errorf("invalid custom pruning options: %w", err)
}
return opts, nil
default:

View File

@ -6,53 +6,58 @@ import (
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store"
"github.com/cosmos/cosmos-sdk/store/types"
)
func TestGetPruningOptionsFromFlags(t *testing.T) {
tests := []struct {
name string
initParams func()
expectedOptions store.PruningOptions
expectedOptions types.PruningOptions
wantErr bool
}{
{
name: "pruning",
name: FlagPruning,
initParams: func() {
viper.Set(flagPruning, store.PruningStrategyNothing)
viper.Set(FlagPruning, types.PruningOptionNothing)
},
expectedOptions: store.PruneNothing,
expectedOptions: types.PruneNothing,
},
{
name: "granular pruning",
name: "custom pruning options",
initParams: func() {
viper.Set(flagPruning, "custom")
viper.Set(flagPruningSnapshotEvery, 1234)
viper.Set(flagPruningKeepEvery, 4321)
viper.Set(FlagPruning, types.PruningOptionCustom)
viper.Set(FlagPruningKeepRecent, 1234)
viper.Set(FlagPruningKeepEvery, 4321)
viper.Set(FlagPruningInterval, 10)
},
expectedOptions: store.PruningOptions{
SnapshotEvery: 1234,
KeepEvery: 4321,
expectedOptions: types.PruningOptions{
KeepRecent: 1234,
KeepEvery: 4321,
Interval: 10,
},
},
{
name: "default",
name: types.PruningOptionDefault,
initParams: func() {},
expectedOptions: store.PruneSyncable,
expectedOptions: types.PruneDefault,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(j *testing.T) {
viper.Reset()
viper.SetDefault(flagPruning, "syncable")
viper.SetDefault(FlagPruning, types.PruningOptionDefault)
tt.initParams()
opts, err := GetPruningOptionsFromFlags()
if tt.wantErr {
require.Error(t, err)
return
}
require.Equal(t, tt.expectedOptions, opts)
})
}

View File

@ -22,22 +22,25 @@ import (
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/server/api"
"github.com/cosmos/cosmos-sdk/server/config"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
)
// Tendermint full-node start flags
const (
flagWithTendermint = "with-tendermint"
flagAddress = "address"
flagTraceStore = "trace-store"
flagPruning = "pruning"
flagPruningKeepEvery = "pruning-keep-every"
flagPruningSnapshotEvery = "pruning-snapshot-every"
flagCPUProfile = "cpu-profile"
FlagMinGasPrices = "minimum-gas-prices"
FlagHaltHeight = "halt-height"
FlagHaltTime = "halt-time"
FlagInterBlockCache = "inter-block-cache"
FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
flagWithTendermint = "with-tendermint"
flagAddress = "address"
flagTraceStore = "trace-store"
flagCPUProfile = "cpu-profile"
FlagMinGasPrices = "minimum-gas-prices"
FlagHaltHeight = "halt-height"
FlagHaltTime = "halt-time"
FlagInterBlockCache = "inter-block-cache"
FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades"
FlagPruning = "pruning"
FlagPruningKeepRecent = "pruning-keep-recent"
FlagPruningKeepEvery = "pruning-keep-every"
FlagPruningInterval = "pruning-interval"
)
// StartCmd runs the service passed in, either stand-alone or in-process with
@ -49,13 +52,15 @@ func StartCmd(ctx *Context, cdc codec.JSONMarshaler, appCreator AppCreator) *cob
Long: `Run the full node application with Tendermint in or out of process. By
default, the application will run with Tendermint in process.
Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-snapshot-every' and 'pruning-keep-every' together.
Pruning options can be provided via the '--pruning' flag or alternatively with '--pruning-keep-recent',
'pruning-keep-every', and 'pruning-interval' together.
For '--pruning' the options are as follows:
syncable: only those states not needed for state syncing will be deleted (flushes every 100th to disk and keeps every 10000th)
default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals
nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node)
everything: all saved states will be deleted, storing only the current state
everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals
custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval'
Node halting configurations exist in the form of two flags: '--halt-height' and '--halt-time'. During
the ABCI Commit phase, the node will check if the current block height is greater than or equal to
@ -87,9 +92,6 @@ which accepts a path for the resulting pprof file.
cmd.Flags().Bool(flagWithTendermint, true, "Run abci app embedded in-process with tendermint")
cmd.Flags().String(flagAddress, "tcp://0.0.0.0:26658", "Listen address")
cmd.Flags().String(flagTraceStore, "", "Enable KVStore tracing to an output file")
cmd.Flags().String(flagPruning, "syncable", "Pruning strategy: syncable, nothing, everything, custom")
cmd.Flags().Int64(flagPruningKeepEvery, 0, "Define the state number that will be kept. Ignored if pruning is not custom.")
cmd.Flags().Int64(flagPruningSnapshotEvery, 0, "Defines the state that will be snapshot for pruning. Ignored if pruning is not custom.")
cmd.Flags().String(
FlagMinGasPrices, "",
"Minimum gas prices to accept for transactions; Any fee in a tx must meet this minimum (e.g. 0.01photino;0.0001stake)",
@ -100,9 +102,14 @@ which accepts a path for the resulting pprof file.
cmd.Flags().Bool(FlagInterBlockCache, true, "Enable inter-block caching")
cmd.Flags().String(flagCPUProfile, "", "Enable CPU profiling and write to the provided file")
viper.BindPFlag(flagPruning, cmd.Flags().Lookup(flagPruning))
viper.BindPFlag(flagPruningKeepEvery, cmd.Flags().Lookup(flagPruningKeepEvery))
viper.BindPFlag(flagPruningSnapshotEvery, cmd.Flags().Lookup(flagPruningSnapshotEvery))
cmd.Flags().String(FlagPruning, storetypes.PruningOptionDefault, "Pruning strategy (default|nothing|everything|custom)")
cmd.Flags().Uint64(FlagPruningKeepRecent, 0, "Number of recent heights to keep on disk (ignored if pruning is not 'custom')")
cmd.Flags().Uint64(FlagPruningKeepEvery, 0, "Offset heights to keep on disk after 'keep-every' (ignored if pruning is not 'custom')")
cmd.Flags().Uint64(FlagPruningInterval, 0, "Height interval at which pruned heights are removed from disk (ignored if pruning is not 'custom')")
viper.BindPFlag(FlagPruning, cmd.Flags().Lookup(FlagPruning))
viper.BindPFlag(FlagPruningKeepRecent, cmd.Flags().Lookup(FlagPruningKeepRecent))
viper.BindPFlag(FlagPruningKeepEvery, cmd.Flags().Lookup(FlagPruningKeepEvery))
viper.BindPFlag(FlagPruningInterval, cmd.Flags().Lookup(FlagPruningInterval))
// add support for all Tendermint-specific command line options
tcmd.AddNodeFlags(cmd)

View File

@ -1,85 +0,0 @@
package server
import (
"fmt"
"testing"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
func TestPruningOptions(t *testing.T) {
tests := []struct {
name string
paramInit func()
returnsErr bool
expectedErr error
}{
{
name: "default",
paramInit: func() {},
returnsErr: false,
expectedErr: nil,
},
{
name: "unknown strategy",
paramInit: func() { viper.Set(flagPruning, "unknown") },
returnsErr: true,
expectedErr: fmt.Errorf("unknown pruning strategy unknown"),
},
{
name: "only keep-every provided",
paramInit: func() {
viper.Set(flagPruning, "custom")
viper.Set(flagPruningKeepEvery, 12345)
},
returnsErr: false,
expectedErr: nil,
},
{
name: "only snapshot-every provided",
paramInit: func() {
viper.Set(flagPruning, "custom")
viper.Set(flagPruningSnapshotEvery, 12345)
},
returnsErr: true,
expectedErr: fmt.Errorf("invalid granular options"),
},
{
name: "pruning flag with other granular options 3",
paramInit: func() {
viper.Set(flagPruning, "custom")
viper.Set(flagPruningKeepEvery, 1234)
viper.Set(flagPruningSnapshotEvery, 1234)
},
returnsErr: false,
expectedErr: nil,
},
{
name: "nothing strategy",
paramInit: func() {
viper.Set(flagPruning, "nothing")
},
returnsErr: false,
expectedErr: nil,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
viper.Reset()
viper.SetDefault(flagPruning, "syncable")
startCommand := StartCmd(nil, nil, nil)
tt.paramInit()
err := startCommand.PreRunE(startCommand, nil)
if tt.returnsErr {
require.EqualError(t, err, tt.expectedErr.Error())
} else {
require.NoError(t, err)
}
})
}
}

View File

@ -18,6 +18,7 @@ import (
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/simapp"
"github.com/cosmos/cosmos-sdk/store"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli"
@ -83,10 +84,11 @@ func newApp(logger log.Logger, db dbm.DB, traceStore io.Writer) server.Applicati
skipUpgradeHeights[int64(h)] = true
}
// TODO: Make sure custom pruning works.
return simapp.NewSimApp(
logger, db, traceStore, true, skipUpgradeHeights,
viper.GetString(flags.FlagHome), invCheckPeriod,
baseapp.SetPruning(store.NewPruningOptionsFromString(viper.GetString("pruning"))),
baseapp.SetPruning(storetypes.NewPruningOptionsFromString(viper.GetString(server.FlagPruning))),
baseapp.SetMinGasPrices(viper.GetString(server.FlagMinGasPrices)),
baseapp.SetHaltHeight(viper.GetUint64(server.FlagHaltHeight)),
baseapp.SetHaltTime(viper.GetUint64(server.FlagHaltTime)),

View File

@ -20,7 +20,7 @@ func TestGetOrSetStoreCache(t *testing.T) {
sKey := types.NewKVStoreKey("test")
tree, err := iavl.NewMutableTree(db, 100)
require.NoError(t, err)
store := iavlstore.UnsafeNewStore(tree, types.PruneNothing)
store := iavlstore.UnsafeNewStore(tree)
store2 := mngr.GetStoreCache(sKey, store)
require.NotNil(t, store2)
@ -34,7 +34,7 @@ func TestUnwrap(t *testing.T) {
sKey := types.NewKVStoreKey("test")
tree, err := iavl.NewMutableTree(db, 100)
require.NoError(t, err)
store := iavlstore.UnsafeNewStore(tree, types.PruneNothing)
store := iavlstore.UnsafeNewStore(tree)
_ = mngr.GetStoreCache(sKey, store)
require.Equal(t, store, mngr.Unwrap(sKey))
@ -48,7 +48,7 @@ func TestStoreCache(t *testing.T) {
sKey := types.NewKVStoreKey("test")
tree, err := iavl.NewMutableTree(db, 100)
require.NoError(t, err)
store := iavlstore.UnsafeNewStore(tree, types.PruneNothing)
store := iavlstore.UnsafeNewStore(tree)
kvStore := mngr.GetStoreCache(sKey, store)
for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ {

View File

@ -7,7 +7,6 @@ import (
ics23iavl "github.com/confio/ics23-iavl"
ics23 "github.com/confio/ics23/go"
"github.com/pkg/errors"
"github.com/tendermint/iavl"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/merkle"
@ -35,39 +34,14 @@ var (
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree Tree
pruning types.PruningOptions
tree Tree
}
// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the
// store's version (id) from the provided DB. An error is returned if the version
// fails to load.
func LoadStore(db dbm.DB, id types.CommitID, pruning types.PruningOptions, lazyLoading bool) (types.CommitKVStore, error) {
if !pruning.IsValid() {
return nil, fmt.Errorf("pruning options are invalid: %v", pruning)
}
var keepRecent int64
// Determine the value of keepRecent based on the following:
//
// If KeepEvery = 1, keepRecent should be 0 since there is no need to keep
// latest version in a in-memory cache.
//
// If KeepEvery > 1, keepRecent should be 1 so that state changes in between
// flushed states can be saved in the in-memory latest tree.
if pruning.KeepEvery == 1 {
keepRecent = 0
} else {
keepRecent = 1
}
tree, err := iavl.NewMutableTreeWithOpts(
db,
dbm.NewMemDB(),
defaultIAVLCacheSize,
iavl.PruningOptions(pruning.KeepEvery, keepRecent),
)
func LoadStore(db dbm.DB, id types.CommitID, lazyLoading bool) (types.CommitKVStore, error) {
tree, err := iavl.NewMutableTree(db, defaultIAVLCacheSize)
if err != nil {
return nil, err
}
@ -83,8 +57,7 @@ func LoadStore(db dbm.DB, id types.CommitID, pruning types.PruningOptions, lazyL
}
return &Store{
tree: tree,
pruning: pruning,
tree: tree,
}, nil
}
@ -94,10 +67,9 @@ func LoadStore(db dbm.DB, id types.CommitID, pruning types.PruningOptions, lazyL
// CONTRACT: The IAVL tree should be fully loaded.
// CONTRACT: PruningOptions passed in as argument must be the same as pruning options
// passed into iavl.MutableTree
func UnsafeNewStore(tree *iavl.MutableTree, po types.PruningOptions) *Store {
func UnsafeNewStore(tree *iavl.MutableTree) *Store {
return &Store{
tree: tree,
pruning: po,
tree: tree,
}
}
@ -117,8 +89,7 @@ func (st *Store) GetImmutable(version int64) (*Store, error) {
}
return &Store{
tree: &immutableTree{iTree},
pruning: st.pruning,
tree: &immutableTree{iTree},
}, nil
}
@ -129,25 +100,9 @@ func (st *Store) Commit() types.CommitID {
hash, version, err := st.tree.SaveVersion()
if err != nil {
// TODO: Do we want to extend Commit to allow returning errors?
panic(err)
}
// If the version we saved got flushed to disk, check if previous flushed
// version should be deleted.
if st.pruning.FlushVersion(version) {
previous := version - st.pruning.KeepEvery
// Previous flushed version should only be pruned if the previous version is
// not a snapshot version OR if snapshotting is disabled (SnapshotEvery == 0).
if previous != 0 && !st.pruning.SnapshotVersion(previous) {
err := st.tree.DeleteVersion(previous)
if errCause := errors.Cause(err); errCause != nil && errCause != iavl.ErrVersionDoesNotExist {
panic(err)
}
}
}
return types.CommitID{
Version: version,
Hash: hash,
@ -214,6 +169,13 @@ func (st *Store) Delete(key []byte) {
st.tree.Remove(key)
}
// DeleteVersions deletes a series of versions from the MutableTree. An error
// is returned if any single version is invalid or the delete fails. All writes
// happen in a single batch with a single commit.
func (st *Store) DeleteVersions(versions ...int64) error {
return st.tree.DeleteVersions(versions...)
}
// Implements types.KVStore.
func (st *Store) Iterator(start, end []byte) types.Iterator {
var iTree *iavl.ImmutableTree
@ -350,6 +312,7 @@ func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *merkle.P
panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error()))
}
}
op := types.NewIavlCommitmentOp(key, commitmentProof)
return &merkle.Proof{Ops: []merkle.ProofOp{op.ProofOp()}}
}

View File

@ -52,7 +52,7 @@ func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) {
func TestGetImmutable(t *testing.T) {
db := dbm.NewMemDB()
tree, cID := newAlohaTree(t, db)
store := UnsafeNewStore(tree, types.PruneNothing)
store := UnsafeNewStore(tree)
require.True(t, tree.Set([]byte("hello"), []byte("adios")))
hash, ver, err := tree.SaveVersion()
@ -82,7 +82,7 @@ func TestGetImmutable(t *testing.T) {
func TestTestGetImmutableIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, cID := newAlohaTree(t, db)
store := UnsafeNewStore(tree, types.PruneNothing)
store := UnsafeNewStore(tree)
newStore, err := store.GetImmutable(cID.Version)
require.NoError(t, err)
@ -105,7 +105,7 @@ func TestTestGetImmutableIterator(t *testing.T) {
func TestIAVLStoreGetSetHasDelete(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
key := "hello"
@ -130,14 +130,14 @@ func TestIAVLStoreGetSetHasDelete(t *testing.T) {
func TestIAVLStoreNoNilSet(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic")
}
func TestIAVLIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz"))
expected := []string{"aloha", "hello"}
var i int
@ -213,7 +213,7 @@ func TestIAVLReverseIterator(t *testing.T) {
tree, err := iavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte{0x00}, []byte("0"))
iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0"))
@ -246,7 +246,7 @@ func TestIAVLPrefixIterator(t *testing.T) {
tree, err := iavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
@ -310,7 +310,7 @@ func TestIAVLReversePrefixIterator(t *testing.T) {
tree, err := iavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
@ -373,95 +373,12 @@ func nextVersion(iavl *Store) {
iavl.Commit()
}
func TestIAVLDefaultPruning(t *testing.T) {
//Expected stored / deleted version numbers for:
//numRecent = 5, storeEvery = 3, snapshotEvery = 5
var states = []pruneState{
{[]int64{}, []int64{}},
{[]int64{1}, []int64{}},
{[]int64{1, 2}, []int64{}},
{[]int64{1, 2, 3}, []int64{}},
{[]int64{1, 2, 3, 4}, []int64{}},
{[]int64{1, 2, 3, 4, 5}, []int64{}},
{[]int64{2, 4, 5, 6}, []int64{1, 3}},
{[]int64{4, 5, 6, 7}, []int64{1, 2, 3}},
{[]int64{4, 5, 6, 7, 8}, []int64{1, 2, 3}},
{[]int64{5, 6, 7, 8, 9}, []int64{1, 2, 3, 4}},
{[]int64{6, 7, 8, 9, 10}, []int64{1, 2, 3, 4, 5}},
{[]int64{6, 7, 8, 9, 10, 11}, []int64{1, 2, 3, 4, 5}},
{[]int64{6, 8, 10, 11, 12}, []int64{1, 2, 3, 4, 5, 7, 9}},
{[]int64{6, 10, 11, 12, 13}, []int64{1, 2, 3, 4, 5, 7, 8, 9}},
{[]int64{6, 10, 11, 12, 13, 14}, []int64{1, 2, 3, 4, 5, 7, 8, 9}},
{[]int64{6, 11, 12, 13, 14, 15}, []int64{1, 2, 3, 4, 5, 7, 8, 9, 10}},
}
testPruning(t, int64(5), int64(3), int64(6), states)
}
func TestIAVLAlternativePruning(t *testing.T) {
//Expected stored / deleted version numbers for:
//numRecent = 3, storeEvery = 5, snapshotEvery = 10
var states = []pruneState{
{[]int64{}, []int64{}},
{[]int64{1}, []int64{}},
{[]int64{1, 2}, []int64{}},
{[]int64{1, 2, 3}, []int64{}},
{[]int64{2, 3, 4}, []int64{1}},
{[]int64{3, 4, 5}, []int64{1, 2}},
{[]int64{4, 5, 6}, []int64{1, 2, 3}},
{[]int64{5, 6, 7}, []int64{1, 2, 3, 4}},
{[]int64{5, 6, 7, 8}, []int64{1, 2, 3, 4}},
{[]int64{5, 7, 8, 9}, []int64{1, 2, 3, 4, 6}},
{[]int64{8, 9, 10}, []int64{1, 2, 3, 4, 6, 7}},
{[]int64{9, 10, 11}, []int64{1, 2, 3, 4, 6, 7, 8}},
{[]int64{10, 11, 12}, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}},
{[]int64{10, 11, 12, 13}, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}},
{[]int64{10, 12, 13, 14}, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 11}},
{[]int64{10, 13, 14, 15}, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12}},
}
testPruning(t, int64(3), int64(5), int64(10), states)
}
type pruneState struct {
stored []int64
deleted []int64
}
func testPruning(t *testing.T, numRecent int64, storeEvery int64, snapshotEvery int64, states []pruneState) {
db := dbm.NewMemDB()
pruningOpts := types.PruningOptions{
KeepEvery: storeEvery,
SnapshotEvery: snapshotEvery,
}
iavlOpts := iavl.PruningOptions(storeEvery, numRecent)
tree, err := iavl.NewMutableTreeWithOpts(db, dbm.NewMemDB(), cacheSize, iavlOpts)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, pruningOpts)
for step, state := range states {
for _, ver := range state.stored {
require.True(t, iavlStore.VersionExists(ver),
"missing version %d with latest version %d; should save last %d, store every %d, and snapshot every %d",
ver, step, numRecent, storeEvery, snapshotEvery)
}
for _, ver := range state.deleted {
require.False(t, iavlStore.VersionExists(ver),
"not pruned version %d with latest version %d; should prune all but last %d and every %d with intermediate flush interval %d",
ver, step, numRecent, snapshotEvery, storeEvery)
}
nextVersion(iavlStore)
}
}
func TestIAVLNoPrune(t *testing.T) {
db := dbm.NewMemDB()
tree, err := iavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
nextVersion(iavlStore)
for i := 1; i < 100; i++ {
@ -482,7 +399,7 @@ func TestIAVLPruneEverything(t *testing.T) {
tree, err := iavl.NewMutableTreeWithOpts(db, dbm.NewMemDB(), cacheSize, iavlOpts)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneEverything)
iavlStore := UnsafeNewStore(tree)
nextVersion(iavlStore)
for i := 1; i < 100; i++ {
@ -505,7 +422,7 @@ func TestIAVLStoreQuery(t *testing.T) {
tree, err := iavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
k1, v1 := []byte("key1"), []byte("val1")
k2, v2 := []byte("key2"), []byte("val2")
@ -604,7 +521,7 @@ func BenchmarkIAVLIteratorNext(b *testing.B) {
tree.Set(key, value)
}
iavlStore := UnsafeNewStore(tree, types.PruneNothing)
iavlStore := UnsafeNewStore(tree)
iterators := make([]types.Iterator, b.N/treeSize)
for i := 0; i < len(iterators); i++ {

View File

@ -23,6 +23,7 @@ type (
Remove(key []byte) ([]byte, bool)
SaveVersion() ([]byte, int64, error)
DeleteVersion(version int64) error
DeleteVersions(versions ...int64) error
Version() int64
Hash() []byte
VersionExists(version int64) bool
@ -55,6 +56,10 @@ func (it *immutableTree) DeleteVersion(_ int64) error {
panic("cannot call 'DeleteVersion' on an immutable IAVL tree")
}
func (it *immutableTree) DeleteVersions(_ ...int64) error {
panic("cannot call 'DeleteVersions' on an immutable IAVL tree")
}
func (it *immutableTree) VersionExists(version int64) bool {
return it.Version() == version
}

View File

@ -90,7 +90,7 @@ func TestIAVLStorePrefix(t *testing.T) {
db := dbm.NewMemDB()
tree, err := tiavl.NewMutableTree(db, cacheSize)
require.NoError(t, err)
iavlStore := iavl.UnsafeNewStore(tree, types.PruneNothing)
iavlStore := iavl.UnsafeNewStore(tree)
testPrefixStore(t, iavlStore, []byte("test"))
}

View File

@ -29,9 +29,3 @@ type (
GasMeter = types.GasMeter
GasConfig = types.GasConfig
)
var (
PruneNothing = types.PruneNothing
PruneEverything = types.PruneEverything
PruneSyncable = types.PruneSyncable
)

View File

@ -14,7 +14,7 @@ import (
func TestVerifyIAVLStoreQueryProof(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
iStore, err := iavl.LoadStore(db, types.CommitID{}, types.PruneNothing, false)
iStore, err := iavl.LoadStore(db, types.CommitID{}, false)
store := iStore.(*iavl.Store)
require.Nil(t, err)
store.Set([]byte("MYKEY"), []byte("MYVALUE"))

View File

@ -5,7 +5,10 @@ import (
"io"
"strings"
ics23tendermint "github.com/confio/ics23-tendermint"
ics23 "github.com/confio/ics23/go"
"github.com/pkg/errors"
iavltree "github.com/tendermint/iavl"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/merkle"
dbm "github.com/tendermint/tm-db"
@ -19,13 +22,11 @@ import (
"github.com/cosmos/cosmos-sdk/store/transient"
"github.com/cosmos/cosmos-sdk/store/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
ics23tendermint "github.com/confio/ics23-tendermint"
ics23 "github.com/confio/ics23/go"
)
const (
latestVersionKey = "s/latest"
pruneHeightsKey = "s/pruneheights"
commitInfoKeyFmt = "s/%d" // s/<version>
)
@ -42,6 +43,7 @@ type Store struct {
stores map[types.StoreKey]types.CommitKVStore
keysByName map[string]types.StoreKey
lazyLoading bool
pruneHeights []int64
traceWriter io.Writer
traceContext types.TraceContext
@ -49,8 +51,10 @@ type Store struct {
interBlockCache types.MultiStorePersistentCache
}
var _ types.CommitMultiStore = (*Store)(nil)
var _ types.Queryable = (*Store)(nil)
var (
_ types.CommitMultiStore = (*Store)(nil)
_ types.Queryable = (*Store)(nil)
)
// NewStore returns a reference to a new Store object with the provided DB. The
// store will be created with a PruneNothing pruning strategy by default. After
@ -63,20 +67,15 @@ func NewStore(db dbm.DB) *Store {
storesParams: make(map[types.StoreKey]storeParams),
stores: make(map[types.StoreKey]types.CommitKVStore),
keysByName: make(map[string]types.StoreKey),
pruneHeights: make([]int64, 0),
}
}
// SetPruning sets the pruning strategy on the root store and all the sub-stores.
// Note, calling SetPruning on the root store prior to LoadVersion or
// LoadLatestVersion performs a no-op as the stores aren't mounted yet.
//
// TODO: Consider removing this API altogether on sub-stores as a pruning
// strategy should only be provided on initialization.
func (rs *Store) SetPruning(pruningOpts types.PruningOptions) {
rs.pruningOpts = pruningOpts
for _, substore := range rs.stores {
substore.SetPruning(pruningOpts)
}
}
// SetLazyLoading sets if the iavl store should be loaded lazily or not
@ -208,6 +207,12 @@ func (rs *Store) loadVersion(ver int64, upgrades *types.StoreUpgrades) error {
rs.lastCommitInfo = cInfo
rs.stores = newStores
// load any pruned heights we missed from disk to be pruned on the next run
ph, err := getPruningHeights(rs.db)
if err == nil && len(ph) > 0 {
rs.pruneHeights = ph
}
return nil
}
@ -294,22 +299,59 @@ func (rs *Store) LastCommitID() types.CommitID {
// Commit implements Committer/CommitStore.
func (rs *Store) Commit() types.CommitID {
// Commit stores.
version := rs.lastCommitInfo.Version + 1
previousHeight := rs.lastCommitInfo.Version
version := previousHeight + 1
rs.lastCommitInfo = commitStores(version, rs.stores)
// write CommitInfo to disk only if this version was flushed to disk
if rs.pruningOpts.FlushVersion(version) {
flushCommitInfo(rs.db, version, rs.lastCommitInfo)
// Determine if pruneHeight height needs to be added to the list of heights to
// be pruned, where pruneHeight = (commitHeight - 1) - KeepRecent.
if int64(rs.pruningOpts.KeepRecent) < previousHeight {
pruneHeight := previousHeight - int64(rs.pruningOpts.KeepRecent)
// We consider this height to be pruned iff:
//
// - KeepEvery is zero as that means that all heights should be pruned.
// - KeepEvery % (height - KeepRecent) != 0 as that means the height is not
// a 'snapshot' height.
if rs.pruningOpts.KeepEvery == 0 || pruneHeight%int64(rs.pruningOpts.KeepEvery) != 0 {
rs.pruneHeights = append(rs.pruneHeights, pruneHeight)
}
}
// Prepare for next version.
commitID := types.CommitID{
// batch prune if the current height is a pruning interval height
if rs.pruningOpts.Interval > 0 && version%int64(rs.pruningOpts.Interval) == 0 {
rs.pruneStores()
}
flushMetadata(rs.db, version, rs.lastCommitInfo, rs.pruneHeights)
return types.CommitID{
Version: version,
Hash: rs.lastCommitInfo.Hash(),
}
return commitID
}
// pruneStores will batch delete a list of heights from each mounted sub-store.
// Afterwards, pruneHeights is reset.
func (rs *Store) pruneStores() {
if len(rs.pruneHeights) == 0 {
return
}
for key, store := range rs.stores {
if store.GetStoreType() == types.StoreTypeIAVL {
// If the store is wrapped with an inter-block cache, we must first unwrap
// it to get the underlying IAVL store.
store = rs.GetCommitKVStore(key)
if err := store.(*iavl.Store).DeleteVersions(rs.pruneHeights...); err != nil {
if errCause := errors.Cause(err); errCause != nil && errCause != iavltree.ErrVersionDoesNotExist {
panic(err)
}
}
}
}
rs.pruneHeights = make([]int64, 0)
}
// CacheWrap implements CacheWrapper/Store/CommitStore.
@ -483,8 +525,6 @@ func parsePath(path string) (storeName string, subpath string, err error) {
return storeName, subpath, nil
}
//----------------------------------------
// Note: why do we use key and params.key in different places. Seems like there should be only one key used.
func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID, params storeParams) (types.CommitKVStore, error) {
var db dbm.DB
@ -500,7 +540,7 @@ func (rs *Store) loadCommitStoreFromParams(key types.StoreKey, id types.CommitID
panic("recursive MultiStores not yet supported")
case types.StoreTypeIAVL:
store, err := iavl.LoadStore(db, id, rs.pruningOpts, rs.lazyLoading)
store, err := iavl.LoadStore(db, id, rs.lazyLoading)
if err != nil {
return nil, err
}
@ -653,12 +693,6 @@ func getLatestVersion(db dbm.DB) int64 {
return latest
}
// Set the latest version.
func setLatestVersion(batch dbm.Batch, version int64) {
latestBytes, _ := cdc.MarshalBinaryBare(version)
batch.Set([]byte(latestVersionKey), latestBytes)
}
// Commits each store and returns a new commitInfo.
func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore) commitInfo {
storeInfos := make([]storeInfo, 0, len(storeMap))
@ -684,9 +718,8 @@ func commitStores(version int64, storeMap map[types.StoreKey]types.CommitKVStore
// Gets commitInfo from disk.
func getCommitInfo(db dbm.DB, ver int64) (commitInfo, error) {
// Get from DB.
cInfoKey := fmt.Sprintf(commitInfoKeyFmt, ver)
cInfoBytes, err := db.Get([]byte(cInfoKey))
if err != nil {
return commitInfo{}, errors.Wrap(err, "failed to get commit info")
@ -704,23 +737,48 @@ func getCommitInfo(db dbm.DB, ver int64) (commitInfo, error) {
return cInfo, nil
}
// Set a commitInfo for given version.
func setCommitInfo(batch dbm.Batch, version int64, cInfo commitInfo) {
cInfoBytes := cdc.MustMarshalBinaryBare(cInfo)
cInfoKey := fmt.Sprintf(commitInfoKeyFmt, version)
batch.Set([]byte(cInfoKey), cInfoBytes)
}
// flushCommitInfo flushes a commitInfo for given version to the DB. Note, this
// needs to happen atomically.
func flushCommitInfo(db dbm.DB, version int64, cInfo commitInfo) {
func setLatestVersion(batch dbm.Batch, version int64) {
latestBytes := cdc.MustMarshalBinaryBare(version)
batch.Set([]byte(latestVersionKey), latestBytes)
}
func setPruningHeights(batch dbm.Batch, pruneHeights []int64) {
bz := cdc.MustMarshalBinaryBare(pruneHeights)
batch.Set([]byte(pruneHeightsKey), bz)
}
func getPruningHeights(db dbm.DB) ([]int64, error) {
bz, err := db.Get([]byte(pruneHeightsKey))
if err != nil {
return nil, fmt.Errorf("failed to get pruned heights: %w", err)
}
if len(bz) == 0 {
return nil, errors.New("no pruned heights found")
}
var prunedHeights []int64
if err := cdc.UnmarshalBinaryBare(bz, &prunedHeights); err != nil {
return nil, fmt.Errorf("failed to unmarshal pruned heights: %w", err)
}
return prunedHeights, nil
}
func flushMetadata(db dbm.DB, version int64, cInfo commitInfo, pruneHeights []int64) {
batch := db.NewBatch()
defer batch.Close()
setCommitInfo(batch, version, cInfo)
setLatestVersion(batch, version)
err := batch.Write()
if err != nil {
setPruningHeights(batch, pruneHeights)
if err := batch.Write(); err != nil {
panic(fmt.Errorf("error on batch write %w", err))
}
}

View File

@ -21,7 +21,7 @@ func TestStoreType(t *testing.T) {
func TestGetCommitKVStore(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, types.PruneSyncable)
ms := newMultiStoreWithMounts(db, types.PruneDefault)
err := ms.LoadLatestVersion()
require.Nil(t, err)
@ -166,7 +166,7 @@ func TestMultistoreCommitLoad(t *testing.T) {
// XXX: confirm old commit is overwritten and we have rolled back
// LatestVersion
store = newMultiStoreWithMounts(db, types.PruneSyncable)
store = newMultiStoreWithMounts(db, types.PruneDefault)
err = store.LoadLatestVersion()
require.Nil(t, err)
commitID = getExpectedCommitID(store, ver+1)
@ -293,8 +293,9 @@ func TestParsePath(t *testing.T) {
func TestMultiStoreRestart(t *testing.T) {
db := dbm.NewMemDB()
pruning := types.PruningOptions{
KeepEvery: 3,
SnapshotEvery: 6,
KeepRecent: 2,
KeepEvery: 3,
Interval: 1,
}
multi := newMultiStoreWithMounts(db, pruning)
err := multi.LoadLatestVersion()
@ -322,8 +323,8 @@ func TestMultiStoreRestart(t *testing.T) {
multi.Commit()
cinfo, err := getCommitInfo(multi.db, int64(i))
require.NotNil(t, err)
require.Equal(t, commitInfo{}, cinfo)
require.NoError(t, err)
require.Equal(t, int64(i), cinfo.Version)
}
// Set and commit data in one store.
@ -347,15 +348,15 @@ func TestMultiStoreRestart(t *testing.T) {
multi.Commit()
postFlushCinfo, err := getCommitInfo(multi.db, 4)
require.NotNil(t, err)
require.Equal(t, commitInfo{}, postFlushCinfo, "Commit changed after in-memory commit")
require.NoError(t, err)
require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit")
multi = newMultiStoreWithMounts(db, pruning)
err = multi.LoadLatestVersion()
require.Nil(t, err)
reloadedCid := multi.LastCommitID()
require.Equal(t, flushedCinfo.CommitID(), reloadedCid, "Reloaded CID is not the same as last flushed CID")
require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID")
// Check that store1 and store2 retained date from 3rd commit
store1 = multi.getStoreByName("store1").(types.KVStore)
@ -369,7 +370,7 @@ func TestMultiStoreRestart(t *testing.T) {
// Check that store3 still has data from last commit even though update happened on 2nd commit
store3 = multi.getStoreByName("store3").(types.KVStore)
val3 := store3.Get([]byte(k3))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 2)), val3, "Reloaded value not the same as last flushed value")
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value")
}
func TestMultiStoreQuery(t *testing.T) {
@ -442,6 +443,80 @@ func TestMultiStoreQuery(t *testing.T) {
require.Equal(t, v2, qres.Value)
}
func TestMultiStore_Pruning(t *testing.T) {
testCases := []struct {
name string
numVersions int64
po types.PruningOptions
deleted []int64
saved []int64
}{
{"prune nothing", 10, types.PruneNothing, nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
{"prune everything", 10, types.PruneEverything, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}, []int64{10}},
{"prune some; no batch", 10, types.NewPruningOptions(2, 3, 1), []int64{1, 2, 4, 5, 7}, []int64{3, 6, 8, 9, 10}},
{"prune some; small batch", 10, types.NewPruningOptions(2, 3, 3), []int64{1, 2, 4, 5}, []int64{3, 6, 7, 8, 9, 10}},
{"prune some; large batch", 10, types.NewPruningOptions(2, 3, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, tc.po)
require.NoError(t, ms.LoadLatestVersion())
for i := int64(0); i < tc.numVersions; i++ {
ms.Commit()
}
for _, v := range tc.saved {
_, err := ms.CacheMultiStoreWithVersion(v)
require.NoError(t, err, "expected error when loading height: %d", v)
}
for _, v := range tc.deleted {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
})
}
}
func TestMultiStore_PruningRestart(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, types.NewPruningOptions(2, 3, 11))
require.NoError(t, ms.LoadLatestVersion())
// Commit enough to build up heights to prune, where on the next block we should
// batch delete.
for i := int64(0); i < 10; i++ {
ms.Commit()
}
pruneHeights := []int64{1, 2, 4, 5, 7}
// ensure we've persisted the current batch of heights to prune to the store's DB
ph, err := getPruningHeights(ms.db)
require.NoError(t, err)
require.Equal(t, pruneHeights, ph)
// "restart"
ms = newMultiStoreWithMounts(db, types.NewPruningOptions(2, 3, 11))
err = ms.LoadLatestVersion()
require.NoError(t, err)
require.Equal(t, pruneHeights, ms.pruneHeights)
// commit one more block and ensure the heights have been pruned
ms.Commit()
require.Empty(t, ms.pruneHeights)
for _, v := range pruneHeights {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
}
//-----------------------------------------------------------------------
// utils

View File

@ -8,13 +8,6 @@ import (
"github.com/cosmos/cosmos-sdk/store/types"
)
// Pruning strategies that may be provided to a KVStore to enable pruning.
const (
PruningStrategyNothing = "nothing"
PruningStrategyEverything = "everything"
PruningStrategySyncable = "syncable"
)
func NewCommitMultiStore(db dbm.DB) types.CommitMultiStore {
return rootmulti.NewStore(db)
}
@ -22,17 +15,3 @@ func NewCommitMultiStore(db dbm.DB) types.CommitMultiStore {
func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache {
return cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
}
func NewPruningOptionsFromString(strategy string) (opt PruningOptions) {
switch strategy {
case PruningStrategyNothing:
opt = PruneNothing
case PruningStrategyEverything:
opt = PruneEverything
case PruningStrategySyncable:
opt = PruneSyncable
default:
opt = PruneSyncable
}
return
}

View File

@ -12,7 +12,7 @@ import (
func newMemTestKVStore(t *testing.T) types.KVStore {
db := dbm.NewMemDB()
store, err := iavl.LoadStore(db, types.CommitID{}, types.PruneNothing, false)
store, err := iavl.LoadStore(db, types.CommitID{}, false)
require.NoError(t, err)
return store
}

View File

@ -1,66 +1,74 @@
package types
var (
// PruneEverything defines a pruning strategy where all committed states will
// be deleted, persisting only the current state.
PruneEverything = PruningOptions{
KeepEvery: 1,
SnapshotEvery: 0,
}
import "fmt"
// PruneNothing defines a pruning strategy where all committed states will be
// kept on disk, i.e. no states will be pruned.
PruneNothing = PruningOptions{
KeepEvery: 1,
SnapshotEvery: 1,
}
// PruneSyncable defines a pruning strategy where only those states not needed
// for state syncing will be pruned. It flushes every 100th state to disk and
// keeps every 10000th.
PruneSyncable = PruningOptions{
KeepEvery: 100,
SnapshotEvery: 10000,
}
// Pruning option string constants
const (
PruningOptionDefault = "default"
PruningOptionEverything = "everything"
PruningOptionNothing = "nothing"
PruningOptionCustom = "custom"
)
// PruningOptions defines the specific pruning strategy every store in a multi-store
// will use when committing state, where keepEvery determines which committed
// heights are flushed to disk and snapshotEvery determines which of these heights
// are kept after pruning.
var (
// PruneDefault defines a pruning strategy where the last 100 heights are kept
// in addition to every 500th and where to-be pruned heights are pruned at
// every 10th height.
PruneDefault = NewPruningOptions(100, 500, 10)
// PruneEverything defines a pruning strategy where all committed heights are
// deleted, storing only the current height and where to-be pruned heights are
// pruned at every 10th height.
PruneEverything = NewPruningOptions(0, 0, 10)
// PruneNothing defines a pruning strategy where all heights are kept on disk.
PruneNothing = NewPruningOptions(0, 1, 0)
)
// PruningOptions defines the pruning strategy used when determining which
// heights are removed from disk when committing state.
type PruningOptions struct {
KeepEvery int64
SnapshotEvery int64
// KeepRecent defines how many recent heights to keep on disk.
KeepRecent uint64
// KeepEvery defines how many offset heights are kept on disk past KeepRecent.
KeepEvery uint64
// Interval defines when the pruned heights are removed from disk.
Interval uint64
}
// IsValid verifies if the pruning options are valid. It returns false if invalid
// and true otherwise. Pruning options are considered valid iff:
//
// - KeepEvery > 0
// - SnapshotEvery >= 0
// - SnapshotEvery % KeepEvery = 0
func (po PruningOptions) IsValid() bool {
// must flush at positive block interval
if po.KeepEvery <= 0 {
return false
func NewPruningOptions(keepRecent, keepEvery, interval uint64) PruningOptions {
return PruningOptions{
KeepRecent: keepRecent,
KeepEvery: keepEvery,
Interval: interval,
}
}
func (po PruningOptions) Validate() error {
if po.KeepRecent == 0 && po.KeepEvery == 0 && po.Interval == 0 { // prune everything
return fmt.Errorf("invalid 'Interval' when pruning everything: %d", po.Interval)
}
if po.KeepRecent == 0 && po.KeepEvery == 1 && po.Interval != 0 { // prune nothing
return fmt.Errorf("invalid 'Interval' when pruning nothing: %d", po.Interval)
}
// cannot snapshot negative intervals
if po.SnapshotEvery < 0 {
return false
return nil
}
func NewPruningOptionsFromString(strategy string) PruningOptions {
switch strategy {
case PruningOptionEverything:
return PruneEverything
case PruningOptionNothing:
return PruneNothing
case PruningOptionDefault:
return PruneDefault
default:
return PruneDefault
}
return po.SnapshotEvery%po.KeepEvery == 0
}
// FlushVersion returns a boolean signaling if the provided version/height should
// be flushed to disk.
func (po PruningOptions) FlushVersion(ver int64) bool {
return po.KeepEvery != 0 && ver%po.KeepEvery == 0
}
// SnapshotVersion returns a boolean signaling if the provided version/height
// should be snapshotted (kept on disk).
func (po PruningOptions) SnapshotVersion(ver int64) bool {
return po.SnapshotEvery != 0 && ver%po.SnapshotEvery == 0
}

View File

@ -1,79 +0,0 @@
package types_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/store/types"
)
func TestPruningOptions_FlushVersion(t *testing.T) {
t.Parallel()
require.True(t, types.PruneEverything.FlushVersion(-1))
require.True(t, types.PruneEverything.FlushVersion(0))
require.True(t, types.PruneEverything.FlushVersion(1))
require.True(t, types.PruneEverything.FlushVersion(2))
require.True(t, types.PruneNothing.FlushVersion(-1))
require.True(t, types.PruneNothing.FlushVersion(0))
require.True(t, types.PruneNothing.FlushVersion(1))
require.True(t, types.PruneNothing.FlushVersion(2))
require.False(t, types.PruneSyncable.FlushVersion(-1))
require.True(t, types.PruneSyncable.FlushVersion(0))
require.False(t, types.PruneSyncable.FlushVersion(1))
require.True(t, types.PruneSyncable.FlushVersion(100))
require.False(t, types.PruneSyncable.FlushVersion(101))
}
func TestPruningOptions_SnapshotVersion(t *testing.T) {
t.Parallel()
require.False(t, types.PruneEverything.SnapshotVersion(-1))
require.False(t, types.PruneEverything.SnapshotVersion(0))
require.False(t, types.PruneEverything.SnapshotVersion(1))
require.False(t, types.PruneEverything.SnapshotVersion(2))
require.True(t, types.PruneNothing.SnapshotVersion(-1))
require.True(t, types.PruneNothing.SnapshotVersion(0))
require.True(t, types.PruneNothing.SnapshotVersion(1))
require.True(t, types.PruneNothing.SnapshotVersion(2))
require.False(t, types.PruneSyncable.SnapshotVersion(-1))
require.True(t, types.PruneSyncable.SnapshotVersion(0))
require.False(t, types.PruneSyncable.SnapshotVersion(1))
require.True(t, types.PruneSyncable.SnapshotVersion(10000))
require.False(t, types.PruneSyncable.SnapshotVersion(10001))
}
func TestPruningOptions_IsValid(t *testing.T) {
t.Parallel()
type fields struct {
KeepEvery int64
SnapshotEvery int64
}
tests := []struct {
name string
fields fields
want bool
}{
{"PruneEverything", fields{types.PruneEverything.KeepEvery, types.PruneEverything.SnapshotEvery}, true},
{"PruneNothing", fields{types.PruneNothing.KeepEvery, types.PruneNothing.SnapshotEvery}, true},
{"PruneSyncable", fields{types.PruneSyncable.KeepEvery, types.PruneSyncable.SnapshotEvery}, true},
{"KeepEvery=0", fields{0, 0}, false},
{"KeepEvery<0", fields{-1, 0}, false},
{"SnapshotEvery<0", fields{1, -1}, false},
{"SnapshotEvery%KeepEvery!=0", fields{15, 30}, true},
{"SnapshotEvery%KeepEvery!=0", fields{15, 20}, false},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
po := types.PruningOptions{
KeepEvery: tt.fields.KeepEvery,
SnapshotEvery: tt.fields.SnapshotEvery,
}
require.Equal(t, tt.want, po.IsValid(), "IsValid() = %v, want %v", po.IsValid(), tt.want)
})
}
}

View File

@ -18,6 +18,8 @@ type Store interface {
type Committer interface {
Commit() CommitID
LastCommitID() CommitID
// TODO: Deprecate after 0.38.5
SetPruning(PruningOptions)
}