Merge PR #5500: Regen network/multistore upgrades
This commit is contained in:
parent
de0f1a9e40
commit
0b7449129a
|
@ -1,11 +1,8 @@
|
||||||
package baseapp
|
package baseapp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -17,7 +14,6 @@ import (
|
||||||
dbm "github.com/tendermint/tm-db"
|
dbm "github.com/tendermint/tm-db"
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/store"
|
"github.com/cosmos/cosmos-sdk/store"
|
||||||
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
|
||||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||||
)
|
)
|
||||||
|
@ -226,62 +222,6 @@ func DefaultStoreLoader(ms sdk.CommitMultiStore) error {
|
||||||
return ms.LoadLatestVersion()
|
return ms.LoadLatestVersion()
|
||||||
}
|
}
|
||||||
|
|
||||||
// StoreLoaderWithUpgrade is used to prepare baseapp with a fixed StoreLoader
|
|
||||||
// pattern. This is useful in test cases, or with custom upgrade loading logic.
|
|
||||||
func StoreLoaderWithUpgrade(upgrades *storetypes.StoreUpgrades) StoreLoader {
|
|
||||||
return func(ms sdk.CommitMultiStore) error {
|
|
||||||
return ms.LoadLatestVersionAndUpgrade(upgrades)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpgradeableStoreLoader can be configured by SetStoreLoader() to check for the
|
|
||||||
// existence of a given upgrade file - json encoded StoreUpgrades data.
|
|
||||||
//
|
|
||||||
// If not file is present, it will peform the default load (no upgrades to store).
|
|
||||||
//
|
|
||||||
// If the file is present, it will parse the file and execute those upgrades
|
|
||||||
// (rename or delete stores), while loading the data. It will also delete the
|
|
||||||
// upgrade file upon successful load, so that the upgrade is only applied once,
|
|
||||||
// and not re-applied on next restart
|
|
||||||
//
|
|
||||||
// This is useful for in place migrations when a store key is renamed between
|
|
||||||
// two versions of the software. (TODO: this code will move to x/upgrades
|
|
||||||
// when PR #4233 is merged, here mainly to help test the design)
|
|
||||||
func UpgradeableStoreLoader(upgradeInfoPath string) StoreLoader {
|
|
||||||
return func(ms sdk.CommitMultiStore) error {
|
|
||||||
_, err := os.Stat(upgradeInfoPath)
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return DefaultStoreLoader(ms)
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// there is a migration file, let's execute
|
|
||||||
data, err := ioutil.ReadFile(upgradeInfoPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot read upgrade file %s: %v", upgradeInfoPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var upgrades storetypes.StoreUpgrades
|
|
||||||
err = json.Unmarshal(data, &upgrades)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot parse upgrade file: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ms.LoadLatestVersionAndUpgrade(&upgrades)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("load and upgrade database: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we have a successful load, we delete the file
|
|
||||||
err = os.Remove(upgradeInfoPath)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("deleting upgrade file %s: %v", upgradeInfoPath, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadVersion loads the BaseApp application version. It will panic if called
|
// LoadVersion loads the BaseApp application version. It will panic if called
|
||||||
// more than once on a running baseapp.
|
// more than once on a running baseapp.
|
||||||
func (app *BaseApp) LoadVersion(version int64, baseKey *sdk.KVStoreKey) error {
|
func (app *BaseApp) LoadVersion(version int64, baseKey *sdk.KVStoreKey) error {
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
@ -137,18 +136,6 @@ func useDefaultLoader(app *BaseApp) {
|
||||||
app.SetStoreLoader(DefaultStoreLoader)
|
app.SetStoreLoader(DefaultStoreLoader)
|
||||||
}
|
}
|
||||||
|
|
||||||
func useUpgradeLoader(upgrades *store.StoreUpgrades) func(*BaseApp) {
|
|
||||||
return func(app *BaseApp) {
|
|
||||||
app.SetStoreLoader(StoreLoaderWithUpgrade(upgrades))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func useFileUpgradeLoader(upgradeInfoPath string) func(*BaseApp) {
|
|
||||||
return func(app *BaseApp) {
|
|
||||||
app.SetStoreLoader(UpgradeableStoreLoader(upgradeInfoPath))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) {
|
func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) {
|
||||||
rs := rootmulti.NewStore(db)
|
rs := rootmulti.NewStore(db)
|
||||||
rs.SetPruning(store.PruneNothing)
|
rs.SetPruning(store.PruneNothing)
|
||||||
|
@ -184,19 +171,6 @@ func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte
|
||||||
// Test that we can make commits and then reload old versions.
|
// Test that we can make commits and then reload old versions.
|
||||||
// Test that LoadLatestVersion actually does.
|
// Test that LoadLatestVersion actually does.
|
||||||
func TestSetLoader(t *testing.T) {
|
func TestSetLoader(t *testing.T) {
|
||||||
// write a renamer to a file
|
|
||||||
f, err := ioutil.TempFile("", "upgrade-*.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
data := []byte(`{"renamed":[{"old_key": "bnk", "new_key": "banker"}]}`)
|
|
||||||
_, err = f.Write(data)
|
|
||||||
require.NoError(t, err)
|
|
||||||
configName := f.Name()
|
|
||||||
require.NoError(t, f.Close())
|
|
||||||
|
|
||||||
// make sure it exists before running everything
|
|
||||||
_, err = os.Stat(configName)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
cases := map[string]struct {
|
cases := map[string]struct {
|
||||||
setLoader func(*BaseApp)
|
setLoader func(*BaseApp)
|
||||||
origStoreKey string
|
origStoreKey string
|
||||||
|
@ -211,26 +185,6 @@ func TestSetLoader(t *testing.T) {
|
||||||
origStoreKey: "foo",
|
origStoreKey: "foo",
|
||||||
loadStoreKey: "foo",
|
loadStoreKey: "foo",
|
||||||
},
|
},
|
||||||
"rename with inline opts": {
|
|
||||||
setLoader: useUpgradeLoader(&store.StoreUpgrades{
|
|
||||||
Renamed: []store.StoreRename{{
|
|
||||||
OldKey: "foo",
|
|
||||||
NewKey: "bar",
|
|
||||||
}},
|
|
||||||
}),
|
|
||||||
origStoreKey: "foo",
|
|
||||||
loadStoreKey: "bar",
|
|
||||||
},
|
|
||||||
"file loader with missing file": {
|
|
||||||
setLoader: useFileUpgradeLoader(configName + "randomchars"),
|
|
||||||
origStoreKey: "bnk",
|
|
||||||
loadStoreKey: "bnk",
|
|
||||||
},
|
|
||||||
"file loader with existing file": {
|
|
||||||
setLoader: useFileUpgradeLoader(configName),
|
|
||||||
origStoreKey: "bnk",
|
|
||||||
loadStoreKey: "banker",
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
k := []byte("key")
|
k := []byte("key")
|
||||||
|
@ -265,10 +219,6 @@ func TestSetLoader(t *testing.T) {
|
||||||
checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil)
|
checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ensure config file was deleted
|
|
||||||
_, err = os.Stat(configName)
|
|
||||||
require.True(t, os.IsNotExist(err))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAppVersionSetterGetter(t *testing.T) {
|
func TestAppVersionSetterGetter(t *testing.T) {
|
||||||
|
|
|
@ -122,7 +122,7 @@ type SimApp struct {
|
||||||
// NewSimApp returns a reference to an initialized SimApp.
|
// NewSimApp returns a reference to an initialized SimApp.
|
||||||
func NewSimApp(
|
func NewSimApp(
|
||||||
logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool,
|
logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool,
|
||||||
invCheckPeriod uint, baseAppOptions ...func(*bam.BaseApp),
|
homePath string, invCheckPeriod uint, baseAppOptions ...func(*bam.BaseApp),
|
||||||
) *SimApp {
|
) *SimApp {
|
||||||
|
|
||||||
// TODO: Remove cdc in favor of appCodec once all modules are migrated.
|
// TODO: Remove cdc in favor of appCodec once all modules are migrated.
|
||||||
|
@ -189,7 +189,7 @@ func NewSimApp(
|
||||||
app.CrisisKeeper = crisis.NewKeeper(
|
app.CrisisKeeper = crisis.NewKeeper(
|
||||||
app.subspaces[crisis.ModuleName], invCheckPeriod, app.SupplyKeeper, auth.FeeCollectorName,
|
app.subspaces[crisis.ModuleName], invCheckPeriod, app.SupplyKeeper, auth.FeeCollectorName,
|
||||||
)
|
)
|
||||||
app.UpgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], appCodec)
|
app.UpgradeKeeper = upgrade.NewKeeper(skipUpgradeHeights, keys[upgrade.StoreKey], appCodec, homePath)
|
||||||
|
|
||||||
// create evidence keeper with router
|
// create evidence keeper with router
|
||||||
evidenceKeeper := evidence.NewKeeper(
|
evidenceKeeper := evidence.NewKeeper(
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
|
|
||||||
func TestSimAppExport(t *testing.T) {
|
func TestSimAppExport(t *testing.T) {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0)
|
app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0)
|
||||||
|
|
||||||
genesisState := NewDefaultGenesisState()
|
genesisState := NewDefaultGenesisState()
|
||||||
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
||||||
|
@ -31,7 +31,7 @@ func TestSimAppExport(t *testing.T) {
|
||||||
app.Commit()
|
app.Commit()
|
||||||
|
|
||||||
// Making a new app object with the db, so that initchain hasn't been called
|
// Making a new app object with the db, so that initchain hasn't been called
|
||||||
app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0)
|
app2 := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0)
|
||||||
_, _, err = app2.ExportAppStateAndValidators(false, []string{})
|
_, _, err = app2.ExportAppStateAndValidators(false, []string{})
|
||||||
require.NoError(t, err, "ExportAppStateAndValidators should not have an error")
|
require.NoError(t, err, "ExportAppStateAndValidators should not have an error")
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,7 @@ func TestSimAppExport(t *testing.T) {
|
||||||
// ensure that black listed addresses are properly set in bank keeper
|
// ensure that black listed addresses are properly set in bank keeper
|
||||||
func TestBlackListedAddrs(t *testing.T) {
|
func TestBlackListedAddrs(t *testing.T) {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, 0)
|
app := NewSimApp(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0)
|
||||||
|
|
||||||
for acc := range maccPerms {
|
for acc := range maccPerms {
|
||||||
require.Equal(t, !allowedReceivingModAcc[acc], app.BankKeeper.BlacklistedAddr(app.SupplyKeeper.GetModuleAddress(acc)))
|
require.Equal(t, !allowedReceivingModAcc[acc], app.BankKeeper.BlacklistedAddr(app.SupplyKeeper.GetModuleAddress(acc)))
|
||||||
|
|
|
@ -26,7 +26,7 @@ func BenchmarkFullAppSimulation(b *testing.B) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt())
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt())
|
||||||
|
|
||||||
// run randomized simulation
|
// run randomized simulation
|
||||||
_, simParams, simErr := simulation.SimulateFromSeed(
|
_, simParams, simErr := simulation.SimulateFromSeed(
|
||||||
|
@ -65,7 +65,7 @@ func BenchmarkInvariants(b *testing.B) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt())
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt())
|
||||||
|
|
||||||
// run randomized simulation
|
// run randomized simulation
|
||||||
_, simParams, simErr := simulation.SimulateFromSeed(
|
_, simParams, simErr := simulation.SimulateFromSeed(
|
||||||
|
|
|
@ -63,7 +63,7 @@ func TestFullAppSimulation(t *testing.T) {
|
||||||
require.NoError(t, os.RemoveAll(dir))
|
require.NoError(t, os.RemoveAll(dir))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt)
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt)
|
||||||
require.Equal(t, "SimApp", app.Name())
|
require.Equal(t, "SimApp", app.Name())
|
||||||
|
|
||||||
// run randomized simulation
|
// run randomized simulation
|
||||||
|
@ -95,7 +95,7 @@ func TestAppImportExport(t *testing.T) {
|
||||||
require.NoError(t, os.RemoveAll(dir))
|
require.NoError(t, os.RemoveAll(dir))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt)
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt)
|
||||||
require.Equal(t, "SimApp", app.Name())
|
require.Equal(t, "SimApp", app.Name())
|
||||||
|
|
||||||
// Run randomized simulation
|
// Run randomized simulation
|
||||||
|
@ -129,7 +129,7 @@ func TestAppImportExport(t *testing.T) {
|
||||||
require.NoError(t, os.RemoveAll(newDir))
|
require.NoError(t, os.RemoveAll(newDir))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt)
|
newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt)
|
||||||
require.Equal(t, "SimApp", newApp.Name())
|
require.Equal(t, "SimApp", newApp.Name())
|
||||||
|
|
||||||
var genesisState GenesisState
|
var genesisState GenesisState
|
||||||
|
@ -181,7 +181,7 @@ func TestAppSimulationAfterImport(t *testing.T) {
|
||||||
require.NoError(t, os.RemoveAll(dir))
|
require.NoError(t, os.RemoveAll(dir))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt)
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt)
|
||||||
require.Equal(t, "SimApp", app.Name())
|
require.Equal(t, "SimApp", app.Name())
|
||||||
|
|
||||||
// Run randomized simulation
|
// Run randomized simulation
|
||||||
|
@ -220,7 +220,7 @@ func TestAppSimulationAfterImport(t *testing.T) {
|
||||||
require.NoError(t, os.RemoveAll(newDir))
|
require.NoError(t, os.RemoveAll(newDir))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, FlagPeriodValue, fauxMerkleModeOpt)
|
newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, fauxMerkleModeOpt)
|
||||||
require.Equal(t, "SimApp", newApp.Name())
|
require.Equal(t, "SimApp", newApp.Name())
|
||||||
|
|
||||||
newApp.InitChain(abci.RequestInitChain{
|
newApp.InitChain(abci.RequestInitChain{
|
||||||
|
@ -266,7 +266,7 @@ func TestAppStateDeterminism(t *testing.T) {
|
||||||
|
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
|
|
||||||
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, FlagPeriodValue, interBlockCacheOpt())
|
app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, interBlockCacheOpt())
|
||||||
|
|
||||||
fmt.Printf(
|
fmt.Printf(
|
||||||
"running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n",
|
"running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n",
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
// Setup initializes a new SimApp. A Nop logger is set in SimApp.
|
// Setup initializes a new SimApp. A Nop logger is set in SimApp.
|
||||||
func Setup(isCheckTx bool) *SimApp {
|
func Setup(isCheckTx bool) *SimApp {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0)
|
app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0)
|
||||||
if !isCheckTx {
|
if !isCheckTx {
|
||||||
// init chain must be called to stop deliverState from being nil
|
// init chain must be called to stop deliverState from being nil
|
||||||
genesisState := NewDefaultGenesisState()
|
genesisState := NewDefaultGenesisState()
|
||||||
|
@ -48,7 +48,7 @@ func Setup(isCheckTx bool) *SimApp {
|
||||||
// accounts and possible balances.
|
// accounts and possible balances.
|
||||||
func SetupWithGenesisAccounts(genAccs []authexported.GenesisAccount, balances ...bank.Balance) *SimApp {
|
func SetupWithGenesisAccounts(genAccs []authexported.GenesisAccount, balances ...bank.Balance) *SimApp {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0)
|
app := NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0)
|
||||||
|
|
||||||
// initialize the chain with the passed in genesis accounts
|
// initialize the chain with the passed in genesis accounts
|
||||||
genesisState := NewDefaultGenesisState()
|
genesisState := NewDefaultGenesisState()
|
||||||
|
|
|
@ -44,6 +44,13 @@ type StoreUpgrades struct {
|
||||||
Deleted []string `json:"deleted"`
|
Deleted []string `json:"deleted"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpgradeInfo defines height and name of the upgrade
|
||||||
|
// to ensure multistore upgrades happen only at matching height.
|
||||||
|
type UpgradeInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Height int64 `json:"height"`
|
||||||
|
}
|
||||||
|
|
||||||
// StoreRename defines a name change of a sub-store.
|
// StoreRename defines a name change of a sub-store.
|
||||||
// All data previously under a PrefixStore with OldKey will be copied
|
// All data previously under a PrefixStore with OldKey will be copied
|
||||||
// to a PrefixStore with NewKey, then deleted from OldKey store.
|
// to a PrefixStore with NewKey, then deleted from OldKey store.
|
||||||
|
|
|
@ -25,7 +25,7 @@ var (
|
||||||
|
|
||||||
func createTestApp() (*simapp.SimApp, sdk.Context, []sdk.AccAddress) {
|
func createTestApp() (*simapp.SimApp, sdk.Context, []sdk.AccAddress) {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 1)
|
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 1)
|
||||||
ctx := app.NewContext(true, abci.Header{})
|
ctx := app.NewContext(true, abci.Header{})
|
||||||
|
|
||||||
constantFee := sdk.NewInt64Coin(sdk.DefaultBondDenom, 10)
|
constantFee := sdk.NewInt64Coin(sdk.DefaultBondDenom, 10)
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
func createTestApp() *simapp.SimApp {
|
func createTestApp() *simapp.SimApp {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 5)
|
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5)
|
||||||
// init chain must be called to stop deliverState from being nil
|
// init chain must be called to stop deliverState from being nil
|
||||||
genesisState := simapp.NewDefaultGenesisState()
|
genesisState := simapp.NewDefaultGenesisState()
|
||||||
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
||||||
|
|
|
@ -63,7 +63,7 @@ func TestImportExportQueues(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, 0)
|
app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 0)
|
||||||
|
|
||||||
app2.InitChain(
|
app2.InitChain(
|
||||||
abci.RequestInitChain{
|
abci.RequestInitChain{
|
||||||
|
|
|
@ -38,6 +38,14 @@ func BeginBlocker(k Keeper, ctx sdk.Context, _ abci.RequestBeginBlock) {
|
||||||
upgradeMsg := fmt.Sprintf("UPGRADE \"%s\" NEEDED at %s: %s", plan.Name, plan.DueAt(), plan.Info)
|
upgradeMsg := fmt.Sprintf("UPGRADE \"%s\" NEEDED at %s: %s", plan.Name, plan.DueAt(), plan.Info)
|
||||||
// We don't have an upgrade handler for this upgrade name, meaning this software is out of date so shutdown
|
// We don't have an upgrade handler for this upgrade name, meaning this software is out of date so shutdown
|
||||||
ctx.Logger().Error(upgradeMsg)
|
ctx.Logger().Error(upgradeMsg)
|
||||||
|
|
||||||
|
// Write the upgrade info to disk. The UpgradeStoreLoader uses this info to perform or skip
|
||||||
|
// store migrations.
|
||||||
|
err := k.DumpUpgradeInfoToDisk(ctx.BlockHeight(), plan.Name)
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Errorf("unable to write upgrade info to filesystem: %s", err.Error()))
|
||||||
|
}
|
||||||
|
|
||||||
panic(upgradeMsg)
|
panic(upgradeMsg)
|
||||||
}
|
}
|
||||||
// We have an upgrade handler for this upgrade name, so apply the upgrade
|
// We have an upgrade handler for this upgrade name, so apply the upgrade
|
||||||
|
|
|
@ -1,10 +1,15 @@
|
||||||
package upgrade_test
|
package upgrade_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
storetypes "github.com/cosmos/cosmos-sdk/store/types"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
abci "github.com/tendermint/tendermint/abci/types"
|
abci "github.com/tendermint/tendermint/abci/types"
|
||||||
"github.com/tendermint/tendermint/libs/log"
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
@ -31,7 +36,7 @@ var s TestSuite
|
||||||
|
|
||||||
func setupTest(height int64, skip map[int64]bool) TestSuite {
|
func setupTest(height int64, skip map[int64]bool) TestSuite {
|
||||||
db := dbm.NewMemDB()
|
db := dbm.NewMemDB()
|
||||||
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, skip, 0)
|
app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, skip, simapp.DefaultNodeHome, 0)
|
||||||
genesisState := simapp.NewDefaultGenesisState()
|
genesisState := simapp.NewDefaultGenesisState()
|
||||||
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
stateBytes, err := codec.MarshalJSONIndent(app.Codec(), genesisState)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -393,3 +398,30 @@ func TestUpgradeWithoutSkip(t *testing.T) {
|
||||||
VerifyDoUpgrade(t)
|
VerifyDoUpgrade(t)
|
||||||
VerifyDone(t, s.ctx, "test")
|
VerifyDone(t, s.ctx, "test")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDumpUpgradeInfoToFile(t *testing.T) {
|
||||||
|
s := setupTest(10, map[int64]bool{})
|
||||||
|
|
||||||
|
planHeight := s.ctx.BlockHeight() + 1
|
||||||
|
name := "test"
|
||||||
|
t.Log("verify if upgrade height is dumped to file")
|
||||||
|
err := s.keeper.DumpUpgradeInfoToDisk(planHeight, name)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
upgradeInfoFilePath, err := s.keeper.GetUpgradeInfoPath()
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(upgradeInfoFilePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var upgradeInfo storetypes.UpgradeInfo
|
||||||
|
err = json.Unmarshal(data, &upgradeInfo)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
t.Log("Verify upgrade height from file matches ")
|
||||||
|
require.Equal(t, upgradeInfo.Height, planHeight)
|
||||||
|
|
||||||
|
// clear the test file
|
||||||
|
err = os.Remove(upgradeInfoFilePath)
|
||||||
|
require.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ var (
|
||||||
NewSoftwareUpgradeProposal = types.NewSoftwareUpgradeProposal
|
NewSoftwareUpgradeProposal = types.NewSoftwareUpgradeProposal
|
||||||
NewCancelSoftwareUpgradeProposal = types.NewCancelSoftwareUpgradeProposal
|
NewCancelSoftwareUpgradeProposal = types.NewCancelSoftwareUpgradeProposal
|
||||||
NewQueryAppliedParams = types.NewQueryAppliedParams
|
NewQueryAppliedParams = types.NewQueryAppliedParams
|
||||||
|
UpgradeStoreLoader = types.UpgradeStoreLoader
|
||||||
NewKeeper = keeper.NewKeeper
|
NewKeeper = keeper.NewKeeper
|
||||||
NewQuerier = keeper.NewQuerier
|
NewQuerier = keeper.NewQuerier
|
||||||
)
|
)
|
||||||
|
|
|
@ -68,6 +68,27 @@ as well as providing the opportunity for the upgraded software to perform any ne
|
||||||
(with the old binary) and applying the migration (with the new binary) are enforced in the state machine. Actually
|
(with the old binary) and applying the migration (with the new binary) are enforced in the state machine. Actually
|
||||||
switching the binaries is an ops task and not handled inside the sdk / abci app.
|
switching the binaries is an ops task and not handled inside the sdk / abci app.
|
||||||
|
|
||||||
|
Here is a sample code to set store migrations with an upgrade:
|
||||||
|
|
||||||
|
// this configures a no-op upgrade handler for the "my-fancy-upgrade" upgrade
|
||||||
|
app.UpgradeKeeper.SetUpgradeHandler("my-fancy-upgrade", func(ctx sdk.Context, plan upgrade.Plan) {
|
||||||
|
// upgrade changes here
|
||||||
|
})
|
||||||
|
|
||||||
|
upgradeInfo := app.UpgradeKeeper.ReadUpgradeInfoFromDisk()
|
||||||
|
if upgradeInfo.Name == "my-fancy-upgrade" && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
|
||||||
|
storeUpgrades := store.StoreUpgrades{
|
||||||
|
Renamed: []store.StoreRename{{
|
||||||
|
OldKey: "foo",
|
||||||
|
NewKey: "bar",
|
||||||
|
}},
|
||||||
|
Deleted: []string{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// configure store loader that checks if version == upgradeHeight and applies store upgrades
|
||||||
|
app.SetStoreLoader(upgrade.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
|
||||||
|
}
|
||||||
|
|
||||||
Halt Behavior
|
Halt Behavior
|
||||||
|
|
||||||
Before halting the ABCI state machine in the BeginBlocker method, the upgrade module will log an error
|
Before halting the ABCI state machine in the BeginBlocker method, the upgrade module will log an error
|
||||||
|
|
|
@ -2,19 +2,30 @@ package keeper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
"github.com/cosmos/cosmos-sdk/x/upgrade/types"
|
||||||
|
|
||||||
"github.com/tendermint/tendermint/libs/log"
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
tmos "github.com/tendermint/tendermint/libs/os"
|
||||||
|
|
||||||
"github.com/cosmos/cosmos-sdk/codec"
|
"github.com/cosmos/cosmos-sdk/codec"
|
||||||
"github.com/cosmos/cosmos-sdk/store/prefix"
|
"github.com/cosmos/cosmos-sdk/store/prefix"
|
||||||
|
store "github.com/cosmos/cosmos-sdk/store/types"
|
||||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// UpgradeInfoFileName file to store upgrade information
|
||||||
|
const UpgradeInfoFileName string = "upgrade-info.json"
|
||||||
|
|
||||||
type Keeper struct {
|
type Keeper struct {
|
||||||
|
homePath string
|
||||||
skipUpgradeHeights map[int64]bool
|
skipUpgradeHeights map[int64]bool
|
||||||
storeKey sdk.StoreKey
|
storeKey sdk.StoreKey
|
||||||
cdc codec.Marshaler
|
cdc codec.Marshaler
|
||||||
|
@ -22,8 +33,9 @@ type Keeper struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewKeeper constructs an upgrade Keeper
|
// NewKeeper constructs an upgrade Keeper
|
||||||
func NewKeeper(skipUpgradeHeights map[int64]bool, storeKey sdk.StoreKey, cdc codec.Marshaler) Keeper {
|
func NewKeeper(skipUpgradeHeights map[int64]bool, storeKey sdk.StoreKey, cdc codec.Marshaler, homePath string) Keeper {
|
||||||
return Keeper{
|
return Keeper{
|
||||||
|
homePath: homePath,
|
||||||
skipUpgradeHeights: skipUpgradeHeights,
|
skipUpgradeHeights: skipUpgradeHeights,
|
||||||
storeKey: storeKey,
|
storeKey: storeKey,
|
||||||
cdc: cdc,
|
cdc: cdc,
|
||||||
|
@ -131,3 +143,59 @@ func (k Keeper) ApplyUpgrade(ctx sdk.Context, plan types.Plan) {
|
||||||
func (k Keeper) IsSkipHeight(height int64) bool {
|
func (k Keeper) IsSkipHeight(height int64) bool {
|
||||||
return k.skipUpgradeHeights[height]
|
return k.skipUpgradeHeights[height]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DumpUpgradeInfoToDisk writes upgrade information to UpgradeInfoFileName.
|
||||||
|
func (k Keeper) DumpUpgradeInfoToDisk(height int64, name string) error {
|
||||||
|
upgradeInfoFilePath, err := k.GetUpgradeInfoPath()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
upgradeInfo := store.UpgradeInfo{
|
||||||
|
Name: name,
|
||||||
|
Height: height,
|
||||||
|
}
|
||||||
|
info, err := json.Marshal(upgradeInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ioutil.WriteFile(upgradeInfoFilePath, info, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUpgradeInfoPath returns the upgrade info file path
|
||||||
|
func (k Keeper) GetUpgradeInfoPath() (string, error) {
|
||||||
|
upgradeInfoFileDir := path.Join(k.getHomeDir(), "data")
|
||||||
|
err := tmos.EnsureDir(upgradeInfoFileDir, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(upgradeInfoFileDir, UpgradeInfoFileName), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHomeDir returns the height at which the given upgrade was executed
|
||||||
|
func (k Keeper) getHomeDir() string {
|
||||||
|
return k.homePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUpgradeInfoFromDisk returns the name and height of the upgrade
|
||||||
|
// which is written to disk by the old binary when panic'ing
|
||||||
|
// if there's an error in reading the info,
|
||||||
|
// it assumes that the upgrade info is not available
|
||||||
|
func (k Keeper) ReadUpgradeInfoFromDisk() (upgradeInfo store.UpgradeInfo) {
|
||||||
|
upgradeInfoPath, err := k.GetUpgradeInfoPath()
|
||||||
|
// if error in reading the path, assume there are no upgrades
|
||||||
|
if err != nil {
|
||||||
|
return upgradeInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadFile(upgradeInfoPath)
|
||||||
|
// if error in reading the file, assume there are no upgrades
|
||||||
|
if err != nil {
|
||||||
|
return upgradeInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
json.Unmarshal(data, &upgradeInfo)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
@ -56,6 +56,33 @@ During each `EndBlock` execution, the `x/upgrade` module checks if there exists
|
||||||
`Handler` is executed. If the `Plan` is expected to execute but no `Handler` is registered
|
`Handler` is executed. If the `Plan` is expected to execute but no `Handler` is registered
|
||||||
or if the binary was upgraded too early, the node will gracefully panic and exit.
|
or if the binary was upgraded too early, the node will gracefully panic and exit.
|
||||||
|
|
||||||
|
## StoreLoader
|
||||||
|
|
||||||
|
|
||||||
|
The `x/upgrade` module also facilitates store migrations as part of the upgrade. The
|
||||||
|
`StoreLoader` sets the migrations that need to occur before the new binary can
|
||||||
|
successfully run the chain. This `StoreLoader` is also application specific and
|
||||||
|
not defined on a per-module basis. Registering this `StoreLoader` is done via
|
||||||
|
`app#SetStoreLoader` in the application.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func UpgradeStoreLoader (upgradeHeight int64, storeUpgrades *store.StoreUpgrades) baseapp.StoreLoader
|
||||||
|
```
|
||||||
|
|
||||||
|
If there's a planned upgrade and the upgrade height is reached, the old binary writes `UpgradeInfo` to the disk before panic'ing.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type UpgradeInfo struct {
|
||||||
|
Name string
|
||||||
|
Height int64
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This information is critical to ensure the `StoreUpgrades` happens smoothly at correct height and
|
||||||
|
expected upgrade. It eliminiates the chances for the new binary to execute `StoreUpgrades` multiple
|
||||||
|
times everytime on restart. Also if there are multiple upgrades planned on same height, the `Name`
|
||||||
|
will ensure these `StoreUpgrades` takes place only in planned upgrade handler.
|
||||||
|
|
||||||
## Proposal
|
## Proposal
|
||||||
|
|
||||||
Typically, a `Plan` is proposed and submitted through governance via a `SoftwareUpgradeProposal`.
|
Typically, a `Plan` is proposed and submitted through governance via a `SoftwareUpgradeProposal`.
|
||||||
|
|
|
@ -0,0 +1,23 @@
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||||
|
store "github.com/cosmos/cosmos-sdk/store/types"
|
||||||
|
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpgradeStoreLoader is used to prepare baseapp with a fixed StoreLoader
|
||||||
|
// pattern. This is useful for custom upgrade loading logic.
|
||||||
|
func UpgradeStoreLoader(upgradeHeight int64, storeUpgrades *store.StoreUpgrades) baseapp.StoreLoader {
|
||||||
|
return func(ms sdk.CommitMultiStore) error {
|
||||||
|
if upgradeHeight == ms.LastCommitID().Version {
|
||||||
|
// Check if the current commit version and upgrade height matches
|
||||||
|
if len(storeUpgrades.Renamed) > 0 || len(storeUpgrades.Deleted) > 0 {
|
||||||
|
return ms.LoadLatestVersionAndUpgrade(storeUpgrades)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise load default store loader
|
||||||
|
return baseapp.DefaultStoreLoader(ms)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,144 @@
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
abci "github.com/tendermint/tendermint/abci/types"
|
||||||
|
"github.com/tendermint/tendermint/libs/log"
|
||||||
|
dbm "github.com/tendermint/tm-db"
|
||||||
|
|
||||||
|
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||||
|
"github.com/cosmos/cosmos-sdk/client/flags"
|
||||||
|
"github.com/cosmos/cosmos-sdk/store/rootmulti"
|
||||||
|
store "github.com/cosmos/cosmos-sdk/store/types"
|
||||||
|
"github.com/cosmos/cosmos-sdk/tests"
|
||||||
|
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func useUpgradeLoader(height int64, upgrades *store.StoreUpgrades) func(*baseapp.BaseApp) {
|
||||||
|
return func(app *baseapp.BaseApp) {
|
||||||
|
app.SetStoreLoader(UpgradeStoreLoader(height, upgrades))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultLogger() log.Logger {
|
||||||
|
return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app")
|
||||||
|
}
|
||||||
|
|
||||||
|
func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) {
|
||||||
|
rs := rootmulti.NewStore(db)
|
||||||
|
rs.SetPruning(store.PruneNothing)
|
||||||
|
key := sdk.NewKVStoreKey(storeKey)
|
||||||
|
rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil)
|
||||||
|
err := rs.LoadLatestVersion()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, int64(0), rs.LastCommitID().Version)
|
||||||
|
|
||||||
|
// write some data in substore
|
||||||
|
kv, _ := rs.GetStore(key).(store.KVStore)
|
||||||
|
require.NotNil(t, kv)
|
||||||
|
kv.Set(k, v)
|
||||||
|
commitID := rs.Commit()
|
||||||
|
require.Equal(t, int64(1), commitID.Version)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) {
|
||||||
|
rs := rootmulti.NewStore(db)
|
||||||
|
rs.SetPruning(store.PruneNothing)
|
||||||
|
key := sdk.NewKVStoreKey(storeKey)
|
||||||
|
rs.MountStoreWithDB(key, store.StoreTypeIAVL, nil)
|
||||||
|
err := rs.LoadLatestVersion()
|
||||||
|
require.Nil(t, err)
|
||||||
|
require.Equal(t, ver, rs.LastCommitID().Version)
|
||||||
|
|
||||||
|
// query data in substore
|
||||||
|
kv, _ := rs.GetStore(key).(store.KVStore)
|
||||||
|
|
||||||
|
require.NotNil(t, kv)
|
||||||
|
require.Equal(t, v, kv.Get(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that we can make commits and then reload old versions.
|
||||||
|
// Test that LoadLatestVersion actually does.
|
||||||
|
func TestSetLoader(t *testing.T) {
|
||||||
|
// set a temporary home dir
|
||||||
|
homeDir, cleanUp := tests.NewTestCaseDir(t)
|
||||||
|
defer cleanUp()
|
||||||
|
// TODO cleanup viper
|
||||||
|
viper.Set(flags.FlagHome, homeDir)
|
||||||
|
|
||||||
|
upgradeInfoFilePath := filepath.Join(homeDir, "upgrade-info.json")
|
||||||
|
upgradeInfo := &store.UpgradeInfo{
|
||||||
|
Name: "test", Height: 0,
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(upgradeInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = ioutil.WriteFile(upgradeInfoFilePath, data, 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// make sure it exists before running everything
|
||||||
|
_, err = os.Stat(upgradeInfoFilePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cases := map[string]struct {
|
||||||
|
setLoader func(*baseapp.BaseApp)
|
||||||
|
origStoreKey string
|
||||||
|
loadStoreKey string
|
||||||
|
}{
|
||||||
|
"don't set loader": {
|
||||||
|
origStoreKey: "foo",
|
||||||
|
loadStoreKey: "foo",
|
||||||
|
},
|
||||||
|
"rename with inline opts": {
|
||||||
|
setLoader: useUpgradeLoader(0, &store.StoreUpgrades{
|
||||||
|
Renamed: []store.StoreRename{{
|
||||||
|
OldKey: "foo",
|
||||||
|
NewKey: "bar",
|
||||||
|
}},
|
||||||
|
}),
|
||||||
|
origStoreKey: "foo",
|
||||||
|
loadStoreKey: "bar",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
k := []byte("key")
|
||||||
|
v := []byte("value")
|
||||||
|
|
||||||
|
for name, tc := range cases {
|
||||||
|
tc := tc
|
||||||
|
t.Run(name, func(t *testing.T) {
|
||||||
|
// prepare a db with some data
|
||||||
|
db := dbm.NewMemDB()
|
||||||
|
|
||||||
|
initStore(t, db, tc.origStoreKey, k, v)
|
||||||
|
|
||||||
|
// load the app with the existing db
|
||||||
|
opts := []func(*baseapp.BaseApp){baseapp.SetPruning(store.PruneNothing)}
|
||||||
|
if tc.setLoader != nil {
|
||||||
|
opts = append(opts, tc.setLoader)
|
||||||
|
}
|
||||||
|
|
||||||
|
app := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...)
|
||||||
|
capKey := sdk.NewKVStoreKey(baseapp.MainStoreKey)
|
||||||
|
app.MountStores(capKey)
|
||||||
|
app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey))
|
||||||
|
err := app.LoadLatestVersion(capKey)
|
||||||
|
require.Nil(t, err)
|
||||||
|
|
||||||
|
// "execute" one block
|
||||||
|
app.BeginBlock(abci.RequestBeginBlock{Header: abci.Header{Height: 2}})
|
||||||
|
res := app.Commit()
|
||||||
|
require.NotNil(t, res.Data)
|
||||||
|
|
||||||
|
// check db is properly updated
|
||||||
|
checkStore(t, db, 2, tc.loadStoreKey, k, v)
|
||||||
|
checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue