started fixing merge conflicts
This commit is contained in:
parent
1f99aa3fb2
commit
be27ec1d29
|
@ -5901,6 +5901,7 @@ Params defines the parameters for the staking module.
|
|||
| `max_entries` | [uint32](#uint32) | | max_entries is the max entries for either unbonding delegation or redelegation (per pair/trio). |
|
||||
| `historical_entries` | [uint32](#uint32) | | historical_entries is the number of historical entries to persist. |
|
||||
| `bond_denom` | [string](#string) | | bond_denom defines the bondable coin denomination. |
|
||||
| `power_reduction` | [string](#string) | | power_reduction is the amount of staking tokens required for 1 unit of consensus-engine power |
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,389 +0,0 @@
|
|||
package keeper_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
tmbytes "github.com/tendermint/tendermint/libs/bytes"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/baseapp"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
|
||||
"github.com/cosmos/cosmos-sdk/simapp"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/keeper"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
|
||||
commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
|
||||
ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
|
||||
localhosttypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/09-localhost/types"
|
||||
ibctesting "github.com/cosmos/cosmos-sdk/x/ibc/testing"
|
||||
ibctestingmock "github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
const (
|
||||
testChainID = "gaiahub-0"
|
||||
testChainIDRevision1 = "gaiahub-1"
|
||||
|
||||
testClientID = "tendermint-0"
|
||||
testClientID2 = "tendermint-1"
|
||||
testClientID3 = "tendermint-2"
|
||||
|
||||
height = 5
|
||||
|
||||
trustingPeriod time.Duration = time.Hour * 24 * 7 * 2
|
||||
ubdPeriod time.Duration = time.Hour * 24 * 7 * 3
|
||||
maxClockDrift time.Duration = time.Second * 10
|
||||
)
|
||||
|
||||
var (
|
||||
testClientHeight = types.NewHeight(0, 5)
|
||||
testClientHeightRevision1 = types.NewHeight(1, 5)
|
||||
newClientHeight = types.NewHeight(1, 1)
|
||||
)
|
||||
|
||||
type KeeperTestSuite struct {
|
||||
suite.Suite
|
||||
|
||||
coordinator *ibctesting.Coordinator
|
||||
|
||||
chainA *ibctesting.TestChain
|
||||
chainB *ibctesting.TestChain
|
||||
|
||||
cdc codec.Marshaler
|
||||
ctx sdk.Context
|
||||
keeper *keeper.Keeper
|
||||
consensusState *ibctmtypes.ConsensusState
|
||||
header *ibctmtypes.Header
|
||||
valSet *tmtypes.ValidatorSet
|
||||
valSetHash tmbytes.HexBytes
|
||||
privVal tmtypes.PrivValidator
|
||||
now time.Time
|
||||
past time.Time
|
||||
|
||||
queryClient types.QueryClient
|
||||
}
|
||||
|
||||
func (suite *KeeperTestSuite) SetupTest() {
|
||||
suite.coordinator = ibctesting.NewCoordinator(suite.T(), 2)
|
||||
|
||||
suite.chainA = suite.coordinator.GetChain(ibctesting.GetChainID(0))
|
||||
suite.chainB = suite.coordinator.GetChain(ibctesting.GetChainID(1))
|
||||
|
||||
isCheckTx := false
|
||||
suite.now = time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC)
|
||||
suite.past = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
now2 := suite.now.Add(time.Hour)
|
||||
app := simapp.Setup(isCheckTx)
|
||||
|
||||
suite.cdc = app.AppCodec()
|
||||
suite.ctx = app.BaseApp.NewContext(isCheckTx, tmproto.Header{Height: height, ChainID: testClientID, Time: now2})
|
||||
suite.keeper = &app.IBCKeeper.ClientKeeper
|
||||
suite.privVal = ibctestingmock.NewPV()
|
||||
|
||||
pubKey, err := suite.privVal.GetPubKey()
|
||||
suite.Require().NoError(err)
|
||||
|
||||
testClientHeightMinus1 := types.NewHeight(0, height-1)
|
||||
|
||||
validator := tmtypes.NewValidator(pubKey, 1)
|
||||
suite.valSet = tmtypes.NewValidatorSet([]*tmtypes.Validator{validator})
|
||||
suite.valSetHash = suite.valSet.Hash()
|
||||
suite.header = suite.chainA.CreateTMClientHeader(testChainID, int64(testClientHeight.RevisionHeight), testClientHeightMinus1, now2, suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
|
||||
suite.consensusState = ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("hash")), suite.valSetHash)
|
||||
|
||||
var validators stakingtypes.Validators
|
||||
for i := 1; i < 11; i++ {
|
||||
privVal := ibctestingmock.NewPV()
|
||||
tmPk, err := privVal.GetPubKey()
|
||||
suite.Require().NoError(err)
|
||||
pk, err := cryptocodec.FromTmPubKeyInterface(tmPk)
|
||||
suite.Require().NoError(err)
|
||||
val, err := stakingtypes.NewValidator(sdk.ValAddress(pk.Address()), pk, stakingtypes.Description{})
|
||||
suite.Require().NoError(err)
|
||||
|
||||
val.Status = stakingtypes.Bonded
|
||||
val.Tokens = sdk.NewInt(rand.Int63())
|
||||
validators = append(validators, val)
|
||||
|
||||
hi := stakingtypes.NewHistoricalInfo(suite.ctx.BlockHeader(), validators, app.StakingKeeper.PowerReduction(suite.ctx))
|
||||
app.StakingKeeper.SetHistoricalInfo(suite.ctx, int64(i), &hi)
|
||||
}
|
||||
|
||||
// add localhost client
|
||||
revision := types.ParseChainID(suite.chainA.ChainID)
|
||||
localHostClient := localhosttypes.NewClientState(
|
||||
suite.chainA.ChainID, types.NewHeight(revision, uint64(suite.chainA.GetContext().BlockHeight())),
|
||||
)
|
||||
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), exported.Localhost, localHostClient)
|
||||
|
||||
queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, app.InterfaceRegistry())
|
||||
types.RegisterQueryServer(queryHelper, app.IBCKeeper.ClientKeeper)
|
||||
suite.queryClient = types.NewQueryClient(queryHelper)
|
||||
}
|
||||
|
||||
func TestKeeperTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(KeeperTestSuite))
|
||||
}
|
||||
|
||||
func (suite *KeeperTestSuite) TestSetClientState() {
|
||||
clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
|
||||
suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
|
||||
|
||||
retrievedState, found := suite.keeper.GetClientState(suite.ctx, testClientID)
|
||||
suite.Require().True(found, "GetClientState failed")
|
||||
suite.Require().Equal(clientState, retrievedState, "Client states are not equal")
|
||||
}
|
||||
|
||||
func (suite *KeeperTestSuite) TestSetClientConsensusState() {
|
||||
suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState)
|
||||
|
||||
retrievedConsState, found := suite.keeper.GetClientConsensusState(suite.ctx, testClientID, testClientHeight)
|
||||
suite.Require().True(found, "GetConsensusState failed")
|
||||
|
||||
tmConsState, ok := retrievedConsState.(*ibctmtypes.ConsensusState)
|
||||
suite.Require().True(ok)
|
||||
suite.Require().Equal(suite.consensusState, tmConsState, "ConsensusState not stored correctly")
|
||||
}
|
||||
|
||||
func (suite *KeeperTestSuite) TestValidateSelfClient() {
|
||||
testClientHeight := types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight()-1))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
clientState exported.ClientState
|
||||
expPass bool
|
||||
}{
|
||||
{
|
||||
"success",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"success with nil UpgradePath",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), nil, false, false),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"invalid client type",
|
||||
localhosttypes.NewClientState(suite.chainA.ChainID, testClientHeight),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"frozen client",
|
||||
&ibctmtypes.ClientState{suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"incorrect chainID",
|
||||
ibctmtypes.NewClientState("gaiatestnet", ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid client height",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.NewHeight(0, uint64(suite.chainA.GetContext().BlockHeight())), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid client revision",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeightRevision1, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid proof specs",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, nil, ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid trust level",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.Fraction{0, 1}, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid unbonding period",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod+10, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid trusting period",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, ubdPeriod+10, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid upgrade path",
|
||||
ibctmtypes.NewClientState(suite.chainA.ChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), []string{"bad", "upgrade", "path"}, false, false),
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
err := suite.chainA.App.IBCKeeper.ClientKeeper.ValidateSelfClient(suite.chainA.GetContext(), tc.clientState)
|
||||
if tc.expPass {
|
||||
suite.Require().NoError(err, "expected valid client for case: %s", tc.name)
|
||||
} else {
|
||||
suite.Require().Error(err, "expected invalid client for case: %s", tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (suite KeeperTestSuite) TestGetAllGenesisClients() {
|
||||
clientIDs := []string{
|
||||
testClientID2, testClientID3, testClientID,
|
||||
}
|
||||
expClients := []exported.ClientState{
|
||||
ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, types.ZeroHeight(), commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false),
|
||||
}
|
||||
|
||||
expGenClients := make(types.IdentifiedClientStates, len(expClients))
|
||||
|
||||
for i := range expClients {
|
||||
suite.chainA.App.IBCKeeper.ClientKeeper.SetClientState(suite.chainA.GetContext(), clientIDs[i], expClients[i])
|
||||
expGenClients[i] = types.NewIdentifiedClientState(clientIDs[i], expClients[i])
|
||||
}
|
||||
|
||||
// add localhost client
|
||||
localHostClient, found := suite.chainA.App.IBCKeeper.ClientKeeper.GetClientState(suite.chainA.GetContext(), exported.Localhost)
|
||||
suite.Require().True(found)
|
||||
expGenClients = append(expGenClients, types.NewIdentifiedClientState(exported.Localhost, localHostClient))
|
||||
|
||||
genClients := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllGenesisClients(suite.chainA.GetContext())
|
||||
|
||||
suite.Require().Equal(expGenClients.Sort(), genClients)
|
||||
}
|
||||
|
||||
func (suite KeeperTestSuite) TestGetAllGenesisMetadata() {
|
||||
expectedGenMetadata := []types.IdentifiedGenesisMetadata{
|
||||
types.NewIdentifiedGenesisMetadata(
|
||||
"clientA",
|
||||
[]types.GenesisMetadata{
|
||||
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 1)), []byte("foo")),
|
||||
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 2)), []byte("bar")),
|
||||
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(0, 3)), []byte("baz")),
|
||||
},
|
||||
),
|
||||
types.NewIdentifiedGenesisMetadata(
|
||||
"clientB",
|
||||
[]types.GenesisMetadata{
|
||||
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(1, 100)), []byte("val1")),
|
||||
types.NewGenesisMetadata(ibctmtypes.ProcessedTimeKey(types.NewHeight(2, 300)), []byte("val2")),
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
genClients := []types.IdentifiedClientState{
|
||||
types.NewIdentifiedClientState("clientA", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientB", &ibctmtypes.ClientState{}),
|
||||
types.NewIdentifiedClientState("clientC", &ibctmtypes.ClientState{}), types.NewIdentifiedClientState("clientD", &localhosttypes.ClientState{}),
|
||||
}
|
||||
|
||||
suite.chainA.App.IBCKeeper.ClientKeeper.SetAllClientMetadata(suite.chainA.GetContext(), expectedGenMetadata)
|
||||
|
||||
actualGenMetadata, err := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllClientMetadata(suite.chainA.GetContext(), genClients)
|
||||
suite.Require().NoError(err, "get client metadata returned error unexpectedly")
|
||||
suite.Require().Equal(expectedGenMetadata, actualGenMetadata, "retrieved metadata is unexpected")
|
||||
}
|
||||
|
||||
func (suite KeeperTestSuite) TestGetConsensusState() {
|
||||
suite.ctx = suite.ctx.WithBlockHeight(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
height types.Height
|
||||
expPass bool
|
||||
}{
|
||||
{"zero height", types.ZeroHeight(), false},
|
||||
{"height > latest height", types.NewHeight(0, uint64(suite.ctx.BlockHeight())+1), false},
|
||||
{"latest height - 1", types.NewHeight(0, uint64(suite.ctx.BlockHeight())-1), true},
|
||||
{"latest height", types.GetSelfHeight(suite.ctx), true},
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
tc := tc
|
||||
cs, found := suite.keeper.GetSelfConsensusState(suite.ctx, tc.height)
|
||||
if tc.expPass {
|
||||
suite.Require().True(found, "Case %d should have passed: %s", i, tc.name)
|
||||
suite.Require().NotNil(cs, "Case %d should have passed: %s", i, tc.name)
|
||||
} else {
|
||||
suite.Require().False(found, "Case %d should have failed: %s", i, tc.name)
|
||||
suite.Require().Nil(cs, "Case %d should have failed: %s", i, tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (suite KeeperTestSuite) TestConsensusStateHelpers() {
|
||||
// initial setup
|
||||
clientState := ibctmtypes.NewClientState(testChainID, ibctmtypes.DefaultTrustLevel, trustingPeriod, ubdPeriod, maxClockDrift, testClientHeight, commitmenttypes.GetSDKSpecs(), ibctesting.UpgradePath, false, false)
|
||||
|
||||
suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
|
||||
suite.keeper.SetClientConsensusState(suite.ctx, testClientID, testClientHeight, suite.consensusState)
|
||||
|
||||
nextState := ibctmtypes.NewConsensusState(suite.now, commitmenttypes.NewMerkleRoot([]byte("next")), suite.valSetHash)
|
||||
|
||||
testClientHeightPlus5 := types.NewHeight(0, height+5)
|
||||
|
||||
header := suite.chainA.CreateTMClientHeader(testClientID, int64(testClientHeightPlus5.RevisionHeight), testClientHeight, suite.header.Header.Time.Add(time.Minute),
|
||||
suite.valSet, suite.valSet, []tmtypes.PrivValidator{suite.privVal})
|
||||
|
||||
// mock update functionality
|
||||
clientState.LatestHeight = header.GetHeight().(types.Height)
|
||||
suite.keeper.SetClientConsensusState(suite.ctx, testClientID, header.GetHeight(), nextState)
|
||||
suite.keeper.SetClientState(suite.ctx, testClientID, clientState)
|
||||
|
||||
latest, ok := suite.keeper.GetLatestClientConsensusState(suite.ctx, testClientID)
|
||||
suite.Require().True(ok)
|
||||
suite.Require().Equal(nextState, latest, "Latest client not returned correctly")
|
||||
}
|
||||
|
||||
// 2 clients in total are created on chainA. The first client is updated so it contains an initial consensus state
|
||||
// and a consensus state at the update height.
|
||||
func (suite KeeperTestSuite) TestGetAllConsensusStates() {
|
||||
clientA, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
|
||||
|
||||
clientState := suite.chainA.GetClientState(clientA)
|
||||
expConsensusHeight0 := clientState.GetLatestHeight()
|
||||
consensusState0, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight0)
|
||||
suite.Require().True(ok)
|
||||
|
||||
// update client to create a second consensus state
|
||||
err := suite.coordinator.UpdateClient(suite.chainA, suite.chainB, clientA, exported.Tendermint)
|
||||
suite.Require().NoError(err)
|
||||
|
||||
clientState = suite.chainA.GetClientState(clientA)
|
||||
expConsensusHeight1 := clientState.GetLatestHeight()
|
||||
suite.Require().True(expConsensusHeight1.GT(expConsensusHeight0))
|
||||
consensusState1, ok := suite.chainA.GetConsensusState(clientA, expConsensusHeight1)
|
||||
suite.Require().True(ok)
|
||||
|
||||
expConsensus := []exported.ConsensusState{
|
||||
consensusState0,
|
||||
consensusState1,
|
||||
}
|
||||
|
||||
// create second client on chainA
|
||||
clientA2, _ := suite.coordinator.SetupClients(suite.chainA, suite.chainB, exported.Tendermint)
|
||||
clientState = suite.chainA.GetClientState(clientA2)
|
||||
|
||||
expConsensusHeight2 := clientState.GetLatestHeight()
|
||||
consensusState2, ok := suite.chainA.GetConsensusState(clientA2, expConsensusHeight2)
|
||||
suite.Require().True(ok)
|
||||
|
||||
expConsensus2 := []exported.ConsensusState{consensusState2}
|
||||
|
||||
expConsensusStates := types.ClientsConsensusStates{
|
||||
types.NewClientConsensusStates(clientA, []types.ConsensusStateWithHeight{
|
||||
types.NewConsensusStateWithHeight(expConsensusHeight0.(types.Height), expConsensus[0]),
|
||||
types.NewConsensusStateWithHeight(expConsensusHeight1.(types.Height), expConsensus[1]),
|
||||
}),
|
||||
types.NewClientConsensusStates(clientA2, []types.ConsensusStateWithHeight{
|
||||
types.NewConsensusStateWithHeight(expConsensusHeight2.(types.Height), expConsensus2[0]),
|
||||
}),
|
||||
}.Sort()
|
||||
|
||||
consStates := suite.chainA.App.IBCKeeper.ClientKeeper.GetAllConsensusStates(suite.chainA.GetContext())
|
||||
suite.Require().Equal(expConsensusStates, consStates, "%s \n\n%s", expConsensusStates, consStates)
|
||||
}
|
|
@ -1,910 +0,0 @@
|
|||
package ibctesting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
tmprotoversion "github.com/tendermint/tendermint/proto/tendermint/version"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
tmversion "github.com/tendermint/tendermint/version"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
"github.com/cosmos/cosmos-sdk/codec"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
|
||||
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
|
||||
"github.com/cosmos/cosmos-sdk/simapp"
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
|
||||
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
|
||||
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
|
||||
capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types"
|
||||
ibctransfertypes "github.com/cosmos/cosmos-sdk/x/ibc/applications/transfer/types"
|
||||
clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types"
|
||||
connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types"
|
||||
channeltypes "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types"
|
||||
commitmenttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/23-commitment/types"
|
||||
host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/core/exported"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/core/types"
|
||||
ibctmtypes "github.com/cosmos/cosmos-sdk/x/ibc/light-clients/07-tendermint/types"
|
||||
"github.com/cosmos/cosmos-sdk/x/ibc/testing/mock"
|
||||
"github.com/cosmos/cosmos-sdk/x/staking/teststaking"
|
||||
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default params constants used to create a TM client
|
||||
TrustingPeriod time.Duration = time.Hour * 24 * 7 * 2
|
||||
UnbondingPeriod time.Duration = time.Hour * 24 * 7 * 3
|
||||
MaxClockDrift time.Duration = time.Second * 10
|
||||
DefaultDelayPeriod uint64 = 0
|
||||
|
||||
DefaultChannelVersion = ibctransfertypes.Version
|
||||
InvalidID = "IDisInvalid"
|
||||
|
||||
ConnectionIDPrefix = "conn"
|
||||
ChannelIDPrefix = "chan"
|
||||
|
||||
TransferPort = ibctransfertypes.ModuleName
|
||||
MockPort = mock.ModuleName
|
||||
|
||||
// used for testing UpdateClientProposal
|
||||
Title = "title"
|
||||
Description = "description"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultOpenInitVersion *connectiontypes.Version
|
||||
|
||||
// Default params variables used to create a TM client
|
||||
DefaultTrustLevel ibctmtypes.Fraction = ibctmtypes.DefaultTrustLevel
|
||||
TestHash = tmhash.Sum([]byte("TESTING HASH"))
|
||||
TestCoin = sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100))
|
||||
|
||||
UpgradePath = []string{"upgrade", "upgradedIBCState"}
|
||||
|
||||
ConnectionVersion = connectiontypes.ExportedVersionsToProto(connectiontypes.GetCompatibleVersions())[0]
|
||||
|
||||
MockAcknowledgement = mock.MockAcknowledgement
|
||||
MockCommitment = mock.MockCommitment
|
||||
)
|
||||
|
||||
// TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI
|
||||
// header and the validators of the TestChain. It also contains a field called ChainID. This
|
||||
// is the clientID that *other* chains use to refer to this TestChain. The SenderAccount
|
||||
// is used for delivering transactions through the application state.
|
||||
// NOTE: the actual application uses an empty chain-id for ease of testing.
|
||||
type TestChain struct {
|
||||
t *testing.T
|
||||
|
||||
App *simapp.SimApp
|
||||
ChainID string
|
||||
LastHeader *ibctmtypes.Header // header for last block height committed
|
||||
CurrentHeader tmproto.Header // header for current block height
|
||||
QueryServer types.QueryServer
|
||||
TxConfig client.TxConfig
|
||||
Codec codec.BinaryMarshaler
|
||||
|
||||
Vals *tmtypes.ValidatorSet
|
||||
Signers []tmtypes.PrivValidator
|
||||
|
||||
senderPrivKey cryptotypes.PrivKey
|
||||
SenderAccount authtypes.AccountI
|
||||
|
||||
// IBC specific helpers
|
||||
ClientIDs []string // ClientID's used on this chain
|
||||
Connections []*TestConnection // track connectionID's created for this chain
|
||||
}
|
||||
|
||||
// NewTestChain initializes a new TestChain instance with a single validator set using a
|
||||
// generated private key. It also creates a sender account to be used for delivering transactions.
|
||||
//
|
||||
// The first block height is committed to state in order to allow for client creations on
|
||||
// counterparty chains. The TestChain will return with a block height starting at 2.
|
||||
//
|
||||
// Time management is handled by the Coordinator in order to ensure synchrony between chains.
|
||||
// Each update of any chain increments the block header time for all chains by 5 seconds.
|
||||
func NewTestChain(t *testing.T, chainID string) *TestChain {
|
||||
// generate validator private/public key
|
||||
privVal := mock.NewPV()
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
// create validator set with single validator
|
||||
validator := tmtypes.NewValidator(pubKey, 1)
|
||||
valSet := tmtypes.NewValidatorSet([]*tmtypes.Validator{validator})
|
||||
signers := []tmtypes.PrivValidator{privVal}
|
||||
|
||||
// generate genesis account
|
||||
senderPrivKey := secp256k1.GenPrivKey()
|
||||
acc := authtypes.NewBaseAccount(senderPrivKey.PubKey().Address().Bytes(), senderPrivKey.PubKey(), 0, 0)
|
||||
balance := banktypes.Balance{
|
||||
Address: acc.GetAddress().String(),
|
||||
Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))),
|
||||
}
|
||||
|
||||
app := simapp.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)
|
||||
|
||||
// create current header and call begin block
|
||||
header := tmproto.Header{
|
||||
ChainID: chainID,
|
||||
Height: 1,
|
||||
Time: globalStartTime,
|
||||
}
|
||||
|
||||
txConfig := simapp.MakeTestEncodingConfig().TxConfig
|
||||
|
||||
// create an account to send transactions from
|
||||
chain := &TestChain{
|
||||
t: t,
|
||||
ChainID: chainID,
|
||||
App: app,
|
||||
CurrentHeader: header,
|
||||
QueryServer: app.IBCKeeper,
|
||||
TxConfig: txConfig,
|
||||
Codec: app.AppCodec(),
|
||||
Vals: valSet,
|
||||
Signers: signers,
|
||||
senderPrivKey: senderPrivKey,
|
||||
SenderAccount: acc,
|
||||
ClientIDs: make([]string, 0),
|
||||
Connections: make([]*TestConnection, 0),
|
||||
}
|
||||
|
||||
cap := chain.App.IBCKeeper.PortKeeper.BindPort(chain.GetContext(), MockPort)
|
||||
err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(MockPort))
|
||||
require.NoError(t, err)
|
||||
|
||||
chain.NextBlock()
|
||||
|
||||
return chain
|
||||
}
|
||||
|
||||
// GetContext returns the current context for the application.
|
||||
func (chain *TestChain) GetContext() sdk.Context {
|
||||
return chain.App.BaseApp.NewContext(false, chain.CurrentHeader)
|
||||
}
|
||||
|
||||
// QueryProof performs an abci query with the given key and returns the proto encoded merkle proof
|
||||
// for the query and the height at which the proof will succeed on a tendermint verifier.
|
||||
func (chain *TestChain) QueryProof(key []byte) ([]byte, clienttypes.Height) {
|
||||
res := chain.App.Query(abci.RequestQuery{
|
||||
Path: fmt.Sprintf("store/%s/key", host.StoreKey),
|
||||
Height: chain.App.LastBlockHeight() - 1,
|
||||
Data: key,
|
||||
Prove: true,
|
||||
})
|
||||
|
||||
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
revision := clienttypes.ParseChainID(chain.ChainID)
|
||||
|
||||
// proof height + 1 is returned as the proof created corresponds to the height the proof
|
||||
// was created in the IAVL tree. Tendermint and subsequently the clients that rely on it
|
||||
// have heights 1 above the IAVL tree. Thus we return proof height + 1
|
||||
return proof, clienttypes.NewHeight(revision, uint64(res.Height)+1)
|
||||
}
|
||||
|
||||
// QueryUpgradeProof performs an abci query with the given key and returns the proto encoded merkle proof
|
||||
// for the query and the height at which the proof will succeed on a tendermint verifier.
|
||||
func (chain *TestChain) QueryUpgradeProof(key []byte, height uint64) ([]byte, clienttypes.Height) {
|
||||
res := chain.App.Query(abci.RequestQuery{
|
||||
Path: "store/upgrade/key",
|
||||
Height: int64(height - 1),
|
||||
Data: key,
|
||||
Prove: true,
|
||||
})
|
||||
|
||||
merkleProof, err := commitmenttypes.ConvertProofs(res.ProofOps)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
proof, err := chain.App.AppCodec().MarshalBinaryBare(&merkleProof)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
revision := clienttypes.ParseChainID(chain.ChainID)
|
||||
|
||||
// proof height + 1 is returned as the proof created corresponds to the height the proof
|
||||
// was created in the IAVL tree. Tendermint and subsequently the clients that rely on it
|
||||
// have heights 1 above the IAVL tree. Thus we return proof height + 1
|
||||
return proof, clienttypes.NewHeight(revision, uint64(res.Height+1))
|
||||
}
|
||||
|
||||
// QueryClientStateProof performs and abci query for a client state
|
||||
// stored with a given clientID and returns the ClientState along with the proof
|
||||
func (chain *TestChain) QueryClientStateProof(clientID string) (exported.ClientState, []byte) {
|
||||
// retrieve client state to provide proof for
|
||||
clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
|
||||
require.True(chain.t, found)
|
||||
|
||||
clientKey := host.FullClientStateKey(clientID)
|
||||
proofClient, _ := chain.QueryProof(clientKey)
|
||||
|
||||
return clientState, proofClient
|
||||
}
|
||||
|
||||
// QueryConsensusStateProof performs an abci query for a consensus state
|
||||
// stored on the given clientID. The proof and consensusHeight are returned.
|
||||
func (chain *TestChain) QueryConsensusStateProof(clientID string) ([]byte, clienttypes.Height) {
|
||||
clientState := chain.GetClientState(clientID)
|
||||
|
||||
consensusHeight := clientState.GetLatestHeight().(clienttypes.Height)
|
||||
consensusKey := host.FullConsensusStateKey(clientID, consensusHeight)
|
||||
proofConsensus, _ := chain.QueryProof(consensusKey)
|
||||
|
||||
return proofConsensus, consensusHeight
|
||||
}
|
||||
|
||||
// NextBlock sets the last header to the current header and increments the current header to be
|
||||
// at the next block height. It does not update the time as that is handled by the Coordinator.
|
||||
//
|
||||
// CONTRACT: this function must only be called after app.Commit() occurs
|
||||
func (chain *TestChain) NextBlock() {
|
||||
// set the last header to the current header
|
||||
// use nil trusted fields
|
||||
chain.LastHeader = chain.CurrentTMClientHeader()
|
||||
|
||||
// increment the current header
|
||||
chain.CurrentHeader = tmproto.Header{
|
||||
ChainID: chain.ChainID,
|
||||
Height: chain.App.LastBlockHeight() + 1,
|
||||
AppHash: chain.App.LastCommitID().Hash,
|
||||
// NOTE: the time is increased by the coordinator to maintain time synchrony amongst
|
||||
// chains.
|
||||
Time: chain.CurrentHeader.Time,
|
||||
ValidatorsHash: chain.Vals.Hash(),
|
||||
NextValidatorsHash: chain.Vals.Hash(),
|
||||
}
|
||||
|
||||
chain.App.BeginBlock(abci.RequestBeginBlock{Header: chain.CurrentHeader})
|
||||
|
||||
}
|
||||
|
||||
// sendMsgs delivers a transaction through the application without returning the result.
|
||||
func (chain *TestChain) sendMsgs(msgs ...sdk.Msg) error {
|
||||
_, err := chain.SendMsgs(msgs...)
|
||||
return err
|
||||
}
|
||||
|
||||
// SendMsgs delivers a transaction through the application. It updates the senders sequence
|
||||
// number and updates the TestChain's headers. It returns the result and error if one
|
||||
// occurred.
|
||||
func (chain *TestChain) SendMsgs(msgs ...sdk.Msg) (*sdk.Result, error) {
|
||||
_, r, err := simapp.SignCheckDeliver(
|
||||
chain.t,
|
||||
chain.TxConfig,
|
||||
chain.App.BaseApp,
|
||||
chain.GetContext().BlockHeader(),
|
||||
msgs,
|
||||
chain.ChainID,
|
||||
[]uint64{chain.SenderAccount.GetAccountNumber()},
|
||||
[]uint64{chain.SenderAccount.GetSequence()},
|
||||
true, true, chain.senderPrivKey,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// SignCheckDeliver calls app.Commit()
|
||||
chain.NextBlock()
|
||||
|
||||
// increment sequence for successful transaction execution
|
||||
chain.SenderAccount.SetSequence(chain.SenderAccount.GetSequence() + 1)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// GetClientState retrieves the client state for the provided clientID. The client is
|
||||
// expected to exist otherwise testing will fail.
|
||||
func (chain *TestChain) GetClientState(clientID string) exported.ClientState {
|
||||
clientState, found := chain.App.IBCKeeper.ClientKeeper.GetClientState(chain.GetContext(), clientID)
|
||||
require.True(chain.t, found)
|
||||
|
||||
return clientState
|
||||
}
|
||||
|
||||
// GetConsensusState retrieves the consensus state for the provided clientID and height.
|
||||
// It will return a success boolean depending on if consensus state exists or not.
|
||||
func (chain *TestChain) GetConsensusState(clientID string, height exported.Height) (exported.ConsensusState, bool) {
|
||||
return chain.App.IBCKeeper.ClientKeeper.GetClientConsensusState(chain.GetContext(), clientID, height)
|
||||
}
|
||||
|
||||
// GetValsAtHeight will return the validator set of the chain at a given height. It will return
|
||||
// a success boolean depending on if the validator set exists or not at that height.
|
||||
func (chain *TestChain) GetValsAtHeight(height int64) (*tmtypes.ValidatorSet, bool) {
|
||||
histInfo, ok := chain.App.StakingKeeper.GetHistoricalInfo(chain.GetContext(), height)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
valSet := stakingtypes.Validators(histInfo.Valset)
|
||||
|
||||
tmValidators, err := teststaking.ToTmValidators(valSet, sdk.DefaultPowerReduction)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmtypes.NewValidatorSet(tmValidators), true
|
||||
}
|
||||
|
||||
// GetConnection retrieves an IBC Connection for the provided TestConnection. The
|
||||
// connection is expected to exist otherwise testing will fail.
|
||||
func (chain *TestChain) GetConnection(testConnection *TestConnection) connectiontypes.ConnectionEnd {
|
||||
connection, found := chain.App.IBCKeeper.ConnectionKeeper.GetConnection(chain.GetContext(), testConnection.ID)
|
||||
require.True(chain.t, found)
|
||||
|
||||
return connection
|
||||
}
|
||||
|
||||
// GetChannel retrieves an IBC Channel for the provided TestChannel. The channel
|
||||
// is expected to exist otherwise testing will fail.
|
||||
func (chain *TestChain) GetChannel(testChannel TestChannel) channeltypes.Channel {
|
||||
channel, found := chain.App.IBCKeeper.ChannelKeeper.GetChannel(chain.GetContext(), testChannel.PortID, testChannel.ID)
|
||||
require.True(chain.t, found)
|
||||
|
||||
return channel
|
||||
}
|
||||
|
||||
// GetAcknowledgement retrieves an acknowledgement for the provided packet. If the
|
||||
// acknowledgement does not exist then testing will fail.
|
||||
func (chain *TestChain) GetAcknowledgement(packet exported.PacketI) []byte {
|
||||
ack, found := chain.App.IBCKeeper.ChannelKeeper.GetPacketAcknowledgement(chain.GetContext(), packet.GetDestPort(), packet.GetDestChannel(), packet.GetSequence())
|
||||
require.True(chain.t, found)
|
||||
|
||||
return ack
|
||||
}
|
||||
|
||||
// GetPrefix returns the prefix for used by a chain in connection creation
|
||||
func (chain *TestChain) GetPrefix() commitmenttypes.MerklePrefix {
|
||||
return commitmenttypes.NewMerklePrefix(chain.App.IBCKeeper.ConnectionKeeper.GetCommitmentPrefix().Bytes())
|
||||
}
|
||||
|
||||
// NewClientID appends a new clientID string in the format:
|
||||
// ClientFor<counterparty-chain-id><index>
|
||||
func (chain *TestChain) NewClientID(clientType string) string {
|
||||
clientID := fmt.Sprintf("%s-%s", clientType, strconv.Itoa(len(chain.ClientIDs)))
|
||||
chain.ClientIDs = append(chain.ClientIDs, clientID)
|
||||
return clientID
|
||||
}
|
||||
|
||||
// AddTestConnection appends a new TestConnection which contains references
|
||||
// to the connection id, client id and counterparty client id.
|
||||
func (chain *TestChain) AddTestConnection(clientID, counterpartyClientID string) *TestConnection {
|
||||
conn := chain.ConstructNextTestConnection(clientID, counterpartyClientID)
|
||||
|
||||
chain.Connections = append(chain.Connections, conn)
|
||||
return conn
|
||||
}
|
||||
|
||||
// ConstructNextTestConnection constructs the next test connection to be
|
||||
// created given a clientID and counterparty clientID. The connection id
|
||||
// format: <chainID>-conn<index>
|
||||
func (chain *TestChain) ConstructNextTestConnection(clientID, counterpartyClientID string) *TestConnection {
|
||||
connectionID := connectiontypes.FormatConnectionIdentifier(uint64(len(chain.Connections)))
|
||||
return &TestConnection{
|
||||
ID: connectionID,
|
||||
ClientID: clientID,
|
||||
NextChannelVersion: DefaultChannelVersion,
|
||||
CounterpartyClientID: counterpartyClientID,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFirstTestConnection returns the first test connection for a given clientID.
|
||||
// The connection may or may not exist in the chain state.
|
||||
func (chain *TestChain) GetFirstTestConnection(clientID, counterpartyClientID string) *TestConnection {
|
||||
if len(chain.Connections) > 0 {
|
||||
return chain.Connections[0]
|
||||
}
|
||||
|
||||
return chain.ConstructNextTestConnection(clientID, counterpartyClientID)
|
||||
}
|
||||
|
||||
// AddTestChannel appends a new TestChannel which contains references to the port and channel ID
|
||||
// used for channel creation and interaction. See 'NextTestChannel' for channel ID naming format.
|
||||
func (chain *TestChain) AddTestChannel(conn *TestConnection, portID string) TestChannel {
|
||||
channel := chain.NextTestChannel(conn, portID)
|
||||
conn.Channels = append(conn.Channels, channel)
|
||||
return channel
|
||||
}
|
||||
|
||||
// NextTestChannel returns the next test channel to be created on this connection, but does not
|
||||
// add it to the list of created channels. This function is expected to be used when the caller
|
||||
// has not created the associated channel in app state, but would still like to refer to the
|
||||
// non-existent channel usually to test for its non-existence.
|
||||
//
|
||||
// channel ID format: <connectionid>-chan<channel-index>
|
||||
//
|
||||
// The port is passed in by the caller.
|
||||
func (chain *TestChain) NextTestChannel(conn *TestConnection, portID string) TestChannel {
|
||||
nextChanSeq := chain.App.IBCKeeper.ChannelKeeper.GetNextChannelSequence(chain.GetContext())
|
||||
channelID := channeltypes.FormatChannelIdentifier(nextChanSeq)
|
||||
return TestChannel{
|
||||
PortID: portID,
|
||||
ID: channelID,
|
||||
ClientID: conn.ClientID,
|
||||
CounterpartyClientID: conn.CounterpartyClientID,
|
||||
Version: conn.NextChannelVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// ConstructMsgCreateClient constructs a message to create a new client state (tendermint or solomachine).
|
||||
// NOTE: a solo machine client will be created with an empty diversifier.
|
||||
func (chain *TestChain) ConstructMsgCreateClient(counterparty *TestChain, clientID string, clientType string) *clienttypes.MsgCreateClient {
|
||||
var (
|
||||
clientState exported.ClientState
|
||||
consensusState exported.ConsensusState
|
||||
)
|
||||
|
||||
switch clientType {
|
||||
case exported.Tendermint:
|
||||
height := counterparty.LastHeader.GetHeight().(clienttypes.Height)
|
||||
clientState = ibctmtypes.NewClientState(
|
||||
counterparty.ChainID, DefaultTrustLevel, TrustingPeriod, UnbondingPeriod, MaxClockDrift,
|
||||
height, commitmenttypes.GetSDKSpecs(), UpgradePath, false, false,
|
||||
)
|
||||
consensusState = counterparty.LastHeader.ConsensusState()
|
||||
case exported.Solomachine:
|
||||
solo := NewSolomachine(chain.t, chain.Codec, clientID, "", 1)
|
||||
clientState = solo.ClientState()
|
||||
consensusState = solo.ConsensusState()
|
||||
default:
|
||||
chain.t.Fatalf("unsupported client state type %s", clientType)
|
||||
}
|
||||
|
||||
msg, err := clienttypes.NewMsgCreateClient(
|
||||
clientState, consensusState, chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
require.NoError(chain.t, err)
|
||||
return msg
|
||||
}
|
||||
|
||||
// CreateTMClient will construct and execute a 07-tendermint MsgCreateClient. A counterparty
|
||||
// client will be created on the (target) chain.
|
||||
func (chain *TestChain) CreateTMClient(counterparty *TestChain, clientID string) error {
|
||||
// construct MsgCreateClient using counterparty
|
||||
msg := chain.ConstructMsgCreateClient(counterparty, clientID, exported.Tendermint)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// UpdateTMClient will construct and execute a 07-tendermint MsgUpdateClient. The counterparty
|
||||
// client will be updated on the (target) chain. UpdateTMClient mocks the relayer flow
|
||||
// necessary for updating a Tendermint client.
|
||||
func (chain *TestChain) UpdateTMClient(counterparty *TestChain, clientID string) error {
|
||||
header, err := chain.ConstructUpdateTMClientHeader(counterparty, clientID)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
msg, err := clienttypes.NewMsgUpdateClient(
|
||||
clientID, header,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ConstructUpdateTMClientHeader will construct a valid 07-tendermint Header to update the
|
||||
// light client on the source chain.
|
||||
func (chain *TestChain) ConstructUpdateTMClientHeader(counterparty *TestChain, clientID string) (*ibctmtypes.Header, error) {
|
||||
header := counterparty.LastHeader
|
||||
// Relayer must query for LatestHeight on client to get TrustedHeight
|
||||
trustedHeight := chain.GetClientState(clientID).GetLatestHeight().(clienttypes.Height)
|
||||
var (
|
||||
tmTrustedVals *tmtypes.ValidatorSet
|
||||
ok bool
|
||||
)
|
||||
// Once we get TrustedHeight from client, we must query the validators from the counterparty chain
|
||||
// If the LatestHeight == LastHeader.Height, then TrustedValidators are current validators
|
||||
// If LatestHeight < LastHeader.Height, we can query the historical validator set from HistoricalInfo
|
||||
if trustedHeight == counterparty.LastHeader.GetHeight() {
|
||||
tmTrustedVals = counterparty.Vals
|
||||
} else {
|
||||
// NOTE: We need to get validators from counterparty at height: trustedHeight+1
|
||||
// since the last trusted validators for a header at height h
|
||||
// is the NextValidators at h+1 committed to in header h by
|
||||
// NextValidatorsHash
|
||||
tmTrustedVals, ok = counterparty.GetValsAtHeight(int64(trustedHeight.RevisionHeight + 1))
|
||||
if !ok {
|
||||
return nil, sdkerrors.Wrapf(ibctmtypes.ErrInvalidHeaderHeight, "could not retrieve trusted validators at trustedHeight: %d", trustedHeight)
|
||||
}
|
||||
}
|
||||
// inject trusted fields into last header
|
||||
// for now assume revision number is 0
|
||||
header.TrustedHeight = trustedHeight
|
||||
|
||||
trustedVals, err := tmTrustedVals.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
header.TrustedValidators = trustedVals
|
||||
|
||||
return header, nil
|
||||
|
||||
}
|
||||
|
||||
// ExpireClient fast forwards the chain's block time by the provided amount of time which will
|
||||
// expire any clients with a trusting period less than or equal to this amount of time.
|
||||
func (chain *TestChain) ExpireClient(amount time.Duration) {
|
||||
chain.CurrentHeader.Time = chain.CurrentHeader.Time.Add(amount)
|
||||
}
|
||||
|
||||
// CurrentTMClientHeader creates a TM header using the current header parameters
|
||||
// on the chain. The trusted fields in the header are set to nil.
|
||||
func (chain *TestChain) CurrentTMClientHeader() *ibctmtypes.Header {
|
||||
return chain.CreateTMClientHeader(chain.ChainID, chain.CurrentHeader.Height, clienttypes.Height{}, chain.CurrentHeader.Time, chain.Vals, nil, chain.Signers)
|
||||
}
|
||||
|
||||
// CreateTMClientHeader creates a TM header to update the TM client. Args are passed in to allow
|
||||
// caller flexibility to use params that differ from the chain.
|
||||
func (chain *TestChain) CreateTMClientHeader(chainID string, blockHeight int64, trustedHeight clienttypes.Height, timestamp time.Time, tmValSet, tmTrustedVals *tmtypes.ValidatorSet, signers []tmtypes.PrivValidator) *ibctmtypes.Header {
|
||||
var (
|
||||
valSet *tmproto.ValidatorSet
|
||||
trustedVals *tmproto.ValidatorSet
|
||||
)
|
||||
require.NotNil(chain.t, tmValSet)
|
||||
|
||||
vsetHash := tmValSet.Hash()
|
||||
|
||||
tmHeader := tmtypes.Header{
|
||||
Version: tmprotoversion.Consensus{Block: tmversion.BlockProtocol, App: 2},
|
||||
ChainID: chainID,
|
||||
Height: blockHeight,
|
||||
Time: timestamp,
|
||||
LastBlockID: MakeBlockID(make([]byte, tmhash.Size), 10_000, make([]byte, tmhash.Size)),
|
||||
LastCommitHash: chain.App.LastCommitID().Hash,
|
||||
DataHash: tmhash.Sum([]byte("data_hash")),
|
||||
ValidatorsHash: vsetHash,
|
||||
NextValidatorsHash: vsetHash,
|
||||
ConsensusHash: tmhash.Sum([]byte("consensus_hash")),
|
||||
AppHash: chain.CurrentHeader.AppHash,
|
||||
LastResultsHash: tmhash.Sum([]byte("last_results_hash")),
|
||||
EvidenceHash: tmhash.Sum([]byte("evidence_hash")),
|
||||
ProposerAddress: tmValSet.Proposer.Address, //nolint:staticcheck
|
||||
}
|
||||
hhash := tmHeader.Hash()
|
||||
blockID := MakeBlockID(hhash, 3, tmhash.Sum([]byte("part_set")))
|
||||
voteSet := tmtypes.NewVoteSet(chainID, blockHeight, 1, tmproto.PrecommitType, tmValSet)
|
||||
|
||||
commit, err := tmtypes.MakeCommit(blockID, blockHeight, 1, voteSet, signers, timestamp)
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
signedHeader := &tmproto.SignedHeader{
|
||||
Header: tmHeader.ToProto(),
|
||||
Commit: commit.ToProto(),
|
||||
}
|
||||
|
||||
if tmValSet != nil {
|
||||
valSet, err = tmValSet.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
if tmTrustedVals != nil {
|
||||
trustedVals, err = tmTrustedVals.ToProto()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// The trusted fields may be nil. They may be filled before relaying messages to a client.
|
||||
// The relayer is responsible for querying client and injecting appropriate trusted fields.
|
||||
return &ibctmtypes.Header{
|
||||
SignedHeader: signedHeader,
|
||||
ValidatorSet: valSet,
|
||||
TrustedHeight: trustedHeight,
|
||||
TrustedValidators: trustedVals,
|
||||
}
|
||||
}
|
||||
|
||||
// MakeBlockID copied unimported test functions from tmtypes to use them here
|
||||
func MakeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) tmtypes.BlockID {
|
||||
return tmtypes.BlockID{
|
||||
Hash: hash,
|
||||
PartSetHeader: tmtypes.PartSetHeader{
|
||||
Total: partSetSize,
|
||||
Hash: partSetHash,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CreateSortedSignerArray takes two PrivValidators, and the corresponding Validator structs
|
||||
// (including voting power). It returns a signer array of PrivValidators that matches the
|
||||
// sorting of ValidatorSet.
|
||||
// The sorting is first by .VotingPower (descending), with secondary index of .Address (ascending).
|
||||
func CreateSortedSignerArray(altPrivVal, suitePrivVal tmtypes.PrivValidator,
|
||||
altVal, suiteVal *tmtypes.Validator) []tmtypes.PrivValidator {
|
||||
|
||||
switch {
|
||||
case altVal.VotingPower > suiteVal.VotingPower:
|
||||
return []tmtypes.PrivValidator{altPrivVal, suitePrivVal}
|
||||
case altVal.VotingPower < suiteVal.VotingPower:
|
||||
return []tmtypes.PrivValidator{suitePrivVal, altPrivVal}
|
||||
default:
|
||||
if bytes.Compare(altVal.Address, suiteVal.Address) == -1 {
|
||||
return []tmtypes.PrivValidator{altPrivVal, suitePrivVal}
|
||||
}
|
||||
return []tmtypes.PrivValidator{suitePrivVal, altPrivVal}
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectionOpenInit will construct and execute a MsgConnectionOpenInit.
|
||||
func (chain *TestChain) ConnectionOpenInit(
|
||||
counterparty *TestChain,
|
||||
connection, counterpartyConnection *TestConnection,
|
||||
) error {
|
||||
msg := connectiontypes.NewMsgConnectionOpenInit(
|
||||
connection.ClientID,
|
||||
connection.CounterpartyClientID,
|
||||
counterparty.GetPrefix(), DefaultOpenInitVersion, DefaultDelayPeriod,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ConnectionOpenTry will construct and execute a MsgConnectionOpenTry.
|
||||
func (chain *TestChain) ConnectionOpenTry(
|
||||
counterparty *TestChain,
|
||||
connection, counterpartyConnection *TestConnection,
|
||||
) error {
|
||||
counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
|
||||
|
||||
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
|
||||
proofInit, proofHeight := counterparty.QueryProof(connectionKey)
|
||||
|
||||
proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
|
||||
|
||||
msg := connectiontypes.NewMsgConnectionOpenTry(
|
||||
"", connection.ClientID, // does not support handshake continuation
|
||||
counterpartyConnection.ID, counterpartyConnection.ClientID,
|
||||
counterpartyClient, counterparty.GetPrefix(), []*connectiontypes.Version{ConnectionVersion}, DefaultDelayPeriod,
|
||||
proofInit, proofClient, proofConsensus,
|
||||
proofHeight, consensusHeight,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ConnectionOpenAck will construct and execute a MsgConnectionOpenAck.
|
||||
func (chain *TestChain) ConnectionOpenAck(
|
||||
counterparty *TestChain,
|
||||
connection, counterpartyConnection *TestConnection,
|
||||
) error {
|
||||
counterpartyClient, proofClient := counterparty.QueryClientStateProof(counterpartyConnection.ClientID)
|
||||
|
||||
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
|
||||
proofTry, proofHeight := counterparty.QueryProof(connectionKey)
|
||||
|
||||
proofConsensus, consensusHeight := counterparty.QueryConsensusStateProof(counterpartyConnection.ClientID)
|
||||
|
||||
msg := connectiontypes.NewMsgConnectionOpenAck(
|
||||
connection.ID, counterpartyConnection.ID, counterpartyClient, // testing doesn't use flexible selection
|
||||
proofTry, proofClient, proofConsensus,
|
||||
proofHeight, consensusHeight,
|
||||
ConnectionVersion,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ConnectionOpenConfirm will construct and execute a MsgConnectionOpenConfirm.
|
||||
func (chain *TestChain) ConnectionOpenConfirm(
|
||||
counterparty *TestChain,
|
||||
connection, counterpartyConnection *TestConnection,
|
||||
) error {
|
||||
connectionKey := host.ConnectionKey(counterpartyConnection.ID)
|
||||
proof, height := counterparty.QueryProof(connectionKey)
|
||||
|
||||
msg := connectiontypes.NewMsgConnectionOpenConfirm(
|
||||
connection.ID,
|
||||
proof, height,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// CreatePortCapability binds and claims a capability for the given portID if it does not
|
||||
// already exist. This function will fail testing on any resulting error.
|
||||
// NOTE: only creation of a capbility for a transfer or mock port is supported
|
||||
// Other applications must bind to the port in InitGenesis or modify this code.
|
||||
func (chain *TestChain) CreatePortCapability(portID string) {
|
||||
// check if the portId is already binded, if not bind it
|
||||
_, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
|
||||
if !ok {
|
||||
// create capability using the IBC capability keeper
|
||||
cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), host.PortPath(portID))
|
||||
require.NoError(chain.t, err)
|
||||
|
||||
switch portID {
|
||||
case MockPort:
|
||||
// claim capability using the mock capability keeper
|
||||
err = chain.App.ScopedIBCMockKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
|
||||
require.NoError(chain.t, err)
|
||||
case TransferPort:
|
||||
// claim capability using the transfer capability keeper
|
||||
err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, host.PortPath(portID))
|
||||
require.NoError(chain.t, err)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported ibc testing package port ID %s", portID))
|
||||
}
|
||||
}
|
||||
|
||||
chain.App.Commit()
|
||||
|
||||
chain.NextBlock()
|
||||
}
|
||||
|
||||
// GetPortCapability returns the port capability for the given portID. The capability must
|
||||
// exist, otherwise testing will fail.
|
||||
func (chain *TestChain) GetPortCapability(portID string) *capabilitytypes.Capability {
|
||||
cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.PortPath(portID))
|
||||
require.True(chain.t, ok)
|
||||
|
||||
return cap
|
||||
}
|
||||
|
||||
// CreateChannelCapability binds and claims a capability for the given portID and channelID
|
||||
// if it does not already exist. This function will fail testing on any resulting error.
|
||||
func (chain *TestChain) CreateChannelCapability(portID, channelID string) {
|
||||
capName := host.ChannelCapabilityPath(portID, channelID)
|
||||
// check if the portId is already binded, if not bind it
|
||||
_, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), capName)
|
||||
if !ok {
|
||||
cap, err := chain.App.ScopedIBCKeeper.NewCapability(chain.GetContext(), capName)
|
||||
require.NoError(chain.t, err)
|
||||
err = chain.App.ScopedTransferKeeper.ClaimCapability(chain.GetContext(), cap, capName)
|
||||
require.NoError(chain.t, err)
|
||||
}
|
||||
|
||||
chain.App.Commit()
|
||||
|
||||
chain.NextBlock()
|
||||
}
|
||||
|
||||
// GetChannelCapability returns the channel capability for the given portID and channelID.
|
||||
// The capability must exist, otherwise testing will fail.
|
||||
func (chain *TestChain) GetChannelCapability(portID, channelID string) *capabilitytypes.Capability {
|
||||
cap, ok := chain.App.ScopedIBCKeeper.GetCapability(chain.GetContext(), host.ChannelCapabilityPath(portID, channelID))
|
||||
require.True(chain.t, ok)
|
||||
|
||||
return cap
|
||||
}
|
||||
|
||||
// ChanOpenInit will construct and execute a MsgChannelOpenInit.
|
||||
func (chain *TestChain) ChanOpenInit(
|
||||
ch, counterparty TestChannel,
|
||||
order channeltypes.Order,
|
||||
connectionID string,
|
||||
) error {
|
||||
msg := channeltypes.NewMsgChannelOpenInit(
|
||||
ch.PortID,
|
||||
ch.Version, order, []string{connectionID},
|
||||
counterparty.PortID,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ChanOpenTry will construct and execute a MsgChannelOpenTry.
|
||||
func (chain *TestChain) ChanOpenTry(
|
||||
counterparty *TestChain,
|
||||
ch, counterpartyCh TestChannel,
|
||||
order channeltypes.Order,
|
||||
connectionID string,
|
||||
) error {
|
||||
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
|
||||
|
||||
msg := channeltypes.NewMsgChannelOpenTry(
|
||||
ch.PortID, "", // does not support handshake continuation
|
||||
ch.Version, order, []string{connectionID},
|
||||
counterpartyCh.PortID, counterpartyCh.ID, counterpartyCh.Version,
|
||||
proof, height,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ChanOpenAck will construct and execute a MsgChannelOpenAck.
|
||||
func (chain *TestChain) ChanOpenAck(
|
||||
counterparty *TestChain,
|
||||
ch, counterpartyCh TestChannel,
|
||||
) error {
|
||||
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
|
||||
|
||||
msg := channeltypes.NewMsgChannelOpenAck(
|
||||
ch.PortID, ch.ID,
|
||||
counterpartyCh.ID, counterpartyCh.Version, // testing doesn't use flexible selection
|
||||
proof, height,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ChanOpenConfirm will construct and execute a MsgChannelOpenConfirm.
|
||||
func (chain *TestChain) ChanOpenConfirm(
|
||||
counterparty *TestChain,
|
||||
ch, counterpartyCh TestChannel,
|
||||
) error {
|
||||
proof, height := counterparty.QueryProof(host.ChannelKey(counterpartyCh.PortID, counterpartyCh.ID))
|
||||
|
||||
msg := channeltypes.NewMsgChannelOpenConfirm(
|
||||
ch.PortID, ch.ID,
|
||||
proof, height,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// ChanCloseInit will construct and execute a MsgChannelCloseInit.
|
||||
//
|
||||
// NOTE: does not work with ibc-transfer module
|
||||
func (chain *TestChain) ChanCloseInit(
|
||||
counterparty *TestChain,
|
||||
channel TestChannel,
|
||||
) error {
|
||||
msg := channeltypes.NewMsgChannelCloseInit(
|
||||
channel.PortID, channel.ID,
|
||||
chain.SenderAccount.GetAddress(),
|
||||
)
|
||||
return chain.sendMsgs(msg)
|
||||
}
|
||||
|
||||
// GetPacketData returns a ibc-transfer marshalled packet to be used for
|
||||
// callback testing.
|
||||
func (chain *TestChain) GetPacketData(counterparty *TestChain) []byte {
|
||||
packet := ibctransfertypes.FungibleTokenPacketData{
|
||||
Denom: TestCoin.Denom,
|
||||
Amount: TestCoin.Amount.Uint64(),
|
||||
Sender: chain.SenderAccount.GetAddress().String(),
|
||||
Receiver: counterparty.SenderAccount.GetAddress().String(),
|
||||
}
|
||||
|
||||
return packet.GetBytes()
|
||||
}
|
||||
|
||||
// SendPacket simulates sending a packet through the channel keeper. No message needs to be
|
||||
// passed since this call is made from a module.
|
||||
func (chain *TestChain) SendPacket(
|
||||
packet exported.PacketI,
|
||||
) error {
|
||||
channelCap := chain.GetChannelCapability(packet.GetSourcePort(), packet.GetSourceChannel())
|
||||
|
||||
// no need to send message, acting as a module
|
||||
err := chain.App.IBCKeeper.ChannelKeeper.SendPacket(chain.GetContext(), channelCap, packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// commit changes
|
||||
chain.App.Commit()
|
||||
chain.NextBlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteAcknowledgement simulates writing an acknowledgement to the chain.
|
||||
func (chain *TestChain) WriteAcknowledgement(
|
||||
packet exported.PacketI,
|
||||
) error {
|
||||
channelCap := chain.GetChannelCapability(packet.GetDestPort(), packet.GetDestChannel())
|
||||
|
||||
// no need to send message, acting as a handler
|
||||
err := chain.App.IBCKeeper.ChannelKeeper.WriteAcknowledgement(chain.GetContext(), channelCap, packet, TestHash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// commit changes
|
||||
chain.App.Commit()
|
||||
chain.NextBlock()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -345,11 +345,7 @@ func TestUndelegateSelfDelegationBelowMinSelfDelegation(t *testing.T) {
|
|||
app.StakingKeeper.SetDelegation(ctx, delegation)
|
||||
|
||||
val0AccAddr := sdk.AccAddress(addrVals[0].Bytes())
|
||||
<<<<<<< HEAD
|
||||
_, err = app.StakingKeeper.Undelegate(ctx, val0AccAddr, addrVals[0], app.StakingKeeper.TokensFromConsensusPower(ctx, 6).ToDec())
|
||||
=======
|
||||
_, err := app.StakingKeeper.Undelegate(ctx, val0AccAddr, addrVals[0], sdk.TokensFromConsensusPower(6).ToDec())
|
||||
>>>>>>> upstream/master
|
||||
_, err := app.StakingKeeper.Undelegate(ctx, val0AccAddr, addrVals[0], app.StakingKeeper.TokensFromConsensusPower(ctx, 6).ToDec())
|
||||
require.NoError(t, err)
|
||||
|
||||
// end block
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
package v040
|
||||
|
||||
import (
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// String implements the Stringer interface for a Commission object.
|
||||
func (c Commission) String() string {
|
||||
out, _ := yaml.Marshal(c)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// String implements the Stringer interface for a CommissionRates object.
|
||||
func (cr CommissionRates) String() string {
|
||||
out, _ := yaml.Marshal(cr)
|
||||
return string(out)
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
package v040
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// String returns a human readable string representation of a Delegation.
|
||||
func (d Delegation) String() string {
|
||||
out, _ := yaml.Marshal(d)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Delegations is a collection of delegations
|
||||
type Delegations []Delegation
|
||||
|
||||
func (d Delegations) String() (out string) {
|
||||
for _, del := range d {
|
||||
out += del.String() + "\n"
|
||||
}
|
||||
|
||||
return strings.TrimSpace(out)
|
||||
}
|
||||
|
||||
// String implements the stringer interface for a UnbondingDelegationEntry.
|
||||
func (e UnbondingDelegationEntry) String() string {
|
||||
out, _ := yaml.Marshal(e)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// String returns a human readable string representation of an UnbondingDelegation.
|
||||
func (ubd UnbondingDelegation) String() string {
|
||||
out := fmt.Sprintf(`Unbonding Delegations between:
|
||||
Delegator: %s
|
||||
Validator: %s
|
||||
Entries:`, ubd.DelegatorAddress, ubd.ValidatorAddress)
|
||||
for i, entry := range ubd.Entries {
|
||||
out += fmt.Sprintf(` Unbonding Delegation %d:
|
||||
Creation Height: %v
|
||||
Min time to unbond (unix): %v
|
||||
Expected balance: %s`, i, entry.CreationHeight,
|
||||
entry.CompletionTime, entry.Balance)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// UnbondingDelegations is a collection of UnbondingDelegation
|
||||
type UnbondingDelegations []UnbondingDelegation
|
||||
|
||||
func (ubds UnbondingDelegations) String() (out string) {
|
||||
for _, u := range ubds {
|
||||
out += u.String() + "\n"
|
||||
}
|
||||
|
||||
return strings.TrimSpace(out)
|
||||
}
|
||||
|
||||
// String implements the Stringer interface for a RedelegationEntry object.
|
||||
func (e RedelegationEntry) String() string {
|
||||
out, _ := yaml.Marshal(e)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// String returns a human readable string representation of a Redelegation.
|
||||
func (red Redelegation) String() string {
|
||||
out := fmt.Sprintf(`Redelegations between:
|
||||
Delegator: %s
|
||||
Source Validator: %s
|
||||
Destination Validator: %s
|
||||
Entries:
|
||||
`,
|
||||
red.DelegatorAddress, red.ValidatorSrcAddress, red.ValidatorDstAddress,
|
||||
)
|
||||
|
||||
for i, entry := range red.Entries {
|
||||
out += fmt.Sprintf(` Redelegation Entry #%d:
|
||||
Creation height: %v
|
||||
Min time to unbond (unix): %v
|
||||
Dest Shares: %s
|
||||
`,
|
||||
i, entry.CreationHeight, entry.CompletionTime, entry.SharesDst,
|
||||
)
|
||||
}
|
||||
|
||||
return strings.TrimRight(out, "\n")
|
||||
}
|
||||
|
||||
// Redelegations are a collection of Redelegation
|
||||
type Redelegations []Redelegation
|
||||
|
||||
func (d Redelegations) String() (out string) {
|
||||
for _, red := range d {
|
||||
out += red.String() + "\n"
|
||||
}
|
||||
|
||||
return strings.TrimSpace(out)
|
||||
}
|
|
@ -1,774 +0,0 @@
|
|||
package v040
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
io "io"
|
||||
math_bits "math/bits"
|
||||
|
||||
github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
// GenesisState defines the staking module's genesis state.
|
||||
type GenesisState struct {
|
||||
// params defines all the parameters of related to deposit.
|
||||
Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"`
|
||||
// last_total_power tracks the total amounts of bonded tokens recorded during
|
||||
// the previous end block.
|
||||
LastTotalPower github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=last_total_power,json=lastTotalPower,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"last_total_power" yaml:"last_total_power"`
|
||||
// last_validator_powers is a special index that provides a historical list
|
||||
// of the last-block's bonded validators.
|
||||
LastValidatorPowers []LastValidatorPower `protobuf:"bytes,3,rep,name=last_validator_powers,json=lastValidatorPowers,proto3" json:"last_validator_powers" yaml:"last_validator_powers"`
|
||||
// delegations defines the validator set at genesis.
|
||||
Validators []Validator `protobuf:"bytes,4,rep,name=validators,proto3" json:"validators"`
|
||||
// delegations defines the delegations active at genesis.
|
||||
Delegations []Delegation `protobuf:"bytes,5,rep,name=delegations,proto3" json:"delegations"`
|
||||
// unbonding_delegations defines the unbonding delegations active at genesis.
|
||||
UnbondingDelegations []UnbondingDelegation `protobuf:"bytes,6,rep,name=unbonding_delegations,json=unbondingDelegations,proto3" json:"unbonding_delegations" yaml:"unbonding_delegations"`
|
||||
// redelegations defines the redelegations active at genesis.
|
||||
Redelegations []Redelegation `protobuf:"bytes,7,rep,name=redelegations,proto3" json:"redelegations"`
|
||||
Exported bool `protobuf:"varint,8,opt,name=exported,proto3" json:"exported,omitempty"`
|
||||
}
|
||||
|
||||
func (m *GenesisState) Reset() { *m = GenesisState{} }
|
||||
func (m *GenesisState) String() string { return proto.CompactTextString(m) }
|
||||
func (*GenesisState) ProtoMessage() {}
|
||||
|
||||
// LastValidatorPower required for validator set update logic.
|
||||
type LastValidatorPower struct {
|
||||
// address is the address of the validator.
|
||||
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
|
||||
// power defines the power of the validator.
|
||||
Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LastValidatorPower) Reset() { *m = LastValidatorPower{} }
|
||||
func (m *LastValidatorPower) String() string { return proto.CompactTextString(m) }
|
||||
func (*LastValidatorPower) ProtoMessage() {}
|
||||
|
||||
func (m *GenesisState) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Exported {
|
||||
i--
|
||||
if m.Exported {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x40
|
||||
}
|
||||
if len(m.Redelegations) > 0 {
|
||||
for iNdEx := len(m.Redelegations) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Redelegations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x3a
|
||||
}
|
||||
}
|
||||
if len(m.UnbondingDelegations) > 0 {
|
||||
for iNdEx := len(m.UnbondingDelegations) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.UnbondingDelegations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
}
|
||||
if len(m.Delegations) > 0 {
|
||||
for iNdEx := len(m.Delegations) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Delegations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
}
|
||||
if len(m.Validators) > 0 {
|
||||
for iNdEx := len(m.Validators) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Validators[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x22
|
||||
}
|
||||
}
|
||||
if len(m.LastValidatorPowers) > 0 {
|
||||
for iNdEx := len(m.LastValidatorPowers) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.LastValidatorPowers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
}
|
||||
{
|
||||
size := m.LastTotalPower.Size()
|
||||
i -= size
|
||||
if _, err := m.LastTotalPower.MarshalTo(dAtA[i:]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
{
|
||||
size, err := m.Params.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *LastValidatorPower) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *LastValidatorPower) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *LastValidatorPower) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Power != 0 {
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(m.Power))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Address) > 0 {
|
||||
i -= len(m.Address)
|
||||
copy(dAtA[i:], m.Address)
|
||||
i = encodeVarintGenesis(dAtA, i, uint64(len(m.Address)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovGenesis(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *GenesisState) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.Params.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
l = m.LastTotalPower.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
if len(m.LastValidatorPowers) > 0 {
|
||||
for _, e := range m.LastValidatorPowers {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Validators) > 0 {
|
||||
for _, e := range m.Validators {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Delegations) > 0 {
|
||||
for _, e := range m.Delegations {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.UnbondingDelegations) > 0 {
|
||||
for _, e := range m.UnbondingDelegations {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.Redelegations) > 0 {
|
||||
for _, e := range m.Redelegations {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Exported {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *LastValidatorPower) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Address)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovGenesis(uint64(l))
|
||||
}
|
||||
if m.Power != 0 {
|
||||
n += 1 + sovGenesis(uint64(m.Power))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovGenesis(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
|
||||
func (m *GenesisState) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: GenesisState: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LastTotalPower", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.LastTotalPower.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field LastValidatorPowers", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.LastValidatorPowers = append(m.LastValidatorPowers, LastValidatorPower{})
|
||||
if err := m.LastValidatorPowers[len(m.LastValidatorPowers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Validators = append(m.Validators, Validator{})
|
||||
if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Delegations", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Delegations = append(m.Delegations, Delegation{})
|
||||
if err := m.Delegations[len(m.Delegations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field UnbondingDelegations", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.UnbondingDelegations = append(m.UnbondingDelegations, UnbondingDelegation{})
|
||||
if err := m.UnbondingDelegations[len(m.UnbondingDelegations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Redelegations", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Redelegations = append(m.Redelegations, Redelegation{})
|
||||
if err := m.Redelegations[len(m.Redelegations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Exported", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Exported = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenesis(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *LastValidatorPower) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: LastValidatorPower: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: LastValidatorPower: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Address = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType)
|
||||
}
|
||||
m.Power = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Power |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenesis(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenesis
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipGenesis(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowGenesis
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthGenesis
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupGenesis
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthGenesis
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
|
@ -1,5 +1,330 @@
|
|||
// Package v040 is copy-pasted from:
|
||||
// https://github.com/cosmos/cosmos-sdk/blob/v0.41.0/x/staking/types/keys.go
|
||||
package v040
|
||||
|
||||
const (
|
||||
ModuleName = "staking"
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
v040auth "github.com/cosmos/cosmos-sdk/x/auth/legacy/v040"
|
||||
"github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// ModuleName is the name of the staking module
|
||||
ModuleName = "staking"
|
||||
|
||||
// StoreKey is the string store representation
|
||||
StoreKey = ModuleName
|
||||
|
||||
// QuerierRoute is the querier route for the staking module
|
||||
QuerierRoute = ModuleName
|
||||
|
||||
// RouterKey is the msg router key for the staking module
|
||||
RouterKey = ModuleName
|
||||
)
|
||||
|
||||
var (
|
||||
// Keys for store prefixes
|
||||
// Last* values are constant during a block.
|
||||
LastValidatorPowerKey = []byte{0x11} // prefix for each key to a validator index, for bonded validators
|
||||
LastTotalPowerKey = []byte{0x12} // prefix for the total power
|
||||
|
||||
ValidatorsKey = []byte{0x21} // prefix for each key to a validator
|
||||
ValidatorsByConsAddrKey = []byte{0x22} // prefix for each key to a validator index, by pubkey
|
||||
ValidatorsByPowerIndexKey = []byte{0x23} // prefix for each key to a validator index, sorted by power
|
||||
|
||||
DelegationKey = []byte{0x31} // key for a delegation
|
||||
UnbondingDelegationKey = []byte{0x32} // key for an unbonding-delegation
|
||||
UnbondingDelegationByValIndexKey = []byte{0x33} // prefix for each key for an unbonding-delegation, by validator operator
|
||||
RedelegationKey = []byte{0x34} // key for a redelegation
|
||||
RedelegationByValSrcIndexKey = []byte{0x35} // prefix for each key for an redelegation, by source validator operator
|
||||
RedelegationByValDstIndexKey = []byte{0x36} // prefix for each key for an redelegation, by destination validator operator
|
||||
|
||||
UnbondingQueueKey = []byte{0x41} // prefix for the timestamps in unbonding queue
|
||||
RedelegationQueueKey = []byte{0x42} // prefix for the timestamps in redelegations queue
|
||||
ValidatorQueueKey = []byte{0x43} // prefix for the timestamps in validator queue
|
||||
|
||||
HistoricalInfoKey = []byte{0x50} // prefix for the historical info
|
||||
)
|
||||
|
||||
// gets the key for the validator with address
|
||||
// VALUE: staking/Validator
|
||||
func GetValidatorKey(operatorAddr sdk.ValAddress) []byte {
|
||||
return append(ValidatorsKey, operatorAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the key for the validator with pubkey
|
||||
// VALUE: validator operator address ([]byte)
|
||||
func GetValidatorByConsAddrKey(addr sdk.ConsAddress) []byte {
|
||||
return append(ValidatorsByConsAddrKey, addr.Bytes()...)
|
||||
}
|
||||
|
||||
// Get the validator operator address from LastValidatorPowerKey
|
||||
func AddressFromLastValidatorPowerKey(key []byte) []byte {
|
||||
return key[1:] // remove prefix bytes
|
||||
}
|
||||
|
||||
// get the validator by power index.
|
||||
// Power index is the key used in the power-store, and represents the relative
|
||||
// power ranking of the validator.
|
||||
// VALUE: validator operator address ([]byte)
|
||||
func GetValidatorsByPowerIndexKey(validator types.Validator) []byte {
|
||||
// NOTE the address doesn't need to be stored because counter bytes must always be different
|
||||
// NOTE the larger values are of higher value
|
||||
|
||||
consensusPower := sdk.TokensToConsensusPower(validator.Tokens, sdk.DefaultPowerReduction)
|
||||
consensusPowerBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(consensusPowerBytes, uint64(consensusPower))
|
||||
|
||||
powerBytes := consensusPowerBytes
|
||||
powerBytesLen := len(powerBytes) // 8
|
||||
|
||||
// key is of format prefix || powerbytes || addrBytes
|
||||
key := make([]byte, 1+powerBytesLen+v040auth.AddrLen)
|
||||
|
||||
key[0] = ValidatorsByPowerIndexKey[0]
|
||||
copy(key[1:powerBytesLen+1], powerBytes)
|
||||
addr, err := sdk.ValAddressFromBech32(validator.OperatorAddress)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
operAddrInvr := sdk.CopyBytes(addr)
|
||||
|
||||
for i, b := range operAddrInvr {
|
||||
operAddrInvr[i] = ^b
|
||||
}
|
||||
|
||||
copy(key[powerBytesLen+1:], operAddrInvr)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// get the bonded validator index key for an operator address
|
||||
func GetLastValidatorPowerKey(operator sdk.ValAddress) []byte {
|
||||
return append(LastValidatorPowerKey, operator...)
|
||||
}
|
||||
|
||||
// parse the validators operator address from power rank key
|
||||
func ParseValidatorPowerRankKey(key []byte) (operAddr []byte) {
|
||||
powerBytesLen := 8
|
||||
if len(key) != 1+powerBytesLen+v040auth.AddrLen {
|
||||
panic("Invalid validator power rank key length")
|
||||
}
|
||||
|
||||
operAddr = sdk.CopyBytes(key[powerBytesLen+1:])
|
||||
|
||||
for i, b := range operAddr {
|
||||
operAddr[i] = ^b
|
||||
}
|
||||
|
||||
return operAddr
|
||||
}
|
||||
|
||||
// GetValidatorQueueKey returns the prefix key used for getting a set of unbonding
|
||||
// validators whose unbonding completion occurs at the given time and height.
|
||||
func GetValidatorQueueKey(timestamp time.Time, height int64) []byte {
|
||||
heightBz := sdk.Uint64ToBigEndian(uint64(height))
|
||||
timeBz := sdk.FormatTimeBytes(timestamp)
|
||||
timeBzL := len(timeBz)
|
||||
prefixL := len(ValidatorQueueKey)
|
||||
|
||||
bz := make([]byte, prefixL+8+timeBzL+8)
|
||||
|
||||
// copy the prefix
|
||||
copy(bz[:prefixL], ValidatorQueueKey)
|
||||
|
||||
// copy the encoded time bytes length
|
||||
copy(bz[prefixL:prefixL+8], sdk.Uint64ToBigEndian(uint64(timeBzL)))
|
||||
|
||||
// copy the encoded time bytes
|
||||
copy(bz[prefixL+8:prefixL+8+timeBzL], timeBz)
|
||||
|
||||
// copy the encoded height
|
||||
copy(bz[prefixL+8+timeBzL:], heightBz)
|
||||
|
||||
return bz
|
||||
}
|
||||
|
||||
// ParseValidatorQueueKey returns the encoded time and height from a key created
|
||||
// from GetValidatorQueueKey.
|
||||
func ParseValidatorQueueKey(bz []byte) (time.Time, int64, error) {
|
||||
prefixL := len(ValidatorQueueKey)
|
||||
if prefix := bz[:prefixL]; !bytes.Equal(prefix, ValidatorQueueKey) {
|
||||
return time.Time{}, 0, fmt.Errorf("invalid prefix; expected: %X, got: %X", ValidatorQueueKey, prefix)
|
||||
}
|
||||
|
||||
timeBzL := sdk.BigEndianToUint64(bz[prefixL : prefixL+8])
|
||||
ts, err := sdk.ParseTimeBytes(bz[prefixL+8 : prefixL+8+int(timeBzL)])
|
||||
if err != nil {
|
||||
return time.Time{}, 0, err
|
||||
}
|
||||
|
||||
height := sdk.BigEndianToUint64(bz[prefixL+8+int(timeBzL):])
|
||||
|
||||
return ts, int64(height), nil
|
||||
}
|
||||
|
||||
// gets the key for delegator bond with validator
|
||||
// VALUE: staking/Delegation
|
||||
func GetDelegationKey(delAddr sdk.AccAddress, valAddr sdk.ValAddress) []byte {
|
||||
return append(GetDelegationsKey(delAddr), valAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the prefix for a delegator for all validators
|
||||
func GetDelegationsKey(delAddr sdk.AccAddress) []byte {
|
||||
return append(DelegationKey, delAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the key for an unbonding delegation by delegator and validator addr
|
||||
// VALUE: staking/UnbondingDelegation
|
||||
func GetUBDKey(delAddr sdk.AccAddress, valAddr sdk.ValAddress) []byte {
|
||||
return append(
|
||||
GetUBDsKey(delAddr.Bytes()),
|
||||
valAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the index-key for an unbonding delegation, stored by validator-index
|
||||
// VALUE: none (key rearrangement used)
|
||||
func GetUBDByValIndexKey(delAddr sdk.AccAddress, valAddr sdk.ValAddress) []byte {
|
||||
return append(GetUBDsByValIndexKey(valAddr), delAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// rearranges the ValIndexKey to get the UBDKey
|
||||
func GetUBDKeyFromValIndexKey(indexKey []byte) []byte {
|
||||
addrs := indexKey[1:] // remove prefix bytes
|
||||
if len(addrs) != 2*v040auth.AddrLen {
|
||||
panic("unexpected key length")
|
||||
}
|
||||
|
||||
valAddr := addrs[:v040auth.AddrLen]
|
||||
delAddr := addrs[v040auth.AddrLen:]
|
||||
|
||||
return GetUBDKey(delAddr, valAddr)
|
||||
}
|
||||
|
||||
// gets the prefix for all unbonding delegations from a delegator
|
||||
func GetUBDsKey(delAddr sdk.AccAddress) []byte {
|
||||
return append(UnbondingDelegationKey, delAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the prefix keyspace for the indexes of unbonding delegations for a validator
|
||||
func GetUBDsByValIndexKey(valAddr sdk.ValAddress) []byte {
|
||||
return append(UnbondingDelegationByValIndexKey, valAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// gets the prefix for all unbonding delegations from a delegator
|
||||
func GetUnbondingDelegationTimeKey(timestamp time.Time) []byte {
|
||||
bz := sdk.FormatTimeBytes(timestamp)
|
||||
return append(UnbondingQueueKey, bz...)
|
||||
}
|
||||
|
||||
// GetREDKey returns a key prefix for indexing a redelegation from a delegator
|
||||
// and source validator to a destination validator.
|
||||
func GetREDKey(delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress) []byte {
|
||||
key := make([]byte, 1+v040auth.AddrLen*3)
|
||||
|
||||
copy(key[0:v040auth.AddrLen+1], GetREDsKey(delAddr.Bytes()))
|
||||
copy(key[v040auth.AddrLen+1:2*v040auth.AddrLen+1], valSrcAddr.Bytes())
|
||||
copy(key[2*v040auth.AddrLen+1:3*v040auth.AddrLen+1], valDstAddr.Bytes())
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// gets the index-key for a redelegation, stored by source-validator-index
|
||||
// VALUE: none (key rearrangement used)
|
||||
func GetREDByValSrcIndexKey(delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress) []byte {
|
||||
REDSFromValsSrcKey := GetREDsFromValSrcIndexKey(valSrcAddr)
|
||||
offset := len(REDSFromValsSrcKey)
|
||||
|
||||
// key is of the form REDSFromValsSrcKey || delAddr || valDstAddr
|
||||
key := make([]byte, len(REDSFromValsSrcKey)+2*v040auth.AddrLen)
|
||||
copy(key[0:offset], REDSFromValsSrcKey)
|
||||
copy(key[offset:offset+v040auth.AddrLen], delAddr.Bytes())
|
||||
copy(key[offset+v040auth.AddrLen:offset+2*v040auth.AddrLen], valDstAddr.Bytes())
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// gets the index-key for a redelegation, stored by destination-validator-index
|
||||
// VALUE: none (key rearrangement used)
|
||||
func GetREDByValDstIndexKey(delAddr sdk.AccAddress, valSrcAddr, valDstAddr sdk.ValAddress) []byte {
|
||||
REDSToValsDstKey := GetREDsToValDstIndexKey(valDstAddr)
|
||||
offset := len(REDSToValsDstKey)
|
||||
|
||||
// key is of the form REDSToValsDstKey || delAddr || valSrcAddr
|
||||
key := make([]byte, len(REDSToValsDstKey)+2*v040auth.AddrLen)
|
||||
copy(key[0:offset], REDSToValsDstKey)
|
||||
copy(key[offset:offset+v040auth.AddrLen], delAddr.Bytes())
|
||||
copy(key[offset+v040auth.AddrLen:offset+2*v040auth.AddrLen], valSrcAddr.Bytes())
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// GetREDKeyFromValSrcIndexKey rearranges the ValSrcIndexKey to get the REDKey
|
||||
func GetREDKeyFromValSrcIndexKey(indexKey []byte) []byte {
|
||||
// note that first byte is prefix byte
|
||||
if len(indexKey) != 3*v040auth.AddrLen+1 {
|
||||
panic("unexpected key length")
|
||||
}
|
||||
|
||||
valSrcAddr := indexKey[1 : v040auth.AddrLen+1]
|
||||
delAddr := indexKey[v040auth.AddrLen+1 : 2*v040auth.AddrLen+1]
|
||||
valDstAddr := indexKey[2*v040auth.AddrLen+1 : 3*v040auth.AddrLen+1]
|
||||
|
||||
return GetREDKey(delAddr, valSrcAddr, valDstAddr)
|
||||
}
|
||||
|
||||
// GetREDKeyFromValDstIndexKey rearranges the ValDstIndexKey to get the REDKey
|
||||
func GetREDKeyFromValDstIndexKey(indexKey []byte) []byte {
|
||||
// note that first byte is prefix byte
|
||||
if len(indexKey) != 3*v040auth.AddrLen+1 {
|
||||
panic("unexpected key length")
|
||||
}
|
||||
|
||||
valDstAddr := indexKey[1 : v040auth.AddrLen+1]
|
||||
delAddr := indexKey[v040auth.AddrLen+1 : 2*v040auth.AddrLen+1]
|
||||
valSrcAddr := indexKey[2*v040auth.AddrLen+1 : 3*v040auth.AddrLen+1]
|
||||
|
||||
return GetREDKey(delAddr, valSrcAddr, valDstAddr)
|
||||
}
|
||||
|
||||
// GetRedelegationTimeKey returns a key prefix for indexing an unbonding
|
||||
// redelegation based on a completion time.
|
||||
func GetRedelegationTimeKey(timestamp time.Time) []byte {
|
||||
bz := sdk.FormatTimeBytes(timestamp)
|
||||
return append(RedelegationQueueKey, bz...)
|
||||
}
|
||||
|
||||
// GetREDsKey returns a key prefix for indexing a redelegation from a delegator
|
||||
// address.
|
||||
func GetREDsKey(delAddr sdk.AccAddress) []byte {
|
||||
return append(RedelegationKey, delAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// GetREDsFromValSrcIndexKey returns a key prefix for indexing a redelegation to
|
||||
// a source validator.
|
||||
func GetREDsFromValSrcIndexKey(valSrcAddr sdk.ValAddress) []byte {
|
||||
return append(RedelegationByValSrcIndexKey, valSrcAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// GetREDsToValDstIndexKey returns a key prefix for indexing a redelegation to a
|
||||
// destination (target) validator.
|
||||
func GetREDsToValDstIndexKey(valDstAddr sdk.ValAddress) []byte {
|
||||
return append(RedelegationByValDstIndexKey, valDstAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// GetREDsByDelToValDstIndexKey returns a key prefix for indexing a redelegation
|
||||
// from an address to a source validator.
|
||||
func GetREDsByDelToValDstIndexKey(delAddr sdk.AccAddress, valDstAddr sdk.ValAddress) []byte {
|
||||
return append(GetREDsToValDstIndexKey(valDstAddr), delAddr.Bytes()...)
|
||||
}
|
||||
|
||||
// GetHistoricalInfoKey returns a key prefix for indexing HistoricalInfo objects.
|
||||
func GetHistoricalInfoKey(height int64) []byte {
|
||||
return append(HistoricalInfoKey, []byte(strconv.FormatInt(height, 10))...)
|
||||
}
|
||||
|
|
|
@ -6,18 +6,19 @@ import (
|
|||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
v034staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v034"
|
||||
v038staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v038"
|
||||
v040staking "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
func migrateBondStatus(oldStatus v034staking.BondStatus) BondStatus {
|
||||
func migrateBondStatus(oldStatus v034staking.BondStatus) v040staking.BondStatus {
|
||||
switch oldStatus {
|
||||
case v034staking.Unbonded:
|
||||
return Unbonded
|
||||
return v040staking.Unbonded
|
||||
|
||||
case v034staking.Unbonding:
|
||||
return Unbonding
|
||||
return v040staking.Unbonding
|
||||
|
||||
case v034staking.Bonded:
|
||||
return Bonded
|
||||
return v040staking.Bonded
|
||||
|
||||
default:
|
||||
panic(fmt.Errorf("invalid bond status %d", oldStatus))
|
||||
|
@ -30,29 +31,29 @@ func migrateBondStatus(oldStatus v034staking.BondStatus) BondStatus {
|
|||
// - Convert addresses from bytes to bech32 strings.
|
||||
// - Update BondStatus staking constants.
|
||||
// - Re-encode in v0.40 GenesisState.
|
||||
func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
||||
newLastValidatorPowers := make([]LastValidatorPower, len(stakingState.LastValidatorPowers))
|
||||
func Migrate(stakingState v038staking.GenesisState) *v040staking.GenesisState {
|
||||
newLastValidatorPowers := make([]v040staking.LastValidatorPower, len(stakingState.LastValidatorPowers))
|
||||
for i, oldLastValidatorPower := range stakingState.LastValidatorPowers {
|
||||
newLastValidatorPowers[i] = LastValidatorPower{
|
||||
newLastValidatorPowers[i] = v040staking.LastValidatorPower{
|
||||
Address: oldLastValidatorPower.Address.String(),
|
||||
Power: oldLastValidatorPower.Power,
|
||||
}
|
||||
}
|
||||
|
||||
newValidators := make([]Validator, len(stakingState.Validators))
|
||||
newValidators := make([]v040staking.Validator, len(stakingState.Validators))
|
||||
for i, oldValidator := range stakingState.Validators {
|
||||
pkAny, err := codectypes.NewAnyWithValue(oldValidator.ConsPubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Can't pack validator consensus PK as Any: %s", err))
|
||||
}
|
||||
newValidators[i] = Validator{
|
||||
newValidators[i] = v040staking.Validator{
|
||||
OperatorAddress: oldValidator.OperatorAddress.String(),
|
||||
ConsensusPubkey: pkAny,
|
||||
Jailed: oldValidator.Jailed,
|
||||
Status: migrateBondStatus(oldValidator.Status),
|
||||
Tokens: oldValidator.Tokens,
|
||||
DelegatorShares: oldValidator.DelegatorShares,
|
||||
Description: Description{
|
||||
Description: v040staking.Description{
|
||||
Moniker: oldValidator.Description.Moniker,
|
||||
Identity: oldValidator.Description.Identity,
|
||||
Website: oldValidator.Description.Website,
|
||||
|
@ -61,8 +62,8 @@ func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
|||
},
|
||||
UnbondingHeight: oldValidator.UnbondingHeight,
|
||||
UnbondingTime: oldValidator.UnbondingCompletionTime,
|
||||
Commission: Commission{
|
||||
CommissionRates: CommissionRates{
|
||||
Commission: v040staking.Commission{
|
||||
CommissionRates: v040staking.CommissionRates{
|
||||
Rate: oldValidator.Commission.Rate,
|
||||
MaxRate: oldValidator.Commission.MaxRate,
|
||||
MaxChangeRate: oldValidator.Commission.MaxChangeRate,
|
||||
|
@ -73,20 +74,20 @@ func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
|||
}
|
||||
}
|
||||
|
||||
newDelegations := make([]Delegation, len(stakingState.Delegations))
|
||||
newDelegations := make([]v040staking.Delegation, len(stakingState.Delegations))
|
||||
for i, oldDelegation := range stakingState.Delegations {
|
||||
newDelegations[i] = Delegation{
|
||||
newDelegations[i] = v040staking.Delegation{
|
||||
DelegatorAddress: oldDelegation.DelegatorAddress.String(),
|
||||
ValidatorAddress: oldDelegation.ValidatorAddress.String(),
|
||||
Shares: oldDelegation.Shares,
|
||||
}
|
||||
}
|
||||
|
||||
newUnbondingDelegations := make([]UnbondingDelegation, len(stakingState.UnbondingDelegations))
|
||||
newUnbondingDelegations := make([]v040staking.UnbondingDelegation, len(stakingState.UnbondingDelegations))
|
||||
for i, oldUnbondingDelegation := range stakingState.UnbondingDelegations {
|
||||
newEntries := make([]UnbondingDelegationEntry, len(oldUnbondingDelegation.Entries))
|
||||
newEntries := make([]v040staking.UnbondingDelegationEntry, len(oldUnbondingDelegation.Entries))
|
||||
for j, oldEntry := range oldUnbondingDelegation.Entries {
|
||||
newEntries[j] = UnbondingDelegationEntry{
|
||||
newEntries[j] = v040staking.UnbondingDelegationEntry{
|
||||
CreationHeight: oldEntry.CreationHeight,
|
||||
CompletionTime: oldEntry.CompletionTime,
|
||||
InitialBalance: oldEntry.InitialBalance,
|
||||
|
@ -94,18 +95,18 @@ func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
|||
}
|
||||
}
|
||||
|
||||
newUnbondingDelegations[i] = UnbondingDelegation{
|
||||
newUnbondingDelegations[i] = v040staking.UnbondingDelegation{
|
||||
DelegatorAddress: oldUnbondingDelegation.DelegatorAddress.String(),
|
||||
ValidatorAddress: oldUnbondingDelegation.ValidatorAddress.String(),
|
||||
Entries: newEntries,
|
||||
}
|
||||
}
|
||||
|
||||
newRedelegations := make([]Redelegation, len(stakingState.Redelegations))
|
||||
newRedelegations := make([]v040staking.Redelegation, len(stakingState.Redelegations))
|
||||
for i, oldRedelegation := range stakingState.Redelegations {
|
||||
newEntries := make([]RedelegationEntry, len(oldRedelegation.Entries))
|
||||
newEntries := make([]v040staking.RedelegationEntry, len(oldRedelegation.Entries))
|
||||
for j, oldEntry := range oldRedelegation.Entries {
|
||||
newEntries[j] = RedelegationEntry{
|
||||
newEntries[j] = v040staking.RedelegationEntry{
|
||||
CreationHeight: oldEntry.CreationHeight,
|
||||
CompletionTime: oldEntry.CompletionTime,
|
||||
InitialBalance: oldEntry.InitialBalance,
|
||||
|
@ -113,7 +114,7 @@ func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
|||
}
|
||||
}
|
||||
|
||||
newRedelegations[i] = Redelegation{
|
||||
newRedelegations[i] = v040staking.Redelegation{
|
||||
DelegatorAddress: oldRedelegation.DelegatorAddress.String(),
|
||||
ValidatorSrcAddress: oldRedelegation.ValidatorSrcAddress.String(),
|
||||
ValidatorDstAddress: oldRedelegation.ValidatorDstAddress.String(),
|
||||
|
@ -121,8 +122,8 @@ func Migrate(stakingState v038staking.GenesisState) *GenesisState {
|
|||
}
|
||||
}
|
||||
|
||||
return &GenesisState{
|
||||
Params: Params{
|
||||
return &v040staking.GenesisState{
|
||||
Params: v040staking.Params{
|
||||
UnbondingTime: stakingState.Params.UnbondingTime,
|
||||
MaxValidators: uint32(stakingState.Params.MaxValidators),
|
||||
MaxEntries: uint32(stakingState.Params.MaxEntries),
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
package v040
|
||||
|
||||
import (
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// String returns a human readable string representation of the parameters.
|
||||
func (p Params) String() string {
|
||||
out, _ := yaml.Marshal(p)
|
||||
return string(out)
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,43 +0,0 @@
|
|||
package v040
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
BondStatusUnspecified = BondStatusName[int32(Unspecified)]
|
||||
BondStatusUnbonded = BondStatusName[int32(Unbonded)]
|
||||
BondStatusUnbonding = BondStatusName[int32(Unbonding)]
|
||||
BondStatusBonded = BondStatusName[int32(Bonded)]
|
||||
)
|
||||
|
||||
// String implements the Stringer interface for a Validator object.
|
||||
func (v Validator) String() string {
|
||||
out, _ := yaml.Marshal(v)
|
||||
return string(out)
|
||||
}
|
||||
|
||||
// Validators is a collection of Validator
|
||||
type Validators []Validator
|
||||
|
||||
func (v Validators) String() (out string) {
|
||||
for _, val := range v {
|
||||
out += val.String() + "\n"
|
||||
}
|
||||
|
||||
return strings.TrimSpace(out)
|
||||
}
|
||||
|
||||
// ValidatorsByVotingPower implements sort.Interface for []Validator based on
|
||||
// the VotingPower and Address fields.
|
||||
// The validators are sorted first by their voting power (descending). Secondary index - Address (ascending).
|
||||
// Copied from tendermint/types/validator_set.go
|
||||
type ValidatorsByVotingPower []Validator
|
||||
|
||||
// String implements the Stringer interface for a Description object.
|
||||
func (d Description) String() string {
|
||||
out, _ := yaml.Marshal(d)
|
||||
return string(out)
|
||||
}
|
|
@ -1,117 +0,0 @@
|
|||
package v042
|
||||
|
||||
import (
|
||||
sdk "github.com/cosmos/cosmos-sdk/types"
|
||||
v040staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v040"
|
||||
v042staking "github.com/cosmos/cosmos-sdk/x/staking/types"
|
||||
)
|
||||
|
||||
// Migrate accepts exported v0.40 x/staking genesis state and migrates it to
|
||||
// v0.42 x/staking genesis state. The migration includes:
|
||||
//
|
||||
// - Adding power reduction on-chain param
|
||||
func Migrate(stakingState v040staking.GenesisState) *v042staking.GenesisState {
|
||||
newLastValidatorPowers := make([]v042staking.LastValidatorPower, len(stakingState.LastValidatorPowers))
|
||||
for i, oldLastValidatorPower := range stakingState.LastValidatorPowers {
|
||||
newLastValidatorPowers[i] = v042staking.LastValidatorPower{
|
||||
Address: oldLastValidatorPower.Address,
|
||||
Power: oldLastValidatorPower.Power,
|
||||
}
|
||||
}
|
||||
|
||||
newValidators := make([]v042staking.Validator, len(stakingState.Validators))
|
||||
for i, oldValidator := range stakingState.Validators {
|
||||
newValidators[i] = v042staking.Validator{
|
||||
OperatorAddress: oldValidator.OperatorAddress,
|
||||
ConsensusPubkey: oldValidator.ConsensusPubkey,
|
||||
Jailed: oldValidator.Jailed,
|
||||
Status: v042staking.BondStatus(oldValidator.Status),
|
||||
Tokens: oldValidator.Tokens,
|
||||
DelegatorShares: oldValidator.DelegatorShares,
|
||||
Description: v042staking.Description{
|
||||
Moniker: oldValidator.Description.Moniker,
|
||||
Identity: oldValidator.Description.Identity,
|
||||
Website: oldValidator.Description.Website,
|
||||
SecurityContact: oldValidator.Description.SecurityContact,
|
||||
Details: oldValidator.Description.Details,
|
||||
},
|
||||
UnbondingHeight: oldValidator.UnbondingHeight,
|
||||
UnbondingTime: oldValidator.UnbondingTime,
|
||||
Commission: v042staking.Commission{
|
||||
CommissionRates: v042staking.CommissionRates{
|
||||
Rate: oldValidator.Commission.Rate,
|
||||
MaxRate: oldValidator.Commission.MaxRate,
|
||||
MaxChangeRate: oldValidator.Commission.MaxChangeRate,
|
||||
},
|
||||
UpdateTime: oldValidator.Commission.UpdateTime,
|
||||
},
|
||||
MinSelfDelegation: oldValidator.MinSelfDelegation,
|
||||
}
|
||||
}
|
||||
|
||||
newDelegations := make([]v042staking.Delegation, len(stakingState.Delegations))
|
||||
for i, oldDelegation := range stakingState.Delegations {
|
||||
newDelegations[i] = v042staking.Delegation{
|
||||
DelegatorAddress: oldDelegation.DelegatorAddress,
|
||||
ValidatorAddress: oldDelegation.ValidatorAddress,
|
||||
Shares: oldDelegation.Shares,
|
||||
}
|
||||
}
|
||||
|
||||
newUnbondingDelegations := make([]v042staking.UnbondingDelegation, len(stakingState.UnbondingDelegations))
|
||||
for i, oldUnbondingDelegation := range stakingState.UnbondingDelegations {
|
||||
newEntries := make([]v042staking.UnbondingDelegationEntry, len(oldUnbondingDelegation.Entries))
|
||||
for j, oldEntry := range oldUnbondingDelegation.Entries {
|
||||
newEntries[j] = v042staking.UnbondingDelegationEntry{
|
||||
CreationHeight: oldEntry.CreationHeight,
|
||||
CompletionTime: oldEntry.CompletionTime,
|
||||
InitialBalance: oldEntry.InitialBalance,
|
||||
Balance: oldEntry.Balance,
|
||||
}
|
||||
}
|
||||
|
||||
newUnbondingDelegations[i] = v042staking.UnbondingDelegation{
|
||||
DelegatorAddress: oldUnbondingDelegation.DelegatorAddress,
|
||||
ValidatorAddress: oldUnbondingDelegation.ValidatorAddress,
|
||||
Entries: newEntries,
|
||||
}
|
||||
}
|
||||
|
||||
newRedelegations := make([]v042staking.Redelegation, len(stakingState.Redelegations))
|
||||
for i, oldRedelegation := range stakingState.Redelegations {
|
||||
newEntries := make([]v042staking.RedelegationEntry, len(oldRedelegation.Entries))
|
||||
for j, oldEntry := range oldRedelegation.Entries {
|
||||
newEntries[j] = v042staking.RedelegationEntry{
|
||||
CreationHeight: oldEntry.CreationHeight,
|
||||
CompletionTime: oldEntry.CompletionTime,
|
||||
InitialBalance: oldEntry.InitialBalance,
|
||||
SharesDst: oldEntry.SharesDst,
|
||||
}
|
||||
}
|
||||
|
||||
newRedelegations[i] = v042staking.Redelegation{
|
||||
DelegatorAddress: oldRedelegation.DelegatorAddress,
|
||||
ValidatorSrcAddress: oldRedelegation.ValidatorSrcAddress,
|
||||
ValidatorDstAddress: oldRedelegation.ValidatorDstAddress,
|
||||
Entries: newEntries,
|
||||
}
|
||||
}
|
||||
|
||||
return &v042staking.GenesisState{
|
||||
Params: v042staking.Params{
|
||||
UnbondingTime: stakingState.Params.UnbondingTime,
|
||||
MaxValidators: stakingState.Params.MaxValidators,
|
||||
MaxEntries: stakingState.Params.MaxEntries,
|
||||
HistoricalEntries: stakingState.Params.HistoricalEntries,
|
||||
BondDenom: stakingState.Params.BondDenom,
|
||||
PowerReduction: sdk.DefaultPowerReduction,
|
||||
},
|
||||
LastTotalPower: stakingState.LastTotalPower,
|
||||
LastValidatorPowers: newLastValidatorPowers,
|
||||
Validators: newValidators,
|
||||
Delegations: newDelegations,
|
||||
UnbondingDelegations: newUnbondingDelegations,
|
||||
Redelegations: newRedelegations,
|
||||
Exported: stakingState.Exported,
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
package v042_test
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/cosmos/cosmos-sdk/client"
|
||||
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
|
||||
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
|
||||
"github.com/cosmos/cosmos-sdk/simapp"
|
||||
v040staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v040"
|
||||
v042staking "github.com/cosmos/cosmos-sdk/x/staking/legacy/v042"
|
||||
)
|
||||
|
||||
func TestMigrate(t *testing.T) {
|
||||
encodingConfig := simapp.MakeTestEncodingConfig()
|
||||
clientCtx := client.Context{}.
|
||||
WithInterfaceRegistry(encodingConfig.InterfaceRegistry).
|
||||
WithTxConfig(encodingConfig.TxConfig).
|
||||
WithLegacyAmino(encodingConfig.Amino).
|
||||
WithJSONMarshaler(encodingConfig.Marshaler)
|
||||
|
||||
consPubKey := ed25519.GenPrivKeyFromSecret([]byte("val0")).PubKey()
|
||||
pkAny, err := codectypes.NewAnyWithValue(consPubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Can't pack validator consensus PK as Any: %s", err))
|
||||
}
|
||||
stakingGenState := v040staking.GenesisState{
|
||||
Validators: v040staking.Validators{v040staking.Validator{
|
||||
ConsensusPubkey: pkAny,
|
||||
Status: v040staking.Unbonded,
|
||||
}},
|
||||
}
|
||||
|
||||
migrated := v042staking.Migrate(stakingGenState)
|
||||
bz, err := clientCtx.JSONMarshaler.MarshalJSON(migrated)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Indent the JSON bz correctly.
|
||||
var jsonObj map[string]interface{}
|
||||
err = json.Unmarshal(bz, &jsonObj)
|
||||
require.NoError(t, err)
|
||||
indentedBz, err := json.MarshalIndent(jsonObj, "", " ")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure about:
|
||||
// - consensus_pubkey: should be an any
|
||||
// - validator's status should be 1 (new unbonded)
|
||||
expected := `{
|
||||
"delegations": [],
|
||||
"exported": false,
|
||||
"last_total_power": "0",
|
||||
"last_validator_powers": [],
|
||||
"params": {
|
||||
"bond_denom": "",
|
||||
"historical_entries": 0,
|
||||
"max_entries": 0,
|
||||
"max_validators": 0,
|
||||
"power_reduction": "1000000",
|
||||
"unbonding_time": "0s"
|
||||
},
|
||||
"redelegations": [],
|
||||
"unbonding_delegations": [],
|
||||
"validators": [
|
||||
{
|
||||
"commission": {
|
||||
"commission_rates": {
|
||||
"max_change_rate": "0",
|
||||
"max_rate": "0",
|
||||
"rate": "0"
|
||||
},
|
||||
"update_time": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"consensus_pubkey": {
|
||||
"@type": "/cosmos.crypto.ed25519.PubKey",
|
||||
"key": "KTeVrjP7NJIufvgMJsQRxZjfFyD+Exda6O7x+oxIvmA="
|
||||
},
|
||||
"delegator_shares": "0",
|
||||
"description": {
|
||||
"details": "",
|
||||
"identity": "",
|
||||
"moniker": "",
|
||||
"security_contact": "",
|
||||
"website": ""
|
||||
},
|
||||
"jailed": false,
|
||||
"min_self_delegation": "0",
|
||||
"operator_address": "",
|
||||
"status": "BOND_STATUS_UNBONDED",
|
||||
"tokens": "0",
|
||||
"unbonding_height": "0",
|
||||
"unbonding_time": "0001-01-01T00:00:00Z"
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
require.Equal(t, expected, string(indentedBz))
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package v042
|
||||
|
||||
const (
|
||||
ModuleName = "staking"
|
||||
)
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue