From a5b7ea93c42d77a812b0ccd213de0197178f8e41 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 29 May 2018 01:03:03 -0700 Subject: [PATCH 01/27] Delay validator set changes by 1 block. --- Makefile | 4 +- consensus/reactor_test.go | 6 +- consensus/replay.go | 12 ++- consensus/state.go | 1 - consensus/state_test.go | 8 +- evidence/pool_test.go | 1 + rpc/core/status.go | 7 +- state/execution.go | 44 +++++------ state/state.go | 17 ++--- state/state_test.go | 157 +++++++++++++++++++------------------- state/store.go | 9 ++- state/validation.go | 12 ++- types/block.go | 38 ++++----- types/validator_set.go | 11 ++- 14 files changed, 172 insertions(+), 155 deletions(-) diff --git a/Makefile b/Makefile index 079c58f9..0f7578c0 100644 --- a/Makefile +++ b/Makefile @@ -132,11 +132,11 @@ vagrant_test: ### go tests test: @echo "--> Running go test" - @go test $(PACKAGES) + @GOCACHE=off go test -p 1 $(PACKAGES) test_race: @echo "--> Running go test --race" - @go test -v -race $(PACKAGES) + @GOCACHE=off go test -p 1 -v -race $(PACKAGES) ######################################## diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 498a857b..70af588a 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -174,14 +174,14 @@ func TestReactorRecordsBlockParts(t *testing.T) { require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") } -// Test we record votes from other peers +// Test we record votes from other peers. func TestReactorRecordsVotes(t *testing.T) { - // create dummy peer + // Create dummy peer. peer := p2pdummy.NewPeer() ps := NewPeerState(peer).SetLogger(log.TestingLogger()) peer.Set(types.PeerStateKey, ps) - // create reactor + // Create reactor. css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore) reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states reactor.SetEventBus(css[0].eventBus) diff --git a/consensus/replay.go b/consensus/replay.go index f681828c..75173061 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -264,15 +264,15 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight stateBlockHeight := state.LastBlockHeight h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) - // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - validators := types.TM2PB.Validators(state.Validators) + nvals := types.TM2PB.Validators(state.Validators) // state.Validators would work too. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime.Unix(), // TODO ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: validators, + Validators: nvals, AppStateBytes: h.genDoc.AppStateJSON, } res, err := proxyApp.Consensus().InitChainSync(req) @@ -280,9 +280,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight return nil, err } - // if the app returned validators - // or consensus params, update the state - // with the them + // If the app returned validators or consensus params, update the state. if len(res.Validators) > 0 { vals, err := types.PB2TM.Validators(res.Validators) if err != nil { @@ -296,7 +294,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight sm.SaveState(h.stateDB, state) } - // First handle edge cases and constraints on the storeBlockHeight + // First handle edge cases and constraints on the storeBlockHeight. if storeBlockHeight == 0 { return appHash, checkAppHash(state, appHash) diff --git a/consensus/state.go b/consensus/state.go index 5d6842a8..a12345d7 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -74,7 +74,6 @@ type ConsensusState struct { privValidator types.PrivValidator // for signing votes // services for creating and executing blocks - // TODO: encapsulate all of this in one "BlockManager" blockExec *sm.BlockExecutor blockStore sm.BlockStore mempool sm.Mempool diff --git a/consensus/state_test.go b/consensus/state_test.go index d0def630..ece70dd5 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -64,22 +64,22 @@ func TestStateProposerSelection0(t *testing.T) { startTestRound(cs1, height, round) - // wait for new round so proposer is set + // Wait for new round so proposer is set. <-newRoundCh - // lets commit a block and ensure proposer for the next height is correct + // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } - // wait for complete proposal + // Wait for complete proposal. <-proposalCh rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) - // wait for new round so next validator is set + // Wait for new round so next validator is set. <-newRoundCh prop = cs1.GetRoundState().Validators.GetProposer() diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 01907623..4b3e3581 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -27,6 +27,7 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { LastBlockHeight: 0, LastBlockTime: time.Now(), Validators: valSet, + NextValidators: valSet.CopyIncrementAccum(1), LastHeightValidatorsChanged: 1, ConsensusParams: types.ConsensusParams{ EvidenceParams: types.EvidenceParams{ diff --git a/rpc/core/status.go b/rpc/core/status.go index 2c54d0a9..5738685b 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -109,7 +109,7 @@ func validatorAtHeight(h int64) *types.Validator { privValAddress := pubKey.Address() - // if we're still at height h, search in the current validator set + // If we're still at height h, search in the current validator set. if lastBlockHeight == h { for _, val := range vals { if bytes.Equal(val.Address, privValAddress) { @@ -118,12 +118,11 @@ func validatorAtHeight(h int64) *types.Validator { } } - // if we've moved to the next height, retrieve the validator set from DB + // If we've moved to the next height, retrieve the validator set from DB. if lastBlockHeight > h { vals, err := sm.LoadValidators(stateDB, h) if err != nil { - // should not happen - return nil + return nil // should not happen } _, val := vals.GetByAddress(privValAddress) return val diff --git a/state/execution.go b/state/execution.go index 0d6ee81b..3dbd3bf2 100644 --- a/state/execution.go +++ b/state/execution.go @@ -80,18 +80,18 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX - // save the results before we commit + // Save the results before we commit. saveABCIResponses(blockExec.db, block.Height, abciResponses) fail.Fail() // XXX - // update the state with the block and responses + // Update the state with the block and responses. state, err = updateState(state, blockID, block.Header, abciResponses) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } - // lock mempool, commit app state, update mempoool + // Lock mempool, commit app state, update mempoool. appHash, err := blockExec.Commit(block) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) @@ -102,13 +102,13 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX - // update the app hash and save the state + // Update the app hash and save the state. state.AppHash = appHash SaveState(blockExec.db, state) fail.Fail() // XXX - // events are fired after everything else + // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) @@ -164,7 +164,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, txIndex := 0 abciResponses := NewABCIResponses(block) - // Execute transactions and get hash + // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_DeliverTx: @@ -186,7 +186,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) - // Begin block + // Begin block. _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ Hash: block.Hash(), Header: types.TM2PB.Header(block.Header), @@ -198,7 +198,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, return nil, err } - // Run txs of block + // Run txs of block. for _, tx := range block.Txs { proxyAppConn.DeliverTxAsync(tx) if err := proxyAppConn.Error(); err != nil { @@ -206,7 +206,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } } - // End block + // End block. abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{block.Height}) if err != nil { logger.Error("Error in proxyAppConn.EndBlock", "err", err) @@ -307,26 +307,25 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat func updateState(state State, blockID types.BlockID, header *types.Header, abciResponses *ABCIResponses) (State, error) { - // copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators - prevValSet := state.Validators.Copy() - nextValSet := prevValSet.Copy() + // Copy the valset so we can apply changes from EndBlock + // and update s.LastValidators and s.Validators. + nValSet := state.NextValidators.Copy() - // update the validator set with the latest abciResponses + // Update the validator set with the latest abciResponses. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) + err := updateValidators(nValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { return state, fmt.Errorf("Error changing validator set: %v", err) } - // change results from this height but only applies to the next height - lastHeightValsChanged = header.Height + 1 + // Change results from this height but only applies to the next next height. + lastHeightValsChanged = header.Height + 1 + 1 } - // Update validator accums and set state variables - nextValSet.IncrementAccum(1) + // Update validator accums and set state variables. + nValSet.IncrementAccum(1) - // update the params with the latest abciResponses + // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if abciResponses.EndBlock.ConsensusParamUpdates != nil { @@ -336,7 +335,7 @@ func updateState(state State, blockID types.BlockID, header *types.Header, if err != nil { return state, fmt.Errorf("Error updating consensus params: %v", err) } - // change results from this height but only applies to the next height + // Change results from this height but only applies to the next height. lastHeightParamsChanged = header.Height + 1 } @@ -348,7 +347,8 @@ func updateState(state State, blockID types.BlockID, header *types.Header, LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, LastBlockID: blockID, LastBlockTime: header.Time, - Validators: nextValSet, + NextValidators: nValSet, + Validators: state.NextValidators.Copy(), LastValidators: state.Validators.Copy(), LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, diff --git a/state/state.go b/state/state.go index 3bc08dae..0891f837 100644 --- a/state/state.go +++ b/state/state.go @@ -24,7 +24,7 @@ var ( // Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { - // Immutable + // immutable ChainID string // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) @@ -38,6 +38,7 @@ type State struct { // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + NextValidators *types.ValidatorSet Validators *types.ValidatorSet LastValidators *types.ValidatorSet LastHeightValidatorsChanged int64 @@ -50,7 +51,7 @@ type State struct { // Merkle root of the results from executing prev block LastResultsHash []byte - // The latest AppHash we've received from calling abci.Commit() + // the latest AppHash we've received from calling abci.Commit() AppHash []byte } @@ -64,6 +65,7 @@ func (state State) Copy() State { LastBlockID: state.LastBlockID, LastBlockTime: state.LastBlockTime, + NextValidators: state.NextValidators.Copy(), Validators: state.Validators.Copy(), LastValidators: state.LastValidators.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, @@ -93,24 +95,20 @@ func (state State) IsEmpty() bool { return state.Validators == nil // XXX can't compare to Empty } -// GetValidators returns the last and current validator sets. -func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { - return state.LastValidators, state.Validators -} - //------------------------------------------------------------------------ // Create a block from the latest state // MakeBlock builds a block with the given txs and commit from the current state. func (state State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { - // build base block + // Build base block. block := types.MakeBlock(height, txs, commit) - // fill header with state data + // Fill header with state data. block.ChainID = state.ChainID block.TotalTxs = state.LastBlockTotalTx + block.NumTxs block.LastBlockID = state.LastBlockID block.ValidatorsHash = state.Validators.Hash() + block.NextValidatorsHash = state.NextValidators.Hash() block.AppHash = state.AppHash block.ConsensusHash = state.ConsensusParams.Hash() block.LastResultsHash = state.LastResultsHash @@ -175,6 +173,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastBlockID: types.BlockID{}, LastBlockTime: genDoc.GenesisTime, + NextValidators: types.NewValidatorSet(validators).CopyIncrementAccum(1), Validators: types.NewValidatorSet(validators), LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: 1, diff --git a/state/state_test.go b/state/state_test.go index 30a87fb0..ae70cc10 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/types" ) -// setupTestCase does setup common to all test cases +// setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { config := cfg.ResetTestRoot("state_") dbType := dbm.DBBackendType(config.DBBackend) @@ -72,7 +72,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ - // build mock responses + // Build mock responses. block := makeBlock(state, 2) abciResponses := NewABCIResponses(block) abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} @@ -89,7 +89,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { loadedABCIResponses, abciResponses)) } -// TestResultsSaveLoad tests saving and loading abci results. +// TestResultsSaveLoad tests saving and loading ABCI results. func TestABCIResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) @@ -97,8 +97,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { assert := assert.New(t) cases := [...]struct { - // height is implied index+2 - // as block 1 is created from genesis + // Height is implied to equal index+2, + // as block 1 is created from genesis. added []*abci.ResponseDeliverTx expected types.ABCIResults }{ @@ -132,14 +132,14 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, } - // query all before, should return error + // Query all before, this should return error. for i := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) assert.Error(err, "%d: %#v", i, res) } - // add all cases + // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save responses := &ABCIResponses{ @@ -149,7 +149,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { saveABCIResponses(stateDB, h, responses) } - // query all before, should return expected value + // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) @@ -165,34 +165,30 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { // nolint: vetshadow assert := assert.New(t) - // can't load anything for height 0 + // Can't load anything for height 0. v, err := LoadValidators(stateDB, 0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") - // should be able to load for height 1 + // Should be able to load for height 1. v, err = LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - // increment height, save; should be able to load for next height + // Should be able to load for height 2. + v, err = LoadValidators(stateDB, 2) + assert.Nil(err, "expected no err at height 2") + assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") + + // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + vp0, err := LoadValidators(stateDB, nextHeight+0) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // increment height, save; should be able to load for next height - state.LastBlockHeight += 10 - nextHeight = state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + vp1, err := LoadValidators(stateDB, nextHeight+1) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // should be able to load for next next height - _, err = LoadValidators(stateDB, state.LastBlockHeight+2) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") + assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") + assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. @@ -200,38 +196,37 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // build the validator history by running updateState - // with the right validator set for each height + // Build the validator history by running updateState + // with the right validator set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) power := val.VotingPower var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next pubkey + // When we get to a change height, use the next pubkey. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ power++ } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) state, err = updateState(state, blockID, header, responses) assert.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) } - // on each change height, increment the power by one. + // On each height change, increment the power by one. testCases := make([]int64, highestHeight) changeIndex = 0 power = val.VotingPower for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ power++ @@ -240,7 +235,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := LoadValidators(stateDB, int64(i+1)) + v, err := LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -255,25 +250,41 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) + require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) + state.NextValidators = state.Validators.CopyIncrementAccum(1) SaveState(stateDB, state) defer tearDown(t) - const height = 1 - pubkey := crypto.GenPrivKeyEd25519().PubKey() - // swap the first validator with a new one ^^^ (validator set size stays the same) - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + _, valOld := state.Validators.GetByIndex(0) + var pubkeyOld = valOld.PubKey + var pubkey = crypto.GenPrivKeyEd25519().PubKey() + + // Swap the first validator with a new one (validator set size stays the same). + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) + + // Save state etc. var err error state, err = updateState(state, blockID, header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - v, err := LoadValidators(stateDB, height+1) + // Load nextheight, it should be the oldpubkey. + v0, err := LoadValidators(stateDB, nextHeight) assert.Nil(t, err) - assert.Equal(t, valSetSize, v.Size()) + assert.Equal(t, valSetSize, v0.Size()) + index, val := v0.GetByAddress(pubkeyOld.Address()) + assert.NotNil(t, val) + if index < 0 { + t.Fatal("expected to find old validator") + } - index, val := v.GetByAddress(pubkey.Address()) + // Load nextheight+1, it should be the new pubkey. + v1, err := LoadValidators(stateDB, nextHeight+1) + assert.Nil(t, err) + assert.Equal(t, valSetSize, v1.Size()) + index, val = v1.GetByAddress(pubkey.Address()) assert.NotNil(t, val) if index < 0 { t.Fatal("expected to find newly added validator") @@ -294,12 +305,12 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // each valset is just one validator - // create list of them + // Each valset is just one validator. + // create list of them. params := make([]types.ConsensusParams, N+1) params[0] = state.ConsensusParams for i := 1; i < N+1; i++ { @@ -307,20 +318,19 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { params[i].BlockSize.MaxBytes += i } - // build the params history by running updateState - // with the right params set for each height + // Build the params history by running updateState + // with the right params set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 cp := params[changeIndex] var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next params + // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ cp = params[changeIndex] } - header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) + header, blockID, responses := makeHeaderPartsResponsesParams(state, cp) state, err = updateState(state, blockID, header, responses) require.Nil(t, err) @@ -328,13 +338,13 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } - // make all the test cases by using the same params until after the change + // Make all the test cases by using the same params until after the change. testCases := make([]paramsChangeTestCase, highestHeight) changeIndex = 0 cp = params[changeIndex] for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ cp = params[changeIndex] @@ -419,16 +429,16 @@ func TestApplyUpdates(t *testing.T) { } } -func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, - pubkey crypto.PubKey) (*types.Header, types.BlockID, *ABCIResponses) { +func makeHeaderPartsResponsesValPubKeyChange(state State, pubkey crypto.PubKey) ( + *types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { abciResponses.EndBlock = &abci.ResponseEndBlock{ ValidatorUpdates: []abci.Validator{ @@ -441,16 +451,16 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } -func makeHeaderPartsResponsesValPowerChange(state State, height int64, - power int64) (*types.Header, types.BlockID, *ABCIResponses) { +func makeHeaderPartsResponsesValPowerChange(state State, power int64) ( + *types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { abciResponses.EndBlock = &abci.ResponseEndBlock{ ValidatorUpdates: []abci.Validator{ @@ -462,10 +472,10 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64, return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } -func makeHeaderPartsResponsesParams(state State, height int64, - params types.ConsensusParams) (*types.Header, types.BlockID, *ABCIResponses) { +func makeHeaderPartsResponsesParams(state State, params types.ConsensusParams) ( + *types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, } @@ -476,14 +486,3 @@ type paramsChangeTestCase struct { height int64 params types.ConsensusParams } - -func makeHeaderPartsResults(state State, height int64, - results []*abci.ResponseDeliverTx) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - DeliverTx: results, - EndBlock: &abci.ResponseEndBlock{}, - } - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} diff --git a/state/store.go b/state/store.go index 79893254..040bc9fd 100644 --- a/state/store.go +++ b/state/store.go @@ -86,7 +86,14 @@ func SaveState(db dbm.DB, state State) { func saveState(db dbm.DB, state State, key []byte) { nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + // If first block, save validators for block 1. + if nextHeight == 1 { + lastHeightVoteChanged := int64(1) // Due to Tendermint validator set changes being delayed 1 block. + saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + } + // Save next validators. + saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + // Save next consensus params. saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) db.SetSync(stateKey, state.Bytes()) } diff --git a/state/validation.go b/state/validation.go index 84a4cc82..76c1a1ec 100644 --- a/state/validation.go +++ b/state/validation.go @@ -13,12 +13,12 @@ import ( // Validate block func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { - // validate internal consistency + // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err } - // validate basic info + // Validate basic info. if block.ChainID != state.ChainID { return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID) } @@ -33,7 +33,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } */ - // validate prev block info + // Validate prev block info. if !block.LastBlockID.Equals(state.LastBlockID) { return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID) } @@ -42,7 +42,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs) } - // validate app info + // Validate app info if !bytes.Equal(block.AppHash, state.AppHash) { return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash) } @@ -55,6 +55,9 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash) } + if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { + return fmt.Errorf("Wrong Block.Header.NextValidatorsHash. Expected %X, got %v", state.NextValidators.Hash(), block.NextValidatorsHash) + } // Validate block LastCommit. if block.Height == 1 { @@ -73,6 +76,7 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } + // Validate all evidence. // TODO: Each check requires loading an old validator set. // We should cap the amount of evidence per block // to prevent potential proposer DoS. diff --git a/types/block.go b/types/block.go index 6adc0c4c..e72b5fc7 100644 --- a/types/block.go +++ b/types/block.go @@ -196,10 +196,11 @@ type Header struct { DataHash cmn.HexBytes `json:"data_hash"` // transactions // hashes from the app output from the prev block - ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block - ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block - LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block + ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block + NextValidatorsHash cmn.HexBytes `json:"next_validators_hash"` // validators for the next block + ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block + LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block // consensus info EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block @@ -214,19 +215,20 @@ func (h *Header) Hash() cmn.HexBytes { return nil } return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "ChainID": aminoHasher(h.ChainID), - "Height": aminoHasher(h.Height), - "Time": aminoHasher(h.Time), - "NumTxs": aminoHasher(h.NumTxs), - "TotalTxs": aminoHasher(h.TotalTxs), - "LastBlockID": aminoHasher(h.LastBlockID), - "LastCommit": aminoHasher(h.LastCommitHash), - "Data": aminoHasher(h.DataHash), - "Validators": aminoHasher(h.ValidatorsHash), - "App": aminoHasher(h.AppHash), - "Consensus": aminoHasher(h.ConsensusHash), - "Results": aminoHasher(h.LastResultsHash), - "Evidence": aminoHasher(h.EvidenceHash), + "ChainID": aminoHasher(h.ChainID), + "Height": aminoHasher(h.Height), + "Time": aminoHasher(h.Time), + "NumTxs": aminoHasher(h.NumTxs), + "TotalTxs": aminoHasher(h.TotalTxs), + "LastBlockID": aminoHasher(h.LastBlockID), + "LastCommit": aminoHasher(h.LastCommitHash), + "Data": aminoHasher(h.DataHash), + "Validators": aminoHasher(h.ValidatorsHash), + "NextValidators": aminoHasher(h.NextValidatorsHash), + "App": aminoHasher(h.AppHash), + "Consensus": aminoHasher(h.ConsensusHash), + "Results": aminoHasher(h.LastResultsHash), + "Evidence": aminoHasher(h.EvidenceHash), }) } @@ -245,6 +247,7 @@ func (h *Header) StringIndented(indent string) string { %s LastCommit: %v %s Data: %v %s Validators: %v +%s NextValidators: %v %s App: %v %s Consensus: %v %s Results: %v @@ -259,6 +262,7 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastCommitHash, indent, h.DataHash, indent, h.ValidatorsHash, + indent, h.NextValidatorsHash, indent, h.AppHash, indent, h.ConsensusHash, indent, h.LastResultsHash, diff --git a/types/validator_set.go b/types/validator_set.go index f2fac292..8f085090 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -46,7 +46,14 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet { return vs } -// incrementAccum and update the proposer +// Increment Accum and update the proposer on a copy, and return it. +func (valSet *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { + copy := valSet.Copy() + copy.IncrementAccum(times) + return copy +} + +// Increment Accum and update the proposer. func (valSet *ValidatorSet) IncrementAccum(times int) { // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() @@ -387,7 +394,7 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { %s}`, indent, valSet.GetProposer().String(), indent, - indent, strings.Join(valStrings, "\n"+indent+" "), + indent, strings.Join(valStrings, "\n"+indent+" "), indent) } From bf0ff212b94c6c9b8799d0adaa421a7f85c68208 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 9 Jun 2018 04:25:48 -0700 Subject: [PATCH 02/27] Refactor "lite" to handle delayed validator set changes. Also, fix consensus liveness issue. --- Gopkg.lock | 21 +- Gopkg.toml | 6 +- blockchain/store_test.go | 8 +- consensus/common_test.go | 25 ++ consensus/state.go | 4 +- consensus/state_test.go | 18 +- libs/pubsub/pubsub.go | 3 +- lite/base_certifier.go | 72 ++++ ...rtifier_test.go => base_certifier_test.go} | 35 +- lite/client/main_test.go | 25 -- lite/client/provider.go | 164 ++++---- lite/client/provider_test.go | 62 +-- lite/commit.go | 148 ++++--- lite/dbprovider.go | 168 ++++++++ lite/doc.go | 210 +++++----- lite/dynamic_certifier.go | 96 ----- lite/dynamic_certifier_test.go | 130 ------- lite/errors/errors.go | 148 ++++--- lite/errors/errors_test.go | 18 - lite/files/commit.go | 93 ----- lite/files/commit_test.go | 66 ---- lite/files/provider.go | 139 ------- lite/files/provider_test.go | 96 ----- lite/files/wire.go | 12 - lite/helpers.go | 119 +++--- lite/inquiring_certifier.go | 268 +++++++------ lite/inquiring_certifier_test.go | 189 ++++----- lite/memprovider.go | 152 -------- lite/multiprovider.go | 72 ++++ lite/performance_test.go | 365 ------------------ lite/provider.go | 117 +----- lite/provider_test.go | 137 +++---- lite/proxy/block.go | 29 +- lite/proxy/certifier.go | 26 +- lite/proxy/errors.go | 24 +- lite/proxy/errors_test.go | 17 - lite/proxy/query.go | 40 +- lite/proxy/query_test.go | 24 +- lite/proxy/validate_test.go | 71 ++-- lite/proxy/wrapper.go | 27 +- lite/static_certifier.go | 73 ---- lite/types.go | 13 + privval/priv_validator_test.go | 11 +- privval/socket_test.go | 8 +- rpc/core/blocks.go | 6 +- rpc/core/consensus.go | 6 +- rpc/core/types/responses.go | 6 +- rpc/lib/server/http_server.go | 2 +- scripts/install_abci_apps.sh | 4 +- test/app/grpc_client.go | 4 +- types/block.go | 77 +++- types/canonical_json.go | 4 +- types/proposal.go | 2 +- types/validator_set.go | 325 ++++++++-------- types/vote_set.go | 6 +- 55 files changed, 1542 insertions(+), 2449 deletions(-) create mode 100644 lite/base_certifier.go rename lite/{static_certifier_test.go => base_certifier_test.go} (54%) delete mode 100644 lite/client/main_test.go create mode 100644 lite/dbprovider.go delete mode 100644 lite/dynamic_certifier.go delete mode 100644 lite/dynamic_certifier_test.go delete mode 100644 lite/errors/errors_test.go delete mode 100644 lite/files/commit.go delete mode 100644 lite/files/commit_test.go delete mode 100644 lite/files/provider.go delete mode 100644 lite/files/provider_test.go delete mode 100644 lite/files/wire.go delete mode 100644 lite/memprovider.go create mode 100644 lite/multiprovider.go delete mode 100644 lite/performance_test.go delete mode 100644 lite/proxy/errors_test.go delete mode 100644 lite/static_certifier.go create mode 100644 lite/types.go diff --git a/Gopkg.lock b/Gopkg.lock index f9729ffa..9dfc2a5f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,6 +7,12 @@ packages = ["quantile"] revision = "3a771d992973f24aa725d07868b467d1ddfceafb" +[[projects]] + branch = "master" + name = "github.com/brejski/hid" + packages = ["."] + revision = "06112dcfcc50a7e0e4fd06e17f9791e788fdaafc" + [[projects]] branch = "master" name = "github.com/btcsuite/btcd" @@ -289,11 +295,8 @@ [[projects]] name = "github.com/tendermint/abci" packages = [ - "client", "example/code", - "example/counter", "example/kvstore", - "server", "types" ] revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540" @@ -327,10 +330,16 @@ "flowrate", "log", "merkle", + "merkle/tmhash", "test" ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" + revision = "fb7ec62b2925f48de159aeea73b254ae8c58a738" + version = "v0.9.0-rc1" + +[[projects]] + name = "github.com/zondax/ledger-goclient" + packages = ["."] + revision = "3e2146609cdb97894c064d59e9d00accd8c2b1dd" [[projects]] branch = "master" @@ -435,6 +444,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "d17038089dd6383ff5028229d4026bb92f5c7adc7e9c1cd52584237e2e5fd431" + inputs-digest = "fcc5b0344f1e328b6abefa1a937d1161e14bbaef603e6f2065e6690531bc5de1" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 4c32f3d8..d892405b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -69,13 +69,17 @@ name = "github.com/stretchr/testify" version = "~1.2.1" +[[constraint]] + name = "github.com/tendermint/abci" + version = "~0.12.0" + [[constraint]] name = "github.com/tendermint/go-amino" version = "~0.10.1" [[override]] name = "github.com/tendermint/tmlibs" - version = "~0.8.4" + version = "0.9.0-rc1" [[constraint]] name = "google.golang.org/grpc" diff --git a/blockchain/store_test.go b/blockchain/store_test.go index 1e0c223a..5cb18cdc 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) { return nil, nil }) require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) - assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) + assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } db.Set(blockStoreKey, nil) @@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { if subStr := tuple.wantPanic; subStr != "" { if panicErr == nil { t.Errorf("#%d: want a non-nil panic", i) - } else if got := panicErr.Error(); !strings.Contains(got, subStr) { + } else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) { t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) } continue @@ -287,7 +287,7 @@ func TestLoadBlockPart(t *testing.T) { db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block part") + require.Contains(t, fmt.Sprintf("%#v", panicErr), "Error reading block part") // 3. A good block serialized and saved to the DB should be retrievable db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) @@ -316,7 +316,7 @@ func TestLoadBlockMeta(t *testing.T) { db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) res, _, panicErr = doFn(loadMeta) require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block meta") + require.Contains(t, fmt.Sprintf("%#v", panicErr), "Error reading block meta") // 3. A good blockMeta serialized and saved to the DB should be retrievable meta := &types.BlockMeta{} diff --git a/consensus/common_test.go b/consensus/common_test.go index b990f525..8c5aa6c9 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path" + "reflect" "sort" "sync" "testing" @@ -325,6 +326,30 @@ func ensureNewStep(stepCh <-chan interface{}) { } } +func ensureVote(voteCh chan interface{}, height int64, round int, voteType byte) { + timer := time.NewTimer(ensureTimeout) + select { + case <-timer.C: + break + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + if vote.Type != voteType { + panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + } + } +} + //------------------------------------------------------------------------------- // consensus nets diff --git a/consensus/state.go b/consensus/state.go index a12345d7..93e1f6b4 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1594,7 +1594,9 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, blockID, ok := precommits.TwoThirdsMajority() if ok { if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round+1) + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + cs.enterPrecommitWait(height, vote.Round) } else { cs.enterNewRound(height, vote.Round) cs.enterPrecommit(height, vote.Round) diff --git a/consensus/state_test.go b/consensus/state_test.go index ece70dd5..307a3993 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -718,6 +718,8 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + h := cs1.GetRoundState().Height + r := cs1.GetRoundState().Round partSize := cs1.state.ConsensusParams.BlockPartSizeBytes @@ -734,7 +736,7 @@ func TestStateLockPOLSafety1(t *testing.T) { rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) propBlock := rs.ProposalBlock - <-voteCh // prevote + ensureVote(voteCh, h, r, types.VoteTypePrevote) validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) @@ -755,6 +757,11 @@ func TestStateLockPOLSafety1(t *testing.T) { // we do see them precommit nil signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + ensureVote(voteCh, h, r, types.VoteTypePrecommit) + + <-newRoundCh + t.Log("### ONTO ROUND 1") + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) @@ -765,9 +772,6 @@ func TestStateLockPOLSafety1(t *testing.T) { if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - - <-newRoundCh - t.Log("### ONTO ROUND 1") /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! @@ -788,13 +792,13 @@ func TestStateLockPOLSafety1(t *testing.T) { } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - <-voteCh + ensureVote(voteCh, h, r+1, types.VoteTypePrevote) validatePrevote(t, cs1, 1, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - <-voteCh // precommit + ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) // we should have precommitted validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) @@ -816,7 +820,7 @@ func TestStateLockPOLSafety1(t *testing.T) { <-timeoutProposeCh // finish prevote - <-voteCh + ensureVote(voteCh, h, r+2, types.VoteTypePrevote) // we should prevote what we're locked on validatePrevote(t, cs1, 2, vss[0], propBlockHash) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 776e0653..df7bc7a4 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -283,7 +283,8 @@ loop: } func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed + + // initialize clientToChannelMap per query if needed if _, ok := state.queries[q]; !ok { state.queries[q] = make(map[string]chan<- interface{}) } diff --git a/lite/base_certifier.go b/lite/base_certifier.go new file mode 100644 index 00000000..6f2b3da9 --- /dev/null +++ b/lite/base_certifier.go @@ -0,0 +1,72 @@ +package lite + +import ( + "bytes" + + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" +) + +var _ Certifier = (*BaseCertifier)(nil) + +// BaseCertifier lets us check the validity of SignedHeaders at height or +// later, requiring sufficient votes (> 2/3) from the given valset. +// To certify blocks produced by a blockchain with mutable validator sets, +// use the InquiringCertifier. +// TODO: Handle unbonding time. +type BaseCertifier struct { + chainID string + height int64 + valset *types.ValidatorSet +} + +// NewBaseCertifier returns a new certifier initialized with a validator set at +// some height. +func NewBaseCertifier(chainID string, height int64, valset *types.ValidatorSet) *BaseCertifier { + if valset == nil || len(valset.Hash()) == 0 { + panic("NewBaseCertifier requires a valid valset") + } + return &BaseCertifier{ + chainID: chainID, + height: height, + valset: valset, + } +} + +// Implements Certifier. +func (bc *BaseCertifier) ChainID() string { + return bc.chainID +} + +// Implements Certifier. +func (bc *BaseCertifier) Certify(signedHeader types.SignedHeader) error { + + // We can't certify commits older than bc.height. + if signedHeader.Height < bc.height { + return cmn.NewError("BaseCertifier height is %v, cannot certify height %v", + bc.height, signedHeader.Height) + } + + // We can't certify with the wrong validator set. + if !bytes.Equal(signedHeader.ValidatorsHash, + bc.valset.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) + } + + // Do basic sanity checks. + err := signedHeader.ValidateBasic(bc.chainID) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + // Check commit signatures. + err = bc.valset.VerifyCommit( + bc.chainID, signedHeader.Commit.BlockID, + signedHeader.Height, signedHeader.Commit) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + return nil +} diff --git a/lite/static_certifier_test.go b/lite/base_certifier_test.go similarity index 54% rename from lite/static_certifier_test.go rename to lite/base_certifier_test.go index 03567daa..20342c90 100644 --- a/lite/static_certifier_test.go +++ b/lite/base_certifier_test.go @@ -1,59 +1,58 @@ -package lite_test +package lite import ( "testing" "github.com/stretchr/testify/assert" + lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" ) -func TestStaticCert(t *testing.T) { +func TestBaseCert(t *testing.T) { // assert, require := assert.New(t), require.New(t) assert := assert.New(t) // require := require.New(t) - keys := lite.GenValKeys(4) + keys := genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals := keys.ToValidators(20, 10) // and a certifier based on our known set chainID := "test-static" - cert := lite.NewStaticCertifier(chainID, vals) + cert := NewBaseCertifier(chainID, 2, vals) cases := []struct { - keys lite.ValKeys + keys privKeys vals *types.ValidatorSet height int64 first, last int // who actually signs proper bool // true -> expect no error changed bool // true -> expect validator change error }{ + // height regression + {keys, vals, 1, 0, len(keys), false, false}, // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, + {keys, vals, 2, 0, len(keys), true, false}, // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, + {keys, vals, 3, 1, len(keys), true, false}, // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + {keys, vals, 4, 0, len(keys) - 1, false, false}, + // Changing the power a little bit breaks the static validator. + // The sigs are enough, but the validator hash is unknown. + {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, } for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) + err := cert.Certify(sh) if tc.proper { assert.Nil(err, "%+v", err) } else { assert.NotNil(err) if tc.changed { - assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) + assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err) } } } - } diff --git a/lite/client/main_test.go b/lite/client/main_test.go deleted file mode 100644 index 49b19436..00000000 --- a/lite/client/main_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/tendermint/abci/example/kvstore" - - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and merkleeyes) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/lite/client/provider.go b/lite/client/provider.go index 5f3d7245..188ce7d0 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,19 +1,19 @@ /* Package client defines a provider that uses a rpcclient to get information, which is used to get new headers -and validators directly from a node. +and validators directly from a Tendermint client. */ package client import ( - "bytes" + "fmt" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" ) // SignStatusClient combines a SignClient and StatusClient. @@ -23,119 +23,111 @@ type SignStatusClient interface { } type provider struct { - node SignStatusClient - lastHeight int64 + chainID string + client SignStatusClient } -// NewProvider can wrap any rpcclient to expose it as -// a read-only provider. -func NewProvider(node SignStatusClient) lite.Provider { - return &provider{node: node} +// NewProvider implements Provider (but not PersistentProvider). +func NewProvider(chainID string, client SignStatusClient) lite.Provider { + return &provider{chainID: chainID, client: client} } // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) lite.Provider { +func NewHTTPProvider(chainID, remote string) lite.Provider { return &provider{ - node: rpcclient.NewHTTP(remote, "/websocket"), + chainID: chainID, + client: rpcclient.NewHTTP(remote, "/websocket"), } } -// StatusClient returns the internal node as a StatusClient +// StatusClient returns the internal client as a StatusClient func (p *provider) StatusClient() rpcclient.StatusClient { - return p.node + return p.client } -// StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } - -// GetHash gets the most recent validator and sees if it matches -// -// TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - var fc lite.FullCommit - vals, err := p.node.Validators(nil) - // if we get no validators, or a different height, return an error +// LatestFullCommit implements Provider. +func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return + } + if maxHeight != 0 && maxHeight < minHeight { + err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got %v and %v", + minHeight, maxHeight) + return + } + commit, err := p.fetchLatestCommit(minHeight, maxHeight) if err != nil { - return fc, err + return } - p.updateHeight(vals.BlockHeight) - vhash := types.NewValidatorSet(vals.Validators).Hash() - if !bytes.Equal(hash, vhash) { - return fc, liteErr.ErrCommitNotFound() - } - return p.seedFromVals(vals) + fc, err = p.fillFullCommit(commit.SignedHeader) + return } -// GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { - commit, err := p.node.Commit(&h) - if err != nil { - return fc, err - } - return p.seedFromCommit(commit) -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - commit, err := p.GetLatestCommit() - if err != nil { - return fc, err - } - return p.seedFromCommit(commit) -} - -// GetLatestCommit should return the most recent commit there is, -// which handles queries for future heights as per the semantics -// of GetByHeight. -func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { - status, err := p.node.Status() +// fetchLatestCommit fetches the latest commit from the client. +func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) { + status, err := p.client.Status() if err != nil { return nil, err } - return p.node.Commit(&status.SyncInfo.LatestBlockHeight) + if status.SyncInfo.LatestBlockHeight < minHeight { + err = fmt.Errorf("provider is at %v but require minHeight=%v", + status.SyncInfo.LatestBlockHeight, minHeight) + return nil, err + } + if maxHeight == 0 { + maxHeight = status.SyncInfo.LatestBlockHeight + } else if status.SyncInfo.LatestBlockHeight < maxHeight { + maxHeight = status.SyncInfo.LatestBlockHeight + } + return p.client.Commit(&maxHeight) } -// CommitFromResult ... -func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { - return (lite.Commit)(result.SignedHeader) +// Implements Provider. +func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return p.getValidatorSet(chainID, height) } -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { - // now get the commits and build a full commit - commit, err := p.node.Commit(&vals.BlockHeight) +func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return + } + if height < 1 { + err = fmt.Errorf("expected height >= 1, got %v", height) + return + } + heightPtr := new(int64) + *heightPtr = height + res, err := p.client.Validators(heightPtr) + if err != nil { + // TODO pass through other types of errors. + return nil, lerr.ErrMissingValidators(chainID, height) + } + valset = types.NewValidatorSet(res.Validators) + valset.TotalVotingPower() // to test deep equality. + return +} + +// This does no validation. +func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { + fc.SignedHeader = signedHeader + + // Get the validators. + valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) if err != nil { return lite.FullCommit{}, err } - fc := lite.NewFullCommit( - CommitFromResult(commit), - types.NewValidatorSet(vals.Validators), - ) - return fc, nil -} + fc.Validators = valset -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { - fc.Commit = CommitFromResult(commit) - - // now get the proper validators - vals, err := p.node.Validators(&commit.Header.Height) + // Get the next validators. + nvalset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) if err != nil { - return fc, err + return lite.FullCommit{}, err + } else { + fc.NextValidators = nvalset } - // make sure they match the commit (as we cannot enforce height) - vset := types.NewValidatorSet(vals.Validators) - if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, liteErr.ErrValidatorsChanged() - } - - p.updateHeight(commit.Header.Height) - fc.Validators = vset return fc, nil } - -func (p *provider) updateHeight(h int64) { - if h > p.lastHeight { - p.lastHeight = h - } -} diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index 94d47da3..2385bbbe 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -1,63 +1,73 @@ package client import ( + "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +// TODO fix tests!! +func TestMain(m *testing.M) { + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app) + + code := m.Run() + + node.Stop() + node.Wait() + os.Exit(code) +} + func TestProvider(t *testing.T) { assert, require := assert.New(t), require.New(t) cfg := rpctest.GetConfig() rpcAddr := cfg.RPC.ListenAddress - genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + panic(err) + } chainID := genDoc.ChainID - p := NewHTTPProvider(rpcAddr) + t.Log("chainID:", chainID) + p := NewHTTPProvider(chainID, rpcAddr) require.NotNil(t, p) // let it produce some blocks - err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) require.Nil(err) // let's get the highest block - seed, err := p.LatestCommit() + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.Nil(err, "%+v", err) - sh := seed.Height() - vhash := seed.Header.ValidatorsHash + sh := fc.Height() assert.True(sh < 5000) // let's check this is valid somehow - assert.Nil(seed.ValidateBasic(chainID)) - cert := lite.NewStaticCertifier(chainID, seed.Validators) + assert.Nil(fc.ValidateBasic(chainID)) // historical queries now work :) lower := sh - 5 - seed, err = p.GetByHeight(lower) + fc, err = p.LatestFullCommit(chainID, lower, lower) assert.Nil(err, "%+v", err) - assert.Equal(lower, seed.Height()) + assert.Equal(lower, fc.Height()) - // also get by hash (given the match) - seed, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, seed.Header.ValidatorsHash) - err = cert.Certify(seed.Commit) - assert.Nil(err, "%+v", err) + /* + // also get by hash (given the match) + fc, err = p.GetByHash(vhash) + require.Nil(err, "%+v", err) + require.Equal(vhash, fc.Header.ValidatorsHash) - // get by hash fails without match - seed, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // storing the seed silently ignored - err = p.StoreCommit(seed) - assert.Nil(err, "%+v", err) + // get by hash fails without match + fc, err = p.GetByHash([]byte("foobar")) + assert.NotNil(err) + assert.True(liteErr.IsCommitNotFoundErr(err)) + */ } diff --git a/lite/commit.go b/lite/commit.go index 11ae6d7f..8449bf69 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -2,98 +2,92 @@ package lite import ( "bytes" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" ) -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current set of validitors by some other means. -type Certifier interface { - Certify(check Commit) error - ChainID() string -} - -// Commit is basically the rpc /commit response, but extended -// -// This is the basepoint for proving anything on the blockchain. It contains -// a signed header. If the signatures are valid and > 2/3 of the known set, -// we can store this checkpoint and use it to prove any number of aspects of -// the system: such as txs, abci state, validator sets, etc... -type Commit types.SignedHeader - -// FullCommit is a commit and the actual validator set, -// the base info you need to update to a given point, -// assuming knowledge of some previous validator set +// FullCommit is a signed header (the block header and a commit that signs it), +// the validator set which signed the commit, and the next validator set. The +// next validator set (which is proven from the block header) allows us to +// revert to block-by-block updating of lite certifier's latest validator set, +// even in the face of arbitrarily power changes. type FullCommit struct { - Commit `json:"commit"` - Validators *types.ValidatorSet `json:"validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Validators *types.ValidatorSet `json:"validator_set"` + NextValidators *types.ValidatorSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, valset, nvalset *types.ValidatorSet) FullCommit { return FullCommit{ - Commit: commit, - Validators: vals, + SignedHeader: signedHeader, + Validators: valset, + NextValidators: nvalset, } } +// Validate the components and check for consistency. +// This also checks to make sure that Validators actually +// signed the SignedHeader.Commit. +// If > 2/3 did not sign the Commit from fc.Validators, it +// is not a valid commit! +func (fc FullCommit) ValidateBasic(chainID string) error { + // Ensure that Validators exists and matches the header. + if fc.Validators.Size() == 0 { + return errors.New("need FullCommit.Validators") + } + if !bytes.Equal( + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash()) { + return fmt.Errorf("header has vhash %X but valset hash is %X", + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash(), + ) + } + // Ensure that NextValidators exists and matches the header. + if fc.NextValidators.Size() == 0 { + return errors.New("need FullCommit.NextValidators") + } + if !bytes.Equal( + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash()) { + return fmt.Errorf("header has next vhash %X but next valset hash is %X", + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash(), + ) + } + // Validate the header. + err := fc.SignedHeader.ValidateBasic(chainID) + if err != nil { + return err + } + // Validate the signatures on the commit. + hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit + err = fc.Validators.VerifyCommit( + hdr.ChainID, cmt.BlockID, + hdr.Height, cmt) + if err != nil { + return err + } + + // All good! + return nil +} + // Height returns the height of the header. -func (c Commit) Height() int64 { - if c.Header == nil { - return 0 +func (fc FullCommit) Height() int64 { + if fc.SignedHeader.Header == nil { + panic("should not happen") } - return c.Header.Height + return fc.SignedHeader.Height } -// ValidatorsHash returns the hash of the validator set. -func (c Commit) ValidatorsHash() []byte { - if c.Header == nil { - return nil +// ChainID returns the chainID of the header. +func (fc FullCommit) ChainID() string { + if fc.SignedHeader.Header == nil { + panic("should not happen") } - return c.Header.ValidatorsHash -} - -// ValidateBasic does basic consistency checks and makes sure the headers -// and commits are all consistent and refer to our chain. -// -// Make sure to use a Verifier to validate the signatures actually provide -// a significantly strong proof for this header's validity. -func (c Commit) ValidateBasic(chainID string) error { - // make sure the header is reasonable - if c.Header == nil { - return errors.New("Commit missing header") - } - if c.Header.ChainID != chainID { - return errors.Errorf("Header belongs to another chain '%s' not '%s'", - c.Header.ChainID, chainID) - } - - if c.Commit == nil { - return errors.New("Commit missing signatures") - } - - // make sure the header and commit match (height and hash) - if c.Commit.Height() != c.Header.Height { - return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) - } - hhash := c.Header.Hash() - chash := c.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return errors.Errorf("Commits sign block %X header is block %X", - chash, hhash) - } - - // make sure the commit is reasonable - err := c.Commit.ValidateBasic() - if err != nil { - return errors.WithStack(err) - } - - // looks good, we just need to make sure the signatures are really from - // empowered validators - return nil + return fc.SignedHeader.ChainID } diff --git a/lite/dbprovider.go b/lite/dbprovider.go new file mode 100644 index 00000000..834bab66 --- /dev/null +++ b/lite/dbprovider.go @@ -0,0 +1,168 @@ +package lite + +import ( + "fmt" + "regexp" + "strconv" + + amino "github.com/tendermint/go-amino" + crypto "github.com/tendermint/tendermint/crypto" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" +) + +func signedHeaderKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) +} + +var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + submatch := signedHeaderKeyPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, false + } + height = int64(heightInt) + ok = true // good! + return +} + +func validatorSetKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) +} + +type DBProvider struct { + chainID string + db dbm.DB + cdc *amino.Codec +} + +func NewDBProvider(db dbm.DB) *DBProvider { + //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) + cdc := amino.NewCodec() + crypto.RegisterAmino(cdc) + dbp := &DBProvider{db: db, cdc: cdc} + return dbp +} + +// Implements PersistentProvider. +func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { + + batch := dbp.db.NewBatch() + + // Save the fc.validators. + // We might be overwriting what we already have, but + // it makes the logic easier for now. + vsKey := validatorSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinary(fc.Validators) + if err != nil { + return err + } + batch.Set(vsKey, vsBz) + + // Save the fc.NextValidators. + nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinary(fc.NextValidators) + if err != nil { + return err + } + batch.Set(nvsKey, nvsBz) + + // Save the fc.SignedHeader + shKey := signedHeaderKey(fc.ChainID(), fc.Height()) + shBz, err := dbp.cdc.MarshalBinary(fc.SignedHeader) + if err != nil { + return err + } + batch.Set(shKey, shBz) + + // And write sync. + batch.WriteSync() + return nil +} + +// Implements Provider. +func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( + FullCommit, error) { + + if minHeight <= 0 { + minHeight = 1 + } + if maxHeight == 0 { + maxHeight = 1<<63 - 1 + } + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, maxHeight), + signedHeaderKey(chainID, minHeight-1), + ) + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, _, ok := parseSignedHeaderKey(key) + if !ok { + // Skip over other keys. + itr.Next() + continue + } else { + // Found the latest full commit signed header. + shBz := itr.Value() + sh := types.SignedHeader{} + err := dbp.cdc.UnmarshalBinary(shBz, &sh) + if err != nil { + return FullCommit{}, err + } else { + return dbp.fillFullCommit(sh) + } + } + } + return FullCommit{}, lerr.ErrCommitNotFound() +} + +func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return dbp.getValidatorSet(chainID, height) +} + +func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + vsBz := dbp.db.Get(validatorSetKey(chainID, height)) + if vsBz == nil { + err = lerr.ErrMissingValidators(chainID, height) + return + } + err = dbp.cdc.UnmarshalBinary(vsBz, &valset) + if err != nil { + return + } + valset.TotalVotingPower() // to test deep equality. + return +} + +func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { + var chainID = sh.ChainID + var height = sh.Height + var valset, nvalset *types.ValidatorSet + // Load the validator set. + valset, err := dbp.getValidatorSet(chainID, height) + if err != nil { + return FullCommit{}, err + } + // Load the next validator set. + nvalset, err = dbp.getValidatorSet(chainID, height+1) + if err != nil { + return FullCommit{}, err + } + // Return filled FullCommit. + return FullCommit{ + SignedHeader: sh, + Validators: valset, + NextValidators: nvalset, + }, nil +} diff --git a/lite/doc.go b/lite/doc.go index 89dc702f..881880f6 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -1,133 +1,139 @@ /* -Package lite allows you to securely validate headers -without a full node. +Package lite allows you to securely validate headers without a full node. -This library pulls together all the crypto and algorithms, -so given a relatively recent (< unbonding period) known -validator set, one can get indisputable proof that data is in -the chain (current state) or detect if the node is lying to -the client. +This library pulls together all the crypto and algorithms, so given a +relatively recent (< unbonding period) known validator set, one can get +indisputable proof that data is in the chain (current state) or detect if the +node is lying to the client. -Tendermint RPC exposes a lot of info, but a malicious node -could return any data it wants to queries, or even to block -headers, even making up fake signatures from non-existent -validators to justify it. This is a lot of logic to get -right, to be contained in a small, easy to use library, -that does this for you, so you can just build nice UI. +Tendermint RPC exposes a lot of info, but a malicious node could return any +data it wants to queries, or even to block headers, even making up fake +signatures from non-existent validators to justify it. This is a lot of logic +to get right, to be contained in a small, easy to use library, that does this +for you, so you can just build nice applications. -We design for clients who have no strong trust relationship -with any tendermint node, just the validator set as a whole. -Beyond building nice mobile or desktop applications, the -cosmos hub is another important example of a client, -that needs undeniable proof without syncing the full chain, -in order to efficiently implement IBC. +We design for clients who have no strong trust relationship with any Tendermint +node, just the blockchain and validator set as a whole. -Commits +# Data structures -There are two main data structures that we pass around - Commit -and FullCommit. Both of them mirror what information is -exposed in tendermint rpc. +## SignedHeader -Commit is a block header along with enough validator signatures -to prove its validity (> 2/3 of the voting power). A FullCommit -is a Commit along with the full validator set. When the -validator set doesn't change, the Commit is enough, but since -the block header only has a hash, we need the FullCommit to -follow any changes to the validator set. +SignedHeader is a block header along with a commit -- enough validator +precommit-vote signatures to prove its validity (> 2/3 of the voting power) +given the validator set responsible for signing that header. A FullCommit is a +SignedHeader along with the current and next validator sets. -Certifiers +The hash of the next validator set is included and signed in the SignedHeader. +This lets the lite client keep track of arbitrary changes to the validator set, +as every change to the validator set must be approved by inclusion in the +header and signed in the commit. -A Certifier validates a new Commit given the currently known -state. There are three different types of Certifiers exposed, -each one building on the last one, with additional complexity. +In the worst case, with every block changing the validators around completely, +a lite client can sync up with every block header to verify each validator set +change on the chain. In practice, most applications will not have frequent +drastic updates to the validator set, so the logic defined in this package for +lite client syncing is optimized to use intelligent bisection and +block-skipping for efficient sourcing and verification of these data structures +and updates to the validator set (see the InquiringCertifier for more +information). -Static - given the validator set upon initialization. Verifies -all signatures against that set and if the validator set -changes, it will reject all headers. +The FullCommit is also declared in this package as a convenience structure, +which includes the SignedHeader along with the full current and next +ValidatorSets. -Dynamic - This wraps Static and has the same Certify -method. However, it adds an Update method, which can be called -with a FullCommit when the validator set changes. If it can -prove this is a valid transition, it will update the validator -set. +## Certifier -Inquiring - this wraps Dynamic and implements an auto-update -strategy on top of the Dynamic update. If a call to -Certify fails as the validator set has changed, then it -attempts to find a FullCommit and Update to that header. -To get these FullCommits, it makes use of a Provider. +A Certifier validates a new SignedHeader given the currently known state. There +are two different types of Certifiers provided. -Providers +BaseCertifier - given a validator set and a height, this Certifier verifies +that > 2/3 of the voting power of the given validator set had signed the +SignedHeader, and that the SignedHeader was to be signed by the exact given +validator set, and that the height of the commit is at least height (or +greater). -A Provider allows us to store and retrieve the FullCommits, -to provide memory to the Inquiring Certifier. +SignedHeader.Commit may be signed by a different validator set, it can get +certified with a BaseCertifier as long as sufficient signatures from the +previous validator set are present in the commit. -NewMemStoreProvider - in-memory cache. +InquiringCertifier - this certifier implements an auto-update and persistence +strategy to certify any SignedHeader of the blockchain. -files.NewProvider - disk backed storage. +## Provider and PersistentProvider -client.NewHTTPProvider - query tendermint rpc. +A Provider allows us to store and retrieve the FullCommits. -NewCacheProvider - combine multiple providers. +```go +type Provider interface { + // LatestFullCommit returns the latest commit with + // minHeight <= height <= maxHeight. + // If maxHeight is zero, returns the latest where + // minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) +} +``` -The suggested use for local light clients is -client.NewHTTPProvider for getting new data (Source), -and NewCacheProvider(NewMemStoreProvider(), -files.NewProvider()) to store confirmed headers (Trusted) +* client.NewHTTPProvider - query Tendermint rpc. -How We Track Validators +A PersistentProvider is a Provider that also allows for saving state. This is +used by the InquiringCertifier for persistence. -Unless you want to blindly trust the node you talk with, you -need to trace every response back to a hash in a block header -and validate the commit signatures of that block header match -the proper validator set. If there is a contant validator -set, you store it locally upon initialization of the client, +```go +type PersistentProvider interface { + Provider + + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error +} +``` + +* DBProvider - persistence provider for use with any tmlibs/DB. +* MultiProvider - combine multiple providers. + +The suggested use for local light clients is client.NewHTTPProvider(...) for +getting new data (Source), and NewMultiProvider(NewDBProvider(dbm.NewMemDB()), +NewDBProvider(db.NewFileDB(...))) to store confirmed full commits (Trusted) + + +# How We Track Validators + +Unless you want to blindly trust the node you talk with, you need to trace +every response back to a hash in a block header and validate the commit +signatures of that block header match the proper validator set. If there is a +static validator set, you store it locally upon initialization of the client, and check against that every time. -Once there is a dynamic validator set, the issue of -verifying a block becomes a bit more tricky. There is -background information in a -github issue (https://github.com/tendermint/tendermint/issues/377). +If the validator set for the blockchain is dynamic, verifying block commits is +a bit more involved -- if there is a block at height H with a known (trusted) +validator set V, and another block at height H' (H' > H) with validator set V' +!= V, then we want a way to safely update it. -In short, if there is a block at height H with a known -(trusted) validator set V, and another block at height H' -(H' > H) with validator set V' != V, then we want a way to -safely update it. +First, we get the new (unconfirmed) validator set V' and verify that H' is +internally consistent and properly signed by this V'. Assuming it is a valid +block, we check that at least 2/3 of the validators in V also signed it, +meaning it would also be valid under our old assumptions. Then, we accept H' +and V' as valid and trusted and use that to validate for heights X > H' until a +more recent and updated validator set is found. -First, get the new (unconfirmed) validator set V' and -verify H' is internally consistent and properly signed by -this V'. Assuming it is a valid block, we check that at -least 2/3 of the validators in V also signed it, meaning -it would also be valid under our old assumptions. -That should be enough, but we can also check that the -V counts for at least 2/3 of the total votes in H' -for extra safety (we can have a discussion if this is -strictly required). If we can verify all this, -then we can accept H' and V' as valid and use that to -validate all blocks X > H'. +If we cannot update directly from H -> H' because there was too much change to +the validator set, then we can look for some Hm (H < Hm < H') with a validator +set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one +of these steps doesn't work, then we continue bisecting, until we eventually +have to externally validate the valdiator set changes at every block. -If we cannot update directly from H -> H' because there was -too much change to the validator set, then we can look for -some Hm (H < Hm < H') with a validator set Vm. Then we try -to update H -> Hm and Hm -> H' in two separate steps. -If one of these steps doesn't work, then we continue -bisecting, until we eventually have to externally -validate the valdiator set changes at every block. +Since we never trust any server in this protocol, only the signatures +themselves, it doesn't matter if the seed comes from a (possibly malicious) +node or a (possibly malicious) user. We can accept it or reject it based only +on our trusted validator set and cryptographic proofs. This makes it extremely +important to verify that you have the proper validator set when initializing +the client, as that is the root of all trust. -Since we never trust any server in this protocol, only the -signatures themselves, it doesn't matter if the seed comes -from a (possibly malicious) node or a (possibly malicious) user. -We can accept it or reject it based only on our trusted -validator set and cryptographic proofs. This makes it -extremely important to verify that you have the proper -validator set when initializing the client, as that is the -root of all trust. +The software currently assumes that the unbonding period is infinite in +duration. If the InquiringCertifier hasn't been updated in a while, you should +manually verify the block headers using other sources. -Or course, this assumes that the known block is within the -unbonding period to avoid the "nothing at stake" problem. -If you haven't seen the state in a few months, you will need -to manually verify the new validator set hash using off-chain -means (the same as getting the initial hash). +TODO: Update the software to handle cases around the unbonding period. */ package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go deleted file mode 100644 index 0ddace8b..00000000 --- a/lite/dynamic_certifier.go +++ /dev/null @@ -1,96 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*DynamicCertifier)(nil) - -// DynamicCertifier uses a StaticCertifier for Certify, but adds an -// Update method to allow for a change of validators. -// -// You can pass in a FullCommit with another validator set, -// and if this is a provably secure transition (< 1/3 change, -// sufficient signatures), then it will update the -// validator set for the next Certify call. -// For security, it will only follow validator set changes -// going forward. -type DynamicCertifier struct { - cert *StaticCertifier - lastHeight int64 -} - -// NewDynamic returns a new dynamic certifier. -func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { - return &DynamicCertifier{ - cert: NewStaticCertifier(chainID, vals), - lastHeight: height, - } -} - -// ChainID returns the chain id of this certifier. -// Implements Certifier. -func (dc *DynamicCertifier) ChainID() string { - return dc.cert.ChainID() -} - -// Validators returns the validators of this certifier. -func (dc *DynamicCertifier) Validators() *types.ValidatorSet { - return dc.cert.vSet -} - -// Hash returns the hash of this certifier. -func (dc *DynamicCertifier) Hash() []byte { - return dc.cert.Hash() -} - -// LastHeight returns the last height of this certifier. -func (dc *DynamicCertifier) LastHeight() int64 { - return dc.lastHeight -} - -// Certify will verify whether the commit is valid and will update the height if it is or return an -// error if it is not. -// Implements Certifier. -func (dc *DynamicCertifier) Certify(check Commit) error { - err := dc.cert.Certify(check) - if err == nil { - // update last seen height if input is valid - dc.lastHeight = check.Height() - } - return err -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -// -// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) -func (dc *DynamicCertifier) Update(fc FullCommit) error { - // ignore all checkpoints in the past -> only to the future - h := fc.Height() - if h <= dc.lastHeight { - return liteErr.ErrPastTime() - } - - // first, verify if the input is self-consistent.... - err := fc.ValidateBasic(dc.ChainID()) - if err != nil { - return err - } - - // now, make sure not too much change... meaning this commit - // would be approved by the currently known validator set - // as well as the new set - commit := fc.Commit.Commit - err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) - if err != nil { - return liteErr.ErrTooMuchChange() - } - - // looks good, we can update - dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) - dc.lastHeight = h - return nil -} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go deleted file mode 100644 index 88c145f9..00000000 --- a/lite/dynamic_certifier_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/errors" -) - -// TestDynamicCert just makes sure it still works like StaticCert -func TestDynamicCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-dyno" - cert := lite.NewDynamicCertifier(chainID, vals, 0) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - assert.Equal(cert.LastHeight(), tc.height) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) - } - } - } -} - -// TestDynamicUpdate makes sure we update safely and sanely -func TestDynamicUpdate(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - chainID := "test-dyno-up" - keys := lite.GenValKeys(5) - vals := keys.ToValidators(20, 0) - cert := lite.NewDynamicCertifier(chainID, vals, 40) - - // one valid block to give us a sense of time - h := int64(100) - good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) - err := cert.Certify(good) - require.Nil(err, "%+v", err) - - // some new sets to try later - keys2 := keys.Extend(2) - keys3 := keys2.Extend(4) - - // we try to update with some blocks - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect too much change error - }{ - // same validator set, well signed, of course it is okay - {keys, vals, h + 10, 0, len(keys), true, false}, - // same validator set, poorly signed, fails - {keys, vals, h + 20, 2, len(keys), false, false}, - - // shift the power a little, works if properly signed - {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, - // but not on a poor signature - {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, - // and not if it was in the past - {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, - - // let's try to adjust to a whole new validator set (we have 5/7 of the votes) - {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, - - // properly signed but too much change, not allowed (only 7/11 validators known) - {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, - } - - for _, tc := range cases { - fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Update(fc) - if tc.proper { - assert.Nil(err, "%d: %+v", tc.height, err) - // we update last seen height - assert.Equal(cert.LastHeight(), tc.height) - // and we update the proper validators - assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) - } else { - assert.NotNil(err, "%d", tc.height) - // we don't update the height - assert.NotEqual(cert.LastHeight(), tc.height) - if tc.changed { - assert.True(errors.IsTooMuchChangeErr(err), - "%d: %+v", tc.height, err) - } - } - } -} diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 99e42a0b..c38ecf88 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,90 +3,110 @@ package errors import ( "fmt" - "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) -var ( - errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") - errCommitNotFound = fmt.Errorf("Commit not found by provider") - errTooMuchChange = fmt.Errorf("Validators change too much to safely update") - errPastTime = fmt.Errorf("Update older than certifier height") - errNoPathFound = fmt.Errorf("Cannot find a path of validators") -) +//---------------------------------------- +// Error types -// IsCommitNotFoundErr checks whether an error is due to missing data -func IsCommitNotFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errCommitNotFound) +type errCommitNotFound struct{} + +func (e errCommitNotFound) Error() string { + return "Commit not found by provider" } +type errUnexpectedValidators struct { + got []byte + want []byte +} + +func (e errUnexpectedValidators) Error() string { + return fmt.Sprintf("Validator set is different. Got %X want %X", + e.got, e.want) +} + +type errTooMuchChange struct{} + +func (e errTooMuchChange) Error() string { + return "Insufficient signatures to validate due to valset changes" +} + +type errMissingValidators struct { + chainID string + height int64 +} + +func (e errMissingValidators) Error() string { + return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", + e.chainID, e.height) +} + +//---------------------------------------- +// Methods for above error types + +//----------------- +// ErrCommitNotFound + // ErrCommitNotFound indicates that a the requested commit was not found. func ErrCommitNotFound() error { - return errors.WithStack(errCommitNotFound) + return cmn.ErrorWrap(errCommitNotFound{}, "") } -// IsValidatorsChangedErr checks whether an error is due -// to a differing validator set. -func IsValidatorsChangedErr(err error) bool { - return err != nil && (errors.Cause(err) == errValidatorsChanged) +func IsErrCommitNotFound(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errCommitNotFound) + return ok + } + return false } -// ErrValidatorsChanged indicates that the validator set was changed between two commits. -func ErrValidatorsChanged() error { - return errors.WithStack(errValidatorsChanged) +//----------------- +// ErrUnexpectedValidators + +// ErrUnexpectedValidators indicates a validator set mismatch. +func ErrUnexpectedValidators(got, want []byte) error { + return cmn.ErrorWrap(errUnexpectedValidators{ + got: got, + want: want, + }, "") } -// IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets. -func IsTooMuchChangeErr(err error) bool { - return err != nil && (errors.Cause(err) == errTooMuchChange) +func IsErrUnexpectedValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnexpectedValidators) + return ok + } + return false } +//----------------- +// ErrTooMuchChange + // ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. func ErrTooMuchChange() error { - return errors.WithStack(errTooMuchChange) + return cmn.ErrorWrap(errTooMuchChange{}, "") } -// IsPastTimeErr ... -func IsPastTimeErr(err error) bool { - return err != nil && (errors.Cause(err) == errPastTime) -} - -// ErrPastTime ... -func ErrPastTime() error { - return errors.WithStack(errPastTime) -} - -// IsNoPathFoundErr checks whether an error is due to no path of -// validators in provider from where we are to where we want to be -func IsNoPathFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errNoPathFound) -} - -// ErrNoPathFound ... -func ErrNoPathFound() error { - return errors.WithStack(errNoPathFound) -} - -//-------------------------------------------- - -type errHeightMismatch struct { - h1, h2 int64 -} - -func (e errHeightMismatch) Error() string { - return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) -} - -// IsHeightMismatchErr checks whether an error is due to data from different blocks -func IsHeightMismatchErr(err error) bool { - if err == nil { - return false +func IsErrTooMuchChange(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errTooMuchChange) + return ok } - _, ok := errors.Cause(err).(errHeightMismatch) - return ok + return false } -// ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int64) error { - return errors.WithStack(errHeightMismatch{h1, h2}) +//----------------- +// ErrMissingValidators + +// ErrMissingValidators indicates that some validator set was missing or unknown. +func ErrMissingValidators(chainID string, height int64) error { + return cmn.ErrorWrap(errMissingValidators{chainID, height}, "") +} + +func IsErrMissingValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errMissingValidators) + return ok + } + return false } diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go deleted file mode 100644 index 479215e4..00000000 --- a/lite/errors/errors_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorHeight(t *testing.T) { - e1 := ErrHeightMismatch(2, 3) - e1.Error() - assert.True(t, IsHeightMismatchErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsHeightMismatchErr(e2)) - assert.False(t, IsHeightMismatchErr(nil)) -} diff --git a/lite/files/commit.go b/lite/files/commit.go deleted file mode 100644 index 8a7e4721..00000000 --- a/lite/files/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package files - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -const ( - // MaxFullCommitSize is the maximum number of bytes we will - // read in for a full commit to avoid excessive allocations - // in the deserializer - MaxFullCommitSize = 1024 * 1024 -) - -// SaveFullCommit exports the seed in binary / go-amino style -func SaveFullCommit(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.MarshalBinaryWriter(f, fc) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - bz, err := cdc.MarshalJSON(fc) - if err != nil { - return errors.WithStack(err) - } - _, err = f.Write(bz) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// LoadFullCommit loads the full commit from the file system. -func LoadFullCommit(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} - -// LoadFullCommitJSON loads the commit from the file system in JSON format. -func LoadFullCommitJSON(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - bz, err := ioutil.ReadAll(f) - if err != nil { - return fc, errors.WithStack(err) - } - err = cdc.UnmarshalJSON(bz, &fc) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go deleted file mode 100644 index e0235ba2..00000000 --- a/lite/files/commit_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package files - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite" -) - -func tmpFile() string { - suffix := cmn.RandStr(16) - return filepath.Join(os.TempDir(), "fc-test-"+suffix) -} - -func TestSerializeFullCommits(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // some constants - appHash := []byte("some crazy thing") - chainID := "ser-ial" - h := int64(25) - - // build a fc - keys := lite.GenValKeys(5) - vals := keys.ToValidators(10, 0) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - - require.Equal(h, fc.Height()) - require.Equal(vals.Hash(), fc.ValidatorsHash()) - - // try read/write with json - jfile := tmpFile() - defer os.Remove(jfile) - jseed, err := LoadFullCommitJSON(jfile) - assert.NotNil(err) - err = SaveFullCommitJSON(fc, jfile) - require.Nil(err) - jseed, err = LoadFullCommitJSON(jfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, jseed.Height()) - assert.Equal(vals.Hash(), jseed.ValidatorsHash()) - - // try read/write with binary - bfile := tmpFile() - defer os.Remove(bfile) - bseed, err := LoadFullCommit(bfile) - assert.NotNil(err) - err = SaveFullCommit(fc, bfile) - require.Nil(err) - bseed, err = LoadFullCommit(bfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, bseed.Height()) - assert.Equal(vals.Hash(), bseed.ValidatorsHash()) - - // make sure they don't read the other format (different) - _, err = LoadFullCommit(jfile) - assert.NotNil(err) - _, err = LoadFullCommitJSON(bfile) - assert.NotNil(err) -} diff --git a/lite/files/provider.go b/lite/files/provider.go deleted file mode 100644 index 327b0331..00000000 --- a/lite/files/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package files defines a Provider that stores all data in the filesystem - -We assume the same validator hash may be reused by many different -headers/Commits, and thus store it separately. This leaves us -with three issues: - - 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the Commit with the highest height <= h - 3. Given a FullCommit, store it quickly to satisfy 1 and 2 - -Note that we do not worry about caching, as that can be achieved by -pairing this with a MemStoreProvider and CacheProvider from certifiers -*/ -package files - -import ( - "encoding/hex" - "fmt" - "math" - "os" - "path/filepath" - "sort" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// nolint -const ( - Ext = ".tsd" - ValDir = "validators" - CheckDir = "checkpoints" - dirPerm = os.FileMode(0755) - //filePerm = os.FileMode(0644) -) - -type provider struct { - valDir string - checkDir string -} - -// NewProvider creates the parent dir and subdirs -// for validators and checkpoints as needed -func NewProvider(dir string) lite.Provider { - valDir := filepath.Join(dir, ValDir) - checkDir := filepath.Join(dir, CheckDir) - for _, d := range []string{valDir, checkDir} { - err := os.MkdirAll(d, dirPerm) - if err != nil { - panic(err) - } - } - return &provider{valDir: valDir, checkDir: checkDir} -} - -func (p *provider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) + Ext -} - -func (p *provider) encodeHeight(h int64) string { - // pad up to 10^12 for height... - return fmt.Sprintf("%012d%s", h, Ext) -} - -// StoreCommit saves a full commit after it has been verified. -func (p *provider) StoreCommit(fc lite.FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - paths := []string{ - filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), - filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), - } - for _, path := range paths { - err := SaveFullCommit(fc, path) - // unknown error in creating or writing immediately breaks - if err != nil { - return err - } - } - return nil -} - -// GetByHeight returns the closest commit with height <= h. -func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { - // first we look for exact match, then search... - path := filepath.Join(p.checkDir, p.encodeHeight(h)) - fc, err := LoadFullCommit(path) - if liteErr.IsCommitNotFoundErr(err) { - path, err = p.searchForHeight(h) - if err == nil { - fc, err = LoadFullCommit(path) - } - } - return fc, err -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - // Note to future: please update by 2077 to avoid rollover - return p.GetByHeight(math.MaxInt32 - 1) -} - -// search for height, looks for a file with highest height < h -// return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int64) (string, error) { - d, err := os.Open(p.checkDir) - if err != nil { - return "", errors.WithStack(err) - } - files, err := d.Readdirnames(0) - - d.Close() - if err != nil { - return "", errors.WithStack(err) - } - - desired := p.encodeHeight(h) - sort.Strings(files) - i := sort.SearchStrings(files, desired) - if i == 0 { - return "", liteErr.ErrCommitNotFound() - } - found := files[i-1] - path := filepath.Join(p.checkDir, found) - return path, errors.WithStack(err) -} - -// GetByHash returns a commit exactly matching this validator hash. -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - path := filepath.Join(p.valDir, p.encodeHash(hash)) - return LoadFullCommit(path) -} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go deleted file mode 100644 index 5deebb1a..00000000 --- a/lite/files/provider_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package files_test - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/lite/files" -) - -func checkEqual(stored, loaded lite.FullCommit, chainID string) error { - err := loaded.ValidateBasic(chainID) - if err != nil { - return err - } - if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { - return errors.New("Different block hashes") - } - return nil -} - -func TestFileProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - dir, err := ioutil.TempDir("", "fileprovider-test") - assert.Nil(err) - defer os.RemoveAll(dir) - p := files.NewProvider(dir) - - chainID := "test-files" - appHash := []byte("some-data") - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of seeds... - seeds := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two seeds for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - seeds[i] = lite.NewFullCommit(check, vals) - } - - // check provider is empty - seed, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - seed, err = p.GetByHash(seeds[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range seeds { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - // by height as well - s2, err = p.GetByHeight(s.Height()) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - } - - // make sure we get the last hash if we overstep - seed, err = p.GetByHeight(5000) - if assert.Nil(err, "%+v", err) { - assert.Equal(seeds[count-1].Height(), seed.Height()) - err = checkEqual(seeds[count-1], seed, chainID) - assert.Nil(err) - } - - // and middle ones as well - seed, err = p.GetByHeight(47) - if assert.Nil(err, "%+v", err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, seed.Height()) - } - - // and proper error for too low - _, err = p.GetByHeight(5) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) -} diff --git a/lite/files/wire.go b/lite/files/wire.go deleted file mode 100644 index 3a207744..00000000 --- a/lite/files/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package files - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/lite/helpers.go b/lite/helpers.go index 695f6fb9..764df507 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -4,24 +4,21 @@ import ( "time" crypto "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/types" ) -// ValKeys is a helper for testing. +// privKeys is a helper type for testing. // -// It lets us simulate signing with many keys, either ed25519 or secp256k1. -// The main use case is to create a set, and call GenCommit -// to get properly signed header for testing. +// It lets us simulate signing with many keys. The main use case is to create +// a set, and call GenSignedHeader to get properly signed header for testing. // -// You can set different weights of validators each time you call -// ToValidators, and can optionally extend the validator set later -// with Extend or ExtendSecp -type ValKeys []crypto.PrivKey +// You can set different weights of validators each time you call ToValidators, +// and can optionally extend the validator set later with Extend. +type privKeys []crypto.PrivKey -// GenValKeys produces an array of private keys to generate commits. -func GenValKeys(n int) ValKeys { - res := make(ValKeys, n) +// genPrivKeys produces an array of private keys to generate commits. +func genPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = crypto.GenPrivKeyEd25519() } @@ -29,56 +26,41 @@ func GenValKeys(n int) ValKeys { } // Change replaces the key at index i. -func (v ValKeys) Change(i int) ValKeys { - res := make(ValKeys, len(v)) - copy(res, v) +func (pkz privKeys) Change(i int) privKeys { + res := make(privKeys, len(pkz)) + copy(res, pkz) res[i] = crypto.GenPrivKeyEd25519() return res } // Extend adds n more keys (to remove, just take a slice). -func (v ValKeys) Extend(n int) ValKeys { - extra := GenValKeys(n) - return append(v, extra...) +func (pkz privKeys) Extend(n int) privKeys { + extra := genPrivKeys(n) + return append(pkz, extra...) } -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. -func GenSecpValKeys(n int) ValKeys { - res := make(ValKeys, n) - for i := range res { - res[i] = crypto.GenPrivKeySecp256k1() - } - return res -} - -// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (v ValKeys) ExtendSecp(n int) ValKeys { - extra := GenSecpValKeys(n) - return append(v, extra...) -} - -// ToValidators produces a list of validators from the set of keys +// ToValidators produces a valset from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). -func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(v)) - for i, k := range v { +func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) } return types.NewValidatorSet(res) } // signHeader properly signs the header with all keys from first to last exclusive. -func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { - votes := make([]*types.Vote, len(v)) +func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(pkz)) - // we need this list to keep the ordering... - vset := v.ToValidators(1, 0) + // We need this list to keep the ordering. + vset := pkz.ToValidators(1, 0) - // fill in the votes we want - for i := first; i < last && i < len(v); i++ { - vote := makeVote(header, vset, v[i]) + // Fill in the votes we want. + for i := first; i < last && i < len(pkz); i++ { + vote := makeVote(header, vset, pkz[i]) votes[vote.ValidatorIndex] = vote } @@ -89,15 +71,15 @@ func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit return res } -func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { +func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote { addr := key.PubKey().Address() - idx, _ := vals.GetByAddress(addr) + idx, _ := valset.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: header.Height, Round: 1, - Timestamp: time.Now().UTC(), + Timestamp: time.Now().Round(0).UTC(), Type: types.VoteTypePrecommit, BlockID: types.BlockID{Hash: header.Hash()}, } @@ -113,47 +95,46 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey return vote } -// Silences warning that vals can also be merkle.Hashable -// nolint: interfacer func genHeader(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, Height: height, - Time: time.Now(), + Time: time.Now().Round(0).UTC(), NumTxs: int64(len(txs)), TotalTxs: int64(len(txs)), // LastBlockID // LastCommitHash - ValidatorsHash: vals.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + ValidatorsHash: valset.Hash(), + NextValidatorsHash: nvalset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } -// GenCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { +// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. +func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - check := Commit{ + header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + check := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } return check } -// GenFullCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { +// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. +func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, + valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - commit := Commit{ + header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + commit := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } - return NewFullCommit(commit, vals) + return NewFullCommit(commit, valset, nvalset) } diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index 042bd08e..049cd728 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -1,163 +1,209 @@ package lite import ( + "bytes" + "github.com/tendermint/tendermint/types" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" ) var _ Certifier = (*InquiringCertifier)(nil) -// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call -// to Certify fails due to a change it validator set, InquiringCertifier will try and find a -// previous FullCommit which it can use to safely update the validator set. It uses a source -// provider to obtain the needed FullCommits. It stores properly validated data on the local system. +// InquiringCertifier implements an auto-updating certifier. It uses a +// "source" provider to obtain the needed FullCommits to securely sync with +// validator set changes. It stores properly validated data on the +// "trusted" local system. type InquiringCertifier struct { - cert *DynamicCertifier - // These are only properly validated data, from local system - trusted Provider - // This is a source of new info, like a node rpc, or other import method - Source Provider + chainID string + // These are only properly validated data, from local system. + trusted PersistentProvider + // This is a source of new info, like a node rpc, or other import method. + source Provider } -// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store -// validated data and the source provider to obtain missing FullCommits. +// NewInquiringCertifier returns a new InquiringCertifier. It uses the +// trusted provider to store validated data and the source provider to +// obtain missing data (e.g. FullCommits). // -// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source -// provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, - source Provider) (*InquiringCertifier, error) { - - // store the data in trusted - err := trusted.StoreCommit(fc) - if err != nil { - return nil, err - } +// The trusted provider should a CacheProvider, MemProvider or +// files.Provider. The source provider should be a client.HTTPProvider. +func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) ( + *InquiringCertifier, error) { return &InquiringCertifier{ - cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), + chainID: chainID, trusted: trusted, - Source: source, + source: source, }, nil } -// ChainID returns the chain id. // Implements Certifier. func (ic *InquiringCertifier) ChainID() string { - return ic.cert.ChainID() + return ic.chainID } -// Validators returns the validator set. -func (ic *InquiringCertifier) Validators() *types.ValidatorSet { - return ic.cert.cert.vSet -} - -// LastHeight returns the last height. -func (ic *InquiringCertifier) LastHeight() int64 { - return ic.cert.lastHeight -} - -// Certify makes sure this is checkpoint is valid. -// -// If the validators have changed since the last know time, it looks -// for a path to prove the new validators. -// -// On success, it will store the checkpoint in the store for later viewing // Implements Certifier. -func (ic *InquiringCertifier) Certify(commit Commit) error { - err := ic.useClosestTrust(commit.Height()) +// +// If the validators have changed since the last know time, it looks to +// ic.trusted and ic.source to prove the new validators. On success, it will +// try to store the SignedHeader in ic.trusted if the next +// validator can be sourced. +func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { + + // Get the latest known full commit <= h-1 from our trusted providers. + // The full commit at h-1 contains the valset to sign for h. + h := shdr.Height - 1 + tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) if err != nil { return err } - err = ic.cert.Certify(commit) - if !liteErr.IsValidatorsChangedErr(err) { - return err + if tfc.Height() == h { + // Return error if valset doesn't match. + if !bytes.Equal( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } else { + // If valset doesn't match... + if !bytes.Equal(tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + // ... update. + tfc, err = ic.updateToHeight(h) + if err != nil { + return err + } + // Return error if valset _still_ doesn't match. + if !bytes.Equal(tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + tfc.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } } - err = ic.updateToHash(commit.Header.ValidatorsHash) + + // Certify the signed header using the matching valset. + cert := NewBaseCertifier(ic.chainID, tfc.Height()+1, tfc.NextValidators) + err = cert.Certify(shdr) if err != nil { return err } - err = ic.cert.Certify(commit) - if err != nil { + // Get the next validator set. + nvalset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) + if lerr.IsErrMissingValidators(err) { + // Ignore this error. + return nil + } else if err != nil { return err + } else { + // Create filled FullCommit. + nfc := FullCommit{ + SignedHeader: shdr, + Validators: tfc.NextValidators, + NextValidators: nvalset, + } + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := nfc.ValidateBasic(ic.chainID); err != nil { + return err + } + // Trust it. + return ic.trusted.SaveFullCommit(nfc) } - - // store the new checkpoint - return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) } -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -func (ic *InquiringCertifier) Update(fc FullCommit) error { - err := ic.useClosestTrust(fc.Height()) +// verifyAndSave will verify if this is a valid source full commit given the +// best match trusted full commit, and if good, persist to ic.trusted. +// Returns ErrTooMuchChange when >2/3 of tfc did not sign sfc. +// Panics if tfc.Height() >= sfc.Height(). +func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { + if tfc.Height() >= sfc.Height() { + panic("should not happen") + } + err := tfc.NextValidators.VerifyFutureCommit( + sfc.Validators, + ic.chainID, sfc.SignedHeader.Commit.BlockID, + sfc.SignedHeader.Height, sfc.SignedHeader.Commit, + ) if err != nil { return err } - err = ic.cert.Update(fc) - if err == nil { - err = ic.trusted.StoreCommit(fc) - } - return err + return ic.trusted.SaveFullCommit(sfc) } -func (ic *InquiringCertifier) useClosestTrust(h int64) error { - closest, err := ic.trusted.GetByHeight(h) +// updateToHeight will use divide-and-conquer to find a path to h. +// Returns nil iff we successfully verify and persist a full commit +// for height h, using repeated applications of bisection if necessary. +// +// Returns ErrCommitNotFound if source provider doesn't have the commit for h. +func (ic *InquiringCertifier) updateToHeight(h int64) (FullCommit, error) { + + // Fetch latest full commit from source. + sfc, err := ic.source.LatestFullCommit(ic.chainID, h, h) if err != nil { - return err + return FullCommit{}, err } - // if the best seed is not the one we currently use, - // let's just reset the dynamic validator - if closest.Height() != ic.LastHeight() { - ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := sfc.ValidateBasic(ic.chainID); err != nil { + return FullCommit{}, err + } + + // If sfc.Height() != h, we can't do it. + if sfc.Height() != h { + return FullCommit{}, lerr.ErrCommitNotFound() + } + +FOR_LOOP: + for { + // Fetch latest full commit from trusted. + tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return FullCommit{}, err + } + // Maybe we have nothing to do. + if tfc.Height() == h { + return FullCommit{}, nil + } + + // Try to update to full commit with checks. + err = ic.verifyAndSave(tfc, sfc) + if err == nil { + // All good! + return sfc, nil + } else { + // Handle special case when err is ErrTooMuchChange. + if lerr.IsErrTooMuchChange(err) { + // Divide and conquer. + start, end := tfc.Height(), sfc.Height() + if !(start < end) { + panic("should not happen") + } + mid := (start + end) / 2 + _, err = ic.updateToHeight(mid) + if err != nil { + return FullCommit{}, err + } + // If we made it to mid, we retry. + continue FOR_LOOP + } + return FullCommit{}, err + } } - return nil } -// updateToHash gets the validator hash we want to update to -// if IsTooMuchChangeErr, we try to find a path by binary search over height -func (ic *InquiringCertifier) updateToHash(vhash []byte) error { - // try to get the match, and update - fc, err := ic.Source.GetByHash(vhash) +func (ic *InquiringCertifier) LastTrustedHeight() int64 { + fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) if err != nil { - return err + panic("should not happen") } - err = ic.cert.Update(fc) - // handle IsTooMuchChangeErr by using divide and conquer - if liteErr.IsTooMuchChangeErr(err) { - err = ic.updateToHeight(fc.Height()) - } - return err -} - -// updateToHeight will use divide-and-conquer to find a path to h -func (ic *InquiringCertifier) updateToHeight(h int64) error { - // try to update to this height (with checks) - fc, err := ic.Source.GetByHeight(h) - if err != nil { - return err - } - start, end := ic.LastHeight(), fc.Height() - if end <= start { - return liteErr.ErrNoPathFound() - } - err = ic.Update(fc) - - // we can handle IsTooMuchChangeErr specially - if !liteErr.IsTooMuchChangeErr(err) { - return err - } - - // try to update to mid - mid := (start + end) / 2 - err = ic.updateToHeight(mid) - if err != nil { - return err - } - - // if we made it to mid, we recurse - return ic.updateToHeight(h) + return fc.Height() } diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go index db8160bd..b3d8edea 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/inquiring_certifier_test.go @@ -1,5 +1,4 @@ -// nolint: vetshadow -package lite_test +package lite import ( "fmt" @@ -8,166 +7,146 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" + dbm "github.com/tendermint/tmlibs/db" ) func TestInquirerValidPath(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() + trust := NewDBProvider(dbm.NewMemDB()) + source := NewDBProvider(dbm.NewMemDB()) - // set up the validators to generate test blocks + // Set up the validators to generate test blocks. var vote int64 = 10 - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) + nkeys := keys.Extend(1) - // construct a bunch of commits, each with one more height than the last + // Construct a bunch of commits, each with one more height than the last. chainID := "inquiry-test" consHash := []byte("params") resHash := []byte("results") count := 50 - commits := make([]lite.FullCommit, count) + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) + nvals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nvals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) } - // initialize a certifier with the initial state - cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + // Initialize a certifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert, err := NewInquiringCertifier(chainID, trust, source) require.Nil(err) - // this should fail validation.... - commit := commits[count-1].Commit - err = cert.Certify(commit) + // This should fail validation: + sh := fcz[count-1].SignedHeader + err = cert.Certify(sh) require.NotNil(err) - // adding a few commits in the middle should be insufficient + // Adding a few commits in the middle should be insufficient. for i := 10; i < 13; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(commit) + err = cert.Certify(sh) assert.NotNil(err) - // with more info, we succeed + // With more info, we succeed. for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerMinimalPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "minimal-path" - consHash := []byte("other-params") - count := 12 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the validators, so we are just below 2/3 - keys = keys.Extend(len(keys)/2 - 1) - vals := keys.ToValidators(vote, 0) - h := int64(5 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // this should fail validation.... - commit := commits[count-1].Commit - err := cert.Certify(commit) - require.NotNil(err) - - // add a few seed in the middle should be insufficient - for i := 5; i < 8; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) + err = cert.Certify(sh) assert.Nil(err, "%+v", err) } func TestInquirerVerifyHistorical(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() + trust := NewDBProvider(dbm.NewMemDB()) + source := NewDBProvider(dbm.NewMemDB()) - // set up the validators to generate test blocks + // Set up the validators to generate test blocks. var vote int64 = 10 - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) + nkeys := keys.Extend(1) - // construct a bunch of commits, each with one more height than the last + // Construct a bunch of commits, each with one more height than the last. chainID := "inquiry-test" count := 10 consHash := []byte("special-params") - commits := make([]lite.FullCommit, count) + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) + nvals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nvals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) } - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) + // Initialize a certifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert, err := NewInquiringCertifier(chainID, trust, source) + require.Nil(err) - // store a few commits as trust + // Store a few full commits as trust. for _, i := range []int{2, 5} { - trust.StoreCommit(commits[i]) + trust.SaveFullCommit(fcz[i]) } - // let's see if we can jump forward using trusted commits - err := source.StoreCommit(commits[7]) + // See if we can jump forward using trusted full commits. + // Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change. + err = source.SaveFullCommit(fcz[7]) require.Nil(err, "%+v", err) - check := commits[7].Commit - err = cert.Certify(check) + sh := fcz[8].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - assert.Equal(check.Height(), cert.LastHeight()) + assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) + fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.NotNil(err, "%+v", err) + assert.Equal(fc_, (FullCommit{})) - // add access to all commits via untrusted source + // With fcz[9] Certify will update last trusted height. + err = source.SaveFullCommit(fcz[9]) + require.Nil(err, "%+v", err) + sh = fcz[8].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.Nil(err, "%+v", err) + assert.Equal(fc_.Height(), fcz[8].Height()) + + // Add access to all full commits via untrusted source. for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) + err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - // try to check an unknown seed in the past - mid := commits[3].Commit - err = cert.Certify(mid) + // Try to check an unknown seed in the past. + sh = fcz[3].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - assert.Equal(mid.Height(), cert.LastHeight()) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) - // and jump all the way forward again - end := commits[count-1].Commit - err = cert.Certify(end) + // Jump all the way forward again. + sh = fcz[count-1].SignedHeader + err = cert.Certify(sh) require.Nil(err, "%+v", err) - assert.Equal(end.Height(), cert.LastHeight()) + assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) } diff --git a/lite/memprovider.go b/lite/memprovider.go deleted file mode 100644 index ac0d8321..00000000 --- a/lite/memprovider.go +++ /dev/null @@ -1,152 +0,0 @@ -package lite - -import ( - "encoding/hex" - "sort" - "sync" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -type memStoreProvider struct { - mtx sync.RWMutex - // byHeight is always sorted by Height... need to support range search (nil, h] - // btree would be more efficient for larger sets - byHeight fullCommits - byHash map[string]FullCommit - - sorted bool -} - -// fullCommits just exists to allow easy sorting -type fullCommits []FullCommit - -func (s fullCommits) Len() int { return len(s) } -func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s fullCommits) Less(i, j int) bool { - return s[i].Height() < s[j].Height() -} - -// NewMemStoreProvider returns a new in-memory provider. -func NewMemStoreProvider() Provider { - return &memStoreProvider{ - byHeight: fullCommits{}, - byHash: map[string]FullCommit{}, - } -} - -func (m *memStoreProvider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) -} - -// StoreCommit stores a FullCommit after verifying it. -func (m *memStoreProvider) StoreCommit(fc FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - // store the valid fc - key := m.encodeHash(fc.ValidatorsHash()) - - m.mtx.Lock() - defer m.mtx.Unlock() - m.byHash[key] = fc - m.byHeight = append(m.byHeight, fc) - m.sorted = false - return nil -} - -// GetByHeight returns the FullCommit for height h or an error if the commit is not found. -func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { - // By heuristics, GetByHeight with linearsearch is fast enough - // for about 50 keys but after that, it needs binary search. - // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 - m.mtx.RLock() - n := len(m.byHeight) - m.mtx.RUnlock() - - if n <= 50 { - return m.getByHeightLinearSearch(h) - } - return m.getByHeightBinarySearch(h) -} - -func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { - if !m.sorted { - sort.Sort(m.byHeight) - m.sorted = true - } -} - -func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - // search from highest to lowest - for i := len(m.byHeight) - 1; i >= 0; i-- { - if fc := m.byHeight[i]; fc.Height() <= h { - return fc, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - low, high := 0, len(m.byHeight)-1 - var mid int - var hmid int64 - var midFC FullCommit - // Our goal is to either find: - // * item ByHeight with the query - // * greatest height with a height <= query - for low <= high { - mid = int(uint(low+high) >> 1) // Avoid an overflow - midFC = m.byHeight[mid] - hmid = midFC.Height() - switch { - case hmid == h: - return midFC, nil - case hmid < h: - low = mid + 1 - case hmid > h: - high = mid - 1 - } - } - - if high >= 0 { - if highFC := m.byHeight[high]; highFC.Height() < h { - return highFC, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - fc, ok := m.byHash[m.encodeHash(hash)] - if !ok { - return fc, liteErr.ErrCommitNotFound() - } - return fc, nil -} - -// LatestCommit returns the latest FullCommit or an error if no commits exist. -func (m *memStoreProvider) LatestCommit() (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - l := len(m.byHeight) - if l == 0 { - return FullCommit{}, liteErr.ErrCommitNotFound() - } - m.sortByHeightIfNecessaryLocked() - return m.byHeight[l-1], nil -} diff --git a/lite/multiprovider.go b/lite/multiprovider.go new file mode 100644 index 00000000..dcfd1318 --- /dev/null +++ b/lite/multiprovider.go @@ -0,0 +1,72 @@ +package lite + +import ( + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +// multiProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +type multiProvider struct { + Providers []PersistentProvider +} + +// NewMultiProvider returns a new provider which wraps multiple other providers. +func NewMultiProvider(providers ...PersistentProvider) multiProvider { + return multiProvider{ + Providers: providers, + } +} + +// SaveFullCommit saves on all providers, and aborts on the first error. +func (mc multiProvider) SaveFullCommit(fc FullCommit) (err error) { + for _, p := range mc.Providers { + err = p.SaveFullCommit(fc) + if err != nil { + return + } + } + return +} + +// LatestFullCommit loads the latest from all providers and provides +// the latest FullCommit that satisfies the conditions. +// Returns the first error encountered. +func (mc multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { + for _, p := range mc.Providers { + var fc_ FullCommit + fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) + if lerr.IsErrCommitNotFound(err) { + err = nil + continue + } else if err != nil { + return + } + if fc == (FullCommit{}) { + fc = fc_ + } else if fc_.Height() > fc.Height() { + fc = fc_ + } + if fc.Height() == maxHeight { + return + } + } + if fc == (FullCommit{}) { + err = lerr.ErrCommitNotFound() + return + } + return +} + +// ValidatorSet returns validator set at height as provided by the first +// provider which has it, or an error otherwise. +func (mc multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + for _, p := range mc.Providers { + valset, err = p.ValidatorSet(chainID, height) + if err == nil { + // TODO Log unexpected types of errors. + return valset, nil + } + } + return nil, lerr.ErrMissingValidators(chainID, height) +} diff --git a/lite/performance_test.go b/lite/performance_test.go deleted file mode 100644 index 8cd522cb..00000000 --- a/lite/performance_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package lite - -import ( - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - - // Store a bunch of commits at specific heights - // and then ensure that: - // * getByHeightLinearSearch - // * getByHeightBinarySearch - // both return the exact same result - - // 1. Non-existent height commits - nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} - ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) - ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) - - // 2. Save some known height commits - knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} - createAndStoreCommits(t, p, knownHeights) - - // 3. Now check if those heights are retrieved - ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) - ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) - - // 4. And now for the height probing to ensure that any height - // requested returns a fullCommit of height <= requestedHeight. - comparegetByHeightAlgorithms(t, p, 0, 0) - comparegetByHeightAlgorithms(t, p, 1, 1) - comparegetByHeightAlgorithms(t, p, 2, 1) - comparegetByHeightAlgorithms(t, p, 5, 1) - comparegetByHeightAlgorithms(t, p, 7, 7) - comparegetByHeightAlgorithms(t, p, 10, 9) - comparegetByHeightAlgorithms(t, p, 12, 12) - comparegetByHeightAlgorithms(t, p, 14, 13) - comparegetByHeightAlgorithms(t, p, 19, 18) - comparegetByHeightAlgorithms(t, p, 43, 23) - comparegetByHeightAlgorithms(t, p, 45, 44) - comparegetByHeightAlgorithms(t, p, 1025, 1024) - comparegetByHeightAlgorithms(t, p, 101, 100) - comparegetByHeightAlgorithms(t, p, 1e3, 199) - comparegetByHeightAlgorithms(t, p, 1e4, 1024) - comparegetByHeightAlgorithms(t, p, 1e9, 1e9) - comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) -} - -func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { - chainID := "cache-best-height-binary-and-linear" - appHash := []byte("0xdeadbeef") - keys := GenValKeys(len(heights) / 2) - - for _, h := range heights { - vals := keys.ToValidators(10, int64(len(heights)/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} - -func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { - algos := map[string]func(int64) (FullCommit, error){ - "getHeightByLinearSearch": p.getByHeightLinearSearch, - "getHeightByBinarySearch": p.getByHeightBinarySearch, - } - - for algo, fn := range algos { - fc, err := fn(ask) - // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) - require.Nil(t, err, "%s: %+v", algo, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "%s: %+v", algo, err) - } - } -} - -var blankFullCommit FullCommit - -func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) - assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) - } -} - -func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) - assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) - } -} - -func BenchmarkGenCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkGenCommit(b, keys) -} - -func benchmarkGenCommit(b *testing.B, keys ValKeys) { - chainID := fmt.Sprintf("bench-%d", len(keys)) - vals := keys.ToValidators(20, 10) - for i := 0; i < b.N; i++ { - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) - } -} - -// this benchmarks generating one key -func BenchmarkGenValKeys(b *testing.B) { - keys := GenValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -// this benchmarks generating one key -func BenchmarkGenSecpValKeys(b *testing.B) { - keys := GenSecpValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -func BenchmarkToValidators20(b *testing.B) { - benchmarkToValidators(b, 20) -} - -func BenchmarkToValidators100(b *testing.B) { - benchmarkToValidators(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidators(b *testing.B, nodes int) { - keys := GenValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkToValidatorsSec100(b *testing.B) { - benchmarkToValidatorsSec(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := GenSecpValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkCertifyCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { - chainID := "bench-certify" - vals := keys.ToValidators(20, 10) - cert := NewStaticCertifier(chainID, vals) - check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) - for i := 0; i < b.N; i++ { - err := cert.Certify(check) - if err != nil { - panic(err) - } - } - -} - -type algo bool - -const ( - linearSearch = true - binarySearch = false -) - -// Lazy load the commits -var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit -var h5, h50, h100, h500, h1000 []int64 -var commitsOnce sync.Once - -func lazyGenerateFullCommits(b *testing.B) { - b.Logf("Generating FullCommits") - commitsOnce.Do(func() { - fcs5, h5 = genFullCommits(nil, nil, 5) - b.Logf("Generated 5 FullCommits") - fcs50, h50 = genFullCommits(fcs5, h5, 50) - b.Logf("Generated 50 FullCommits") - fcs100, h100 = genFullCommits(fcs50, h50, 100) - b.Logf("Generated 100 FullCommits") - fcs500, h500 = genFullCommits(fcs100, h100, 500) - b.Logf("Generated 500 FullCommits") - fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) - b.Logf("Generated 1000 FullCommits") - }) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) -} - -var rng = rand.New(rand.NewSource(10)) - -func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { - lazyGenerateFullCommits(b) - - b.StopTimer() - mp := NewMemStoreProvider() - for i, fc := range fcs { - if err := mp.StoreCommit(fc); err != nil { - b.Fatalf("FullCommit #%d: err: %v", i, err) - } - } - qHeights := make([]int64, len(fHeights)) - copy(qHeights, fHeights) - // Append some non-existent heights to trigger the worst cases. - qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) - - memP := mp.(*memStoreProvider) - searchFn := memP.getByHeightLinearSearch - if algo == binarySearch { // nolint - searchFn = memP.getByHeightBinarySearch - } - - hPerm := rng.Perm(len(qHeights)) - b.StartTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, j := range hPerm { - h := qHeights[j] - if _, err := searchFn(h); err != nil { - } - } - } - b.ReportAllocs() -} - -func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { - fcs := make([]FullCommit, len(prevFC)) - copy(fcs, prevFC) - heights := make([]int64, len(prevH)) - copy(heights, prevH) - - appHash := []byte("benchmarks") - chainID := "benchmarks-gen-full-commits" - n := want - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - vals := keys.ToValidators(10, int64(n/2)) - h := int64(20 + 10*i) - fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) - heights = append(heights, h) - } - return fcs, heights -} - -func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - // 1. With no commits yet stored, it should return ErrCommitNotFound - got, err := p.LatestCommit() - require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") - require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") - - // 2. Generate some full commits now and we'll add them unsorted. - genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) - fc, err := p.LatestCommit() - require.Nil(t, err, "with commits saved no error expected") - require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") - require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") -} - -func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { - n := len(heights) - appHash := []byte("tests") - chainID := "tests-gen-full-commits" - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - h := heights[i] - vals := keys.ToValidators(10, int64(n/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} diff --git a/lite/provider.go b/lite/provider.go index 22dc964a..34ba40d4 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -1,103 +1,28 @@ package lite -// Provider is used to get more validators by other means. -// -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... +import ( + "github.com/tendermint/tendermint/types" +) + +// Provider provides information for the lite client to sync validators. +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. type Provider interface { - // StoreCommit saves a FullCommit after we have verified it, - // so we can query for it later. Important for updating our - // store of trusted commits. - StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h. - GetByHeight(h int64) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash. - GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored. - LatestCommit() (FullCommit, error) + + // LatestFullCommit returns the latest commit with minHeight <= height <= + // maxHeight. + // If maxHeight is zero, returns the latest where minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) + + // Get the valset that corresponds to chainID and height and return. + // Height must be >= 1. + ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) } -// cacheProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -// So you can keep a local cache, and check with the network if -// no data is there. -type cacheProvider struct { - Providers []Provider -} +// A provider that can also persist new information. +// Examples: MemProvider, files.Provider, CacheProvider. +type PersistentProvider interface { + Provider -// NewCacheProvider returns a new provider which wraps multiple other providers. -func NewCacheProvider(providers ...Provider) Provider { - return cacheProvider{ - Providers: providers, - } -} - -// StoreCommit tries to add the seed to all providers. -// -// Aborts on first error it encounters (closest provider) -func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { - for _, p := range c.Providers { - err = p.StoreCommit(fc) - if err != nil { - break - } - } - return err -} - -// GetByHeight should return the closest possible match from all providers. -// -// The Cache is usually organized in order from cheapest call (memory) -// to most expensive calls (disk/network). However, since GetByHeight returns -// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -// give us the exact match, a naive "stop at first non-error" would hide -// the actual desired results. -// -// Thus, we query each provider in order until we find an exact match -// or we finished querying them all. If at least one returned a non-error, -// then this returns the best match (minimum h-h'). -func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.GetByHeight(h) - if err == nil { - if tfc.Height() > fc.Height() { - fc = tfc - } - if tfc.Height() == h { - break - } - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { - for _, p := range c.Providers { - fc, err = p.GetByHash(hash) - if err == nil { - break - } - } - return fc, err -} - -// LatestCommit returns the latest FullCommit or an error if no commit exists. -func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.LatestCommit() - if err == nil && tfc.Height() > fc.Height() { - fc = tfc - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error } diff --git a/lite/provider_test.go b/lite/provider_test.go index 77b5b1a8..96523d94 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -1,98 +1,88 @@ -// nolint: vetshadow -package lite_test +package lite import ( + "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" ) -// missingProvider doesn't store anything, always a miss -// Designed as a mock for testing +// missingProvider doesn't store anything, always a miss. +// Designed as a mock for testing. type missingProvider struct{} // NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() lite.Provider { +func NewMissingProvider() PersistentProvider { return missingProvider{} } -func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } -func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) SaveFullCommit(FullCommit) error { return nil } +func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { + return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { + return nil, errors.New("missing validator set") } func TestMemProvider(t *testing.T) { - p := lite.NewMemStoreProvider() + p := NewDBProvider(dbm.NewMemDB()) checkProvider(t, p, "test-mem", "empty") } -func TestCacheProvider(t *testing.T) { - p := lite.NewCacheProvider( +func TestMultiProvider(t *testing.T) { + p := NewMultiProvider( NewMissingProvider(), - lite.NewMemStoreProvider(), + NewDBProvider(dbm.NewMemDB()), NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") } -func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { +func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { assert, require := assert.New(t), require.New(t) appHash := []byte(app) - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // make a bunch of commits... - commits := make([]lite.FullCommit, count) + // Make a bunch of full commits. + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // two commits for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) h := int64(20 + 10*i) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } - // check provider is empty - fc, err := p.GetByHeight(20) + // Check that provider is initially empty. + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) + assert.True(lerr.IsErrCommitNotFound(err)) - fc, err = p.GetByHash(commits[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range commits { - err = p.StoreCommit(s) + // Save all full commits to the provider. + for _, fc := range fcz { + err = p.SaveFullCommit(fc) require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) + // Make sure we can get it back. + fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) - assert.Equal(s, s2) - // by height as well - s2, err = p.GetByHeight(s.Height()) - assert.Nil(err) - assert.Equal(s, s2) + assert.Equal(fc.SignedHeader, fc2.SignedHeader) + assert.Equal(fc.Validators, fc2.Validators) + assert.Equal(fc.NextValidators, fc2.NextValidators) } - // make sure we get the last hash if we overstep - fc, err = p.GetByHeight(5000) + // Make sure we get the last hash if we overstep. + fc, err = p.LatestFullCommit(chainID, 1, 5000) if assert.Nil(err) { - assert.Equal(commits[count-1].Height(), fc.Height()) - assert.Equal(commits[count-1], fc) + assert.Equal(fcz[count-1].Height(), fc.Height()) + assert.Equal(fcz[count-1], fc) } - // and middle ones as well - fc, err = p.GetByHeight(47) + // ... and middle ones as well. + fc, err = p.LatestFullCommit(chainID, 1, 47) if assert.Nil(err) { // we only step by 10, so 40 must be the one below this assert.EqualValues(40, fc.Height()) @@ -100,50 +90,49 @@ func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { } -// this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { - fc, err := p.GetByHeight(ask) - require.Nil(t, err, "GetByHeight") +// This will make a get height, and if it is good, set the data as well. +func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) { + fc, err := p.LatestFullCommit(chainID, 1, ask) + require.Nil(t, err) if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "StoreCommit") + err = p.SaveFullCommit(fc) + require.Nil(t, err) } } -func TestCacheGetsBestHeight(t *testing.T) { - // assert, require := assert.New(t), require.New(t) +func TestMultiLatestFullCommit(t *testing.T) { require := require.New(t) - // we will write data to the second level of the cache (p2), - // and see what gets cached, stored in - p := lite.NewMemStoreProvider() - p2 := lite.NewMemStoreProvider() - cp := lite.NewCacheProvider(p, p2) + // We will write data to the second level of the cache (p2), and see what + // gets cached/stored in. + p := NewDBProvider(dbm.NewMemDB()) + p2 := NewDBProvider(dbm.NewMemDB()) + cp := NewMultiProvider(p, p2) chainID := "cache-best-height" appHash := []byte("01234567") - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // set a bunch of commits + // Set a bunch of full commits. for i := 0; i < count; i++ { vals := keys.ToValidators(10, int64(count/2)) h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.StoreCommit(fc) + fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p2.SaveFullCommit(fc) require.NoError(err) } - // let's get a few heights from the cache and set them proper - checkGetHeight(t, cp, 57, 50) - checkGetHeight(t, cp, 33, 30) + // Get a few heights from the cache and set them proper. + checkLatestFullCommit(t, cp, chainID, 57, 50) + checkLatestFullCommit(t, cp, chainID, 33, 30) // make sure they are set in p as well (but nothing else) - checkGetHeight(t, p, 44, 30) - checkGetHeight(t, p, 50, 50) - checkGetHeight(t, p, 99, 50) + checkLatestFullCommit(t, p, chainID, 44, 30) + checkLatestFullCommit(t, p, chainID, 50, 50) + checkLatestFullCommit(t, p, chainID, 99, 50) // now, query the cache for a higher value - checkGetHeight(t, p2, 99, 90) - checkGetHeight(t, cp, 99, 90) + checkLatestFullCommit(t, p2, chainID, 99, 90) + checkLatestFullCommit(t, cp, chainID, 99, 90) } diff --git a/lite/proxy/block.go b/lite/proxy/block.go index 4cff9ee6..663395fa 100644 --- a/lite/proxy/block.go +++ b/lite/proxy/block.go @@ -2,27 +2,24 @@ package proxy import ( "bytes" + "errors" - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - certerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) -func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { +func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil BlockMeta") } // TODO: check the BlockID?? - return ValidateHeader(meta.Header, check) + return ValidateHeader(meta.Header, sh) } -func ValidateBlock(meta *types.Block, check lite.Commit) error { +func ValidateBlock(meta *types.Block, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil Block") } - err := ValidateHeader(meta.Header, check) + err := ValidateHeader(meta.Header, sh) if err != nil { return err } @@ -32,17 +29,19 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error { return nil } -func ValidateHeader(head *types.Header, check lite.Commit) error { +func ValidateHeader(head *types.Header, sh types.SignedHeader) error { if head == nil { return errors.New("expecting a non-nil Header") } - // make sure they are for the same height (obvious fail) - if head.Height != check.Height() { - return certerr.ErrHeightMismatch(head.Height, check.Height()) + if sh.Header == nil { + return errors.New("unexpected empty SignedHeader") } - // check if they are equal by using hashes - chead := check.Header - if !bytes.Equal(head.Hash(), chead.Hash()) { + // Make sure they are for the same height (obvious fail). + if head.Height != sh.Height { + return errors.New("Header heights mismatched") + } + // Check if they are equal by using hashes. + if !bytes.Equal(head.Hash(), sh.Hash()) { return errors.New("Headers don't match") } return nil diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go index 6e319dc0..a6765402 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/certifier.go @@ -2,31 +2,29 @@ package proxy import ( "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - "github.com/tendermint/tendermint/lite/files" + lclient "github.com/tendermint/tendermint/lite/client" + dbm "github.com/tendermint/tmlibs/db" ) func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { - trust := lite.NewCacheProvider( - lite.NewMemStoreProvider(), - files.NewProvider(rootDir), + trust := lite.NewMultiProvider( + lite.NewDBProvider(dbm.NewMemDB()), + lite.NewDBProvider(dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)), ) - source := certclient.NewHTTPProvider(nodeAddr) + source := lclient.NewHTTPProvider(chainID, nodeAddr) // XXX: total insecure hack to avoid `init` - fc, err := source.LatestCommit() - /* XXX - // this gets the most recent verified commit - fc, err := trust.LatestCommit() - if certerr.IsCommitNotFoundErr(err) { - return nil, errors.New("Please run init first to establish a root of trust") - }*/ + fc, err := source.LatestFullCommit(chainID, 1, 1) + if err != nil { + return nil, err + } + err = trust.SaveFullCommit(fc) if err != nil { return nil, err } - cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) + cert, err := lite.NewInquiringCertifier(chainID, trust, source) if err != nil { return nil, err } diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 5a2713e3..9af72a54 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,22 +1,24 @@ package proxy import ( - "fmt" - - "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) -//-------------------------------------------- +type errNoData struct{} -var errNoData = fmt.Errorf("No data returned for query") +func (e errNoData) Error() string { + return "No data returned for query" +} -// IsNoDataErr checks whether an error is due to a query returning empty data -func IsNoDataErr(err error) bool { - return errors.Cause(err) == errNoData +// IsErrNoData checks whether an error is due to a query returning empty data +func IsErrNoData(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errNoData) + return ok + } + return false } func ErrNoData() error { - return errors.WithStack(errNoData) + return cmn.ErrorWrap(errNoData{}, "") } - -//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go deleted file mode 100644 index 7f51be50..00000000 --- a/lite/proxy/errors_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proxy - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorNoData(t *testing.T) { - e1 := ErrNoData() - assert.True(t, IsNoDataErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsNoDataErr(e2)) - assert.False(t, IsNoDataErr(nil)) -} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 9c9557f8..aa25cdcf 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -1,15 +1,16 @@ package proxy import ( + "fmt" + "github.com/pkg/errors" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/client" - certerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" ) // KeyProof represents a proof of existence or absence of a single key. @@ -75,12 +76,12 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // AppHash for height H is in header H+1 - commit, err := GetCertifiedCommit(resp.Height+1, node, cert) + signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { return nil, nil, err } - _ = commit + _ = signedHeader return &ctypes.ResultABCIQuery{Response: resp}, nil, nil /* // TODO refactor so iavl stuff is not in tendermint core @@ -98,7 +99,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) + err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -117,7 +118,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) + err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -125,28 +126,29 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption */ } -// GetCertifiedCommit gets the signed header for a given height -// and certifies it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { +// GetCertifiedCommit gets the signed header for a given height and certifies +// it. Returns error if unable to get a proven header. +func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Certifier) (types.SignedHeader, error) { // FIXME: cannot use cert.GetByHeight for now, as it also requires // Validators and will fail on querying tendermint for non-current height. // When this is supported, we should use it instead... - rpcclient.WaitForHeight(node, h, nil) - cresp, err := node.Commit(&h) + rpcclient.WaitForHeight(client, h, nil) + cresp, err := client.Commit(&h) if err != nil { - return lite.Commit{}, err + return types.SignedHeader{}, err } - commit := client.CommitFromResult(cresp) - // validate downloaded checkpoint with our request and trust store. - if commit.Height() != h { - return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) + // Validate downloaded checkpoint with our request and trust store. + sh := cresp.SignedHeader + if sh.Height != h { + return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v", + h, sh.Height) } - if err = cert.Certify(commit); err != nil { - return lite.Commit{}, err + if err = cert.Certify(sh); err != nil { + return types.SignedHeader{}, err } - return commit, nil + return sh, nil } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 38a43af2..fcc6659a 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -19,12 +19,12 @@ import ( ) var node *nm.Node +var chainID = "tendermint_test" // TODO use from config. // TODO fix tests!! func TestMain(m *testing.M) { app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) code := m.Run() @@ -55,28 +55,28 @@ func _TestAppProofs(t *testing.T) { brh := br.Height // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) client.WaitForHeight(cl, 3, nil) - latest, err := source.LatestCommit() + latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) require.NoError(err, "%+v", err) - rootHash := latest.Header.AppHash + rootHash := latest.SignedHeader.AppHash // verify a query before the tx block has no data (and valid non-exist proof) bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) fmt.Println(bs, height, proof, err) require.NotNil(err) - require.True(IsNoDataErr(err), err.Error()) + require.True(IsErrNoData(err), err.Error()) require.Nil(bs) // but given that block it is good bs, height, proof, err = GetWithProof(k, brh, cl, cert) require.NoError(err, "%+v", err) require.NotNil(proof) - require.True(height >= int64(latest.Header.Height)) + require.True(height >= int64(latest.Height())) // Alexis there is a bug here, somehow the above code gives us rootHash = nil // and proof.Verify doesn't care, while proofNotExists.Verify fails. @@ -92,7 +92,7 @@ func _TestAppProofs(t *testing.T) { // Test non-existing key. missing := []byte("my-missing-key") bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsNoDataErr(err)) + require.True(IsErrNoData(err)) require.Nil(bs) require.NotNil(proof) err = proof.Verify(missing, nil, rootHash) @@ -114,10 +114,10 @@ func _TestTxProofs(t *testing.T) { require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 782a6aab..af4fc26f 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/proxy" "github.com/tendermint/tendermint/types" ) @@ -26,9 +25,9 @@ var hdrHeight11 = &types.Header{ func TestValidateBlock(t *testing.T) { tests := []struct { - block *types.Block - commit lite.Commit - wantErr string + block *types.Block + signedHeader types.SignedHeader + wantErr string }{ { block: nil, wantErr: "non-nil Block", @@ -37,32 +36,32 @@ func TestValidateBlock(t *testing.T) { block: &types.Block{}, wantErr: "nil Header", }, { - block: &types.Block{Header: new(types.Header)}, + block: &types.Block{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - block: &types.Block{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + block: &types.Block{Header: &types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - block: &types.Block{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + block: &types.Block{Header: &types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Header.Hash mismatch test { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: hdrHeight11}, }, // End Header.Hash mismatch test @@ -72,7 +71,7 @@ func TestValidateBlock(t *testing.T) { Header: &types.Header{Height: 11}, Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, }, @@ -83,7 +82,7 @@ func TestValidateBlock(t *testing.T) { Header: &types.Header{Height: 11, DataHash: deadBeefHash}, Data: &types.Data{Txs: deadBeefTxs}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, }, @@ -92,7 +91,7 @@ func TestValidateBlock(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.commit) + err := proxy.ValidateBlock(tt.block, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d", i) @@ -108,9 +107,9 @@ func TestValidateBlock(t *testing.T) { func TestValidateBlockMeta(t *testing.T) { tests := []struct { - meta *types.BlockMeta - commit lite.Commit - wantErr string + meta *types.BlockMeta + signedHeader types.SignedHeader + wantErr string }{ { meta: nil, wantErr: "non-nil BlockMeta", @@ -119,32 +118,32 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{}, wantErr: "non-nil Header", }, { - meta: &types.BlockMeta{Header: new(types.Header)}, + meta: &types.BlockMeta{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Headers don't match test { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: hdrHeight11}, }, { @@ -156,7 +155,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11, DataHash: deadBeefHash}, }, wantErr: "Headers don't match", @@ -170,7 +169,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), @@ -189,7 +188,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime2, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint-x"), @@ -203,7 +202,7 @@ func TestValidateBlockMeta(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.commit) + err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 5fb12a40..83fc96a1 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -4,7 +4,6 @@ import ( cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -53,11 +52,11 @@ func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return res, err } h := int64(res.Height) - check, err := GetCertifiedCommit(h, w.Client, w.cert) + sh, err := GetCertifiedCommit(h, w.Client, w.cert) if err != nil { return res, err } - err = res.Proof.Validate(check.Header.DataHash) + err = res.Proof.Validate(sh.DataHash) return res, err } @@ -74,12 +73,12 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // go and verify every blockmeta in the result.... for _, meta := range r.BlockMetas { // get a checkpoint to verify from - c, err := w.Commit(&meta.Header.Height) + res, err := w.Commit(&meta.Header.Height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) - err = ValidateBlockMeta(meta, check) + sh := res.SignedHeader + err = ValidateBlockMeta(meta, sh) if err != nil { return nil, err } @@ -95,18 +94,18 @@ func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { return nil, err } // get a checkpoint to verify from - c, err := w.Commit(height) + res, err := w.Commit(height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) + sh := res.SignedHeader // now verify - err = ValidateBlockMeta(r.BlockMeta, check) + err = ValidateBlockMeta(r.BlockMeta, sh) if err != nil { return nil, err } - err = ValidateBlock(r.Block, check) + err = ValidateBlock(r.Block, sh) if err != nil { return nil, err } @@ -118,13 +117,13 @@ func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { // This is the foundation for all other verification in this module func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { rpcclient.WaitForHeight(w.Client, *height, nil) - r, err := w.Client.Commit(height) + res, err := w.Client.Commit(height) // if we got it, then certify it if err == nil { - check := certclient.CommitFromResult(r) - err = w.cert.Certify(check) + sh := res.SignedHeader + err = w.cert.Certify(sh) } - return r, err + return res, err } // // WrappedSwitch creates a websocket connection that auto-verifies any info diff --git a/lite/static_certifier.go b/lite/static_certifier.go deleted file mode 100644 index 1ec3b809..00000000 --- a/lite/static_certifier.go +++ /dev/null @@ -1,73 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*StaticCertifier)(nil) - -// StaticCertifier assumes a static set of validators, set on -// initilization and checks against them. -// The signatures on every header is checked for > 2/3 votes -// against the known validator set upon Certify -// -// Good for testing or really simple chains. Building block -// to support real-world functionality. -type StaticCertifier struct { - chainID string - vSet *types.ValidatorSet - vhash []byte -} - -// NewStaticCertifier returns a new certifier with a static validator set. -func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { - return &StaticCertifier{ - chainID: chainID, - vSet: vals, - } -} - -// ChainID returns the chain id. -// Implements Certifier. -func (sc *StaticCertifier) ChainID() string { - return sc.chainID -} - -// Validators returns the validator set. -func (sc *StaticCertifier) Validators() *types.ValidatorSet { - return sc.vSet -} - -// Hash returns the hash of the validator set. -func (sc *StaticCertifier) Hash() []byte { - if len(sc.vhash) == 0 { - sc.vhash = sc.vSet.Hash() - } - return sc.vhash -} - -// Certify makes sure that the commit is valid. -// Implements Certifier. -func (sc *StaticCertifier) Certify(commit Commit) error { - // do basic sanity checks - err := commit.ValidateBasic(sc.chainID) - if err != nil { - return err - } - - // make sure it has the same validator set we have (static means static) - if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { - return liteErr.ErrValidatorsChanged() - } - - // then make sure we have the proper signatures for this - err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, - commit.Header.Height, commit.Commit) - return errors.WithStack(err) -} diff --git a/lite/types.go b/lite/types.go new file mode 100644 index 00000000..1f479799 --- /dev/null +++ b/lite/types.go @@ -0,0 +1,13 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" +) + +// Certifier checks the votes to make sure the block really is signed properly. +// Certifier must know the current or recent set of validitors by some other +// means. +type Certifier interface { + Certify(sheader types.SignedHeader) error + ChainID() string +} diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index 4fc8f97f..31410163 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -183,7 +183,7 @@ func TestDifferByTimestamp(t *testing.T) { assert.NoError(t, err, "expected no error signing proposal") signBytes := proposal.SignBytes(chainID) sig := proposal.Signature - timeStamp := clipToMS(proposal.Timestamp) + timeStamp := proposal.Timestamp // manipulate the timestamp. should get changed back proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond) @@ -207,7 +207,7 @@ func TestDifferByTimestamp(t *testing.T) { signBytes := vote.SignBytes(chainID) sig := vote.Signature - timeStamp := clipToMS(vote.Timestamp) + timeStamp := vote.Timestamp // manipulate the timestamp. should get changed back vote.Timestamp = vote.Timestamp.Add(time.Millisecond) @@ -242,10 +242,3 @@ func newProposal(height int64, round int, partsHeader types.PartSetHeader) *type Timestamp: time.Now().UTC(), } } - -func clipToMS(t time.Time) time.Time { - nano := t.UnixNano() - million := int64(1000000) - nano = (nano / million) * million - return time.Unix(0, nano).UTC() -} diff --git a/privval/socket_test.go b/privval/socket_test.go index fcf21e0c..1813893a 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -119,7 +119,7 @@ func TestSocketPVAcceptDeadline(t *testing.T) { SocketPVAcceptDeadline(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestSocketPVDeadline(t *testing.T) { @@ -165,7 +165,7 @@ func TestSocketPVDeadline(t *testing.T) { time.Sleep(20 * time.Microsecond) _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout) + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) } func TestSocketPVWait(t *testing.T) { @@ -178,7 +178,7 @@ func TestSocketPVWait(t *testing.T) { SocketPVConnWait(time.Millisecond)(sc) - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) + assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) } func TestRemoteSignerRetry(t *testing.T) { @@ -221,7 +221,7 @@ func TestRemoteSignerRetry(t *testing.T) { RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(retries)(rs) - assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax) + assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) select { case attempts := <-attemptc: diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a5ad5b4c..4cf44914 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -349,16 +349,16 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { return res, nil } -func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { return 0, fmt.Errorf("Height must be greater than 0") } - if height > storeHeight { + if height > currentHeight { return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") } return height, nil } - return storeHeight, nil + return currentHeight, nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index c026cd91..4e4c54de 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -44,8 +44,10 @@ import ( // } // ``` func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + // The latest validator that we know is the + // NextValidator of the last block. + height := consensusState.GetState().LastBlockHeight + 1 + height, err := getHeight(height, heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 27302be1..516eced0 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -33,10 +33,8 @@ type ResultBlock struct { // Commit and Header type ResultCommit struct { - // SignedHeader is header and commit, embedded so we only have - // one level in the json output - types.SignedHeader - CanonicalCommit bool `json:"canonical"` + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` } // ABCI results from a block diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 9bdb4dff..8d011ce5 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -55,7 +55,7 @@ func StartHTTPServer( listener, RecoverAndLogHandler(handler, logger), ) - logger.Error("RPC HTTP server stopped", "err", err) + logger.Info("RPC HTTP server stopped", "err", err) }() return listener, nil } diff --git a/scripts/install_abci_apps.sh b/scripts/install_abci_apps.sh index eb70070d..ee4b9dde 100644 --- a/scripts/install_abci_apps.sh +++ b/scripts/install_abci_apps.sh @@ -4,8 +4,8 @@ COMMIT=$(bash scripts/dep_utils/parse.sh abci) echo "Checking out vendored commit for abci: $COMMIT" -go get -d github.com/tendermint/abci -cd "$GOPATH/src/github.com/tendermint/abci" || exit +go get -d github.com/tendermint/tendermint/abci +cd "$GOPATH/src/github.com/tendermint/tendermint/abci" || exit git checkout "$COMMIT" make get_tools make get_vendor_deps diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index c55713c7..efcac0f0 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -2,12 +2,12 @@ package main import ( "encoding/hex" - "encoding/json" "fmt" "os" "context" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/rpc/grpc" ) @@ -33,7 +33,7 @@ func main() { os.Exit(1) } - bz, err := json.Marshal(res) + bz, err := amino.NewCodec().MarshalJSON(res) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/types/block.go b/types/block.go index e72b5fc7..0faa24db 100644 --- a/types/block.go +++ b/types/block.go @@ -360,6 +360,7 @@ func (commit *Commit) IsCommit() bool { } // ValidateBasic performs basic validation that doesn't involve state data. +// Does not actually check the cryptographic signatures. func (commit *Commit) ValidateBasic() error { if commit.BlockID.IsZero() { return errors.New("Commit cannot be for nil block") @@ -369,23 +370,23 @@ func (commit *Commit) ValidateBasic() error { } height, round := commit.Height(), commit.Round() - // validate the precommits + // Validate the precommits. for _, precommit := range commit.Precommits { // It's OK for precommits to be missing. if precommit == nil { continue } - // Ensure that all votes are precommits + // Ensure that all votes are precommits. if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", precommit.Type) } - // Ensure that all heights are the same + // Ensure that all heights are the same. if precommit.Height != height { return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", height, precommit.Height) } - // Ensure that all rounds are the same + // Ensure that all rounds are the same. if precommit.Round != round { return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", round, precommit.Round) @@ -417,19 +418,77 @@ func (commit *Commit) StringIndented(indent string) string { } return fmt.Sprintf(`Commit{ %s BlockID: %v -%s Precommits: %v +%s Precommits: +%s %v %s}#%v`, indent, commit.BlockID, - indent, strings.Join(precommitStrings, "\n"+indent+" "), + indent, + indent, strings.Join(precommitStrings, "\n"+indent+" "), indent, commit.hash) } //----------------------------------------------------------------------------- -// SignedHeader is a header along with the commits that prove it +// SignedHeader is a header along with the commits that prove it. type SignedHeader struct { - Header *Header `json:"header"` - Commit *Commit `json:"commit"` + *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +// ValidateBasic does basic consistency checks and makes sure the header +// and commit are consistent. +// +// NOTE: This does not actually check the cryptographic signatures. Make +// sure to use a Certifier to validate the signatures actually provide a +// significantly strong proof for this header's validity. +func (sh SignedHeader) ValidateBasic(chainID string) error { + + // Make sure the header is consistent with the commit. + if sh.Header == nil { + return errors.New("SignedHeader missing header.") + } + if sh.Commit == nil { + return errors.New("SignedHeader missing commit (precommit votes).") + } + // Check ChainID. + if sh.ChainID != chainID { + return fmt.Errorf("Header belongs to another chain '%s' not '%s'", + sh.ChainID, chainID) + } + // Check Height. + if sh.Commit.Height() != sh.Height { + return fmt.Errorf("SignedHeader header and commit height mismatch: %v vs %v", + sh.Height, sh.Commit.Height()) + } + // Check Hash. + hhash := sh.Hash() + chash := sh.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return fmt.Errorf("SignedHeader commit signs block %X, header is block %X", + chash, hhash) + } + // ValidateBasic on the Commit. + err := sh.Commit.ValidateBasic() + if err != nil { + return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + } + return nil +} + +func (sh SignedHeader) String() string { + return sh.StringIndented("") +} + +// StringIndented returns a string representation of the SignedHeader. +func (sh SignedHeader) StringIndented(indent string) string { + return fmt.Sprintf(`SignedHeader{ +%s %v +%s %v +%s}`, + indent, sh.Header.StringIndented(indent+" "), + indent, sh.Commit.StringIndented(indent+" "), + indent) + return "" } //----------------------------------------------------------------------------- diff --git a/types/canonical_json.go b/types/canonical_json.go index 258f7714..14881f62 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -9,7 +9,7 @@ import ( // Canonical json is amino's json for structs with fields in alphabetical order // TimeFormat is used for generating the sigs -const TimeFormat = "2006-01-02T15:04:05.000Z" +const TimeFormat = time.RFC3339Nano type CanonicalJSONBlockID struct { Hash cmn.HexBytes `json:"hash,omitempty"` @@ -110,5 +110,5 @@ func CanonicalTime(t time.Time) string { // Note that sending time over amino resets it to // local time, we need to force UTC here, so the // signatures match - return t.UTC().Format(TimeFormat) + return t.Round(0).UTC().Format(TimeFormat) } diff --git a/types/proposal.go b/types/proposal.go index 52ce8756..964ca0ca 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -34,7 +34,7 @@ func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRou return &Proposal{ Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: time.Now().Round(0).UTC(), BlockPartsHeader: blockPartsHeader, POLRound: polRound, POLBlockID: polBlockID, diff --git a/types/validator_set.go b/types/validator_set.go index 8f085090..dc1d0e88 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -29,48 +29,51 @@ type ValidatorSet struct { totalVotingPower int64 } -func NewValidatorSet(vals []*Validator) *ValidatorSet { - validators := make([]*Validator, len(vals)) - for i, val := range vals { +func NewValidatorSet(valz []*Validator) *ValidatorSet { + if valz != nil && len(valz) == 0 { + panic("validator set initialization slice cannot be an empty slice (but it can be nil)") + } + validators := make([]*Validator, len(valz)) + for i, val := range valz { validators[i] = val.Copy() } sort.Sort(ValidatorsByAddress(validators)) - vs := &ValidatorSet{ + vals := &ValidatorSet{ Validators: validators, } - - if vals != nil { - vs.IncrementAccum(1) + if valz != nil { + vals.IncrementAccum(1) } - return vs + return vals } // Increment Accum and update the proposer on a copy, and return it. -func (valSet *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { - copy := valSet.Copy() +func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { + copy := vals.Copy() copy.IncrementAccum(times) return copy } // Increment Accum and update the proposer. -func (valSet *ValidatorSet) IncrementAccum(times int) { +func (vals *ValidatorSet) IncrementAccum(times int) { + // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() - for _, val := range valSet.Validators { - // check for overflow both multiplication and sum + for _, val := range vals.Validators { + // Check for overflow both multiplication and sum. val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) validatorsHeap.PushComparable(val, accumComparable{val}) } - // Decrement the validator with most accum times times + // Decrement the validator with most accum times times. for i := 0; i < times; i++ { mostest := validatorsHeap.Peek().(*Validator) // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) + mostest.Accum = safeSubClip(mostest.Accum, vals.TotalVotingPower()) if i == times-1 { - valSet.Proposer = mostest + vals.Proposer = mostest } else { validatorsHeap.Update(mostest, accumComparable{mostest}) } @@ -78,36 +81,36 @@ func (valSet *ValidatorSet) IncrementAccum(times int) { } // Copy each validator into a new ValidatorSet -func (valSet *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(valSet.Validators)) - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Copy() *ValidatorSet { + validators := make([]*Validator, len(vals.Validators)) + for i, val := range vals.Validators { // NOTE: must copy, since IncrementAccum updates in place. validators[i] = val.Copy() } return &ValidatorSet{ Validators: validators, - Proposer: valSet.Proposer, - totalVotingPower: valSet.totalVotingPower, + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, } } // HasAddress returns true if address given is in the validator set, false - // otherwise. -func (valSet *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) + return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) } // GetByAddress returns an index of the validator with address and validator // itself if found. Otherwise, -1 and nil are returned. -func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { - return idx, valSet.Validators[idx].Copy() + if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) { + return idx, vals.Validators[idx].Copy() } return -1, nil } @@ -115,45 +118,45 @@ func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Valida // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to // len(ValidatorSet.Validators). -func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(valSet.Validators) { +func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(vals.Validators) { return nil, nil } - val = valSet.Validators[index] + val = vals.Validators[index] return val.Address, val.Copy() } // Size returns the length of the validator set. -func (valSet *ValidatorSet) Size() int { - return len(valSet.Validators) +func (vals *ValidatorSet) Size() int { + return len(vals.Validators) } // TotalVotingPower returns the sum of the voting powers of all validators. -func (valSet *ValidatorSet) TotalVotingPower() int64 { - if valSet.totalVotingPower == 0 { - for _, val := range valSet.Validators { +func (vals *ValidatorSet) TotalVotingPower() int64 { + if vals.totalVotingPower == 0 { + for _, val := range vals.Validators { // mind overflow - valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) + vals.totalVotingPower = safeAddClip(vals.totalVotingPower, val.VotingPower) } } - return valSet.totalVotingPower + return vals.totalVotingPower } // GetProposer returns the current proposer. If the validator set is empty, nil // is returned. -func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) GetProposer() (proposer *Validator) { + if len(vals.Validators) == 0 { return nil } - if valSet.Proposer == nil { - valSet.Proposer = valSet.findProposer() + if vals.Proposer == nil { + vals.Proposer = vals.findProposer() } - return valSet.Proposer.Copy() + return vals.Proposer.Copy() } -func (valSet *ValidatorSet) findProposer() *Validator { +func (vals *ValidatorSet) findProposer() *Validator { var proposer *Validator - for _, val := range valSet.Validators { + for _, val := range vals.Validators { if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { proposer = proposer.CompareAccum(val) } @@ -163,12 +166,12 @@ func (valSet *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. -func (valSet *ValidatorSet) Hash() []byte { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) Hash() []byte { + if len(vals.Validators) == 0 { return nil } - hashers := make([]merkle.Hasher, len(valSet.Validators)) - for i, val := range valSet.Validators { + hashers := make([]merkle.Hasher, len(vals.Validators)) + for i, val := range vals.Validators { hashers[i] = val } return merkle.SimpleHashFromHashers(hashers) @@ -176,70 +179,70 @@ func (valSet *ValidatorSet) Hash() []byte { // Add adds val to the validator set and returns true. It returns false if val // is already in the set. -func (valSet *ValidatorSet) Add(val *Validator) (added bool) { +func (vals *ValidatorSet) Add(val *Validator) (added bool) { val = val.Copy() - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) { - valSet.Validators = append(valSet.Validators, val) + if idx >= len(vals.Validators) { + vals.Validators = append(vals.Validators, val) // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true - } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { + } else if bytes.Equal(vals.Validators[idx].Address, val.Address) { return false } else { - newValidators := make([]*Validator, len(valSet.Validators)+1) - copy(newValidators[:idx], valSet.Validators[:idx]) + newValidators := make([]*Validator, len(vals.Validators)+1) + copy(newValidators[:idx], vals.Validators[:idx]) newValidators[idx] = val - copy(newValidators[idx+1:], valSet.Validators[idx:]) - valSet.Validators = newValidators + copy(newValidators[idx+1:], vals.Validators[idx:]) + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } } // Update updates val and returns true. It returns false if val is not present // in the set. -func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := valSet.GetByAddress(val.Address) +func (vals *ValidatorSet) Update(val *Validator) (updated bool) { + index, sameVal := vals.GetByAddress(val.Address) if sameVal == nil { return false } - valSet.Validators[index] = val.Copy() + vals.Validators[index] = val.Copy() // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } // Remove deletes the validator with address. It returns the validator removed // and true. If returns nil and false if validator is not present in the set. -func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { + if idx >= len(vals.Validators) || !bytes.Equal(vals.Validators[idx].Address, address) { return nil, false } - removedVal := valSet.Validators[idx] - newValidators := valSet.Validators[:idx] - if idx+1 < len(valSet.Validators) { - newValidators = append(newValidators, valSet.Validators[idx+1:]...) + removedVal := vals.Validators[idx] + newValidators := vals.Validators[:idx] + if idx+1 < len(vals.Validators) { + newValidators = append(newValidators, vals.Validators[idx+1:]...) } - valSet.Validators = newValidators + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return removedVal, true } // Iterate will run the given function over the set. -func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range vals.Validators { stop := fn(i, val.Copy()) if stop { break @@ -247,87 +250,106 @@ func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } -// Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { - if valSet.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) +// Verify that +2/3 of the set had signed the given signBytes. +func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if vals.Size() != len(commit.Precommits) { + return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits)) } if height != commit.Height() { return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v", + blockID, commit.BlockID) + } talliedVotingPower := int64(0) round := commit.Round() for idx, precommit := range commit.Precommits { - // may be nil if validator skipped. if precommit == nil { - continue + continue // OK, some precommits can be missing. } if precommit.Height != height { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) + return fmt.Errorf("Invalid commit -- wrong height: want %v got %v", height, precommit.Height) } if precommit.Round != round { - return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + return fmt.Errorf("Invalid commit -- wrong round: want %v got %v", round, precommit.Round) } if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } - _, val := valSet.GetByIndex(idx) - // Validate signature + // NOTE: This will go away when we refactor Commit. + if !blockID.Equals(precommit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", + idx, blockID, precommit.BlockID) + } + _, val := vals.GetByIndex(idx) + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } // Good precommit! talliedVotingPower += val.VotingPower } - if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + if talliedVotingPower > vals.TotalVotingPower()*2/3 { return nil } return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + talliedVotingPower, (vals.TotalVotingPower()*2/3 + 1)) } -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. // -// valSet is the validator set that we know -// * over 2/3 of the power in old signed this block +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. // -// newSet is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. // -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. +// +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. +// +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the ValidatorSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, blockID BlockID, height int64, commit *Commit) error { + oldVals := vals - if newSet.Size() != len(commit.Precommits) { - return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err } + // Check old voting power. oldVotingPower := int64(0) - newVotingPower := int64(0) seen := map[int]bool{} round := commit.Round() for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit if precommit == nil { continue } if precommit.Height != height { - // return certerr.ErrHeightMismatch(height, precommit.Height) return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) } if precommit.Round != round { @@ -336,54 +358,45 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string if precommit.Type != VoteTypePrecommit { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } + // NOTE: This will go away when we refactor Commit. if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count + return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", + idx, blockID, precommit.BlockID) } - - // we only grab by address, ignoring unknown validators - vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { + // See if this validator is in oldVals. + idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[idx] { continue // missing or double vote... } - seen[vi] = true + seen[idx] = true - // Validate signature old school + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := newSet.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - newVotingPower += cv.VotingPower - } + oldVotingPower += val.VotingPower } - if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) - } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", - newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + oldVotingPower, (oldVals.TotalVotingPower()*2/3 + 1)) } return nil } -func (valSet *ValidatorSet) String() string { - return valSet.StringIndented("") +func (vals *ValidatorSet) String() string { + return vals.StringIndented("") } // String -func (valSet *ValidatorSet) StringIndented(indent string) string { - if valSet == nil { +func (vals *ValidatorSet) StringIndented(indent string) string { + if vals == nil { return "nil-ValidatorSet" } valStrings := []string{} - valSet.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -392,7 +405,7 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { %s Validators: %s %v %s}`, - indent, valSet.GetProposer().String(), + indent, vals.GetProposer().String(), indent, indent, strings.Join(valStrings, "\n"+indent+" "), indent) @@ -405,18 +418,18 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { // Sort validators by address type ValidatorsByAddress []*Validator -func (vs ValidatorsByAddress) Len() int { - return len(vs) +func (valz ValidatorsByAddress) Len() int { + return len(valz) } -func (vs ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(vs[i].Address, vs[j].Address) == -1 +func (valz ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(valz[i].Address, valz[j].Address) == -1 } -func (vs ValidatorsByAddress) Swap(i, j int) { - it := vs[i] - vs[i] = vs[j] - vs[j] = it +func (valz ValidatorsByAddress) Swap(i, j int) { + it := valz[i] + valz[i] = valz[j] + valz[j] = it } //------------------------------------- @@ -440,16 +453,16 @@ func (ac accumComparable) Less(o interface{}) bool { // NOTE: PrivValidator are in order. // UNSTABLE func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - vals := make([]*Validator, numValidators) + valz := make([]*Validator, numValidators) privValidators := make([]PrivValidator, numValidators) for i := 0; i < numValidators; i++ { val, privValidator := RandValidator(false, votingPower) - vals[i] = val + valz[i] = val privValidators[i] = privValidator } - valSet := NewValidatorSet(vals) + vals := NewValidatorSet(valz) sort.Sort(PrivValidatorsByAddress(privValidators)) - return valSet, privValidators + return vals, privValidators } /////////////////////////////////////////////////////////////////////////////// diff --git a/types/vote_set.go b/types/vote_set.go index a60d95da..1c7fac19 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -170,7 +170,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) } - // Ensure that the signer has the right address + // Ensure that the signer has the right address. if !bytes.Equal(valAddr, lookupAddr) { return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", @@ -190,7 +190,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) } - // Add vote and get conflicting vote if any + // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) if conflicting != nil { return added, NewConflictingVoteError(val, conflicting, vote) @@ -201,7 +201,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, nil } -// Returns (vote, true) if vote exists for valIndex and blockKey +// Returns (vote, true) if vote exists for valIndex and blockKey. func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { return existing, true From 242a6037e88475eb5270382358060f13e2d24469 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 19 Jun 2018 23:55:15 -0700 Subject: [PATCH 03/27] Fixes from review --- consensus/reactor_test.go | 2 +- consensus/replay.go | 4 ++-- lite/client/provider.go | 8 ++++---- lite/commit.go | 4 ++-- lite/dbprovider.go | 6 +++--- lite/helpers.go | 14 +++++++------- lite/inquiring_certifier.go | 4 ++-- lite/inquiring_certifier_test.go | 8 ++++---- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 70af588a..6faea3f0 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -419,7 +419,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{} err := validateBlock(newBlock, activeVals) assert.Nil(t, err) for _, tx := range txs { - css[j].mempool.CheckTx(tx, nil) + err := css[j].mempool.CheckTx(tx, nil) assert.Nil(t, err) } }, css) diff --git a/consensus/replay.go b/consensus/replay.go index 75173061..8ecf88b8 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -266,13 +266,13 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - nvals := types.TM2PB.Validators(state.Validators) // state.Validators would work too. + nextVals := types.TM2PB.Validators(state.Validators) // state.Validators would work too. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime.Unix(), // TODO ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: nvals, + Validators: nextVals, AppStateBytes: h.genDoc.AppStateJSON, } res, err := proxyApp.Consensus().InitChainSync(req) diff --git a/lite/client/provider.go b/lite/client/provider.go index 188ce7d0..1612ddd7 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -53,7 +53,7 @@ func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) return } if maxHeight != 0 && maxHeight < minHeight { - err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got %v and %v", + err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v", minHeight, maxHeight) return } @@ -95,7 +95,7 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. return } if height < 1 { - err = fmt.Errorf("expected height >= 1, got %v", height) + err = fmt.Errorf("expected height >= 1, got height %v", height) return } heightPtr := new(int64) @@ -122,11 +122,11 @@ func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.Full fc.Validators = valset // Get the next validators. - nvalset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) if err != nil { return lite.FullCommit{}, err } else { - fc.NextValidators = nvalset + fc.NextValidators = nextValset } return fc, nil diff --git a/lite/commit.go b/lite/commit.go index 8449bf69..e62bd166 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -20,11 +20,11 @@ type FullCommit struct { } // NewFullCommit returns a new FullCommit. -func NewFullCommit(signedHeader types.SignedHeader, valset, nvalset *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { return FullCommit{ SignedHeader: signedHeader, Validators: valset, - NextValidators: nvalset, + NextValidators: nextValset, } } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 834bab66..149a0ed3 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -148,14 +148,14 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { var chainID = sh.ChainID var height = sh.Height - var valset, nvalset *types.ValidatorSet + var valset, nextValset *types.ValidatorSet // Load the validator set. valset, err := dbp.getValidatorSet(chainID, height) if err != nil { return FullCommit{}, err } // Load the next validator set. - nvalset, err = dbp.getValidatorSet(chainID, height+1) + nextValset, err = dbp.getValidatorSet(chainID, height+1) if err != nil { return FullCommit{}, err } @@ -163,6 +163,6 @@ func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) return FullCommit{ SignedHeader: sh, Validators: valset, - NextValidators: nvalset, + NextValidators: nextValset, }, nil } diff --git a/lite/helpers.go b/lite/helpers.go index 764df507..de02b739 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -96,7 +96,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivK } func genHeader(chainID string, height int64, txs types.Txs, - valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, @@ -107,7 +107,7 @@ func genHeader(chainID string, height int64, txs types.Txs, // LastBlockID // LastCommitHash ValidatorsHash: valset.Hash(), - NextValidatorsHash: nvalset.Hash(), + NextValidatorsHash: nextValset.Hash(), DataHash: txs.Hash(), AppHash: appHash, ConsensusHash: consHash, @@ -117,9 +117,9 @@ func genHeader(chainID string, height int64, txs types.Txs, // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, - valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) check := types.SignedHeader{ Header: header, Commit: pkz.signHeader(header, first, last), @@ -129,12 +129,12 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, // GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - valset, nvalset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - header := genHeader(chainID, height, txs, valset, nvalset, appHash, consHash, resHash) + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) commit := types.SignedHeader{ Header: header, Commit: pkz.signHeader(header, first, last), } - return NewFullCommit(commit, valset, nvalset) + return NewFullCommit(commit, valset, nextValset) } diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index 049cd728..3e61b958 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -95,7 +95,7 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { } // Get the next validator set. - nvalset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) + nextValset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) if lerr.IsErrMissingValidators(err) { // Ignore this error. return nil @@ -106,7 +106,7 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { nfc := FullCommit{ SignedHeader: shdr, Validators: tfc.NextValidators, - NextValidators: nvalset, + NextValidators: nextValset, } // Validate the full commit. This checks the cryptographic // signatures of Commit against Validators. diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go index b3d8edea..8da5a7c1 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/inquiring_certifier_test.go @@ -28,12 +28,12 @@ func TestInquirerValidPath(t *testing.T) { fcz := make([]FullCommit, count) for i := 0; i < count; i++ { vals := keys.ToValidators(vote, 0) - nvals := nkeys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) fcz[i] = keys.GenFullCommit( chainID, h, nil, - vals, nvals, + vals, nextVals, appHash, consHash, resHash, 0, len(keys)) // Extend the keys by 1 each time. keys = nkeys @@ -85,13 +85,13 @@ func TestInquirerVerifyHistorical(t *testing.T) { fcz := make([]FullCommit, count) for i := 0; i < count; i++ { vals := keys.ToValidators(vote, 0) - nvals := nkeys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) fcz[i] = keys.GenFullCommit( chainID, h, nil, - vals, nvals, + vals, nextVals, appHash, consHash, resHash, 0, len(keys)) // Extend the keys by 1 each time. keys = nkeys From c3296f2e01000b45f033fa355d5e477bece4b599 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 20 Jun 2018 01:42:37 -0700 Subject: [PATCH 04/27] Garbage collect DBProvider (unoptimized); Certifier creation takes a client --- cmd/tendermint/commands/lite.go | 2 +- lite/dbprovider.go | 117 +++++++++++++++++++++++++------- lite/proxy/certifier.go | 24 ++++--- 3 files changed, 105 insertions(+), 38 deletions(-) diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 6987b7f1..53b3ec18 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -68,7 +68,7 @@ func runProxy(cmd *cobra.Command, args []string) error { // First, connect a client node := rpcclient.NewHTTP(nodeAddr, "/websocket") - cert, err := proxy.GetCertifier(chainID, home, nodeAddr) + cert, err := proxy.GetCertifier(chainID, home, node) if err != nil { return err } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 149a0ed3..81710c9e 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -12,36 +12,11 @@ import ( dbm "github.com/tendermint/tmlibs/db" ) -func signedHeaderKey(chainID string, height int64) []byte { - return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) -} - -var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) - -func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { - submatch := signedHeaderKeyPattern.FindSubmatch(key) - if submatch == nil { - return "", 0, false - } - chainID = string(submatch[1]) - heightStr := string(submatch[2]) - heightInt, err := strconv.Atoi(heightStr) - if err != nil { - return "", 0, false - } - height = int64(heightInt) - ok = true // good! - return -} - -func validatorSetKey(chainID string, height int64) []byte { - return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) -} - type DBProvider struct { chainID string db dbm.DB cdc *amino.Codec + limit int } func NewDBProvider(db dbm.DB) *DBProvider { @@ -52,6 +27,11 @@ func NewDBProvider(db dbm.DB) *DBProvider { return dbp } +func (dbp *DBProvider) SetLimit(limit int) *DBProvider { + dbp.limit = limit + return dbp +} + // Implements PersistentProvider. func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { @@ -85,6 +65,13 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // And write sync. batch.WriteSync() + + // Garbage collect. + // TODO: optimize later. + if dbp.limit > 0 { + dbp.deleteAfterN(fc.ChainID(), dbp.limit) + } + return nil } @@ -166,3 +153,81 @@ func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) NextValidators: nextValset, }, nil } + +func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, 1<<63-1), + signedHeaderKey(chainID, 0), + ) + defer itr.Close() + + var lastHeight int64 = 1<<63 - 1 + var numSeen = 0 + + for itr.Valid() { + key := itr.Key() + _, height, ok := parseChainKeyPrefix(key) + if !ok { + return fmt.Errorf("unexpected key %v", key) + } else { + if height < lastHeight { + lastHeight = height + numSeen += 1 + } + if numSeen > after { + dbp.db.Delete(key) + } + } + } + return nil +} + +//---------------------------------------- + +func signedHeaderKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) +} + +var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + submatch := signedHeaderKeyPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, false + } + height = int64(heightInt) + ok = true // good! + return +} + +func validatorSetKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) +} + +func chainKeyPrefix(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/", chainID, height)) +} + +var chainKeyPrefixPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/`) + +func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { + submatch := chainKeyPrefixPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, false + } + height = int64(heightInt) + ok = true // good! + return +} diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go index a6765402..772af58f 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/certifier.go @@ -6,22 +6,24 @@ import ( dbm "github.com/tendermint/tmlibs/db" ) -func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { +func GetCertifier(chainID, rootDir string, client lclient.SignStatusClient) (*lite.InquiringCertifier, error) { trust := lite.NewMultiProvider( - lite.NewDBProvider(dbm.NewMemDB()), + lite.NewDBProvider(dbm.NewMemDB()).SetLimit(10), lite.NewDBProvider(dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)), ) + source := lclient.NewProvider(chainID, client) - source := lclient.NewHTTPProvider(chainID, nodeAddr) - - // XXX: total insecure hack to avoid `init` - fc, err := source.LatestFullCommit(chainID, 1, 1) + // TODO: Make this more secure, e.g. make it interactive in the console? + _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) if err != nil { - return nil, err - } - err = trust.SaveFullCommit(fc) - if err != nil { - return nil, err + fc, err := source.LatestFullCommit(chainID, 1, 1) + if err != nil { + return nil, err + } + err = trust.SaveFullCommit(fc) + if err != nil { + return nil, err + } } cert, err := lite.NewInquiringCertifier(chainID, trust, source) From 538c410bcdd9fbd8792c070554bd211e642e2f45 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Jun 2018 16:31:59 -0700 Subject: [PATCH 05/27] Fixes from review --- Gopkg.lock | 31 +++------------- Gopkg.toml | 2 +- consensus/replay.go | 2 +- lite/client/provider.go | 7 +--- lite/client/provider_test.go | 2 +- lite/commit.go | 4 +-- lite/inquiring_certifier.go | 70 ++++++++++++++++++------------------ 7 files changed, 46 insertions(+), 72 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 9dfc2a5f..496e8967 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -7,12 +7,6 @@ packages = ["quantile"] revision = "3a771d992973f24aa725d07868b467d1ddfceafb" -[[projects]] - branch = "master" - name = "github.com/brejski/hid" - packages = ["."] - revision = "06112dcfcc50a7e0e4fd06e17f9791e788fdaafc" - [[projects]] branch = "master" name = "github.com/btcsuite/btcd" @@ -290,17 +284,7 @@ "leveldb/table", "leveldb/util" ] - revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" - -[[projects]] - name = "github.com/tendermint/abci" - packages = [ - "example/code", - "example/kvstore", - "types" - ] - revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540" - version = "v0.12.0" + revision = "0d5a0ceb10cf9ab89fdd744cc8c50a83134f6697" [[projects]] branch = "master" @@ -333,13 +317,8 @@ "merkle/tmhash", "test" ] - revision = "fb7ec62b2925f48de159aeea73b254ae8c58a738" - version = "v0.9.0-rc1" - -[[projects]] - name = "github.com/zondax/ledger-goclient" - packages = ["."] - revision = "3e2146609cdb97894c064d59e9d00accd8c2b1dd" + revision = "49596e0a1f48866603813df843c9409fc19805c6" + version = "v0.9.0" [[projects]] branch = "master" @@ -374,7 +353,7 @@ "netutil", "trace" ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" + revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9" [[projects]] branch = "master" @@ -444,6 +423,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "fcc5b0344f1e328b6abefa1a937d1161e14bbaef603e6f2065e6690531bc5de1" + inputs-digest = "c25289282b94abc7f0c390e592e5e1636b7f26cb4773863ac39cde7fdc7b5bdf" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index d892405b..dc56ae29 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -79,7 +79,7 @@ [[override]] name = "github.com/tendermint/tmlibs" - version = "0.9.0-rc1" + version = "~0.9.0" [[constraint]] name = "google.golang.org/grpc" diff --git a/consensus/replay.go b/consensus/replay.go index 8ecf88b8..6fdd9c0b 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -266,7 +266,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - nextVals := types.TM2PB.Validators(state.Validators) // state.Validators would work too. + nextVals := types.TM2PB.Validators(state.NextValidators) // state.Validators would work too. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime.Unix(), // TODO diff --git a/lite/client/provider.go b/lite/client/provider.go index 1612ddd7..8175c5b5 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -106,28 +106,23 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. return nil, lerr.ErrMissingValidators(chainID, height) } valset = types.NewValidatorSet(res.Validators) - valset.TotalVotingPower() // to test deep equality. return } // This does no validation. func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { - fc.SignedHeader = signedHeader // Get the validators. valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) if err != nil { return lite.FullCommit{}, err } - fc.Validators = valset // Get the next validators. nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) if err != nil { return lite.FullCommit{}, err - } else { - fc.NextValidators = nextValset } - return fc, nil + return lite.NewFullCommit(signedHeader, valset, nextValset), nil } diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index 2385bbbe..f4da423f 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -51,7 +51,7 @@ func TestProvider(t *testing.T) { assert.True(sh < 5000) // let's check this is valid somehow - assert.Nil(fc.ValidateBasic(chainID)) + assert.Nil(fc.ValidateFull(chainID)) // historical queries now work :) lower := sh - 5 diff --git a/lite/commit.go b/lite/commit.go index e62bd166..40c3534c 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -12,7 +12,7 @@ import ( // the validator set which signed the commit, and the next validator set. The // next validator set (which is proven from the block header) allows us to // revert to block-by-block updating of lite certifier's latest validator set, -// even in the face of arbitrarily power changes. +// even in the face of arbitrarily large power changes. type FullCommit struct { SignedHeader types.SignedHeader `json:"signed_header"` Validators *types.ValidatorSet `json:"validator_set"` @@ -33,7 +33,7 @@ func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.Va // signed the SignedHeader.Commit. // If > 2/3 did not sign the Commit from fc.Validators, it // is not a valid commit! -func (fc FullCommit) ValidateBasic(chainID string) error { +func (fc FullCommit) ValidateFull(chainID string) error { // Ensure that Validators exists and matches the header. if fc.Validators.Size() == 0 { return errors.New("need FullCommit.Validators") diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index 3e61b958..c4c6173b 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -101,21 +101,21 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { return nil } else if err != nil { return err - } else { - // Create filled FullCommit. - nfc := FullCommit{ - SignedHeader: shdr, - Validators: tfc.NextValidators, - NextValidators: nextValset, - } - // Validate the full commit. This checks the cryptographic - // signatures of Commit against Validators. - if err := nfc.ValidateBasic(ic.chainID); err != nil { - return err - } - // Trust it. - return ic.trusted.SaveFullCommit(nfc) } + + // Create filled FullCommit. + nfc := FullCommit{ + SignedHeader: shdr, + Validators: tfc.NextValidators, + NextValidators: nextValset, + } + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := nfc.ValidateFull(ic.chainID); err != nil { + return err + } + // Trust it. + return ic.trusted.SaveFullCommit(nfc) } // verifyAndSave will verify if this is a valid source full commit given the @@ -139,7 +139,7 @@ func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { } // updateToHeight will use divide-and-conquer to find a path to h. -// Returns nil iff we successfully verify and persist a full commit +// Returns nil error iff we successfully verify and persist a full commit // for height h, using repeated applications of bisection if necessary. // // Returns ErrCommitNotFound if source provider doesn't have the commit for h. @@ -153,7 +153,7 @@ func (ic *InquiringCertifier) updateToHeight(h int64) (FullCommit, error) { // Validate the full commit. This checks the cryptographic // signatures of Commit against Validators. - if err := sfc.ValidateBasic(ic.chainID); err != nil { + if err := sfc.ValidateFull(ic.chainID); err != nil { return FullCommit{}, err } @@ -169,9 +169,9 @@ FOR_LOOP: if err != nil { return FullCommit{}, err } - // Maybe we have nothing to do. + // We have nothing to do. if tfc.Height() == h { - return FullCommit{}, nil + return tfc, nil } // Try to update to full commit with checks. @@ -179,24 +179,24 @@ FOR_LOOP: if err == nil { // All good! return sfc, nil - } else { - // Handle special case when err is ErrTooMuchChange. - if lerr.IsErrTooMuchChange(err) { - // Divide and conquer. - start, end := tfc.Height(), sfc.Height() - if !(start < end) { - panic("should not happen") - } - mid := (start + end) / 2 - _, err = ic.updateToHeight(mid) - if err != nil { - return FullCommit{}, err - } - // If we made it to mid, we retry. - continue FOR_LOOP - } - return FullCommit{}, err } + + // Handle special case when err is ErrTooMuchChange. + if lerr.IsErrTooMuchChange(err) { + // Divide and conquer. + start, end := tfc.Height(), sfc.Height() + if !(start < end) { + panic("should not happen") + } + mid := (start + end) / 2 + _, err = ic.updateToHeight(mid) + if err != nil { + return FullCommit{}, err + } + // If we made it to mid, we retry. + continue FOR_LOOP + } + return FullCommit{}, err } } From 7f4498f8b1b2eee693a4d378fa8ef03a5e580b7e Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Tue, 26 Jun 2018 11:10:54 +0400 Subject: [PATCH 06/27] remove no longer needed install_abci_apps script Fixes https://circleci.com/gh/tendermint/tendermint/12923?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link --- scripts/dep_utils/parse.sh | 14 -------------- scripts/install_abci_apps.sh | 12 ------------ test/docker/Dockerfile | 7 +++---- 3 files changed, 3 insertions(+), 30 deletions(-) delete mode 100644 scripts/dep_utils/parse.sh delete mode 100644 scripts/install_abci_apps.sh diff --git a/scripts/dep_utils/parse.sh b/scripts/dep_utils/parse.sh deleted file mode 100644 index e6519efa..00000000 --- a/scripts/dep_utils/parse.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -set +u -if [[ "$DEP" == "" ]]; then - DEP=$GOPATH/src/github.com/tendermint/tendermint/Gopkg.lock -fi -set -u - - -set -euo pipefail - -LIB=$1 - -grep -A100 "$LIB" "$DEP" | grep revision | head -n1 | grep -o '"[^"]\+"' | cut -d '"' -f 2 diff --git a/scripts/install_abci_apps.sh b/scripts/install_abci_apps.sh deleted file mode 100644 index ee4b9dde..00000000 --- a/scripts/install_abci_apps.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash - -# get the abci commit used by tendermint -COMMIT=$(bash scripts/dep_utils/parse.sh abci) -echo "Checking out vendored commit for abci: $COMMIT" - -go get -d github.com/tendermint/tendermint/abci -cd "$GOPATH/src/github.com/tendermint/tendermint/abci" || exit -git checkout "$COMMIT" -make get_tools -make get_vendor_deps -make install diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index bc211ea4..70570e75 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -21,14 +21,13 @@ ADD Makefile Makefile RUN make get_tools RUN make get_vendor_deps -# Install the apps -ADD scripts scripts -RUN bash scripts/install_abci_apps.sh - # Now copy in the code # NOTE: this will overwrite whatever is in vendor/ COPY . $REPO +# install ABCI CLI +RUN cd abci && make install && cd - + RUN go install ./cmd/tendermint # expose the volume for debugging From 37ef5485b43e22eb2fdfa4b935308ca718affdfc Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Jun 2018 16:52:38 -0700 Subject: [PATCH 07/27] Add logs to lite/*; Fix rpc status to return consensus height, not blockstore height --- .gitignore | 1 + cmd/tendermint/commands/lite.go | 13 +++++---- consensus/state.go | 9 +++++++ lite/client/provider.go | 24 +++++++++++------ lite/dbprovider.go | 45 +++++++++++++++++++++++++++----- lite/doc.go | 5 ++-- lite/inquiring_certifier.go | 19 +++++++++----- lite/inquiring_certifier_test.go | 17 ++++++------ lite/multiprovider.go | 31 +++++++++++++++------- lite/provider.go | 4 +++ lite/provider_test.go | 10 ++++--- lite/proxy/certifier.go | 26 +++++++++++------- lite/proxy/wrapper.go | 28 +++++++++++++++----- rpc/core/pipe.go | 3 ++- rpc/core/status.go | 7 ++++- 15 files changed, 174 insertions(+), 68 deletions(-) diff --git a/.gitignore b/.gitignore index bcfd36db..269066e3 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,4 @@ scripts/cutWALUntil/cutWALUntil libs/pubsub/query/fuzz_test/output shunit2 +.tendermint-lite diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 53b3ec18..5fe99d36 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -6,10 +6,9 @@ import ( "github.com/spf13/cobra" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tendermint/lite/proxy" rpcclient "github.com/tendermint/tendermint/rpc/client" + cmn "github.com/tendermint/tmlibs/common" ) // LiteCmd represents the base command when called without any subcommands @@ -66,17 +65,21 @@ func runProxy(cmd *cobra.Command, args []string) error { } // First, connect a client + logger.Info("Connecting to source HTTP client...") node := rpcclient.NewHTTP(nodeAddr, "/websocket") - cert, err := proxy.GetCertifier(chainID, home, node) + logger.Info("Constructing certifier...") + cert, err := proxy.NewCertifier(chainID, home, node, logger) if err != nil { - return err + return cmn.ErrorWrap(err, "constructing certifier") } + cert.SetLogger(logger) sc := proxy.SecureClient(node, cert) + logger.Info("Starting proxy...") err = proxy.StartProxy(sc, listenAddr, logger) if err != nil { - return err + return cmn.ErrorWrap(err, "starting proxy") } cmn.TrapSignal(func() { diff --git a/consensus/state.go b/consensus/state.go index 93e1f6b4..7c18d8b0 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -196,6 +196,15 @@ func (cs *ConsensusState) GetState() sm.State { return cs.state.Copy() } +// GetLastHeight returns the last height committed. +// If there were no blocks, returns 0. +func (cs *ConsensusState) GetLastHeight() int64 { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + return cs.RoundState.Height - 1 +} + // GetRoundState returns a shallow copy of the internal consensus state. func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { cs.mtx.Lock() diff --git a/lite/client/provider.go b/lite/client/provider.go index 8175c5b5..3ef15344 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -8,12 +8,12 @@ package client import ( "fmt" + "github.com/tendermint/tendermint/lite" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - lerr "github.com/tendermint/tendermint/lite/errors" + log "github.com/tendermint/tmlibs/log" ) // SignStatusClient combines a SignClient and StatusClient. @@ -23,22 +23,30 @@ type SignStatusClient interface { } type provider struct { + logger log.Logger chainID string client SignStatusClient } // NewProvider implements Provider (but not PersistentProvider). func NewProvider(chainID string, client SignStatusClient) lite.Provider { - return &provider{chainID: chainID, client: client} + return &provider{ + logger: log.NewNopLogger(), + chainID: chainID, + client: client, + } } // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. func NewHTTPProvider(chainID, remote string) lite.Provider { - return &provider{ - chainID: chainID, - client: rpcclient.NewHTTP(remote, "/websocket"), - } + return NewProvider(chainID, rpcclient.NewHTTP(remote, "/websocket")) +} + +// Implements Provider. +func (p *provider) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite/client") + p.logger = logger } // StatusClient returns the internal client as a StatusClient diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 81710c9e..f39f033c 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -10,23 +10,34 @@ import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tmlibs/db" + log "github.com/tendermint/tmlibs/log" ) type DBProvider struct { - chainID string - db dbm.DB - cdc *amino.Codec - limit int + logger log.Logger + label string + db dbm.DB + cdc *amino.Codec + limit int } -func NewDBProvider(db dbm.DB) *DBProvider { +func NewDBProvider(label string, db dbm.DB) *DBProvider { //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) cdc := amino.NewCodec() crypto.RegisterAmino(cdc) - dbp := &DBProvider{db: db, cdc: cdc} + dbp := &DBProvider{ + logger: log.NewNopLogger(), + label: label, + db: db, + cdc: cdc, + } return dbp } +func (dbp *DBProvider) SetLogger(logger log.Logger) { + dbp.logger = logger.With("label", dbp.label) +} + func (dbp *DBProvider) SetLimit(limit int) *DBProvider { dbp.limit = limit return dbp @@ -35,6 +46,7 @@ func (dbp *DBProvider) SetLimit(limit int) *DBProvider { // Implements PersistentProvider. func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { + dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) batch := dbp.db.NewBatch() // Save the fc.validators. @@ -79,6 +91,9 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( FullCommit, error) { + dbp.logger.Info("DBProvider.LatestFullCommit()...", + "chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight) + if minHeight <= 0 { minHeight = 1 } @@ -107,7 +122,15 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int if err != nil { return FullCommit{}, err } else { - return dbp.fillFullCommit(sh) + lfc, err := dbp.fillFullCommit(sh) + if err == nil { + dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height()) + return lfc, nil + } else { + dbp.logger.Info("DBProvider.LatestFullCommit() got error", "lfc", lfc) + dbp.logger.Info(fmt.Sprintf("%+v", err)) + return lfc, err + } } } } @@ -155,6 +178,9 @@ func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) } func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { + + dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after) + itr := dbp.db.ReverseIterator( signedHeaderKey(chainID, 1<<63-1), signedHeaderKey(chainID, 0), @@ -163,6 +189,7 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { var lastHeight int64 = 1<<63 - 1 var numSeen = 0 + var numDeleted = 0 for itr.Valid() { key := itr.Key() @@ -176,9 +203,13 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { } if numSeen > after { dbp.db.Delete(key) + numDeleted += 1 } } + itr.Next() } + + dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items\n", numDeleted)) return nil } diff --git a/lite/doc.go b/lite/doc.go index 881880f6..07977ebe 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -92,8 +92,9 @@ type PersistentProvider interface { * MultiProvider - combine multiple providers. The suggested use for local light clients is client.NewHTTPProvider(...) for -getting new data (Source), and NewMultiProvider(NewDBProvider(dbm.NewMemDB()), -NewDBProvider(db.NewFileDB(...))) to store confirmed full commits (Trusted) +getting new data (Source), and NewMultiProvider(NewDBProvider("label", +dbm.NewMemDB()), NewDBProvider("label", db.NewFileDB(...))) to store confirmed +full commits (Trusted) # How We Track Validators diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index c4c6173b..f030ec57 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -3,9 +3,9 @@ package lite import ( "bytes" - "github.com/tendermint/tendermint/types" - lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" + log "github.com/tendermint/tmlibs/log" ) var _ Certifier = (*InquiringCertifier)(nil) @@ -15,6 +15,7 @@ var _ Certifier = (*InquiringCertifier)(nil) // validator set changes. It stores properly validated data on the // "trusted" local system. type InquiringCertifier struct { + logger log.Logger chainID string // These are only properly validated data, from local system. trusted PersistentProvider @@ -28,14 +29,20 @@ type InquiringCertifier struct { // // The trusted provider should a CacheProvider, MemProvider or // files.Provider. The source provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) ( - *InquiringCertifier, error) { - +func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) *InquiringCertifier { return &InquiringCertifier{ + logger: log.NewNopLogger(), chainID: chainID, trusted: trusted, source: source, - }, nil + } +} + +func (ic *InquiringCertifier) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite") + ic.logger = logger + ic.trusted.SetLogger(logger) + ic.source.SetLogger(logger) } // Implements Certifier. diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go index 8da5a7c1..23cb5488 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/inquiring_certifier_test.go @@ -8,12 +8,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tmlibs/db" + log "github.com/tendermint/tmlibs/log" ) func TestInquirerValidPath(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := NewDBProvider(dbm.NewMemDB()) - source := NewDBProvider(dbm.NewMemDB()) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) // Set up the validators to generate test blocks. var vote int64 = 10 @@ -43,8 +44,8 @@ func TestInquirerValidPath(t *testing.T) { // Initialize a certifier with the initial state. err := trust.SaveFullCommit(fcz[0]) require.Nil(err) - cert, err := NewInquiringCertifier(chainID, trust, source) - require.Nil(err) + cert := NewInquiringCertifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) // This should fail validation: sh := fcz[count-1].SignedHeader @@ -70,8 +71,8 @@ func TestInquirerValidPath(t *testing.T) { func TestInquirerVerifyHistorical(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := NewDBProvider(dbm.NewMemDB()) - source := NewDBProvider(dbm.NewMemDB()) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) // Set up the validators to generate test blocks. var vote int64 = 10 @@ -101,8 +102,8 @@ func TestInquirerVerifyHistorical(t *testing.T) { // Initialize a certifier with the initial state. err := trust.SaveFullCommit(fcz[0]) require.Nil(err) - cert, err := NewInquiringCertifier(chainID, trust, source) - require.Nil(err) + cert := NewInquiringCertifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) // Store a few full commits as trust. for _, i := range []int{2, 5} { diff --git a/lite/multiprovider.go b/lite/multiprovider.go index dcfd1318..8ff523b4 100644 --- a/lite/multiprovider.go +++ b/lite/multiprovider.go @@ -3,24 +3,35 @@ package lite import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" + log "github.com/tendermint/tmlibs/log" ) // multiProvider allows you to place one or more caches in front of a source // Provider. It runs through them in order until a match is found. type multiProvider struct { - Providers []PersistentProvider + logger log.Logger + providers []PersistentProvider } // NewMultiProvider returns a new provider which wraps multiple other providers. -func NewMultiProvider(providers ...PersistentProvider) multiProvider { - return multiProvider{ - Providers: providers, +func NewMultiProvider(providers ...PersistentProvider) *multiProvider { + return &multiProvider{ + logger: log.NewNopLogger(), + providers: providers, + } +} + +// SetLogger sets logger on self and all subproviders. +func (mc *multiProvider) SetLogger(logger log.Logger) { + mc.logger = logger + for _, p := range mc.providers { + p.SetLogger(logger) } } // SaveFullCommit saves on all providers, and aborts on the first error. -func (mc multiProvider) SaveFullCommit(fc FullCommit) (err error) { - for _, p := range mc.Providers { +func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) { + for _, p := range mc.providers { err = p.SaveFullCommit(fc) if err != nil { return @@ -32,8 +43,8 @@ func (mc multiProvider) SaveFullCommit(fc FullCommit) (err error) { // LatestFullCommit loads the latest from all providers and provides // the latest FullCommit that satisfies the conditions. // Returns the first error encountered. -func (mc multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { - for _, p := range mc.Providers { +func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { + for _, p := range mc.providers { var fc_ FullCommit fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) if lerr.IsErrCommitNotFound(err) { @@ -60,8 +71,8 @@ func (mc multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight in // ValidatorSet returns validator set at height as provided by the first // provider which has it, or an error otherwise. -func (mc multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - for _, p := range mc.Providers { +func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + for _, p := range mc.providers { valset, err = p.ValidatorSet(chainID, height) if err == nil { // TODO Log unexpected types of errors. diff --git a/lite/provider.go b/lite/provider.go index 34ba40d4..c31b2da4 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -2,6 +2,7 @@ package lite import ( "github.com/tendermint/tendermint/types" + log "github.com/tendermint/tmlibs/log" ) // Provider provides information for the lite client to sync validators. @@ -16,6 +17,9 @@ type Provider interface { // Get the valset that corresponds to chainID and height and return. // Height must be >= 1. ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) + + // Set a logger. + SetLogger(logger log.Logger) } // A provider that can also persist new information. diff --git a/lite/provider_test.go b/lite/provider_test.go index 96523d94..09e8119b 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -10,6 +10,7 @@ import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tmlibs/db" + log "github.com/tendermint/tmlibs/log" ) // missingProvider doesn't store anything, always a miss. @@ -28,16 +29,17 @@ func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { return nil, errors.New("missing validator set") } +func (missingProvider) SetLogger(_ log.Logger) {} func TestMemProvider(t *testing.T) { - p := NewDBProvider(dbm.NewMemDB()) + p := NewDBProvider("mem", dbm.NewMemDB()) checkProvider(t, p, "test-mem", "empty") } func TestMultiProvider(t *testing.T) { p := NewMultiProvider( NewMissingProvider(), - NewDBProvider(dbm.NewMemDB()), + NewDBProvider("mem", dbm.NewMemDB()), NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") @@ -105,8 +107,8 @@ func TestMultiLatestFullCommit(t *testing.T) { // We will write data to the second level of the cache (p2), and see what // gets cached/stored in. - p := NewDBProvider(dbm.NewMemDB()) - p2 := NewDBProvider(dbm.NewMemDB()) + p := NewDBProvider("mem1", dbm.NewMemDB()) + p2 := NewDBProvider("mem2", dbm.NewMemDB()) cp := NewMultiProvider(p, p2) chainID := "cache-best-height" diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go index 772af58f..a1ab02c5 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/certifier.go @@ -3,33 +3,39 @@ package proxy import ( "github.com/tendermint/tendermint/lite" lclient "github.com/tendermint/tendermint/lite/client" + cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" + log "github.com/tendermint/tmlibs/log" ) -func GetCertifier(chainID, rootDir string, client lclient.SignStatusClient) (*lite.InquiringCertifier, error) { +func NewCertifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.InquiringCertifier, error) { + + logger = logger.With("module", "lite/proxy") + logger.Info("lite/proxy/NewCertifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) + + memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) + lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) trust := lite.NewMultiProvider( - lite.NewDBProvider(dbm.NewMemDB()).SetLimit(10), - lite.NewDBProvider(dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)), + memProvider, + lvlProvider, ) source := lclient.NewProvider(chainID, client) + cert := lite.NewInquiringCertifier(chainID, trust, source) + cert.SetLogger(logger) // Sets logger recursively. // TODO: Make this more secure, e.g. make it interactive in the console? _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) if err != nil { + logger.Info("lite/proxy/NewCertifier found no trusted full commit, initializing from source from height 1...") fc, err := source.LatestFullCommit(chainID, 1, 1) if err != nil { - return nil, err + return nil, cmn.ErrorWrap(err, "fetching source full commit @ height 1") } err = trust.SaveFullCommit(fc) if err != nil { - return nil, err + return nil, cmn.ErrorWrap(err, "saving full commit to trusted") } } - cert, err := lite.NewInquiringCertifier(chainID, trust, source) - if err != nil { - return nil, err - } - return cert, nil } diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 83fc96a1..82b1fb09 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -89,33 +89,49 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // Block returns an entire block and verifies all signatures func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { - r, err := w.Client.Block(height) + resBlock, err := w.Client.Block(height) if err != nil { return nil, err } // get a checkpoint to verify from - res, err := w.Commit(height) + resCommit, err := w.Commit(height) if err != nil { return nil, err } - sh := res.SignedHeader + sh := resCommit.SignedHeader // now verify - err = ValidateBlockMeta(r.BlockMeta, sh) + err = ValidateBlockMeta(resBlock.BlockMeta, sh) if err != nil { return nil, err } - err = ValidateBlock(r.Block, sh) + err = ValidateBlock(resBlock.Block, sh) if err != nil { return nil, err } - return r, nil + return resBlock, nil } // Commit downloads the Commit and certifies it with the lite. // // This is the foundation for all other verification in this module func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { + if height == nil { + resStatus, err := w.Client.Status() + if err != nil { + return nil, err + } + // NOTE: If resStatus.CatchingUp, there is a race + // condition where the validator set for the next height + // isn't available until some time after the blockstore + // has height h on the remote node. This isn't an issue + // once the node has caught up, and a syncing node likely + // won't have this issue esp with the implementation we + // have here, but we may have to address this at some + // point. + height = new(int64) + *height = resStatus.SyncInfo.LatestBlockHeight + } rpcclient.WaitForHeight(w.Client, *height, nil) res, err := w.Client.Commit(height) // if we got it, then certify it diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 9fcb75e1..52fe1b55 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -3,8 +3,8 @@ package core import ( "time" - crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/consensus" + crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -28,6 +28,7 @@ var subscribeTimeout = 5 * time.Second type Consensus interface { GetState() sm.State GetValidators() (int64, []*types.Validator) + GetLastHeight() int64 GetRoundStateJSON() ([]byte, error) GetRoundStateSimpleJSON() ([]byte, error) } diff --git a/rpc/core/status.go b/rpc/core/status.go index 5738685b..00a14c54 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -64,7 +64,12 @@ import ( //} // ``` func Status() (*ctypes.ResultStatus, error) { - latestHeight := blockStore.Height() + var latestHeight int64 = -1 + if consensusReactor.FastSync() { + latestHeight = blockStore.Height() + } else { + latestHeight = consensusState.GetLastHeight() + } var ( latestBlockMeta *types.BlockMeta latestBlockHash cmn.HexBytes From acd976ad5b64e3952459353cb84b14af86d85bf9 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Jun 2018 23:42:00 -0700 Subject: [PATCH 08/27] bump circle --- bump | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 bump diff --git a/bump b/bump new file mode 100644 index 00000000..e69de29b From 19fc4ac47c40f12567652abee3afe70cd5c5970c Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Jun 2018 23:58:47 -0700 Subject: [PATCH 09/27] remove abci from gopkg.toml --- Gopkg.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Gopkg.toml b/Gopkg.toml index dc56ae29..18e2767a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -69,10 +69,6 @@ name = "github.com/stretchr/testify" version = "~1.2.1" -[[constraint]] - name = "github.com/tendermint/abci" - version = "~0.12.0" - [[constraint]] name = "github.com/tendermint/go-amino" version = "~0.10.1" From 835c2ee74a07814d690166bae3ee0bbc81b02f3e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 00:09:04 -0700 Subject: [PATCH 10/27] Print --- test/p2p/basic/test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh index caf66512..423b5b01 100755 --- a/test/p2p/basic/test.sh +++ b/test/p2p/basic/test.sh @@ -56,6 +56,7 @@ for i in `seq 1 $N`; do # - assert block height is greater than 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` COUNT=0 + echo "$$BLOCK_HEIGHT IS $BLOCK_HEIGHT" while [ "$BLOCK_HEIGHT" -le 1 ]; do echo "Waiting for node $i to commit a block ..." sleep 1 From 8163b99a75f34a3eefc19bf0449405a701b82378 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 00:37:53 -0700 Subject: [PATCH 11/27] print docker output to console to debug circle --- test/p2p/kill_all/check_peers.sh | 4 ++-- test/p2p/peer.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh index 87a76811..95da7484 100644 --- a/test/p2p/kill_all/check_peers.sh +++ b/test/p2p/kill_all/check_peers.sh @@ -23,7 +23,7 @@ set -e # get the first peer's height addr=$(test/p2p/ip.sh 1):26657 -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) +h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "1st peer is on height $h1" echo "Waiting until other peers reporting a height higher than the 1st one" @@ -33,7 +33,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do while [[ $hi -le $h1 ]] ; do addr=$(test/p2p/ip.sh "$i"):26657 - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) + hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "... peer $i is on height $hi" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 15d44ff3..59b55b1e 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,7 +14,7 @@ echo "starting tendermint peer ID=$ID" # start tendermint container on the network # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. -docker run -d \ +docker run \ --net="$NETWORK_NAME" \ --ip=$(test/p2p/ip.sh "$ID") \ --name "local_testnet_$ID" \ @@ -24,4 +24,4 @@ docker run -d \ --log-opt syslog-address=udp://127.0.0.1:5514 \ --log-opt syslog-facility=daemon \ --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & From ad1b722898aa435bb022b1641ebbc25365a17fac Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 00:41:50 -0700 Subject: [PATCH 12/27] bump for circle --- bump | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bump b/bump index e69de29b..729353f7 100644 --- a/bump +++ b/bump @@ -0,0 +1,2 @@ + +booop From 363146dacf803a065d7bbe35994b145fae746ef3 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 02:03:15 -0700 Subject: [PATCH 13/27] just print node1 --- test/p2p/peer.sh | 41 ++++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 59b55b1e..a718fd49 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,14 +14,33 @@ echo "starting tendermint peer ID=$ID" # start tendermint container on the network # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. -docker run \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "$ID") \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & + + + +if [[ "$ID" == "1" ]]; then + docker run \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & +else + docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" +fi From 9184733261efc20d9c4b5328df03c8da8f2ff1ec Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 02:34:11 -0700 Subject: [PATCH 14/27] try it with new consensus? --- consensus/state.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 7c18d8b0..19c17f33 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1587,7 +1587,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, if prevotes.HasTwoThirdsMajority() { cs.enterPrecommit(height, vote.Round) } else { - cs.enterPrevote(height, vote.Round) // if the vote is ahead of us + cs.enterPropose(height, vote.Round) // we can't prevote until we wait for the proposal. cs.enterPrevoteWait(height, vote.Round) } } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { @@ -1621,7 +1621,8 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) + cs.enterPrevote(height, vote.Round) + cs.enterPrevoteWait(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } default: From cfcbc614498b5c16e49e064eacb8dba168cb78c3 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 04:04:33 -0700 Subject: [PATCH 15/27] oops --- consensus/state.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 19c17f33..e3c9054a 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1021,9 +1021,11 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) - } + /* + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } + */ logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { From 8524a8da7fdb52e5396b360a8862d3466be9e2e8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Jun 2018 23:42:00 -0700 Subject: [PATCH 16/27] Try to fix circle... --- Gopkg.toml | 4 ---- bump | 2 ++ consensus/state.go | 13 ++++++---- foo.sh | 1 + test/p2p/basic/test.sh | 1 + test/p2p/kill_all/check_peers.sh | 4 ++-- test/p2p/peer.sh | 41 +++++++++++++++++++++++--------- types/validator_set.go | 24 +++++++++---------- 8 files changed, 56 insertions(+), 34 deletions(-) create mode 100644 bump create mode 100644 foo.sh diff --git a/Gopkg.toml b/Gopkg.toml index dc56ae29..18e2767a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -69,10 +69,6 @@ name = "github.com/stretchr/testify" version = "~1.2.1" -[[constraint]] - name = "github.com/tendermint/abci" - version = "~0.12.0" - [[constraint]] name = "github.com/tendermint/go-amino" version = "~0.10.1" diff --git a/bump b/bump new file mode 100644 index 00000000..729353f7 --- /dev/null +++ b/bump @@ -0,0 +1,2 @@ + +booop diff --git a/consensus/state.go b/consensus/state.go index 7c18d8b0..e3c9054a 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1021,9 +1021,11 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) - } + /* + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } + */ logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { @@ -1587,7 +1589,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, if prevotes.HasTwoThirdsMajority() { cs.enterPrecommit(height, vote.Round) } else { - cs.enterPrevote(height, vote.Round) // if the vote is ahead of us + cs.enterPropose(height, vote.Round) // we can't prevote until we wait for the proposal. cs.enterPrevoteWait(height, vote.Round) } } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { @@ -1621,7 +1623,8 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) + cs.enterPrevote(height, vote.Round) + cs.enterPrevoteWait(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } default: diff --git a/foo.sh b/foo.sh new file mode 100644 index 00000000..be8b9d78 --- /dev/null +++ b/foo.sh @@ -0,0 +1 @@ +docker run -ti tester find diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh index caf66512..423b5b01 100755 --- a/test/p2p/basic/test.sh +++ b/test/p2p/basic/test.sh @@ -56,6 +56,7 @@ for i in `seq 1 $N`; do # - assert block height is greater than 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` COUNT=0 + echo "$$BLOCK_HEIGHT IS $BLOCK_HEIGHT" while [ "$BLOCK_HEIGHT" -le 1 ]; do echo "Waiting for node $i to commit a block ..." sleep 1 diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh index 87a76811..95da7484 100644 --- a/test/p2p/kill_all/check_peers.sh +++ b/test/p2p/kill_all/check_peers.sh @@ -23,7 +23,7 @@ set -e # get the first peer's height addr=$(test/p2p/ip.sh 1):26657 -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) +h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "1st peer is on height $h1" echo "Waiting until other peers reporting a height higher than the 1st one" @@ -33,7 +33,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do while [[ $hi -le $h1 ]] ; do addr=$(test/p2p/ip.sh "$i"):26657 - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) + hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "... peer $i is on height $hi" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 15d44ff3..a718fd49 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,14 +14,33 @@ echo "starting tendermint peer ID=$ID" # start tendermint container on the network # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. -docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "$ID") \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" + + + +if [[ "$ID" == "1" ]]; then + docker run \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & +else + docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" +fi diff --git a/types/validator_set.go b/types/validator_set.go index dc1d0e88..636c046a 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -279,11 +279,6 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } - // NOTE: This will go away when we refactor Commit. - if !blockID.Equals(precommit.BlockID) { - return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", - idx, blockID, precommit.BlockID) - } _, val := vals.GetByIndex(idx) // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) @@ -291,7 +286,12 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - talliedVotingPower += val.VotingPower + if blockID.Equals(precommit.BlockID) { + talliedVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + } } if talliedVotingPower > vals.TotalVotingPower()*2/3 { @@ -358,11 +358,6 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin if precommit.Type != VoteTypePrecommit { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } - // NOTE: This will go away when we refactor Commit. - if !blockID.Equals(precommit.BlockID) { - return fmt.Errorf("Invalid commit -- wrong block id @ index %v: want %v got %v", - idx, blockID, precommit.BlockID) - } // See if this validator is in oldVals. idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) if val == nil || seen[idx] { @@ -376,7 +371,12 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - oldVotingPower += val.VotingPower + if blockID.Equals(precommit.BlockID) { + oldVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + } } if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { From b51ed132f7ac13792dd9b5d7dbb7c806364fdcc9 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Jun 2018 14:31:42 -0700 Subject: [PATCH 17/27] Fix test/p2p/pex circle tests; update consensus --- .gitignore | 1 + consensus/state.go | 2 +- lite/commit.go | 8 +------- lite/dbprovider.go | 4 ---- test/p2p/peer.sh | 4 +--- test/p2p/pex/test_addrbook.sh | 15 ++++++++++++--- 6 files changed, 16 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index 269066e3..9337de17 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ scripts/cutWALUntil/cutWALUntil libs/pubsub/query/fuzz_test/output shunit2 .tendermint-lite +addrbook.json diff --git a/consensus/state.go b/consensus/state.go index e3c9054a..4bb47045 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1623,7 +1623,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrevote(height, vote.Round) + cs.enterPropose(height, vote.Round) cs.enterPrevoteWait(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } diff --git a/lite/commit.go b/lite/commit.go index 40c3534c..89f04417 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -65,15 +65,9 @@ func (fc FullCommit) ValidateFull(chainID string) error { } // Validate the signatures on the commit. hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit - err = fc.Validators.VerifyCommit( + return fc.Validators.VerifyCommit( hdr.ChainID, cmt.BlockID, hdr.Height, cmt) - if err != nil { - return err - } - - // All good! - return nil } // Height returns the height of the header. diff --git a/lite/dbprovider.go b/lite/dbprovider.go index f39f033c..3ee3b062 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -241,10 +241,6 @@ func validatorSetKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) } -func chainKeyPrefix(chainID string, height int64) []byte { - return []byte(fmt.Sprintf("%s/%010d/", chainID, height)) -} - var chainKeyPrefixPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/`) func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index a718fd49..ad04d000 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -15,9 +15,7 @@ echo "starting tendermint peer ID=$ID" # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. - - -if [[ "$ID" == "1" ]]; then +if [[ "$ID" == "x" ]]; then # Set "x" to "1" to print to console. docker run \ --net="$NETWORK_NAME" \ --ip=$(test/p2p/ip.sh "$ID") \ diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh index d54bcf42..9c58db30 100644 --- a/test/p2p/pex/test_addrbook.sh +++ b/test/p2p/pex/test_addrbook.sh @@ -16,6 +16,7 @@ CLIENT_NAME="pex_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" # preserve addrbook.json docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" set +e #CIRCLE @@ -24,6 +25,13 @@ set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +echo "started local_testnet_$ID" + +# if the client runs forever, it means addrbook wasn't saved or was empty +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" + +# Now we know that the node is up. + docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" echo "with the following addrbook:" cat /tmp/addrbook.json @@ -31,9 +39,6 @@ cat /tmp/addrbook.json # docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" echo "" -# if the client runs forever, it means addrbook wasn't saved or was empty -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" - echo "----------------------------------------------------------------------" echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" echo "(assuming peers are started with pex enabled)" @@ -42,16 +47,20 @@ CLIENT_NAME="pex_no_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" set +e #CIRCLE docker rm -vf "local_testnet_$ID" set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +echo "started local_testnet_$ID" # if the client runs forever, it means other peers have removed us from their books (which should not happen) bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" +# Now we know that the node is up. + echo "" echo "PASS" echo "" From 9018acde5f4e77368490c6d497f0364ea396dd2a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 2 Jul 2018 14:58:07 -0400 Subject: [PATCH 18/27] tmlibs -> tendermint/libs --- lite/base_certifier.go | 2 +- lite/client/provider.go | 2 +- lite/dbprovider.go | 4 ++-- lite/errors/errors.go | 2 +- lite/inquiring_certifier.go | 2 +- lite/inquiring_certifier_test.go | 4 ++-- lite/multiprovider.go | 2 +- lite/provider.go | 2 +- lite/provider_test.go | 4 ++-- lite/proxy/certifier.go | 6 +++--- lite/proxy/errors.go | 2 +- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/lite/base_certifier.go b/lite/base_certifier.go index 6f2b3da9..0f9faba3 100644 --- a/lite/base_certifier.go +++ b/lite/base_certifier.go @@ -5,7 +5,7 @@ import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) var _ Certifier = (*BaseCertifier)(nil) diff --git a/lite/client/provider.go b/lite/client/provider.go index 3ef15344..8087be71 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -13,7 +13,7 @@ import ( rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tmlibs/log" + log "github.com/tendermint/tendermint/libs/log" ) // SignStatusClient combines a SignClient and StatusClient. diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 3ee3b062..13ad2c61 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -9,8 +9,8 @@ import ( crypto "github.com/tendermint/tendermint/crypto" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - log "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" ) type DBProvider struct { diff --git a/lite/errors/errors.go b/lite/errors/errors.go index c38ecf88..96a5a02a 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,7 +3,7 @@ package errors import ( "fmt" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go index f030ec57..31637447 100644 --- a/lite/inquiring_certifier.go +++ b/lite/inquiring_certifier.go @@ -5,7 +5,7 @@ import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tmlibs/log" + log "github.com/tendermint/tendermint/libs/log" ) var _ Certifier = (*InquiringCertifier)(nil) diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go index 23cb5488..5eb63727 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/inquiring_certifier_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tmlibs/db" - log "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" ) func TestInquirerValidPath(t *testing.T) { diff --git a/lite/multiprovider.go b/lite/multiprovider.go index 8ff523b4..991a12d7 100644 --- a/lite/multiprovider.go +++ b/lite/multiprovider.go @@ -3,7 +3,7 @@ package lite import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tmlibs/log" + log "github.com/tendermint/tendermint/libs/log" ) // multiProvider allows you to place one or more caches in front of a source diff --git a/lite/provider.go b/lite/provider.go index c31b2da4..59e36a67 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -2,7 +2,7 @@ package lite import ( "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tmlibs/log" + log "github.com/tendermint/tendermint/libs/log" ) // Provider provides information for the lite client to sync validators. diff --git a/lite/provider_test.go b/lite/provider_test.go index 09e8119b..e5547022 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -9,8 +9,8 @@ import ( lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - log "github.com/tendermint/tmlibs/log" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" ) // missingProvider doesn't store anything, always a miss. diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go index a1ab02c5..bd09b1ab 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/certifier.go @@ -3,9 +3,9 @@ package proxy import ( "github.com/tendermint/tendermint/lite" lclient "github.com/tendermint/tendermint/lite/client" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - log "github.com/tendermint/tmlibs/log" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" ) func NewCertifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.InquiringCertifier, error) { diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 9af72a54..6a7c2354 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,7 +1,7 @@ package proxy import ( - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) type errNoData struct{} From f1093edbe2143cf89125052c119598c7acb5eed0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 23 Jul 2018 23:11:53 -0400 Subject: [PATCH 19/27] remove accidental files --- bump | 2 -- foo.sh | 1 - 2 files changed, 3 deletions(-) delete mode 100644 bump delete mode 100644 foo.sh diff --git a/bump b/bump deleted file mode 100644 index 729353f7..00000000 --- a/bump +++ /dev/null @@ -1,2 +0,0 @@ - -booop diff --git a/foo.sh b/foo.sh deleted file mode 100644 index be8b9d78..00000000 --- a/foo.sh +++ /dev/null @@ -1 +0,0 @@ -docker run -ti tester find From f6705f02c7df53b9dc036a290ed0c31cc2f030e4 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 23 Jul 2018 23:31:47 -0400 Subject: [PATCH 20/27] fixes post merge --- lite/dbprovider.go | 8 ++-- lite/helpers.go | 4 +- lite/proxy/validate_test.go | 11 +---- types/validator_set_test.go | 95 +++++++++++++++++++------------------ 4 files changed, 56 insertions(+), 62 deletions(-) diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 13ad2c61..8392fcea 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -6,11 +6,11 @@ import ( "strconv" amino "github.com/tendermint/go-amino" - crypto "github.com/tendermint/tendermint/crypto" - lerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" dbm "github.com/tendermint/tendermint/libs/db" log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" ) type DBProvider struct { @@ -24,7 +24,7 @@ type DBProvider struct { func NewDBProvider(label string, db dbm.DB) *DBProvider { //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) cdc := amino.NewCodec() - crypto.RegisterAmino(cdc) + cryptoAmino.RegisterAmino(cdc) dbp := &DBProvider{ logger: log.NewNopLogger(), label: label, diff --git a/lite/helpers.go b/lite/helpers.go index c6fe1760..9265aeea 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -44,7 +44,7 @@ func (pkz privKeys) Extend(n int) privKeys { // GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits. func GenSecpPrivKeys(n int) privKeys { - res := make(privKey, n) + res := make(privKeys, n) for i := range res { res[i] = secp256k1.GenPrivKey() } @@ -54,7 +54,7 @@ func GenSecpPrivKeys(n int) privKeys { // ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). func (pkz privKeys) ExtendSecp(n int) privKeys { extra := GenSecpPrivKeys(n) - return append(v, extra...) + return append(pkz, extra...) } // ToValidators produces a valset from the set of keys. diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 6ca9035c..1ce4d667 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -33,10 +33,7 @@ func TestValidateBlock(t *testing.T) { block: nil, wantErr: "non-nil Block", }, { - block: &types.Block{}, wantErr: "nil Header", - }, - { - block: &types.Block{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", + block: &types.Block{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test @@ -115,11 +112,7 @@ func TestValidateBlockMeta(t *testing.T) { meta: nil, wantErr: "non-nil BlockMeta", }, { - meta: &types.BlockMeta{}, wantErr: "non-nil Header", - }, - { - meta: &types.BlockMeta{Header: new(types.Header)}, wantErr: "unexpected empty SignedHeader", - // meta: &types.BlockMeta{}, + meta: &types.BlockMeta{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 1756f789..ba5fefa2 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -16,57 +16,58 @@ import ( ) func TestValidatorSetBasic(t *testing.T) { - for _, vset := range []*ValidatorSet{NewValidatorSet([]*Validator{}), NewValidatorSet(nil)} { - assert.Panics(t, func() { vset.IncrementAccum(1) }) + assert.Panics(t, func() { NewValidatorSet([]*Validator{}) }) - assert.EqualValues(t, vset, vset.Copy()) - assert.False(t, vset.HasAddress([]byte("some val"))) - idx, val := vset.GetByAddress([]byte("some val")) - assert.Equal(t, -1, idx) - assert.Nil(t, val) - addr, val := vset.GetByIndex(-100) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(0) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(100) - assert.Nil(t, addr) - assert.Nil(t, val) - assert.Zero(t, vset.Size()) - assert.Equal(t, int64(0), vset.TotalVotingPower()) - assert.Nil(t, vset.GetProposer()) - assert.Nil(t, vset.Hash()) + vset := NewValidatorSet(nil) + assert.Panics(t, func() { vset.IncrementAccum(1) }) - // add - val = randValidator_() - assert.True(t, vset.Add(val)) - assert.True(t, vset.HasAddress(val.Address)) - idx, val2 := vset.GetByAddress(val.Address) - assert.Equal(t, 0, idx) - assert.Equal(t, val, val2) - addr, val2 = vset.GetByIndex(0) - assert.Equal(t, []byte(val.Address), addr) - assert.Equal(t, val, val2) - assert.Equal(t, 1, vset.Size()) - assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) - assert.Equal(t, val, vset.GetProposer()) - assert.NotNil(t, vset.Hash()) - assert.NotPanics(t, func() { vset.IncrementAccum(1) }) + assert.EqualValues(t, vset, vset.Copy()) + assert.False(t, vset.HasAddress([]byte("some val"))) + idx, val := vset.GetByAddress([]byte("some val")) + assert.Equal(t, -1, idx) + assert.Nil(t, val) + addr, val := vset.GetByIndex(-100) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(0) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(100) + assert.Nil(t, addr) + assert.Nil(t, val) + assert.Zero(t, vset.Size()) + assert.Equal(t, int64(0), vset.TotalVotingPower()) + assert.Nil(t, vset.GetProposer()) + assert.Nil(t, vset.Hash()) - // update - assert.False(t, vset.Update(randValidator_())) - val.VotingPower = 100 - assert.True(t, vset.Update(val)) + // add + val = randValidator_() + assert.True(t, vset.Add(val)) + assert.True(t, vset.HasAddress(val.Address)) + idx, val2 := vset.GetByAddress(val.Address) + assert.Equal(t, 0, idx) + assert.Equal(t, val, val2) + addr, val2 = vset.GetByIndex(0) + assert.Equal(t, []byte(val.Address), addr) + assert.Equal(t, val, val2) + assert.Equal(t, 1, vset.Size()) + assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) + assert.Equal(t, val, vset.GetProposer()) + assert.NotNil(t, vset.Hash()) + assert.NotPanics(t, func() { vset.IncrementAccum(1) }) - // remove - val2, removed := vset.Remove(randValidator_().Address) - assert.Nil(t, val2) - assert.False(t, removed) - val2, removed = vset.Remove(val.Address) - assert.Equal(t, val.Address, val2.Address) - assert.True(t, removed) - } + // update + assert.False(t, vset.Update(randValidator_())) + val.VotingPower = 100 + assert.True(t, vset.Update(val)) + + // remove + val2, removed := vset.Remove(randValidator_().Address) + assert.Nil(t, val2) + assert.False(t, removed) + val2, removed = vset.Remove(val.Address) + assert.Equal(t, val.Address, val2.Address) + assert.True(t, removed) } func TestCopy(t *testing.T) { From a657870b3d9d0cbda6387f373827b43d1e06dc21 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 24 Jul 2018 09:42:08 -0400 Subject: [PATCH 21/27] update dockerfile --- test/docker/Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 7351ec9d..e2fc3da1 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -26,10 +26,10 @@ RUN make get_vendor_deps COPY . $REPO # install ABCI CLI -RUN cd abci && make install && cd - +RUN make install_abci -RUN go install ./cmd/tendermint -RUN go install ./abci/cmd/abci-cli +# install Tendermint +RUN make install # expose the volume for debugging VOLUME $REPO From 24ae878b9fdd8cbe02359e02ad24aefd9c2885bd Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Wed, 1 Aug 2018 13:29:41 +0200 Subject: [PATCH 22/27] update encoding test to how amino skips empty pointers --- Gopkg.lock | 173 ++++++++++++++++++++++++++++++++++++++----- types/proto3_test.go | 5 -- 2 files changed, 155 insertions(+), 23 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index bf98a0af..bf48ae38 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,48 +3,63 @@ [[projects]] branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] branch = "master" + digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8" name = "github.com/btcsuite/btcd" packages = ["btcec"] + pruneopts = "UT" revision = "9a2f9524024889e129a5422aca2cff73cb3eabf6" [[projects]] + digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" name = "github.com/btcsuite/btcutil" packages = [ "base58", - "bech32" + "bech32", ] + pruneopts = "UT" revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" packages = ["spew"] + pruneopts = "UT" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] + digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b" name = "github.com/ebuchman/fail-test" packages = ["."] + pruneopts = "UT" revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" [[projects]] + digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" name = "github.com/fortytw2/leaktest" packages = ["."] + pruneopts = "UT" revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" version = "v1.2.0" [[projects]] + digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" name = "github.com/fsnotify/fsnotify" packages = ["."] + pruneopts = "UT" revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" version = "v1.4.7" [[projects]] + digest = "1:a6910e76d4a4aad1481edf7efb7831bd0aae11dddf2f21e5c3f1fe8c7046d3bd" name = "github.com/go-kit/kit" packages = [ "log", @@ -53,24 +68,30 @@ "metrics", "metrics/discard", "metrics/internal/lv", - "metrics/prometheus" + "metrics/prometheus", ] + pruneopts = "UT" revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e" version = "v0.7.0" [[projects]] + digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659" name = "github.com/go-logfmt/logfmt" packages = ["."] + pruneopts = "UT" revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" version = "v0.3.0" [[projects]] + digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406" name = "github.com/go-stack/stack" packages = ["."] + pruneopts = "UT" revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" version = "v1.7.0" [[projects]] + digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" name = "github.com/gogo/protobuf" packages = [ "gogoproto", @@ -78,37 +99,45 @@ "proto", "protoc-gen-gogo/descriptor", "sortkeys", - "types" + "types", ] + pruneopts = "UT" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] + digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp" + "ptypes/timestamp", ] + pruneopts = "UT" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009" name = "github.com/golang/snappy" packages = ["."] + pruneopts = "UT" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] + digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e" name = "github.com/gorilla/websocket" packages = ["."] + pruneopts = "UT" revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" version = "v1.2.0" [[projects]] branch = "master" + digest = "1:a361611b8c8c75a1091f00027767f7779b29cb37c456a71b8f2604c88057ab40" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -120,153 +149,197 @@ "hcl/token", "json/parser", "json/scanner", - "json/token" + "json/token", ] + pruneopts = "UT" revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" [[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" packages = ["."] + pruneopts = "UT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] + digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214" name = "github.com/jmhodges/levigo" packages = ["."] + pruneopts = "UT" revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" [[projects]] branch = "master" + digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" name = "github.com/kr/logfmt" packages = ["."] + pruneopts = "UT" revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" [[projects]] + digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7" name = "github.com/magiconair/properties" packages = ["."] + pruneopts = "UT" revision = "c2353362d570a7bfa228149c62842019201cfb71" version = "v1.8.0" [[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] + pruneopts = "UT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] branch = "master" + digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" name = "github.com/mitchellh/mapstructure" packages = ["."] + pruneopts = "UT" revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] + digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" name = "github.com/pelletier/go-toml" packages = ["."] + pruneopts = "UT" revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" version = "v1.2.0" [[projects]] + digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" name = "github.com/pkg/errors" packages = ["."] + pruneopts = "UT" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] + pruneopts = "UT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] + digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0" name = "github.com/prometheus/client_golang" packages = [ "prometheus", - "prometheus/promhttp" + "prometheus/promhttp", ] + pruneopts = "UT" revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632" [[projects]] branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] + pruneopts = "UT" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model" + "model", ] + pruneopts = "UT" revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs" + "xfs", ] + pruneopts = "UT" revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] + digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" name = "github.com/rcrowley/go-metrics" packages = ["."] + pruneopts = "UT" revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] + digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84" name = "github.com/spf13/afero" packages = [ ".", - "mem" + "mem", ] + pruneopts = "UT" revision = "787d034dfe70e44075ccc060d346146ef53270ad" version = "v1.1.1" [[projects]] + digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" name = "github.com/spf13/cast" packages = ["."] + pruneopts = "UT" revision = "8965335b8c7107321228e3e3702cab9832751bac" version = "v1.2.0" [[projects]] + digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" name = "github.com/spf13/cobra" packages = ["."] + pruneopts = "UT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] branch = "master" + digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805" name = "github.com/spf13/jwalterweatherman" packages = ["."] + pruneopts = "UT" revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" [[projects]] + digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" name = "github.com/spf13/pflag" packages = ["."] + pruneopts = "UT" revision = "583c0c0531f06d5278b7d917446061adc344b5cd" version = "v1.0.1" [[projects]] + digest = "1:59e7dceb53b4a1e57eb1eb0bf9951ff0c25912df7660004a789b62b4e8cdca3b" name = "github.com/spf13/viper" packages = ["."] + pruneopts = "UT" revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" version = "v1.0.2" [[projects]] + digest = "1:c40d65817cdd41fac9aa7af8bed56927bb2d6d47e4fea566a74880f5c2b1c41e" name = "github.com/stretchr/testify" packages = [ "assert", - "require" + "require", ] + pruneopts = "UT" revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" version = "v1.2.2" [[projects]] branch = "master" + digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -280,28 +353,34 @@ "leveldb/opt", "leveldb/storage", "leveldb/table", - "leveldb/util" + "leveldb/util", ] + pruneopts = "UT" revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" [[projects]] branch = "master" + digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" name = "github.com/tendermint/ed25519" packages = [ ".", "edwards25519", - "extra25519" + "extra25519", ] + pruneopts = "UT" revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" [[projects]] branch = "jae/writeemptyptr" + digest = "1:2851f999161ce484d09df830edace38654b711228155d476680577f47f2a5bff" name = "github.com/tendermint/go-amino" packages = ["."] + pruneopts = "UT" revision = "8202139066d340b77084a583e176e29fb28b42e9" [[projects]] branch = "master" + digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -317,11 +396,13 @@ "openpgp/errors", "poly1305", "ripemd160", - "salsa20/salsa" + "salsa20/salsa", ] + pruneopts = "UT" revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" [[projects]] + digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" name = "golang.org/x/net" packages = [ "context", @@ -331,20 +412,24 @@ "idna", "internal/timeseries", "netutil", - "trace" + "trace", ] + pruneopts = "UT" revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" [[projects]] branch = "master" + digest = "1:5a955fee84608f39d7d6474a62cc8d5ec3f4a311b7f21e79c2ba4e1e16169d34" name = "golang.org/x/sys" packages = [ "cpu", - "unix" + "unix", ] + pruneopts = "UT" revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e" [[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" name = "golang.org/x/text" packages = [ "collate", @@ -360,17 +445,21 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] + digest = "1:cd018653a358d4b743a9d3bee89e825521f2ab2f2ec0770164bf7632d8d73ab7" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] + pruneopts = "UT" revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" [[projects]] + digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" name = "google.golang.org/grpc" packages = [ ".", @@ -397,20 +486,68 @@ "stats", "status", "tap", - "transport" + "transport", ] + pruneopts = "UT" revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" version = "v1.13.0" [[projects]] + digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "UT" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "cb44aec2727610e0547ee75e2b4602266d85026bb47747f4fb8bdcef4709bdd1" + input-imports = [ + "github.com/btcsuite/btcd/btcec", + "github.com/btcsuite/btcutil/base58", + "github.com/btcsuite/btcutil/bech32", + "github.com/ebuchman/fail-test", + "github.com/fortytw2/leaktest", + "github.com/go-kit/kit/log", + "github.com/go-kit/kit/log/level", + "github.com/go-kit/kit/log/term", + "github.com/go-kit/kit/metrics", + "github.com/go-kit/kit/metrics/discard", + "github.com/go-kit/kit/metrics/prometheus", + "github.com/go-logfmt/logfmt", + "github.com/gogo/protobuf/gogoproto", + "github.com/gogo/protobuf/jsonpb", + "github.com/gogo/protobuf/proto", + "github.com/golang/protobuf/proto", + "github.com/gorilla/websocket", + "github.com/jmhodges/levigo", + "github.com/pkg/errors", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "github.com/rcrowley/go-metrics", + "github.com/spf13/cobra", + "github.com/spf13/viper", + "github.com/stretchr/testify/assert", + "github.com/stretchr/testify/require", + "github.com/syndtr/goleveldb/leveldb", + "github.com/syndtr/goleveldb/leveldb/errors", + "github.com/syndtr/goleveldb/leveldb/iterator", + "github.com/syndtr/goleveldb/leveldb/opt", + "github.com/tendermint/ed25519", + "github.com/tendermint/ed25519/extra25519", + "github.com/tendermint/go-amino", + "golang.org/x/crypto/bcrypt", + "golang.org/x/crypto/chacha20poly1305", + "golang.org/x/crypto/hkdf", + "golang.org/x/crypto/nacl/box", + "golang.org/x/crypto/nacl/secretbox", + "golang.org/x/crypto/openpgp/armor", + "golang.org/x/crypto/ripemd160", + "golang.org/x/net/context", + "golang.org/x/net/netutil", + "google.golang.org/grpc", + "google.golang.org/grpc/credentials", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/types/proto3_test.go b/types/proto3_test.go index 19a624a6..50645abf 100644 --- a/types/proto3_test.go +++ b/types/proto3_test.go @@ -67,11 +67,6 @@ func TestProto3Compatibility(t *testing.T) { Height: 150, Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, NumTxs: 7, - // This is not fully skipped in amino (yet) although it is empty: - LastBlockID: &proto3.BlockID{ - PartsHeader: &proto3.PartSetHeader{ - }, - }, TotalTxs: 100, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), From eb9b37e19688b435d7163dcfd134d25b84201eef Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 2 Aug 2018 01:59:46 -0700 Subject: [PATCH 23/27] Pull out consensus liveness fix, which went to #1815 --- Gopkg.lock | 173 ++++----------------------------------- consensus/common_test.go | 31 +------ consensus/state.go | 17 ++-- consensus/state_test.go | 26 +++--- 4 files changed, 38 insertions(+), 209 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index bf48ae38..bf98a0af 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -3,63 +3,48 @@ [[projects]] branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] - pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] branch = "master" - digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8" name = "github.com/btcsuite/btcd" packages = ["btcec"] - pruneopts = "UT" revision = "9a2f9524024889e129a5422aca2cff73cb3eabf6" [[projects]] - digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" name = "github.com/btcsuite/btcutil" packages = [ "base58", - "bech32", + "bech32" ] - pruneopts = "UT" revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] - digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" packages = ["spew"] - pruneopts = "UT" revision = "346938d642f2ec3594ed81d874461961cd0faa76" version = "v1.1.0" [[projects]] - digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b" name = "github.com/ebuchman/fail-test" packages = ["."] - pruneopts = "UT" revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" [[projects]] - digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" name = "github.com/fortytw2/leaktest" packages = ["."] - pruneopts = "UT" revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" version = "v1.2.0" [[projects]] - digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd" name = "github.com/fsnotify/fsnotify" packages = ["."] - pruneopts = "UT" revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" version = "v1.4.7" [[projects]] - digest = "1:a6910e76d4a4aad1481edf7efb7831bd0aae11dddf2f21e5c3f1fe8c7046d3bd" name = "github.com/go-kit/kit" packages = [ "log", @@ -68,30 +53,24 @@ "metrics", "metrics/discard", "metrics/internal/lv", - "metrics/prometheus", + "metrics/prometheus" ] - pruneopts = "UT" revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e" version = "v0.7.0" [[projects]] - digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659" name = "github.com/go-logfmt/logfmt" packages = ["."] - pruneopts = "UT" revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" version = "v0.3.0" [[projects]] - digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406" name = "github.com/go-stack/stack" packages = ["."] - pruneopts = "UT" revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" version = "v1.7.0" [[projects]] - digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" name = "github.com/gogo/protobuf" packages = [ "gogoproto", @@ -99,45 +78,37 @@ "proto", "protoc-gen-gogo/descriptor", "sortkeys", - "types", + "types" ] - pruneopts = "UT" revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] - digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" name = "github.com/golang/protobuf" packages = [ "proto", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/timestamp", + "ptypes/timestamp" ] - pruneopts = "UT" revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" version = "v1.1.0" [[projects]] branch = "master" - digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009" name = "github.com/golang/snappy" packages = ["."] - pruneopts = "UT" revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" [[projects]] - digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e" name = "github.com/gorilla/websocket" packages = ["."] - pruneopts = "UT" revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" version = "v1.2.0" [[projects]] branch = "master" - digest = "1:a361611b8c8c75a1091f00027767f7779b29cb37c456a71b8f2604c88057ab40" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -149,197 +120,153 @@ "hcl/token", "json/parser", "json/scanner", - "json/token", + "json/token" ] - pruneopts = "UT" revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" [[projects]] - digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" name = "github.com/inconshreveable/mousetrap" packages = ["."] - pruneopts = "UT" revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" version = "v1.0" [[projects]] - digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214" name = "github.com/jmhodges/levigo" packages = ["."] - pruneopts = "UT" revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" [[projects]] branch = "master" - digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72" name = "github.com/kr/logfmt" packages = ["."] - pruneopts = "UT" revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" [[projects]] - digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7" name = "github.com/magiconair/properties" packages = ["."] - pruneopts = "UT" revision = "c2353362d570a7bfa228149c62842019201cfb71" version = "v1.8.0" [[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - pruneopts = "UT" revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" [[projects]] branch = "master" - digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" name = "github.com/mitchellh/mapstructure" packages = ["."] - pruneopts = "UT" revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] - digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" name = "github.com/pelletier/go-toml" packages = ["."] - pruneopts = "UT" revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" version = "v1.2.0" [[projects]] - digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" name = "github.com/pkg/errors" packages = ["."] - pruneopts = "UT" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] - pruneopts = "UT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] - digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0" name = "github.com/prometheus/client_golang" packages = [ "prometheus", - "prometheus/promhttp", + "prometheus/promhttp" ] - pruneopts = "UT" revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632" [[projects]] branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] - pruneopts = "UT" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model", + "model" ] - pruneopts = "UT" revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs", + "xfs" ] - pruneopts = "UT" revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] - digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" name = "github.com/rcrowley/go-metrics" packages = ["."] - pruneopts = "UT" revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] - digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84" name = "github.com/spf13/afero" packages = [ ".", - "mem", + "mem" ] - pruneopts = "UT" revision = "787d034dfe70e44075ccc060d346146ef53270ad" version = "v1.1.1" [[projects]] - digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" name = "github.com/spf13/cast" packages = ["."] - pruneopts = "UT" revision = "8965335b8c7107321228e3e3702cab9832751bac" version = "v1.2.0" [[projects]] - digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939" name = "github.com/spf13/cobra" packages = ["."] - pruneopts = "UT" revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" version = "v0.0.3" [[projects]] branch = "master" - digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805" name = "github.com/spf13/jwalterweatherman" packages = ["."] - pruneopts = "UT" revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" [[projects]] - digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" name = "github.com/spf13/pflag" packages = ["."] - pruneopts = "UT" revision = "583c0c0531f06d5278b7d917446061adc344b5cd" version = "v1.0.1" [[projects]] - digest = "1:59e7dceb53b4a1e57eb1eb0bf9951ff0c25912df7660004a789b62b4e8cdca3b" name = "github.com/spf13/viper" packages = ["."] - pruneopts = "UT" revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" version = "v1.0.2" [[projects]] - digest = "1:c40d65817cdd41fac9aa7af8bed56927bb2d6d47e4fea566a74880f5c2b1c41e" name = "github.com/stretchr/testify" packages = [ "assert", - "require", + "require" ] - pruneopts = "UT" revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" version = "v1.2.2" [[projects]] branch = "master" - digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -353,34 +280,28 @@ "leveldb/opt", "leveldb/storage", "leveldb/table", - "leveldb/util", + "leveldb/util" ] - pruneopts = "UT" revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" [[projects]] branch = "master" - digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" name = "github.com/tendermint/ed25519" packages = [ ".", "edwards25519", - "extra25519", + "extra25519" ] - pruneopts = "UT" revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" [[projects]] branch = "jae/writeemptyptr" - digest = "1:2851f999161ce484d09df830edace38654b711228155d476680577f47f2a5bff" name = "github.com/tendermint/go-amino" packages = ["."] - pruneopts = "UT" revision = "8202139066d340b77084a583e176e29fb28b42e9" [[projects]] branch = "master" - digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -396,13 +317,11 @@ "openpgp/errors", "poly1305", "ripemd160", - "salsa20/salsa", + "salsa20/salsa" ] - pruneopts = "UT" revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" [[projects]] - digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" name = "golang.org/x/net" packages = [ "context", @@ -412,24 +331,20 @@ "idna", "internal/timeseries", "netutil", - "trace", + "trace" ] - pruneopts = "UT" revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f" [[projects]] branch = "master" - digest = "1:5a955fee84608f39d7d6474a62cc8d5ec3f4a311b7f21e79c2ba4e1e16169d34" name = "golang.org/x/sys" packages = [ "cpu", - "unix", + "unix" ] - pruneopts = "UT" revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e" [[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" name = "golang.org/x/text" packages = [ "collate", @@ -445,21 +360,17 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable", + "unicode/rangetable" ] - pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] - digest = "1:cd018653a358d4b743a9d3bee89e825521f2ab2f2ec0770164bf7632d8d73ab7" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] - pruneopts = "UT" revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" [[projects]] - digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" name = "google.golang.org/grpc" packages = [ ".", @@ -486,68 +397,20 @@ "stats", "status", "tap", - "transport", + "transport" ] - pruneopts = "UT" revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" version = "v1.13.0" [[projects]] - digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202" name = "gopkg.in/yaml.v2" packages = ["."] - pruneopts = "UT" revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - input-imports = [ - "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcutil/base58", - "github.com/btcsuite/btcutil/bech32", - "github.com/ebuchman/fail-test", - "github.com/fortytw2/leaktest", - "github.com/go-kit/kit/log", - "github.com/go-kit/kit/log/level", - "github.com/go-kit/kit/log/term", - "github.com/go-kit/kit/metrics", - "github.com/go-kit/kit/metrics/discard", - "github.com/go-kit/kit/metrics/prometheus", - "github.com/go-logfmt/logfmt", - "github.com/gogo/protobuf/gogoproto", - "github.com/gogo/protobuf/jsonpb", - "github.com/gogo/protobuf/proto", - "github.com/golang/protobuf/proto", - "github.com/gorilla/websocket", - "github.com/jmhodges/levigo", - "github.com/pkg/errors", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "github.com/rcrowley/go-metrics", - "github.com/spf13/cobra", - "github.com/spf13/viper", - "github.com/stretchr/testify/assert", - "github.com/stretchr/testify/require", - "github.com/syndtr/goleveldb/leveldb", - "github.com/syndtr/goleveldb/leveldb/errors", - "github.com/syndtr/goleveldb/leveldb/iterator", - "github.com/syndtr/goleveldb/leveldb/opt", - "github.com/tendermint/ed25519", - "github.com/tendermint/ed25519/extra25519", - "github.com/tendermint/go-amino", - "golang.org/x/crypto/bcrypt", - "golang.org/x/crypto/chacha20poly1305", - "golang.org/x/crypto/hkdf", - "golang.org/x/crypto/nacl/box", - "golang.org/x/crypto/nacl/secretbox", - "golang.org/x/crypto/openpgp/armor", - "golang.org/x/crypto/ripemd160", - "golang.org/x/net/context", - "golang.org/x/net/netutil", - "google.golang.org/grpc", - "google.golang.org/grpc/credentials", - ] + inputs-digest = "cb44aec2727610e0547ee75e2b4602266d85026bb47747f4fb8bdcef4709bdd1" solver-name = "gps-cdcl" solver-version = 1 diff --git a/consensus/common_test.go b/consensus/common_test.go index 643185ea..e6033537 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -7,7 +7,6 @@ import ( "io/ioutil" "os" "path" - "reflect" "sort" "sync" "testing" @@ -18,14 +17,14 @@ import ( bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" - dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -326,30 +325,6 @@ func ensureNewStep(stepCh <-chan interface{}) { } } -func ensureVote(voteCh chan interface{}, height int64, round int, voteType byte) { - timer := time.NewTimer(ensureTimeout) - select { - case <-timer.C: - break - case v := <-voteCh: - edv, ok := v.(types.EventDataVote) - if !ok { - panic(fmt.Sprintf("expected a *types.Vote, got %v. wrong subscription channel?", - reflect.TypeOf(v))) - } - vote := edv.Vote - if vote.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) - } - if vote.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) - } - if vote.Type != voteType { - panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) - } - } -} - //------------------------------------------------------------------------------- // consensus nets diff --git a/consensus/state.go b/consensus/state.go index 031f5f82..add87691 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1014,11 +1014,9 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - /* - if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) - } - */ + if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { + cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + } logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { @@ -1587,7 +1585,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, if prevotes.HasTwoThirdsMajority() { cs.enterPrecommit(height, vote.Round) } else { - cs.enterPropose(height, vote.Round) // we can't prevote until we wait for the proposal. + cs.enterPrevote(height, vote.Round) // if the vote is ahead of us cs.enterPrevoteWait(height, vote.Round) } } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { @@ -1603,9 +1601,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, blockID, ok := precommits.TwoThirdsMajority() if ok { if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) - cs.enterPrecommitWait(height, vote.Round) + cs.enterNewRound(height, vote.Round+1) } else { cs.enterNewRound(height, vote.Round) cs.enterPrecommit(height, vote.Round) @@ -1621,8 +1617,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrevote(height, vote.Round) - cs.enterPrevoteWait(height, vote.Round) + cs.enterPrecommit(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } default: diff --git a/consensus/state_test.go b/consensus/state_test.go index 425f6fad..6a14e17b 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -64,22 +64,22 @@ func TestStateProposerSelection0(t *testing.T) { startTestRound(cs1, height, round) - // Wait for new round so proposer is set. + // wait for new round so proposer is set <-newRoundCh - // Commit a block and ensure proposer for the next height is correct. + // lets commit a block and ensure proposer for the next height is correct prop := cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } - // Wait for complete proposal. + // wait for complete proposal <-proposalCh rs := cs1.GetRoundState() signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) - // Wait for new round so next validator is set. + // wait for new round so next validator is set <-newRoundCh prop = cs1.GetRoundState().Validators.GetProposer() @@ -718,8 +718,6 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - h := cs1.GetRoundState().Height - r := cs1.GetRoundState().Round partSize := cs1.state.ConsensusParams.BlockPartSizeBytes @@ -736,7 +734,7 @@ func TestStateLockPOLSafety1(t *testing.T) { rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) propBlock := rs.ProposalBlock - ensureVote(voteCh, h, r, types.VoteTypePrevote) + <-voteCh // prevote validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) @@ -757,11 +755,6 @@ func TestStateLockPOLSafety1(t *testing.T) { // we do see them precommit nil signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) - ensureVote(voteCh, h, r, types.VoteTypePrecommit) - - <-newRoundCh - t.Log("### ONTO ROUND 1") - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() propBlockParts := propBlock.MakePartSet(partSize) @@ -772,6 +765,9 @@ func TestStateLockPOLSafety1(t *testing.T) { if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } + + <-newRoundCh + t.Log("### ONTO ROUND 1") /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! @@ -792,13 +788,13 @@ func TestStateLockPOLSafety1(t *testing.T) { } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) // go to prevote, prevote for proposal block - ensureVote(voteCh, h, r+1, types.VoteTypePrevote) + <-voteCh validatePrevote(t, cs1, 1, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureVote(voteCh, h, r+1, types.VoteTypePrecommit) + <-voteCh // precommit // we should have precommitted validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) @@ -820,7 +816,7 @@ func TestStateLockPOLSafety1(t *testing.T) { <-timeoutProposeCh // finish prevote - ensureVote(voteCh, h, r+2, types.VoteTypePrevote) + <-voteCh // we should prevote what we're locked on validatePrevote(t, cs1, 2, vss[0], propBlockHash) From e719a93d1d1490a5f724978c09c8d084c0790c21 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 2 Aug 2018 03:10:50 -0700 Subject: [PATCH 24/27] Addressed review for #1815 except those marked as 'TODO make issue' --- CHANGELOG.md | 2 +- Gopkg.lock | 4 +- cmd/tendermint/commands/lite.go | 6 +- lite/{base_certifier.go => base_verifier.go} | 30 +++---- ...ertifier_test.go => base_verifier_test.go} | 6 +- lite/client/provider.go | 8 +- lite/client/provider_test.go | 12 --- lite/commit.go | 2 +- lite/dbprovider.go | 64 +++++++------ lite/doc.go | 18 ++-- ...iring_certifier.go => dynamic_verifier.go} | 90 +++++++++---------- ...ifier_test.go => dynamic_verifier_test.go} | 8 +- lite/errors/errors.go | 16 ++-- lite/multiprovider.go | 4 +- lite/proxy/query.go | 8 +- lite/proxy/query_test.go | 4 +- lite/proxy/{certifier.go => verifier.go} | 8 +- lite/proxy/wrapper.go | 8 +- lite/types.go | 6 +- types/block.go | 2 +- types/validator_set.go | 5 ++ 21 files changed, 154 insertions(+), 157 deletions(-) rename lite/{base_certifier.go => base_verifier.go} (63%) rename lite/{base_certifier_test.go => base_verifier_test.go} (89%) rename lite/{inquiring_certifier.go => dynamic_verifier.go} (64%) rename lite/{inquiring_certifier_test.go => dynamic_verifier_test.go} (95%) rename lite/proxy/{certifier.go => verifier.go} (73%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27d5656b..99c51a04 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -556,7 +556,7 @@ BREAKING CHANGES: - use scripts/wal2json to convert to json for debugging FEATURES: - - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - new `Verifiers` pkg contains the tendermint light-client library (name subject to change)! - rpc: `/genesis` includes the `app_options` . - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. diff --git a/Gopkg.lock b/Gopkg.lock index bf98a0af..31d04b36 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,7 +11,7 @@ branch = "master" name = "github.com/btcsuite/btcd" packages = ["btcec"] - revision = "9a2f9524024889e129a5422aca2cff73cb3eabf6" + revision = "f5e261fc9ec3437697fb31d8b38453c293204b29" [[projects]] name = "github.com/btcsuite/btcutil" @@ -342,7 +342,7 @@ "cpu", "unix" ] - revision = "bd9dbc187b6e1dacfdd2722a87e83093c2d7bd6e" + revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314" [[projects]] name = "golang.org/x/text" diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index 14d584b7..edad4fbb 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -68,10 +68,10 @@ func runProxy(cmd *cobra.Command, args []string) error { logger.Info("Connecting to source HTTP client...") node := rpcclient.NewHTTP(nodeAddr, "/websocket") - logger.Info("Constructing certifier...") - cert, err := proxy.NewCertifier(chainID, home, node, logger) + logger.Info("Constructing Verifier...") + cert, err := proxy.NewVerifier(chainID, home, node, logger) if err != nil { - return cmn.ErrorWrap(err, "constructing certifier") + return cmn.ErrorWrap(err, "constructing Verifier") } cert.SetLogger(logger) sc := proxy.SecureClient(node, cert) diff --git a/lite/base_certifier.go b/lite/base_verifier.go similarity index 63% rename from lite/base_certifier.go rename to lite/base_verifier.go index 0f9faba3..e60d3953 100644 --- a/lite/base_certifier.go +++ b/lite/base_verifier.go @@ -3,48 +3,48 @@ package lite import ( "bytes" + cmn "github.com/tendermint/tendermint/libs/common" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) -var _ Certifier = (*BaseCertifier)(nil) +var _ Verifier = (*BaseVerifier)(nil) -// BaseCertifier lets us check the validity of SignedHeaders at height or +// BaseVerifier lets us check the validity of SignedHeaders at height or // later, requiring sufficient votes (> 2/3) from the given valset. // To certify blocks produced by a blockchain with mutable validator sets, -// use the InquiringCertifier. +// use the DynamicVerifier. // TODO: Handle unbonding time. -type BaseCertifier struct { +type BaseVerifier struct { chainID string height int64 valset *types.ValidatorSet } -// NewBaseCertifier returns a new certifier initialized with a validator set at +// NewBaseVerifier returns a new Verifier initialized with a validator set at // some height. -func NewBaseCertifier(chainID string, height int64, valset *types.ValidatorSet) *BaseCertifier { - if valset == nil || len(valset.Hash()) == 0 { - panic("NewBaseCertifier requires a valid valset") +func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { + if valset.IsNilOrEmpty() { + panic("NewBaseVerifier requires a valid valset") } - return &BaseCertifier{ + return &BaseVerifier{ chainID: chainID, height: height, valset: valset, } } -// Implements Certifier. -func (bc *BaseCertifier) ChainID() string { +// Implements Verifier. +func (bc *BaseVerifier) ChainID() string { return bc.chainID } -// Implements Certifier. -func (bc *BaseCertifier) Certify(signedHeader types.SignedHeader) error { +// Implements Verifier. +func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { // We can't certify commits older than bc.height. if signedHeader.Height < bc.height { - return cmn.NewError("BaseCertifier height is %v, cannot certify height %v", + return cmn.NewError("BaseVerifier height is %v, cannot certify height %v", bc.height, signedHeader.Height) } diff --git a/lite/base_certifier_test.go b/lite/base_verifier_test.go similarity index 89% rename from lite/base_certifier_test.go rename to lite/base_verifier_test.go index 20342c90..dab7885f 100644 --- a/lite/base_certifier_test.go +++ b/lite/base_verifier_test.go @@ -10,16 +10,14 @@ import ( ) func TestBaseCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) assert := assert.New(t) - // require := require.New(t) keys := genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals := keys.ToValidators(20, 10) - // and a certifier based on our known set + // and a Verifier based on our known set chainID := "test-static" - cert := NewBaseCertifier(chainID, 2, vals) + cert := NewBaseVerifier(chainID, 2, vals) cases := []struct { keys privKeys diff --git a/lite/client/provider.go b/lite/client/provider.go index 8087be71..e0c0a331 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -8,12 +8,12 @@ package client import ( "fmt" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/lite" lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tendermint/libs/log" ) // SignStatusClient combines a SignClient and StatusClient. @@ -106,12 +106,10 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. err = fmt.Errorf("expected height >= 1, got height %v", height) return } - heightPtr := new(int64) - *heightPtr = height - res, err := p.client.Validators(heightPtr) + res, err := p.client.Validators(&height) if err != nil { // TODO pass through other types of errors. - return nil, lerr.ErrMissingValidators(chainID, height) + return nil, lerr.ErrUnknownValidators(chainID, height) } valset = types.NewValidatorSet(res.Validators) return diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index f4da423f..d8704a52 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/types" ) -// TODO fix tests!! func TestMain(m *testing.M) { app := kvstore.NewKVStoreApplication() node := rpctest.StartTendermint(app) @@ -59,15 +58,4 @@ func TestProvider(t *testing.T) { assert.Nil(err, "%+v", err) assert.Equal(lower, fc.Height()) - /* - // also get by hash (given the match) - fc, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, fc.Header.ValidatorsHash) - - // get by hash fails without match - fc, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - */ } diff --git a/lite/commit.go b/lite/commit.go index 89f04417..25efb8dc 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -11,7 +11,7 @@ import ( // FullCommit is a signed header (the block header and a commit that signs it), // the validator set which signed the commit, and the next validator set. The // next validator set (which is proven from the block header) allows us to -// revert to block-by-block updating of lite certifier's latest validator set, +// revert to block-by-block updating of lite Verifier's latest validator set, // even in the face of arbitrarily large power changes. type FullCommit struct { SignedHeader types.SignedHeader `json:"signed_header"` diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 8392fcea..cab695b4 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -22,7 +22,10 @@ type DBProvider struct { } func NewDBProvider(label string, db dbm.DB) *DBProvider { + + // NOTE: when debugging, this type of construction might be useful. //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) + cdc := amino.NewCodec() cryptoAmino.RegisterAmino(cdc) dbp := &DBProvider{ @@ -127,8 +130,8 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height()) return lfc, nil } else { - dbp.logger.Info("DBProvider.LatestFullCommit() got error", "lfc", lfc) - dbp.logger.Info(fmt.Sprintf("%+v", err)) + dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc) + dbp.logger.Error(fmt.Sprintf("%+v", err)) return lfc, err } } @@ -144,14 +147,19 @@ func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { vsBz := dbp.db.Get(validatorSetKey(chainID, height)) if vsBz == nil { - err = lerr.ErrMissingValidators(chainID, height) + err = lerr.ErrUnknownValidators(chainID, height) return } err = dbp.cdc.UnmarshalBinary(vsBz, &valset) if err != nil { return } - valset.TotalVotingPower() // to test deep equality. + + // To test deep equality. This makes it easier to test for e.g. valset + // equivalence using assert.Equal (tests for deep equality) in our tests, + // which also tests for unexported/private field equivalence. + valset.TotalVotingPower() + return } @@ -209,52 +217,52 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { itr.Next() } - dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items\n", numDeleted)) + dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted)) return nil } //---------------------------------------- +// key encoding func signedHeaderKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) } -var signedHeaderKeyPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/sh`) - -func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { - submatch := signedHeaderKeyPattern.FindSubmatch(key) - if submatch == nil { - return "", 0, false - } - chainID = string(submatch[1]) - heightStr := string(submatch[2]) - heightInt, err := strconv.Atoi(heightStr) - if err != nil { - return "", 0, false - } - height = int64(heightInt) - ok = true // good! - return -} - func validatorSetKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) } -var chainKeyPrefixPattern = regexp.MustCompile(`([^/]+)/([0-9]*)/`) +//---------------------------------------- +// key parsing -func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { - submatch := chainKeyPrefixPattern.FindSubmatch(key) +var keyPattern = regexp.MustCompile(`^([^/]+)/([0-9]*)/(.*)$`) + +func parseKey(key []byte) (chainID string, height int64, part string, ok bool) { + submatch := keyPattern.FindSubmatch(key) if submatch == nil { - return "", 0, false + return "", 0, "", false } chainID = string(submatch[1]) heightStr := string(submatch[2]) heightInt, err := strconv.Atoi(heightStr) if err != nil { - return "", 0, false + return "", 0, "", false } height = int64(heightInt) + part = string(submatch[3]) ok = true // good! return } + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + chainID, height, part, ok := parseKey(key) + if part != "sh" { + return "", 0, false + } + return chainID, height, true +} + +func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { + chainID, height, _, ok = parseKey(key) + return chainID, height, true +} diff --git a/lite/doc.go b/lite/doc.go index 07977ebe..59f77056 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -35,29 +35,29 @@ change on the chain. In practice, most applications will not have frequent drastic updates to the validator set, so the logic defined in this package for lite client syncing is optimized to use intelligent bisection and block-skipping for efficient sourcing and verification of these data structures -and updates to the validator set (see the InquiringCertifier for more +and updates to the validator set (see the DynamicVerifier for more information). The FullCommit is also declared in this package as a convenience structure, which includes the SignedHeader along with the full current and next ValidatorSets. -## Certifier +## Verifier -A Certifier validates a new SignedHeader given the currently known state. There -are two different types of Certifiers provided. +A Verifier validates a new SignedHeader given the currently known state. There +are two different types of Verifiers provided. -BaseCertifier - given a validator set and a height, this Certifier verifies +BaseVerifier - given a validator set and a height, this Verifier verifies that > 2/3 of the voting power of the given validator set had signed the SignedHeader, and that the SignedHeader was to be signed by the exact given validator set, and that the height of the commit is at least height (or greater). SignedHeader.Commit may be signed by a different validator set, it can get -certified with a BaseCertifier as long as sufficient signatures from the +certified with a BaseVerifier as long as sufficient signatures from the previous validator set are present in the commit. -InquiringCertifier - this certifier implements an auto-update and persistence +DynamicVerifier - this Verifier implements an auto-update and persistence strategy to certify any SignedHeader of the blockchain. ## Provider and PersistentProvider @@ -77,7 +77,7 @@ type Provider interface { * client.NewHTTPProvider - query Tendermint rpc. A PersistentProvider is a Provider that also allows for saving state. This is -used by the InquiringCertifier for persistence. +used by the DynamicVerifier for persistence. ```go type PersistentProvider interface { @@ -131,7 +131,7 @@ important to verify that you have the proper validator set when initializing the client, as that is the root of all trust. The software currently assumes that the unbonding period is infinite in -duration. If the InquiringCertifier hasn't been updated in a while, you should +duration. If the DynamicVerifier hasn't been updated in a while, you should manually verify the block headers using other sources. TODO: Update the software to handle cases around the unbonding period. diff --git a/lite/inquiring_certifier.go b/lite/dynamic_verifier.go similarity index 64% rename from lite/inquiring_certifier.go rename to lite/dynamic_verifier.go index 31637447..3d1a70f2 100644 --- a/lite/inquiring_certifier.go +++ b/lite/dynamic_verifier.go @@ -3,18 +3,18 @@ package lite import ( "bytes" + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tendermint/libs/log" ) -var _ Certifier = (*InquiringCertifier)(nil) +var _ Verifier = (*DynamicVerifier)(nil) -// InquiringCertifier implements an auto-updating certifier. It uses a +// DynamicVerifier implements an auto-updating Verifier. It uses a // "source" provider to obtain the needed FullCommits to securely sync with // validator set changes. It stores properly validated data on the // "trusted" local system. -type InquiringCertifier struct { +type DynamicVerifier struct { logger log.Logger chainID string // These are only properly validated data, from local system. @@ -23,14 +23,14 @@ type InquiringCertifier struct { source Provider } -// NewInquiringCertifier returns a new InquiringCertifier. It uses the +// NewDynamicVerifier returns a new DynamicVerifier. It uses the // trusted provider to store validated data and the source provider to // obtain missing data (e.g. FullCommits). // // The trusted provider should a CacheProvider, MemProvider or // files.Provider. The source provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Provider) *InquiringCertifier { - return &InquiringCertifier{ +func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { + return &DynamicVerifier{ logger: log.NewNopLogger(), chainID: chainID, trusted: trusted, @@ -38,64 +38,64 @@ func NewInquiringCertifier(chainID string, trusted PersistentProvider, source Pr } } -func (ic *InquiringCertifier) SetLogger(logger log.Logger) { +func (ic *DynamicVerifier) SetLogger(logger log.Logger) { logger = logger.With("module", "lite") ic.logger = logger ic.trusted.SetLogger(logger) ic.source.SetLogger(logger) } -// Implements Certifier. -func (ic *InquiringCertifier) ChainID() string { +// Implements Verifier. +func (ic *DynamicVerifier) ChainID() string { return ic.chainID } -// Implements Certifier. +// Implements Verifier. // -// If the validators have changed since the last know time, it looks to +// If the validators have changed since the last known time, it looks to // ic.trusted and ic.source to prove the new validators. On success, it will // try to store the SignedHeader in ic.trusted if the next // validator can be sourced. -func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { +func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { // Get the latest known full commit <= h-1 from our trusted providers. // The full commit at h-1 contains the valset to sign for h. h := shdr.Height - 1 - tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) if err != nil { return err } - if tfc.Height() == h { + if trustedFC.Height() == h { // Return error if valset doesn't match. if !bytes.Equal( - tfc.NextValidators.Hash(), + trustedFC.NextValidators.Hash(), shdr.Header.ValidatorsHash) { return lerr.ErrUnexpectedValidators( - tfc.NextValidators.Hash(), + trustedFC.NextValidators.Hash(), shdr.Header.ValidatorsHash) } } else { // If valset doesn't match... - if !bytes.Equal(tfc.NextValidators.Hash(), + if !bytes.Equal(trustedFC.NextValidators.Hash(), shdr.Header.ValidatorsHash) { // ... update. - tfc, err = ic.updateToHeight(h) + trustedFC, err = ic.updateToHeight(h) if err != nil { return err } // Return error if valset _still_ doesn't match. - if !bytes.Equal(tfc.NextValidators.Hash(), + if !bytes.Equal(trustedFC.NextValidators.Hash(), shdr.Header.ValidatorsHash) { return lerr.ErrUnexpectedValidators( - tfc.NextValidators.Hash(), + trustedFC.NextValidators.Hash(), shdr.Header.ValidatorsHash) } } } // Certify the signed header using the matching valset. - cert := NewBaseCertifier(ic.chainID, tfc.Height()+1, tfc.NextValidators) + cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) err = cert.Certify(shdr) if err != nil { return err @@ -103,7 +103,7 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { // Get the next validator set. nextValset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) - if lerr.IsErrMissingValidators(err) { + if lerr.IsErrUnknownValidators(err) { // Ignore this error. return nil } else if err != nil { @@ -113,7 +113,7 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { // Create filled FullCommit. nfc := FullCommit{ SignedHeader: shdr, - Validators: tfc.NextValidators, + Validators: trustedFC.NextValidators, NextValidators: nextValset, } // Validate the full commit. This checks the cryptographic @@ -127,22 +127,22 @@ func (ic *InquiringCertifier) Certify(shdr types.SignedHeader) error { // verifyAndSave will verify if this is a valid source full commit given the // best match trusted full commit, and if good, persist to ic.trusted. -// Returns ErrTooMuchChange when >2/3 of tfc did not sign sfc. -// Panics if tfc.Height() >= sfc.Height(). -func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { - if tfc.Height() >= sfc.Height() { +// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign sourceFC. +// Panics if trustedFC.Height() >= sourceFC.Height(). +func (ic *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { + if trustedFC.Height() >= sourceFC.Height() { panic("should not happen") } - err := tfc.NextValidators.VerifyFutureCommit( - sfc.Validators, - ic.chainID, sfc.SignedHeader.Commit.BlockID, - sfc.SignedHeader.Height, sfc.SignedHeader.Commit, + err := trustedFC.NextValidators.VerifyFutureCommit( + sourceFC.Validators, + ic.chainID, sourceFC.SignedHeader.Commit.BlockID, + sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, ) if err != nil { return err } - return ic.trusted.SaveFullCommit(sfc) + return ic.trusted.SaveFullCommit(sourceFC) } // updateToHeight will use divide-and-conquer to find a path to h. @@ -150,48 +150,48 @@ func (ic *InquiringCertifier) verifyAndSave(tfc, sfc FullCommit) error { // for height h, using repeated applications of bisection if necessary. // // Returns ErrCommitNotFound if source provider doesn't have the commit for h. -func (ic *InquiringCertifier) updateToHeight(h int64) (FullCommit, error) { +func (ic *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) { // Fetch latest full commit from source. - sfc, err := ic.source.LatestFullCommit(ic.chainID, h, h) + sourceFC, err := ic.source.LatestFullCommit(ic.chainID, h, h) if err != nil { return FullCommit{}, err } // Validate the full commit. This checks the cryptographic // signatures of Commit against Validators. - if err := sfc.ValidateFull(ic.chainID); err != nil { + if err := sourceFC.ValidateFull(ic.chainID); err != nil { return FullCommit{}, err } - // If sfc.Height() != h, we can't do it. - if sfc.Height() != h { + // If sourceFC.Height() != h, we can't do it. + if sourceFC.Height() != h { return FullCommit{}, lerr.ErrCommitNotFound() } FOR_LOOP: for { // Fetch latest full commit from trusted. - tfc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) if err != nil { return FullCommit{}, err } // We have nothing to do. - if tfc.Height() == h { - return tfc, nil + if trustedFC.Height() == h { + return trustedFC, nil } // Try to update to full commit with checks. - err = ic.verifyAndSave(tfc, sfc) + err = ic.verifyAndSave(trustedFC, sourceFC) if err == nil { // All good! - return sfc, nil + return sourceFC, nil } // Handle special case when err is ErrTooMuchChange. if lerr.IsErrTooMuchChange(err) { // Divide and conquer. - start, end := tfc.Height(), sfc.Height() + start, end := trustedFC.Height(), sourceFC.Height() if !(start < end) { panic("should not happen") } @@ -207,7 +207,7 @@ FOR_LOOP: } } -func (ic *InquiringCertifier) LastTrustedHeight() int64 { +func (ic *DynamicVerifier) LastTrustedHeight() int64 { fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) if err != nil { panic("should not happen") diff --git a/lite/inquiring_certifier_test.go b/lite/dynamic_verifier_test.go similarity index 95% rename from lite/inquiring_certifier_test.go rename to lite/dynamic_verifier_test.go index 5eb63727..74e2d55a 100644 --- a/lite/inquiring_certifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -41,10 +41,10 @@ func TestInquirerValidPath(t *testing.T) { nkeys = nkeys.Extend(1) } - // Initialize a certifier with the initial state. + // Initialize a Verifier with the initial state. err := trust.SaveFullCommit(fcz[0]) require.Nil(err) - cert := NewInquiringCertifier(chainID, trust, source) + cert := NewDynamicVerifier(chainID, trust, source) cert.SetLogger(log.TestingLogger()) // This should fail validation: @@ -99,10 +99,10 @@ func TestInquirerVerifyHistorical(t *testing.T) { nkeys = nkeys.Extend(1) } - // Initialize a certifier with the initial state. + // Initialize a Verifier with the initial state. err := trust.SaveFullCommit(fcz[0]) require.Nil(err) - cert := NewInquiringCertifier(chainID, trust, source) + cert := NewDynamicVerifier(chainID, trust, source) cert.SetLogger(log.TestingLogger()) // Store a few full commits as trust. diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 96a5a02a..61426b23 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -31,12 +31,12 @@ func (e errTooMuchChange) Error() string { return "Insufficient signatures to validate due to valset changes" } -type errMissingValidators struct { +type errUnknownValidators struct { chainID string height int64 } -func (e errMissingValidators) Error() string { +func (e errUnknownValidators) Error() string { return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", e.chainID, e.height) } @@ -96,16 +96,16 @@ func IsErrTooMuchChange(err error) bool { } //----------------- -// ErrMissingValidators +// ErrUnknownValidators -// ErrMissingValidators indicates that some validator set was missing or unknown. -func ErrMissingValidators(chainID string, height int64) error { - return cmn.ErrorWrap(errMissingValidators{chainID, height}, "") +// ErrUnknownValidators indicates that some validator set was missing or unknown. +func ErrUnknownValidators(chainID string, height int64) error { + return cmn.ErrorWrap(errUnknownValidators{chainID, height}, "") } -func IsErrMissingValidators(err error) bool { +func IsErrUnknownValidators(err error) bool { if err_, ok := err.(cmn.Error); ok { - _, ok := err_.Data().(errMissingValidators) + _, ok := err_.Data().(errUnknownValidators) return ok } return false diff --git a/lite/multiprovider.go b/lite/multiprovider.go index 991a12d7..734d042c 100644 --- a/lite/multiprovider.go +++ b/lite/multiprovider.go @@ -1,9 +1,9 @@ package lite import ( + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - log "github.com/tendermint/tendermint/libs/log" ) // multiProvider allows you to place one or more caches in front of a source @@ -79,5 +79,5 @@ func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *typ return valset, nil } } - return nil, lerr.ErrMissingValidators(chainID, height) + return nil, lerr.ErrUnknownValidators(chainID, height) } diff --git a/lite/proxy/query.go b/lite/proxy/query.go index a7132223..6f5a2899 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -28,13 +28,13 @@ type KeyProof interface { } // GetWithProof will query the key on the given node, and verify it has -// a valid proof, as defined by the certifier. +// a valid proof, as defined by the Verifier. // // If there is any error in checking, returns an error. // If val is non-empty, proof should be KeyExistsProof // If val is empty, proof should be KeyMissingProof func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, - cert lite.Certifier) ( + cert lite.Verifier) ( val cmn.HexBytes, height int64, proof KeyProof, err error) { if reqHeight < 0 { @@ -54,7 +54,7 @@ func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, // GetWithProofOptions is useful if you want full access to the ABCIQueryOptions func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, - node rpcclient.Client, cert lite.Certifier) ( + node rpcclient.Client, cert lite.Verifier) ( *ctypes.ResultABCIQuery, KeyProof, error) { _resp, err := node.ABCIQueryWithOptions(path, key, opts) @@ -128,7 +128,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption // GetCertifiedCommit gets the signed header for a given height and certifies // it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Certifier) (types.SignedHeader, error) { +func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (types.SignedHeader, error) { // FIXME: cannot use cert.GetByHeight for now, as it also requires // Validators and will fail on querying tendermint for non-current height. diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index fcc6659a..7f759cc6 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -58,7 +58,7 @@ func _TestAppProofs(t *testing.T) { source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) client.WaitForHeight(cl, 3, nil) latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) @@ -117,7 +117,7 @@ func _TestTxProofs(t *testing.T) { source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewBaseCertifier("my-chain", seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/certifier.go b/lite/proxy/verifier.go similarity index 73% rename from lite/proxy/certifier.go rename to lite/proxy/verifier.go index bd09b1ab..6686def0 100644 --- a/lite/proxy/certifier.go +++ b/lite/proxy/verifier.go @@ -8,10 +8,10 @@ import ( log "github.com/tendermint/tendermint/libs/log" ) -func NewCertifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.InquiringCertifier, error) { +func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.DynamicVerifier, error) { logger = logger.With("module", "lite/proxy") - logger.Info("lite/proxy/NewCertifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) + logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) @@ -20,13 +20,13 @@ func NewCertifier(chainID, rootDir string, client lclient.SignStatusClient, logg lvlProvider, ) source := lclient.NewProvider(chainID, client) - cert := lite.NewInquiringCertifier(chainID, trust, source) + cert := lite.NewDynamicVerifier(chainID, trust, source) cert.SetLogger(logger) // Sets logger recursively. // TODO: Make this more secure, e.g. make it interactive in the console? _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) if err != nil { - logger.Info("lite/proxy/NewCertifier found no trusted full commit, initializing from source from height 1...") + logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...") fc, err := source.LatestFullCommit(chainID, 1, 1) if err != nil { return nil, cmn.ErrorWrap(err, "fetching source full commit @ height 1") diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index ac1d1dbc..522511a8 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -10,18 +10,18 @@ import ( var _ rpcclient.Client = Wrapper{} -// Wrapper wraps a rpcclient with a Certifier and double-checks any input that is +// Wrapper wraps a rpcclient with a Verifier and double-checks any input that is // provable before passing it along. Allows you to make any rpcclient fully secure. type Wrapper struct { rpcclient.Client - cert *lite.InquiringCertifier + cert *lite.DynamicVerifier } -// SecureClient uses a given certifier to wrap an connection to an untrusted +// SecureClient uses a given Verifier to wrap an connection to an untrusted // host and return a cryptographically secure rpc client. // // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface -func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { +func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { wrap := Wrapper{c, cert} // TODO: no longer possible as no more such interface exposed.... // if we wrap http client, then we can swap out the event switch to filter diff --git a/lite/types.go b/lite/types.go index 1f479799..7228c74a 100644 --- a/lite/types.go +++ b/lite/types.go @@ -4,10 +4,10 @@ import ( "github.com/tendermint/tendermint/types" ) -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current or recent set of validitors by some other +// Verifier checks the votes to make sure the block really is signed properly. +// Verifier must know the current or recent set of validitors by some other // means. -type Certifier interface { +type Verifier interface { Certify(sheader types.SignedHeader) error ChainID() string } diff --git a/types/block.go b/types/block.go index 0b64c7b8..c112ee50 100644 --- a/types/block.go +++ b/types/block.go @@ -446,7 +446,7 @@ type SignedHeader struct { // and commit are consistent. // // NOTE: This does not actually check the cryptographic signatures. Make -// sure to use a Certifier to validate the signatures actually provide a +// sure to use a Verifier to validate the signatures actually provide a // significantly strong proof for this header's validity. func (sh SignedHeader) ValidateBasic(chainID string) error { diff --git a/types/validator_set.go b/types/validator_set.go index 6d580ace..4dab4d84 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -48,6 +48,11 @@ func NewValidatorSet(valz []*Validator) *ValidatorSet { return vals } +// Nil or empty validator sets are invalid. +func (vals *ValidatorSet) IsNilOrEmpty() bool { + return vals == nil || len(vals.Validators) == 0 +} + // Increment Accum and update the proposer on a copy, and return it. func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { copy := vals.Copy() From 00ebdcd5819c0be39ceb08f276cf4f9c9f425ddd Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 24 Jul 2018 10:27:20 -0400 Subject: [PATCH 25/27] update pending changelog --- CHANGELOG_PENDING.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c51a2ab6..1e002049 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -3,6 +3,12 @@ BREAKING CHANGES: - [types] CanonicalTime uses nanoseconds instead of clipping to ms - breaks serialization/signing of all messages with a timestamp +- [types] Header ... +- [state] Add NextValidatorSet, changes on-disk representation of state +- [state] Validator set changes are delayed by one block (!) +- [lite] Complete refactor of the package +- [rpc] `/commit` returns a `signed_header` field instead of everything being + top-level IMPROVEMENTS: - [blockchain] Improve fast-sync logic From fe5e7808f283203a7fa3db5ddee30255a35f1675 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 2 Aug 2018 19:15:32 -0400 Subject: [PATCH 26/27] fix Gopkg.lock --- Gopkg.lock | 111 ++++++++++++++--------------------------------------- 1 file changed, 29 insertions(+), 82 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index fc34e799..8e567aaf 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,18 +11,14 @@ [[projects]] branch = "master" - digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8" + digest = "1:6aabc1566d6351115d561d038da82a4c19b46c3b6e17f4a0a2fa60260663dc79" name = "github.com/btcsuite/btcd" packages = ["btcec"] -<<<<<<< HEAD - revision = "f5e261fc9ec3437697fb31d8b38453c293204b29" -======= pruneopts = "UT" - revision = "9a2f9524024889e129a5422aca2cff73cb3eabf6" ->>>>>>> origin/develop + revision = "cf05f92c3f815bbd5091ed6c73eff51f7b1945e8" [[projects]] - digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" + digest = "1:df684ed7fed3fb406ec421424aaf5fc9c63ccc2f428b25b842da78e634482e4b" name = "github.com/btcsuite/btcutil" packages = [ "base58", @@ -63,7 +59,7 @@ version = "v1.4.7" [[projects]] - digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11" + digest = "1:fa30c0652956e159cdb97dcb2ef8b8db63ed668c02a5c3a40961c8f0641252fe" name = "github.com/go-kit/kit" packages = [ "log", @@ -74,14 +70,9 @@ "metrics/internal/lv", "metrics/prometheus", ] -<<<<<<< HEAD - revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e" - version = "v0.7.0" -======= pruneopts = "UT" revision = "4dc7be5d2d12881735283bcab7352178e190fc71" version = "v0.6.0" ->>>>>>> origin/develop [[projects]] digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659" @@ -100,7 +91,7 @@ version = "v1.7.0" [[projects]] - digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" + digest = "1:212285efb97b9ec2e20550d81f0446cb7897e57cbdfd7301b1363ab113d8be45" name = "github.com/gogo/protobuf" packages = [ "gogoproto", @@ -110,15 +101,12 @@ "sortkeys", "types", ] -<<<<<<< HEAD -======= pruneopts = "UT" ->>>>>>> origin/develop revision = "636bf0302bc95575d69441b25a2603156ffdddf1" version = "v1.1.1" [[projects]] - digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" + digest = "1:cb22af0ed7c72d495d8be1106233ee553898950f15fd3f5404406d44c2e86888" name = "github.com/golang/protobuf" packages = [ "proto", @@ -149,13 +137,12 @@ [[projects]] branch = "master" - digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240" + digest = "1:8951fe6e358876736d8fa1f3992624fdbb2dec6bc49401c1381d1ef8abbb544f" name = "github.com/hashicorp/hcl" packages = [ ".", "hcl/ast", "hcl/parser", - "hcl/printer", "hcl/scanner", "hcl/strconv", "hcl/token", @@ -207,8 +194,10 @@ [[projects]] branch = "master" + digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" name = "github.com/mitchellh/mapstructure" packages = ["."] + pruneopts = "UT" revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" [[projects]] @@ -236,7 +225,7 @@ version = "v1.0.0" [[projects]] - digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0" + digest = "1:98225904b7abff96c052b669b25788f18225a36673fba022fb93514bb9a2a64e" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -247,33 +236,27 @@ [[projects]] branch = "master" -<<<<<<< HEAD -======= - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" ->>>>>>> origin/develop + digest = "1:0f37e09b3e92aaeda5991581311f8dbf38944b36a3edec61cc2d1991f527554a" name = "github.com/prometheus/client_model" packages = ["go"] + pruneopts = "UT" revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" - digest = "1:e469cd65badf7694aeb44874518606d93c1d59e7735d3754ad442782437d3cc3" + digest = "1:dad2e5a2153ee7a6c9ab8fc13673a16ee4fb64434a7da980965a3741b0c981a3" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", "model", ] -<<<<<<< HEAD - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" -======= pruneopts = "UT" - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" ->>>>>>> origin/develop + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + digest = "1:a37c98f4b7a66bb5c539c0539f0915a74ef1c8e0b3b6f45735289d94cae92bfd" name = "github.com/prometheus/procfs" packages = [ ".", @@ -281,10 +264,7 @@ "nfs", "xfs", ] -<<<<<<< HEAD -======= pruneopts = "UT" ->>>>>>> origin/develop revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] @@ -295,7 +275,7 @@ revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] - digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84" + digest = "1:37ace7f35375adec11634126944bdc45a673415e2fcc07382d03b75ec76ea94c" name = "github.com/spf13/afero" packages = [ ".", @@ -314,17 +294,12 @@ version = "v1.2.0" [[projects]] - digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e" + digest = "1:627ab2f549a6a55c44f46fa24a4307f4d0da81bfc7934ed0473bf38b24051d26" name = "github.com/spf13/cobra" packages = ["."] -<<<<<<< HEAD - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" -======= pruneopts = "UT" revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" version = "v0.0.1" ->>>>>>> origin/develop [[projects]] branch = "master" @@ -346,34 +321,24 @@ digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96" name = "github.com/spf13/viper" packages = ["."] -<<<<<<< HEAD - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" -======= pruneopts = "UT" revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7" version = "v1.0.0" ->>>>>>> origin/develop [[projects]] - digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6" + digest = "1:73697231b93fb74a73ebd8384b68b9a60c57ea6b13c56d2425414566a72c8e6d" name = "github.com/stretchr/testify" packages = [ "assert", "require", ] -<<<<<<< HEAD - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" -======= pruneopts = "UT" revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" version = "v1.2.1" ->>>>>>> origin/develop [[projects]] branch = "master" - digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b" + digest = "1:922191411ad8f61bcd8018ac127589bb489712c1d1a0ab2497aca4b16de417d2" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -394,7 +359,7 @@ [[projects]] branch = "master" - digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" + digest = "1:203b409c21115233a576f99e8f13d8e07ad82b25500491f7e1cca12588fb3232" name = "github.com/tendermint/ed25519" packages = [ ".", @@ -405,23 +370,16 @@ revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" [[projects]] -<<<<<<< HEAD branch = "jae/writeemptyptr" - name = "github.com/tendermint/go-amino" - packages = ["."] - revision = "8202139066d340b77084a583e176e29fb28b42e9" -======= - digest = "1:e9113641c839c21d8eaeb2c907c7276af1eddeed988df8322168c56b7e06e0e1" + digest = "1:2851f999161ce484d09df830edace38654b711228155d476680577f47f2a5bff" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" - version = "0.10.1" ->>>>>>> origin/develop + revision = "8202139066d340b77084a583e176e29fb28b42e9" [[projects]] branch = "master" - digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67" + digest = "1:df132ec33d5acb4a1ab58d637f1bc3557be49456ca59b9198f5c1e7fa32e0d31" name = "golang.org/x/crypto" packages = [ "bcrypt", @@ -439,14 +397,11 @@ "ripemd160", "salsa20/salsa", ] -<<<<<<< HEAD -======= pruneopts = "UT" ->>>>>>> origin/develop - revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" + revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4" [[projects]] - digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" + digest = "1:04dda8391c3e2397daf254ac68003f30141c069b228d06baec8324a5f81dc1e9" name = "golang.org/x/net" packages = [ "context", @@ -463,21 +418,17 @@ [[projects]] branch = "master" - digest = "1:12ff7b51d336ea7e8b182aa3313679a37d53de64f84d2c3cbfd6a0237877e20a" + digest = "1:8cf61f10625f94b618d574224a437fc22ca0f300a3bc03ecab23ab81d478e95c" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] -<<<<<<< HEAD - revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314" -======= pruneopts = "UT" - revision = "e072cadbbdc8dd3d3ffa82b8b4b9304c261d9311" ->>>>>>> origin/develop + revision = "0ffbfd41fbef8ffcf9b62b0b0aa3a5873ed7a4fe" [[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + digest = "1:7509ba4347d1f8de6ae9be8818b0cd1abc3deeffe28aeaf4be6d4b6b5178d9ca" name = "golang.org/x/text" packages = [ "collate", @@ -507,7 +458,7 @@ revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" [[projects]] - digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" + digest = "1:4515e3030c440845b046354fd5d57671238428b820deebce2e9dabb5cd3c51ac" name = "google.golang.org/grpc" packages = [ ".", @@ -551,9 +502,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 -<<<<<<< HEAD - inputs-digest = "cb44aec2727610e0547ee75e2b4602266d85026bb47747f4fb8bdcef4709bdd1" -======= input-imports = [ "github.com/btcsuite/btcd/btcec", "github.com/btcsuite/btcutil/base58", @@ -603,6 +551,5 @@ "google.golang.org/grpc", "google.golang.org/grpc/credentials", ] ->>>>>>> origin/develop solver-name = "gps-cdcl" solver-version = 1 From ca9d07e5e46309473218e5ff0458da3e572df9b0 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 5 Aug 2018 12:39:08 -0400 Subject: [PATCH 27/27] update deps for amaino v0.12.0-rc0 --- Gopkg.lock | 6 +++--- Gopkg.toml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 8e567aaf..db035fc0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -370,12 +370,12 @@ revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" [[projects]] - branch = "jae/writeemptyptr" - digest = "1:2851f999161ce484d09df830edace38654b711228155d476680577f47f2a5bff" + digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "8202139066d340b77084a583e176e29fb28b42e9" + revision = "a8328986c1608950fa5d3d1c0472cccc4f8fc02c" + version = "v0.12.0-rc0" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index a4d71aa8..5ec6a47c 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - branch = "jae/writeemptyptr" + version = "v0.12.0-rc0" [[constraint]] name = "google.golang.org/grpc"