diff --git a/CHANGELOG.md b/CHANGELOG.md index c506a229..2ec6b1d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,13 +1,79 @@ # Changelog +## v0.27.0 + +*December 5th, 2018* + +Special thanks to external contributors on this release: +@danil-lashin, @srmo + +Special thanks to @dlguddus for discovering a [major +issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677) +in the proposer selection algorithm. + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +This release is primarily about fixes to the proposer selection algorithm +in preparation for the [Cosmos Game of +Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6). +It also makes use of the `ConsensusParams.Validator.PubKeyTypes` to restrict the +key types that can be used by validators, and removes the `Heartbeat` consensus +message. + +### BREAKING CHANGES: + +* CLI/RPC/Config + - [rpc] [\#2932](https://github.com/tendermint/tendermint/issues/2932) Rename `accum` to `proposer_priority` + +* Go API + - [db] [\#2913](https://github.com/tendermint/tendermint/pull/2913) + ReverseIterator API change: start < end, and end is exclusive. + - [types] [\#2932](https://github.com/tendermint/tendermint/issues/2932) Rename `Validator.Accum` to `Validator.ProposerPriority` + +* Blockchain Protocol + - [state] [\#2714](https://github.com/tendermint/tendermint/issues/2714) Validators can now only use pubkeys allowed within + ConsensusParams.Validator.PubKeyTypes + +* P2P Protocol + - [consensus] [\#2871](https://github.com/tendermint/tendermint/issues/2871) + Remove *ProposalHeartbeat* message as it serves no real purpose (@srmo) + - [state] Fixes for proposer selection: + - [\#2785](https://github.com/tendermint/tendermint/issues/2785) Accum for new validators is `-1.125*totalVotingPower` instead of 0 + - [\#2941](https://github.com/tendermint/tendermint/issues/2941) val.Accum is preserved during ValidatorSet.Update to avoid being + reset to 0 + +### IMPROVEMENTS: + +- [state] [\#2929](https://github.com/tendermint/tendermint/issues/2929) Minor refactor of updateState logic (@danil-lashin) +- [node] \#2959 Allow node to start even if software's BlockProtocol is + different from state's BlockProtocol +- [pex] \#2959 Pex reactor logger uses `module=pex` + +### BUG FIXES: + +- [p2p] \#2968 Panic on transport error rather than continuing to run but not + accept new connections +- [p2p] \#2969 Fix mismatch in peer count between `/net_info` and the prometheus + metrics +- [rpc] \#2408 `/broadcast_tx_commit`: Fix "interface conversion: interface {} in nil, not EventDataTx" panic (could happen if somebody sent a tx using `/broadcast_tx_commit` while Tendermint was being stopped) +- [state] [\#2785](https://github.com/tendermint/tendermint/issues/2785) Fix accum for new validators to be `-1.125*totalVotingPower` + instead of 0, forcing them to wait before becoming the proposer. Also: + - do not batch clip + - keep accums averaged near 0 +- [txindex/kv] [\#2925](https://github.com/tendermint/tendermint/issues/2925) Don't return false positives when range searching for a prefix of a tag value +- [types] [\#2938](https://github.com/tendermint/tendermint/issues/2938) Fix regression in v0.26.4 where we panic on empty + genDoc.Validators +- [types] [\#2941](https://github.com/tendermint/tendermint/issues/2941) Preserve val.Accum during ValidatorSet.Update to avoid it being + reset to 0 every time a validator is updated + ## v0.26.4 *November 27th, 2018* Special thanks to external contributors on this release: -ackratos, goolAdapter, james-ray, joe-bowman, kostko, -nagarajmanjunath, tomtau - +@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko, +@nagarajmanjunath, @tomtau Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 2a2626a4..7063efc3 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,14 +1,9 @@ -# Pending - -## v0.27.0 +## v0.27.1 *TBD* Special thanks to external contributors on this release: -Friendly reminder, we have a [bug bounty -program](https://hackerone.com/tendermint). - ### BREAKING CHANGES: * CLI/RPC/Config diff --git a/Makefile b/Makefile index 294a319b..6f7874ee 100644 --- a/Makefile +++ b/Makefile @@ -32,6 +32,9 @@ build_race: install: CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint +install_c: + CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint + ######################################## ### Protobuf @@ -328,4 +331,4 @@ build-slate: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all +.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c diff --git a/UPGRADING.md b/UPGRADING.md index 055dbec4..63f000f5 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -3,9 +3,50 @@ This guide provides steps to be followed when you upgrade your applications to a newer version of Tendermint Core. +## v0.27.0 + +This release contains some breaking changes to the block and p2p protocols, +but does not change any core data structures, so it should be compatible with +existing blockchains from the v0.26 series that only used Ed25519 validator keys. +Blockchains using Secp256k1 for validators will not be compatible. This is due +to the fact that we now enforce which key types validators can use as a +consensus param. The default is Ed25519, and Secp256k1 must be activated +explicitly. + +It is recommended to upgrade all nodes at once to avoid incompatibilities at the +peer layer - namely, the heartbeat consensus message has been removed (only +relevant if `create_empty_blocks=false` or `create_empty_blocks_interval > 0`), +and the proposer selection algorithm has changed. Since proposer information is +never included in the blockchain, this change only affects the peer layer. + +### Go API Changes + +#### libs/db + +The ReverseIterator API has changed the meaning of `start` and `end`. +Before, iteration was from `start` to `end`, where +`start > end`. Now, iteration is from `end` to `start`, where `start < end`. +The iterator also excludes `end`. This change allows a simplified and more +intuitive logic, aligning the semantic meaning of `start` and `end` in the +`Iterator` and `ReverseIterator`. + +### Applications + +This release enforces a new consensus parameter, the +ValidatorParams.PubKeyTypes. Applications must ensure that they only return +validator updates with the allowed PubKeyTypes. If a validator update includes a +pubkey type that is not included in the ConsensusParams.Validator.PubKeyTypes, +block execution will fail and the consensus will halt. + +By default, only Ed25519 pubkeys may be used for validators. Enabling +Secp256k1 requires explicit modification of the ConsensusParams. +Please update your application accordingly (ie. restrict validators to only be +able to use Ed25519 keys, or explicitly add additional key types to the genesis +file). + ## v0.26.0 -New 0.26.0 release contains a lot of changes to core data types and protocols. It is not +This release contains a lot of changes to core data types and protocols. It is not compatible to the old versions and there is no straight forward way to update old data to be compatible with the new version. @@ -67,7 +108,7 @@ For more information, see: ### Go API Changes -#### crypto.merkle +#### crypto/merkle The `merkle.Hasher` interface was removed. Functions which used to take `Hasher` now simply take `[]byte`. This means that any objects being Merklized should be diff --git a/consensus/common_test.go b/consensus/common_test.go index 8a2d8a42..46be5cbd 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -425,20 +425,6 @@ func ensureNewRound(roundCh <-chan interface{}, height int64, round int) { } } -func ensureProposalHeartbeat(heartbeatCh <-chan interface{}) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for ProposalHeartbeat event") - case ev := <-heartbeatCh: - heartbeat, ok := ev.(types.EventDataProposalHeartbeat) - if !ok { - panic(fmt.Sprintf("expected a *types.EventDataProposalHeartbeat, "+ - "got %v. wrong subscription channel?", - reflect.TypeOf(heartbeat))) - } - } -} - func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) { timeoutDuration := time.Duration(timeout*3) * time.Nanosecond ensureNewEvent(timeoutCh, height, round, timeoutDuration, diff --git a/consensus/reactor.go b/consensus/reactor.go index b3298e9d..1f508319 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -8,7 +8,7 @@ import ( "github.com/pkg/errors" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" cstypes "github.com/tendermint/tendermint/consensus/types" cmn "github.com/tendermint/tendermint/libs/common" tmevents "github.com/tendermint/tendermint/libs/events" @@ -264,11 +264,6 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) BlockID: msg.BlockID, Votes: ourVotes, })) - case *ProposalHeartbeatMessage: - hb := msg.Heartbeat - conR.Logger.Debug("Received proposal heartbeat message", - "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, - "valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress) default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -369,8 +364,8 @@ func (conR *ConsensusReactor) FastSync() bool { //-------------------------------------- -// subscribeToBroadcastEvents subscribes for new round steps, votes and -// proposal heartbeats using internal pubsub defined on state to broadcast +// subscribeToBroadcastEvents subscribes for new round steps and votes +// using internal pubsub defined on state to broadcast // them to peers upon receiving. func (conR *ConsensusReactor) subscribeToBroadcastEvents() { const subscriber = "consensus-reactor" @@ -389,10 +384,6 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() { conR.broadcastHasVoteMessage(data.(*types.Vote)) }) - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat, - func(data tmevents.EventData) { - conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat)) - }) } func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() { @@ -400,13 +391,6 @@ func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() { conR.conS.evsw.RemoveListener(subscriber) } -func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) { - conR.Logger.Debug("Broadcasting proposal heartbeat message", - "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, "address", hb.ValidatorAddress) - msg := &ProposalHeartbeatMessage{hb} - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) -} - func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { nrsMsg := makeRoundStepMessage(rs) conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) @@ -1387,7 +1371,6 @@ func RegisterConsensusMessages(cdc *amino.Codec) { cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil) cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil) cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil) - cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil) } func decodeMsg(bz []byte) (msg ConsensusMessage, err error) { @@ -1664,18 +1647,3 @@ func (m *VoteSetBitsMessage) String() string { } //------------------------------------- - -// ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions for a proposal. -type ProposalHeartbeatMessage struct { - Heartbeat *types.Heartbeat -} - -// ValidateBasic performs basic validation. -func (m *ProposalHeartbeatMessage) ValidateBasic() error { - return m.Heartbeat.ValidateBasic() -} - -// String returns a string representation. -func (m *ProposalHeartbeatMessage) String() string { - return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat) -} diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 2758f3fa..1636785c 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -213,8 +213,8 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { //------------------------------------ -// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs -func TestReactorProposalHeartbeats(t *testing.T) { +// Ensure a testnet makes blocks when there are txs +func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { N := 4 css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, func(c *cfg.Config) { @@ -222,17 +222,6 @@ func TestReactorProposalHeartbeats(t *testing.T) { }) reactors, eventChans, eventBuses := startConsensusNet(t, css, N) defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - heartbeatChans := make([]chan interface{}, N) - var err error - for i := 0; i < N; i++ { - heartbeatChans[i] = make(chan interface{}, 1) - err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i]) - require.NoError(t, err) - } - // wait till everyone sends a proposal heartbeat - timeoutWaitGroup(t, N, func(j int) { - <-heartbeatChans[j] - }, css) // send a tx if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil { diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c261426c..7cd32c7a 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" auto "github.com/tendermint/tendermint/libs/autofile" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/version" diff --git a/consensus/state.go b/consensus/state.go index 4b7aec2a..81bdce7d 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -22,13 +22,6 @@ import ( "github.com/tendermint/tendermint/types" ) -//----------------------------------------------------------------------------- -// Config - -const ( - proposalHeartbeatIntervalSeconds = 2 -) - //----------------------------------------------------------------------------- // Errors @@ -118,7 +111,7 @@ type ConsensusState struct { done chan struct{} // synchronous pubsub between consensus state and reactor. - // state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat + // state only emits EventNewRoundStep and EventVote evsw tmevents.EventSwitch // for reporting metrics @@ -752,7 +745,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { validators := cs.Validators if cs.Round < round { validators = validators.Copy() - validators.IncrementAccum(round - cs.Round) + validators.IncrementProposerPriority(round - cs.Round) } // Setup new round @@ -785,7 +778,6 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) } - go cs.proposalHeartbeat(height, round) } else { cs.enterPropose(height, round) } @@ -802,38 +794,6 @@ func (cs *ConsensusState) needProofBlock(height int64) bool { return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) } -func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - addr := cs.privValidator.GetAddress() - - if !cs.Validators.HasAddress(addr) { - logger.Debug("Not sending proposalHearbeat. This node is not a validator", "addr", addr, "vals", cs.Validators) - return - } - counter := 0 - valIndex, _ := cs.Validators.GetByAddress(addr) - chainID := cs.state.ChainID - for { - rs := cs.GetRoundState() - // if we've already moved on, no need to send more heartbeats - if rs.Step > cstypes.RoundStepNewRound || rs.Round > round || rs.Height > height { - return - } - heartbeat := &types.Heartbeat{ - Height: rs.Height, - Round: rs.Round, - Sequence: counter, - ValidatorAddress: addr, - ValidatorIndex: valIndex, - } - cs.privValidator.SignHeartbeat(chainID, heartbeat) - cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) - cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat) - counter++ - time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) - } -} - // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool diff --git a/consensus/state_test.go b/consensus/state_test.go index 941a99cd..ddab6404 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -11,8 +11,6 @@ import ( "github.com/stretchr/testify/require" cstypes "github.com/tendermint/tendermint/consensus/types" - tmevents "github.com/tendermint/tendermint/libs/events" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" @@ -1029,33 +1027,6 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == round) } -// regression for #2518 -func TestNoHearbeatWhenNotValidator(t *testing.T) { - cs, _ := randConsensusState(4) - cs.Validators = types.NewValidatorSet(nil) // make sure we are not in the validator set - - cs.evsw.AddListenerForEvent("testing", types.EventProposalHeartbeat, - func(data tmevents.EventData) { - t.Errorf("Should not have broadcasted heartbeat") - }) - go cs.proposalHeartbeat(10, 1) - - cs.Stop() - - // if a faulty implementation sends an event, we should wait here a little bit to make sure we don't miss it by prematurely leaving the test method - time.Sleep((proposalHeartbeatIntervalSeconds + 1) * time.Second) -} - -// regression for #2518 -func TestHearbeatWhenWeAreValidator(t *testing.T) { - cs, _ := randConsensusState(4) - heartbeatCh := subscribe(cs.eventBus, types.EventQueryProposalHeartbeat) - - go cs.proposalHeartbeat(10, 1) - ensureProposalHeartbeat(heartbeatCh) - -} - // What we want: // P0 miss to lock B as Proposal Block is missing, but set valid block to B after // receiving delayed Block Proposal. diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 263c2c5e..ba6f0589 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -11,13 +11,10 @@ Make sure you [have Go installed](https://golang.org/doc/install). Next, install the `abci-cli` tool and example applications: ``` -go get github.com/tendermint/tendermint -``` - -to get vendored dependencies: - -``` -cd $GOPATH/src/github.com/tendermint/tendermint +mkdir -p $GOPATH/src/github.com/tendermint +cd $GOPATH/src/github.com/tendermint +git clone https://github.com/tendermint/tendermint.git +cd tendermint make get_tools make get_vendor_deps make install_abci diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json index f66ed28b..3f0fa334 100644 --- a/docs/app-dev/ecosystem.json +++ b/docs/app-dev/ecosystem.json @@ -122,7 +122,7 @@ ], "abciServers": [ { - "name": "abci", + "name": "go-abci", "url": "https://github.com/tendermint/tendermint/tree/master/abci", "language": "Go", "author": "Tendermint" @@ -133,6 +133,12 @@ "language": "Javascript", "author": "Tendermint" }, + { + "name": "rust-tsp", + "url": "https://github.com/tendermint/rust-tsp", + "language": "Rust", + "author": "Tendermint" + }, { "name": "cpp-tmsp", "url": "https://github.com/mdyring/cpp-tmsp", @@ -164,7 +170,7 @@ "author": "Dave Bryson" }, { - "name": "tm-abci", + "name": "tm-abci (fork of py-abci with async IO)", "url": "https://github.com/SoftblocksCo/tm-abci", "language": "Python", "author": "Softblocks" diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 14aa0dc3..5509a701 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -252,14 +252,12 @@ we'll run a Javascript version of the `counter`. To run it, you'll need to [install node](https://nodejs.org/en/download/). You'll also need to fetch the relevant repository, from -[here](https://github.com/tendermint/js-abci) then install it. As go -devs, we keep all our code under the `$GOPATH`, so run: +[here](https://github.com/tendermint/js-abci), then install it: ``` -go get github.com/tendermint/js-abci &> /dev/null -cd $GOPATH/src/github.com/tendermint/js-abci/example -npm install -cd .. +git clone https://github.com/tendermint/js-abci.git +cd js-abci +npm install abci ``` Kill the previous `counter` and `tendermint` processes. Now run the app: @@ -276,13 +274,16 @@ tendermint node ``` Once again, you should see blocks streaming by - but now, our -application is written in javascript! Try sending some transactions, and +application is written in Javascript! Try sending some transactions, and like before - the results should be the same: ``` -curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok -curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce -curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok +# ok +curl localhost:26657/broadcast_tx_commit?tx=0x00 +# invalid nonce +curl localhost:26657/broadcast_tx_commit?tx=0x05 +# ok +curl localhost:26657/broadcast_tx_commit?tx=0x01 ``` Neat, eh? diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index 49954809..d745769c 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -54,7 +54,7 @@ Response: "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" }, "voting_power": "10", - "accum": "0" + "proposer_priority": "0" } ] } diff --git a/docs/introduction/install.md b/docs/introduction/install.md index c3395eff..3005a734 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -79,11 +79,9 @@ make install Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). -Build Tendermint with C libraries: `make build_c`. - ### Ubuntu -Install LevelDB with snappy: +Install LevelDB with snappy (optionally): ``` sudo apt-get update @@ -112,5 +110,13 @@ db_backend = "cleveldb" To install Tendermint, run ``` -CGO_LDFLAGS="-lsnappy" go install -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" -tags "tendermint gcc" -o build/tendermint ./cmd/tendermint/ +CGO_LDFLAGS="-lsnappy" make install_c ``` + +or run + +``` +CGO_LDFLAGS="-lsnappy" make build_c +``` + +to put the binary in `./build`. diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 05facadf..b77e7273 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -40,7 +40,11 @@ These files are found in `$HOME/.tendermint`: ``` $ ls $HOME/.tendermint -config.toml data genesis.json priv_validator.json +config data + +$ ls $HOME/.tendermint/config/ + +config.toml genesis.json node_key.json priv_validator.json ``` For a single, local node, no further configuration is required. @@ -110,7 +114,18 @@ source ~/.profile This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary. -Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. Then from each machine, run: +Next, use the `tendermint testnet` command to create four directories of config files (found in `./mytestnet`) and copy each directory to the relevant machine in the cloud, so that each machine has `$HOME/mytestnet/node[0-3]` directory. + +Before you can start the network, you'll need peers identifiers (IPs are not enough and can change). We'll refer to them as ID1, ID2, ID3, ID4. + +``` +tendermint show_node_id --home ./mytestnet/node0 +tendermint show_node_id --home ./mytestnet/node1 +tendermint show_node_id --home ./mytestnet/node2 +tendermint show_node_id --home ./mytestnet/node3 +``` + +Finally, from each machine, run: ``` tendermint node --home ./mytestnet/node0 --proxy_app=kvstore --p2p.persistent_peers="ID1@IP1:26656,ID2@IP2:26656,ID3@IP3:26656,ID4@IP4:26656" @@ -121,6 +136,6 @@ tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_pe Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. -Seeds can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options. +Persistent peers can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options. Transactions can then be sent as covered in the single, local node example above. diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index 0a07890f..0b46e503 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -98,6 +98,10 @@ type Evidence struct { type Validator struct { PubKeyTypes []string } + +type ValidatorParams struct { + PubKeyTypes []string +} ``` #### BlockSize diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md index 23275b12..47c6949a 100644 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -338,12 +338,11 @@ BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs` ## Broadcast routine -The Broadcast routine subscribes to an internal event bus to receive new round steps, votes messages and proposal -heartbeat messages, and broadcasts messages to peers upon receiving those events. +The Broadcast routine subscribes to an internal event bus to receive new round steps and votes messages, and broadcasts messages to peers upon receiving those +events. It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. -`ProposalHeartbeatMessage` is sent the same way on the StateChannel. ## Channels diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md index e5d1f4cc..55960874 100644 --- a/docs/spec/reactors/consensus/consensus.md +++ b/docs/spec/reactors/consensus/consensus.md @@ -89,33 +89,6 @@ type BlockPartMessage struct { } ``` -## ProposalHeartbeatMessage - -ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions -to be able to create a next block proposal. - -```go -type ProposalHeartbeatMessage struct { - Heartbeat Heartbeat -} -``` - -### Heartbeat - -Heartbeat contains validator information (address and index), -height, round and sequence number. It is signed by the private key of the validator. - -```go -type Heartbeat struct { - ValidatorAddress []byte - ValidatorIndex int - Height int64 - Round int - Sequence int - Signature Signature -} -``` - ## NewRoundStepMessage NewRoundStepMessage is sent for every step transition during the core consensus algorithm execution. diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index c9915042..148c874c 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -519,18 +519,16 @@ developers guide](../app-dev/app-development.md) for more details. ### Local Network -To run a network locally, say on a single machine, you must change the -`_laddr` fields in the `config.toml` (or using the flags) so that the -listening addresses of the various sockets don't conflict. Additionally, -you must set `addr_book_strict=false` in the `config.toml`, otherwise -Tendermint's p2p library will deny making connections to peers with the -same IP address. +To run a network locally, say on a single machine, you must change the `_laddr` +fields in the `config.toml` (or using the flags) so that the listening +addresses of the various sockets don't conflict. Additionally, you must set +`addr_book_strict=false` in the `config.toml`, otherwise Tendermint's p2p +library will deny making connections to peers with the same IP address. ### Upgrading -The Tendermint development cycle currently includes a lot of breaking changes. -Upgrading from an old version to a new version usually means throwing -away the chain data. Try out the -[tm-migrate](https://github.com/hxzqlh/tm-tools) tool written by -[@hxzqlh](https://github.com/hxzqlh) if you are keen to preserve the -state of your chain when upgrading to newer versions. +See the +[UPGRADING.md](https://github.com/tendermint/tendermint/blob/master/UPGRADING.md) +guide. You may need to reset your chain between major breaking releases. +Although, we expect Tendermint to have fewer breaking releases in the future +(especially after 1.0 release). diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 4e69596b..0640c1da 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -35,7 +35,7 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { LastBlockHeight: 0, LastBlockTime: tmtime.Now(), Validators: valSet, - NextValidators: valSet.CopyIncrementAccum(1), + NextValidators: valSet.CopyIncrementProposerPriority(1), LastHeightValidatorsChanged: 1, ConsensusParams: types.ConsensusParams{ Evidence: types.EvidenceParams{ diff --git a/libs/common/string.go b/libs/common/string.go index e125043d..ddf350b1 100644 --- a/libs/common/string.go +++ b/libs/common/string.go @@ -61,3 +61,16 @@ func ASCIITrim(s string) string { } return string(r) } + +// StringSliceEqual checks if string slices a and b are equal +func StringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/libs/common/string_test.go b/libs/common/string_test.go index 5e7ae98c..35b6fafc 100644 --- a/libs/common/string_test.go +++ b/libs/common/string_test.go @@ -3,6 +3,8 @@ package common import ( "testing" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" ) @@ -35,3 +37,22 @@ func TestASCIITrim(t *testing.T) { assert.Equal(t, ASCIITrim(" a "), "a") assert.Panics(t, func() { ASCIITrim("\xC2\xA2") }) } + +func TestStringSliceEqual(t *testing.T) { + tests := []struct { + a []string + b []string + want bool + }{ + {[]string{"hello", "world"}, []string{"hello", "world"}, true}, + {[]string{"test"}, []string{"test"}, true}, + {[]string{"test1"}, []string{"test2"}, false}, + {[]string{"hello", "world."}, []string{"hello", "world!"}, false}, + {[]string{"only 1 word"}, []string{"two", "words!"}, false}, + {[]string{"two", "words!"}, []string{"only 1 word"}, false}, + } + for i, tt := range tests { + require.Equal(t, tt.want, StringSliceEqual(tt.a, tt.b), + "StringSliceEqual failed on test %d", i) + } +} diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index 2aebde1c..fb2a3d0b 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -180,13 +180,13 @@ func testDBIterator(t *testing.T, backend DBBackendType) { verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0") - verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64(nil), "reverse iterator 10") + verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64(nil), "reverse iterator from 10 (ex)") verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") - verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10") - verifyIterator(t, db.ReverseIterator(int642Bytes(9), nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9") - verifyIterator(t, db.ReverseIterator(int642Bytes(8), nil), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(9)), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") + verifyIterator(t, db.ReverseIterator(nil, int642Bytes(8)), []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6") verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7") @@ -195,20 +195,20 @@ func testDBIterator(t *testing.T, backend DBBackendType) { verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8") verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8") - verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(4)), []int64{5}, "reverse iterator from 5 to 4") - verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(4)), []int64{5}, "reverse iterator from 6 to 4") - verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(4)), []int64{7, 5}, "reverse iterator from 7 to 4") - verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(5)), []int64(nil), "reverse iterator from 6 to 5") - verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(5)), []int64{7}, "reverse iterator from 7 to 5") - verifyIterator(t, db.ReverseIterator(int642Bytes(7), int642Bytes(6)), []int64{7}, "reverse iterator from 7 to 6") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(5)), []int64{4}, "reverse iterator from 5 (ex) to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(6)), []int64{5, 4}, "reverse iterator from 6 (ex) to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(7)), []int64{5, 4}, "reverse iterator from 7 (ex) to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "reverse iterator from 6 (ex) to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "reverse iterator from 7 (ex) to 5") + verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "reverse iterator from 7 (ex) to 6") verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") - verifyIterator(t, db.ReverseIterator(int642Bytes(9), int642Bytes(8)), []int64{9}, "reverse iterator from 9 to 8") + verifyIterator(t, db.ReverseIterator(int642Bytes(8), int642Bytes(9)), []int64{8}, "reverse iterator from 9 (ex) to 8") verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4") verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2") - verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64{4, 3}, "reverse iterator from 4 to 2") - verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64(nil), "reverse iterator from 2 to 4") + verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64{3, 2}, "reverse iterator from 4 (ex) to 2") + verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "reverse iterator from 2 (ex) to 4") } diff --git a/libs/db/c_level_db.go b/libs/db/c_level_db.go index 30746126..7f74b2a7 100644 --- a/libs/db/c_level_db.go +++ b/libs/db/c_level_db.go @@ -205,13 +205,13 @@ type cLevelDBIterator struct { func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { if isReverse { - if start == nil { + if end == nil { source.SeekToLast() } else { - source.Seek(start) + source.Seek(end) if source.Valid() { - soakey := source.Key() // start or after key - if bytes.Compare(start, soakey) < 0 { + eoakey := source.Key() // end or after key + if bytes.Compare(end, eoakey) <= 0 { source.Prev() } } else { @@ -255,10 +255,11 @@ func (itr cLevelDBIterator) Valid() bool { } // If key is end or past it, invalid. + var start = itr.start var end = itr.end var key = itr.source.Key() if itr.isReverse { - if end != nil && bytes.Compare(key, end) <= 0 { + if start != nil && bytes.Compare(key, start) < 0 { itr.isInvalid = true return false } diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go index b1d40c7b..2d82e774 100644 --- a/libs/db/fsdb.go +++ b/libs/db/fsdb.go @@ -161,7 +161,7 @@ func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator { // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir, start, end, isReversed) + keys, err := list(db.dir, start, end) if err != nil { panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } @@ -229,7 +229,7 @@ func remove(path string) error { // List keys in a directory, stripping of escape sequences and dir portions. // CONTRACT: returns os errors directly without wrapping. -func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) { +func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) if err != nil { return nil, err @@ -247,7 +247,7 @@ func list(dirPath string, start, end []byte, isReversed bool) ([]string, error) return nil, fmt.Errorf("Failed to unescape %s while listing", name) } key := unescapeKey([]byte(n)) - if IsKeyInDomain(key, start, end, isReversed) { + if IsKeyInDomain(key, start, end) { keys = append(keys, string(key)) } } diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index 8a488792..fd487a4d 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -213,13 +213,13 @@ var _ Iterator = (*goLevelDBIterator)(nil) func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { if isReverse { - if start == nil { + if end == nil { source.Last() } else { - valid := source.Seek(start) + valid := source.Seek(end) if valid { - soakey := source.Key() // start or after key - if bytes.Compare(start, soakey) < 0 { + eoakey := source.Key() // end or after key + if bytes.Compare(end, eoakey) <= 0 { source.Prev() } } else { @@ -265,11 +265,12 @@ func (itr *goLevelDBIterator) Valid() bool { } // If key is end or past it, invalid. + var start = itr.start var end = itr.end var key = itr.source.Key() if itr.isReverse { - if end != nil && bytes.Compare(key, end) <= 0 { + if start != nil && bytes.Compare(key, start) < 0 { itr.isInvalid = true return false } diff --git a/libs/db/mem_db.go b/libs/db/mem_db.go index 58012301..ff516bc7 100644 --- a/libs/db/mem_db.go +++ b/libs/db/mem_db.go @@ -237,7 +237,7 @@ func (itr *memDBIterator) assertIsValid() { func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { keys := []string{} for key := range db.db { - inDomain := IsKeyInDomain([]byte(key), start, end, reverse) + inDomain := IsKeyInDomain([]byte(key), start, end) if inDomain { keys = append(keys, key) } diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 9dc4ee97..40d72560 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -131,27 +131,13 @@ func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { defer pdb.mtx.Unlock() var pstart, pend []byte - if start == nil { - // This may cause the underlying iterator to start with - // an item which doesn't start with prefix. We will skip - // that item later in this function. See 'skipOne'. - pstart = cpIncr(pdb.prefix) - } else { - pstart = append(cp(pdb.prefix), start...) - } + pstart = append(cp(pdb.prefix), start...) if end == nil { - // This may cause the underlying iterator to end with an - // item which doesn't start with prefix. The - // prefixIterator will terminate iteration - // automatically upon detecting this. - pend = cpDecr(pdb.prefix) + pend = cpIncr(pdb.prefix) } else { pend = append(cp(pdb.prefix), end...) } ritr := pdb.db.ReverseIterator(pstart, pend) - if start == nil { - skipOne(ritr, cpIncr(pdb.prefix)) - } return newPrefixIterator( pdb.prefix, start, @@ -310,7 +296,6 @@ func (itr *prefixIterator) Next() { } itr.source.Next() if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { - itr.source.Close() itr.valid = false return } @@ -345,13 +330,3 @@ func stripPrefix(key []byte, prefix []byte) (stripped []byte) { } return key[len(prefix):] } - -// If the first iterator item is skipKey, then -// skip it. -func skipOne(itr Iterator, skipKey []byte) { - if itr.Valid() { - if bytes.Equal(itr.Key(), skipKey) { - itr.Next() - } - } -} diff --git a/libs/db/prefix_db_test.go b/libs/db/prefix_db_test.go index 60809f15..e3e37c7d 100644 --- a/libs/db/prefix_db_test.go +++ b/libs/db/prefix_db_test.go @@ -113,8 +113,46 @@ func TestPrefixDBReverseIterator2(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, bz("key")) + itr := pdb.ReverseIterator(bz(""), nil) + checkDomain(t, itr, bz(""), nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) + checkNext(t, itr, true) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator3(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + itr := pdb.ReverseIterator(nil, bz("")) checkDomain(t, itr, nil, bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator4(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz(""), bz("")) + checkDomain(t, itr, bz(""), bz("")) + checkInvalid(t, itr) + itr.Close() +} + +func TestPrefixDBReverseIterator5(t *testing.T) { + db := mockDBWithStuff() + pdb := NewPrefixDB(db, bz("key")) + + itr := pdb.ReverseIterator(bz("1"), nil) + checkDomain(t, itr, bz("1"), nil) checkItem(t, itr, bz("3"), bz("value3")) checkNext(t, itr, true) checkItem(t, itr, bz("2"), bz("value2")) @@ -125,23 +163,30 @@ func TestPrefixDBReverseIterator2(t *testing.T) { itr.Close() } -func TestPrefixDBReverseIterator3(t *testing.T) { +func TestPrefixDBReverseIterator6(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, bz("key")) - itr := pdb.ReverseIterator(bz(""), nil) - checkDomain(t, itr, bz(""), nil) - checkItem(t, itr, bz(""), bz("value")) + itr := pdb.ReverseIterator(bz("2"), nil) + checkDomain(t, itr, bz("2"), nil) + checkItem(t, itr, bz("3"), bz("value3")) + checkNext(t, itr, true) + checkItem(t, itr, bz("2"), bz("value2")) checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() } -func TestPrefixDBReverseIterator4(t *testing.T) { +func TestPrefixDBReverseIterator7(t *testing.T) { db := mockDBWithStuff() pdb := NewPrefixDB(db, bz("key")) - itr := pdb.ReverseIterator(bz(""), bz("")) + itr := pdb.ReverseIterator(nil, bz("2")) + checkDomain(t, itr, nil, bz("2")) + checkItem(t, itr, bz("1"), bz("value1")) + checkNext(t, itr, true) + checkItem(t, itr, bz(""), bz("value")) + checkNext(t, itr, false) checkInvalid(t, itr) itr.Close() } diff --git a/libs/db/types.go b/libs/db/types.go index ad78859a..9b9c6d0b 100644 --- a/libs/db/types.go +++ b/libs/db/types.go @@ -34,9 +34,9 @@ type DB interface { Iterator(start, end []byte) Iterator // Iterate over a domain of keys in descending order. End is exclusive. - // Start must be greater than end, or the Iterator is invalid. - // If start is nil, iterates from the last/greatest item (inclusive). - // If end is nil, iterates up to the first/least item (inclusive). + // Start must be less than end, or the Iterator is invalid. + // If start is nil, iterates up to the first/least item (inclusive). + // If end is nil, iterates from the last/greatest item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. // CONTRACT: start, end readonly []byte ReverseIterator(start, end []byte) Iterator diff --git a/libs/db/util.go b/libs/db/util.go index 51277ac4..e927c354 100644 --- a/libs/db/util.go +++ b/libs/db/util.go @@ -33,46 +33,13 @@ func cpIncr(bz []byte) (ret []byte) { return nil } -// Returns a slice of the same length (big endian) -// except decremented by one. -// Returns nil on underflow (e.g. if bz bytes are all 0x00) -// CONTRACT: len(bz) > 0 -func cpDecr(bz []byte) (ret []byte) { - if len(bz) == 0 { - panic("cpDecr expects non-zero bz length") - } - ret = cp(bz) - for i := len(bz) - 1; i >= 0; i-- { - if ret[i] > byte(0x00) { - ret[i]-- - return - } - ret[i] = byte(0xFF) - if i == 0 { - // Underflow - return nil - } - } - return nil -} - // See DB interface documentation for more information. -func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { - if !isReverse { - if bytes.Compare(key, start) < 0 { - return false - } - if end != nil && bytes.Compare(end, key) <= 0 { - return false - } - return true - } else { - if start != nil && bytes.Compare(start, key) < 0 { - return false - } - if end != nil && bytes.Compare(key, end) <= 0 { - return false - } - return true +func IsKeyInDomain(key, start, end []byte) bool { + if bytes.Compare(key, start) < 0 { + return false } + if end != nil && bytes.Compare(end, key) <= 0 { + return false + } + return true } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index e0c4e65b..9f4b264f 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -105,8 +105,8 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int } itr := dbp.db.ReverseIterator( - signedHeaderKey(chainID, maxHeight), - signedHeaderKey(chainID, minHeight-1), + signedHeaderKey(chainID, minHeight), + append(signedHeaderKey(chainID, maxHeight), byte(0x00)), ) defer itr.Close() @@ -190,8 +190,8 @@ func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after) itr := dbp.db.ReverseIterator( - signedHeaderKey(chainID, 1<<63-1), - signedHeaderKey(chainID, 0), + signedHeaderKey(chainID, 1), + append(signedHeaderKey(chainID, 1<<63-1), byte(0x00)), ) defer itr.Close() diff --git a/lite/doc.go b/lite/doc.go index 00dcce68..f68798dc 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -121,7 +121,7 @@ If we cannot update directly from H -> H' because there was too much change to the validator set, then we can look for some Hm (H < Hm < H') with a validator set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one of these steps doesn't work, then we continue bisecting, until we eventually -have to externally validate the valdiator set changes at every block. +have to externally validate the validator set changes at every block. Since we never trust any server in this protocol, only the signatures themselves, it doesn't matter if the seed comes from a (possibly malicious) diff --git a/node/node.go b/node/node.go index 8e41dfd1..b56a3594 100644 --- a/node/node.go +++ b/node/node.go @@ -210,13 +210,18 @@ func NewNode(config *cfg.Config, // what happened during block replay). state = sm.LoadState(stateDB) - // Ensure the state's block version matches that of the software. + // Log the version info. + logger.Info("Version info", + "software", version.TMCoreSemVer, + "block", version.BlockProtocol, + "p2p", version.P2PProtocol, + ) + + // If the state and software differ in block version, at least log it. if state.Version.Consensus.Block != version.BlockProtocol { - return nil, fmt.Errorf( - "Block version of the software does not match that of the state.\n"+ - "Got version.BlockProtocol=%v, state.Version.Consensus.Block=%v", - version.BlockProtocol, - state.Version.Consensus.Block, + logger.Info("Software and state have different block protocols", + "software", version.BlockProtocol, + "state", state.Version.Consensus.Block, ) } @@ -454,7 +459,7 @@ func NewNode(config *cfg.Config, Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), SeedMode: config.P2P.SeedMode, }) - pexReactor.SetLogger(p2pLogger) + pexReactor.SetLogger(logger.With("module", "pex")) sw.AddReactor("PEX", pexReactor) } diff --git a/p2p/key.go b/p2p/key.go index fc64f27b..4e662f9f 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -6,7 +6,7 @@ import ( "fmt" "io/ioutil" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" ) diff --git a/p2p/metrics.go b/p2p/metrics.go index ed26d119..b066fb31 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -62,7 +62,7 @@ func PrometheusMetrics(namespace string) *Metrics { // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + Peers: discard.NewGauge(), PeerReceiveBytesTotal: discard.NewCounter(), PeerSendBytesTotal: discard.NewCounter(), PeerPendingSendBytes: discard.NewGauge(), diff --git a/p2p/peer_set.go b/p2p/peer_set.go index 25785615..87cf61da 100644 --- a/p2p/peer_set.go +++ b/p2p/peer_set.go @@ -98,13 +98,15 @@ func (ps *PeerSet) Get(peerKey ID) Peer { } // Remove discards peer by its Key, if the peer was previously memoized. -func (ps *PeerSet) Remove(peer Peer) { +// Returns true if the peer was removed, and false if it was not found. +// in the set. +func (ps *PeerSet) Remove(peer Peer) bool { ps.mtx.Lock() defer ps.mtx.Unlock() item := ps.lookup[peer.ID()] if item == nil { - return + return false } index := item.index @@ -116,7 +118,7 @@ func (ps *PeerSet) Remove(peer Peer) { if index == len(ps.list)-1 { ps.list = newList delete(ps.lookup, peer.ID()) - return + return true } // Replace the popped item with the last item in the old list. @@ -127,6 +129,7 @@ func (ps *PeerSet) Remove(peer Peer) { lastPeerItem.index = index ps.list = newList delete(ps.lookup, peer.ID()) + return true } // Size returns the number of unique items in the peerSet. diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 04b877b0..3eb5357d 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -60,13 +60,15 @@ func TestPeerSetAddRemoveOne(t *testing.T) { n := len(peerList) // 1. Test removing from the front for i, peerAtFront := range peerList { - peerSet.Remove(peerAtFront) + removed := peerSet.Remove(peerAtFront) + assert.True(t, removed) wantSize := n - i - 1 for j := 0; j < 2; j++ { assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) // Test the route of removing the now non-existent element - peerSet.Remove(peerAtFront) + removed := peerSet.Remove(peerAtFront) + assert.False(t, removed) } } @@ -81,7 +83,8 @@ func TestPeerSetAddRemoveOne(t *testing.T) { // b) In reverse, remove each element for i := n - 1; i >= 0; i-- { peerAtEnd := peerList[i] - peerSet.Remove(peerAtEnd) + removed := peerSet.Remove(peerAtEnd) + assert.True(t, removed) assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) } @@ -105,7 +108,8 @@ func TestPeerSetAddRemoveMany(t *testing.T) { } for i, peer := range peers { - peerSet.Remove(peer) + removed := peerSet.Remove(peer) + assert.True(t, removed) if peerSet.Has(peer.ID()) { t.Errorf("Failed to remove peer") } diff --git a/p2p/peer_test.go b/p2p/peer_test.go index d3d9f0c7..e53d6013 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index e2fcc043..d8954f23 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -13,7 +13,7 @@ import ( "sync" "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" ) diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 8f3ceb89..2e2f3f24 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" diff --git a/p2p/switch.go b/p2p/switch.go index 4996ebd9..0490eebb 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -211,7 +211,9 @@ func (sw *Switch) OnStop() { // Stop peers for _, p := range sw.peers.List() { p.Stop() - sw.peers.Remove(p) + if sw.peers.Remove(p) { + sw.metrics.Peers.Add(float64(-1)) + } } // Stop reactors @@ -299,8 +301,9 @@ func (sw *Switch) StopPeerGracefully(peer Peer) { } func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - sw.peers.Remove(peer) - sw.metrics.Peers.Add(float64(-1)) + if sw.peers.Remove(peer) { + sw.metrics.Peers.Add(float64(-1)) + } peer.Stop() for _, reactor := range sw.reactors { reactor.RemovePeer(peer, reason) @@ -505,6 +508,12 @@ func (sw *Switch) acceptRoutine() { "err", err, "numPeers", sw.peers.Size(), ) + // We could instead have a retry loop around the acceptRoutine, + // but that would need to stop and let the node shutdown eventually. + // So might as well panic and let process managers restart the node. + // There's no point in letting the node run without the acceptRoutine, + // since it won't be able to accept new connections. + panic(fmt.Errorf("accept routine exited: %v", err)) } break diff --git a/p2p/switch_test.go b/p2p/switch_test.go index f52e47f0..6c515be0 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -3,10 +3,17 @@ package p2p import ( "bytes" "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "regexp" + "strconv" "sync" "testing" "time" + stdprometheus "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -335,6 +342,54 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { assert.False(p.IsRunning()) } +func TestSwitchStopPeerForError(t *testing.T) { + s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + defer s.Close() + + scrapeMetrics := func() string { + resp, _ := http.Get(s.URL) + buf, _ := ioutil.ReadAll(resp.Body) + return string(buf) + } + + namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) + peersMetricValue := func() float64 { + matches := re.FindStringSubmatch(scrapeMetrics()) + f, _ := strconv.ParseFloat(matches[1], 64) + return f + } + + p2pMetrics := PrometheusMetrics(namespace) + + // make two connected switches + sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { + // set metrics on sw1 + if i == 0 { + opt := WithMetrics(p2pMetrics) + opt(sw) + } + return initSwitchFunc(i, sw) + }) + + assert.Equal(t, len(sw1.Peers().List()), 1) + assert.EqualValues(t, 1, peersMetricValue()) + + // send messages to the peer from sw1 + p := sw1.Peers().List()[0] + p.Send(0x1, []byte("here's a message to send")) + + // stop sw2. this should cause the p to fail, + // which results in calling StopPeerForError internally + sw2.Stop() + + // now call StopPeerForError explicitly, eg. from a reactor + sw1.StopPeerForError(p, fmt.Errorf("some err")) + + assert.Equal(t, len(sw1.Peers().List()), 0) + assert.EqualValues(t, 0, peersMetricValue()) +} + func TestSwitchReconnectsToPersistentPeer(t *testing.T) { assert, require := assert.New(t), require.New(t) diff --git a/p2p/test_util.go b/p2p/test_util.go index b8a34600..44f27be4 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -5,7 +5,7 @@ import ( "net" "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" @@ -184,7 +184,7 @@ func MakeSwitch( // TODO: let the config be passed in? sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(log.TestingLogger()) + sw.SetLogger(log.TestingLogger().With("switch", i)) sw.SetNodeKey(&nodeKey) ni := nodeInfo.(DefaultNodeInfo) diff --git a/privval/priv_validator.go b/privval/priv_validator.go index a13f5426..ba777e1f 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -290,19 +290,6 @@ func (pv *FilePV) saveSigned(height int64, round int, step int8, pv.save() } -// SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID. -// Implements PrivValidator. -func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error { - pv.mtx.Lock() - defer pv.mtx.Unlock() - sig, err := pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) - if err != nil { - return err - } - heartbeat.Signature = sig - return nil -} - // String returns a string representation of the FilePV. func (pv *FilePV) String() string { return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep) diff --git a/privval/remote_signer.go b/privval/remote_signer.go index eacc840c..5d6339c3 100644 --- a/privval/remote_signer.go +++ b/privval/remote_signer.go @@ -125,35 +125,6 @@ func (sc *RemoteSignerClient) SignProposal( return nil } -// SignHeartbeat implements PrivValidator. -func (sc *RemoteSignerClient) SignHeartbeat( - chainID string, - heartbeat *types.Heartbeat, -) error { - sc.lock.Lock() - defer sc.lock.Unlock() - - err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - resp, ok := res.(*SignedHeartbeatResponse) - if !ok { - return ErrUnexpectedResponse - } - if resp.Error != nil { - return resp.Error - } - *heartbeat = *resp.Heartbeat - - return nil -} - // Ping is used to check connection health. func (sc *RemoteSignerClient) Ping() error { sc.lock.Lock() @@ -186,8 +157,6 @@ func RegisterRemoteSignerMsg(cdc *amino.Codec) { cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) - cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/remotesigner/SignHeartbeatRequest", nil) - cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/remotesigner/SignedHeartbeatResponse", nil) cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) } @@ -218,16 +187,6 @@ type SignedProposalResponse struct { Error *RemoteSignerError } -// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatRequest struct { - Heartbeat *types.Heartbeat -} - -type SignedHeartbeatResponse struct { - Heartbeat *types.Heartbeat - Error *RemoteSignerError -} - // PingRequest is a PrivValidatorSocket message to keep the connection alive. type PingRequest struct { } @@ -286,13 +245,6 @@ func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValida } else { res = &SignedProposalResponse{r.Proposal, nil} } - case *SignHeartbeatRequest: - err = privVal.SignHeartbeat(chainID, r.Heartbeat) - if err != nil { - res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} - } else { - res = &SignedHeartbeatResponse{r.Heartbeat, nil} - } case *PingRequest: res = &PingResponse{} default: diff --git a/privval/tcp_test.go b/privval/tcp_test.go index 6549759d..d2489ad1 100644 --- a/privval/tcp_test.go +++ b/privval/tcp_test.go @@ -137,22 +137,6 @@ func TestSocketPVVoteKeepalive(t *testing.T) { assert.Equal(t, want.Signature, have.Signature) } -func TestSocketPVHeartbeat(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) - - want = &types.Heartbeat{} - have = &types.Heartbeat{} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) - require.NoError(t, sc.SignHeartbeat(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) -} - func TestSocketPVDeadline(t *testing.T) { var ( addr = testFreeAddr(t) @@ -301,32 +285,6 @@ func TestRemoteSignProposalErrors(t *testing.T) { require.Error(t, err) } -func TestRemoteSignHeartbeatErrors(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) - hb = &types.Heartbeat{} - ) - defer sc.Stop() - defer rs.Stop() - - err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: hb}) - require.NoError(t, err) - - res, err := readMsg(sc.conn) - require.NoError(t, err) - - resp := *res.(*SignedHeartbeatResponse) - require.NotNil(t, resp.Error) - require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) - - err = rs.privVal.SignHeartbeat(chainID, hb) - require.Error(t, err) - - err = sc.SignHeartbeat(chainID, hb) - require.Error(t, err) -} - func TestErrUnexpectedResponse(t *testing.T) { var ( addr = testFreeAddr(t) @@ -362,22 +320,12 @@ func TestErrUnexpectedResponse(t *testing.T) { require.NotNil(t, rsConn) <-readyc - // Heartbeat: - go func(errc chan error) { - errc <- sc.SignHeartbeat(chainID, &types.Heartbeat{}) - }(errc) - // read request and write wrong response: - go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) - err = <-errc - require.Error(t, err) - require.Equal(t, err, ErrUnexpectedResponse) - // Proposal: go func(errc chan error) { errc <- sc.SignProposal(chainID, &types.Proposal{}) }(errc) // read request and write wrong response: - go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) err = <-errc require.Error(t, err) require.Equal(t, err, ErrUnexpectedResponse) @@ -387,7 +335,7 @@ func TestErrUnexpectedResponse(t *testing.T) { errc <- sc.SignVote(chainID, &types.Vote{}) }(errc) // read request and write wrong response: - go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + go testReadWriteResponse(t, &SignedProposalResponse{}, rsConn) err = <-errc require.Error(t, err) require.Equal(t, err, ErrUnexpectedResponse) diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 2468a5f0..c9d516f9 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -15,6 +15,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.ABCIQuery("", "abcd", true) // ``` // @@ -69,6 +74,11 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctype // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.ABCIInfo() // ``` // diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a9252f55..ee4009e5 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -18,6 +18,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.BlockchainInfo(10, 10) // ``` // @@ -123,6 +128,11 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.Block(10) // ``` // @@ -235,6 +245,11 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.Commit(11) // ``` // @@ -329,6 +344,11 @@ func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.BlockResults(10) // ``` // diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 14662885..9968a1b2 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -16,6 +16,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // state, err := client.Validators() // ``` // @@ -27,7 +32,7 @@ import ( // "result": { // "validators": [ // { -// "accum": "0", +// "proposer_priority": "0", // "voting_power": "10", // "pub_key": { // "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", @@ -67,6 +72,11 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // state, err := client.DumpConsensusState() // ``` // @@ -92,7 +102,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, // "voting_power": "10", -// "accum": "0" +// "proposer_priority": "0" // } // ], // "proposer": { @@ -102,7 +112,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, // "voting_power": "10", -// "accum": "0" +// "proposer_priority": "0" // } // }, // "proposal": null, @@ -138,7 +148,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, // "voting_power": "10", -// "accum": "0" +// "proposer_priority": "0" // } // ], // "proposer": { @@ -148,7 +158,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, // "voting_power": "10", -// "accum": "0" +// "proposer_priority": "0" // } // } // }, @@ -225,6 +235,11 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // state, err := client.ConsensusState() // ``` // @@ -273,6 +288,11 @@ func ConsensusState() (*ctypes.ResultConsensusState, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // state, err := client.ConsensusParams() // ``` // diff --git a/rpc/core/events.go b/rpc/core/events.go index e7456f35..e4fd2041 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -54,11 +54,16 @@ import ( // import "github.com/tendermint/tendermint/types" // // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // ctx, cancel := context.WithTimeout(context.Background(), timeout) // defer cancel() // query := query.MustParse("tm.event = 'Tx' AND tx.height = 3") // txs := make(chan interface{}) -// err := client.Subscribe(ctx, "test-client", query, txs) +// err = client.Subscribe(ctx, "test-client", query, txs) // // go func() { // for e := range txs { @@ -116,7 +121,12 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscri // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Unsubscribe("test-client", query) +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() +// err = client.Unsubscribe("test-client", query) // ``` // // > The above command returns JSON structured like this: @@ -155,7 +165,12 @@ func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsub // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.UnsubscribeAll("test-client") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() +// err = client.UnsubscribeAll("test-client") // ``` // // > The above command returns JSON structured like this: diff --git a/rpc/core/health.go b/rpc/core/health.go index 0ec4b5b4..eeb8686b 100644 --- a/rpc/core/health.go +++ b/rpc/core/health.go @@ -13,6 +13,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.Health() // ``` // diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 7b3c368a..ff6b029c 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -24,6 +24,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.BroadcastTxAsync("123") // ``` // @@ -64,6 +69,11 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.BroadcastTxSync("456") // ``` // @@ -118,6 +128,11 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.BroadcastTxCommit("789") // ``` // @@ -198,7 +213,10 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // TODO: configurable? var deliverTxTimeout = rpcserver.WriteTimeout / 2 select { - case deliverTxResMsg := <-deliverTxResCh: // The tx was included in a block. + case deliverTxResMsg, ok := <-deliverTxResCh: // The tx was included in a block. + if !ok { + return nil, errors.New("Error on broadcastTxCommit: expected DeliverTxResult, got nil. Did the Tendermint stop?") + } deliverTxRes := deliverTxResMsg.(types.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, @@ -225,6 +243,11 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.UnconfirmedTxs() // ``` // @@ -263,6 +286,11 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.UnconfirmedTxs() // ``` // diff --git a/rpc/core/net.go b/rpc/core/net.go index dbd4d8c0..b80902da 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -17,6 +17,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // info, err := client.NetInfo() // ``` // @@ -95,6 +100,11 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // genesis, err := client.Genesis() // ``` // diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index ae8ae056..7f459654 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -2,7 +2,7 @@ package core import ( "github.com/tendermint/tendermint/consensus" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" diff --git a/rpc/core/status.go b/rpc/core/status.go index 793e1ade..224857d0 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -20,6 +20,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // result, err := client.Status() // ``` // diff --git a/rpc/core/tx.go b/rpc/core/tx.go index ba632001..3bb0f28e 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -21,6 +21,11 @@ import ( // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // tx, err := client.Tx([]byte("2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"), true) // ``` // @@ -115,6 +120,11 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { // // ```go // client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// err := client.Start() +// if err != nil { +// // handle error +// } +// defer client.Stop() // q, err := tmquery.New("account.owner='Ivan'") // tx, err := client.TxSearch(q, true) // ``` diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 07628d1c..af5c4947 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -5,7 +5,7 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" diff --git a/state/execution.go b/state/execution.go index b7c38f41..decadddf 100644 --- a/state/execution.go +++ b/state/execution.go @@ -107,8 +107,22 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX + // validate the validator updates and convert to tendermint types + abciValUpdates := abciResponses.EndBlock.ValidatorUpdates + err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) + if err != nil { + return state, fmt.Errorf("Error in validator updates: %v", err) + } + validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) + if err != nil { + return state, err + } + if len(validatorUpdates) > 0 { + blockExec.logger.Info("Updates to validators", "updates", makeValidatorUpdatesLogString(validatorUpdates)) + } + // Update the state with the block and responses. - state, err = updateState(blockExec.logger, state, blockID, &block.Header, abciResponses) + state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -132,7 +146,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) + fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates) return state, nil } @@ -308,53 +322,98 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS } +func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, + params types.ValidatorParams) error { + for _, valUpdate := range abciUpdates { + if valUpdate.GetPower() < 0 { + return fmt.Errorf("Voting power can't be negative %v", valUpdate) + } else if valUpdate.GetPower() == 0 { + // continue, since this is deleting the validator, and thus there is no + // pubkey to check + continue + } + + // Check if validator's pubkey matches an ABCI type in the consensus params + thisKeyType := valUpdate.PubKey.Type + if !params.IsValidPubkeyType(thisKeyType) { + return fmt.Errorf("Validator %v is using pubkey %s, which is unsupported for consensus", + valUpdate, thisKeyType) + } + } + return nil +} + // If more or equal than 1/3 of total voting power changed in one block, then // a light client could never prove the transition externally. See // ./lite/doc.go for details on how a light client tracks validators. -func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.ValidatorUpdate) ([]*types.Validator, error) { - updates, err := types.PB2TM.ValidatorUpdates(abciUpdates) - if err != nil { - return nil, err - } - - // these are tendermint types now +func updateValidators(currentSet *types.ValidatorSet, updates []*types.Validator) error { for _, valUpdate := range updates { + // should already have been checked if valUpdate.VotingPower < 0 { - return nil, fmt.Errorf("Voting power can't be negative %v", valUpdate) + return fmt.Errorf("Voting power can't be negative %v", valUpdate) } address := valUpdate.Address _, val := currentSet.GetByAddress(address) - if valUpdate.VotingPower == 0 { - // remove val + // valUpdate.VotingPower is ensured to be non-negative in validation method + if valUpdate.VotingPower == 0 { // remove val _, removed := currentSet.Remove(address) if !removed { - return nil, fmt.Errorf("Failed to remove validator %X", address) + return fmt.Errorf("Failed to remove validator %X", address) } - } else if val == nil { - // add val + } else if val == nil { // add val + // make sure we do not exceed MaxTotalVotingPower by adding this validator: + totalVotingPower := currentSet.TotalVotingPower() + updatedVotingPower := valUpdate.VotingPower + totalVotingPower + overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0 + if overflow { + return fmt.Errorf( + "Failed to add new validator %v. Adding it would exceed max allowed total voting power %v", + valUpdate, + types.MaxTotalVotingPower) + } + // TODO: issue #1558 update spec according to the following: + // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't + // unbond/rebond to reset their (potentially previously negative) ProposerPriority to zero. + // + // Contract: totalVotingPower < MaxTotalVotingPower to ensure ProposerPriority does + // not exceed the bounds of int64. + // + // Compute ProposerPriority = -1.125*totalVotingPower == -(totalVotingPower + (totalVotingPower >> 3)). + valUpdate.ProposerPriority = -(totalVotingPower + (totalVotingPower >> 3)) added := currentSet.Add(valUpdate) if !added { - return nil, fmt.Errorf("Failed to add new validator %v", valUpdate) + return fmt.Errorf("Failed to add new validator %v", valUpdate) } - } else { - // update val + } else { // update val + // make sure we do not exceed MaxTotalVotingPower by updating this validator: + totalVotingPower := currentSet.TotalVotingPower() + curVotingPower := val.VotingPower + updatedVotingPower := totalVotingPower - curVotingPower + valUpdate.VotingPower + overflow := updatedVotingPower > types.MaxTotalVotingPower || updatedVotingPower < 0 + if overflow { + return fmt.Errorf( + "Failed to update existing validator %v. Updating it would exceed max allowed total voting power %v", + valUpdate, + types.MaxTotalVotingPower) + } + updated := currentSet.Update(valUpdate) if !updated { - return nil, fmt.Errorf("Failed to update validator %X to %v", address, valUpdate) + return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate) } } } - return updates, nil + return nil } // updateState returns a new State updated according to the header and responses. func updateState( - logger log.Logger, state State, blockID types.BlockID, header *types.Header, abciResponses *ABCIResponses, + validatorUpdates []*types.Validator, ) (State, error) { // Copy the valset so we can apply changes from EndBlock @@ -363,19 +422,17 @@ func updateState( // Update the validator set with the latest abciResponses. lastHeightValsChanged := state.LastHeightValidatorsChanged - if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - validatorUpdates, err := updateValidators(nValSet, abciResponses.EndBlock.ValidatorUpdates) + if len(validatorUpdates) > 0 { + err := updateValidators(nValSet, validatorUpdates) if err != nil { return state, fmt.Errorf("Error changing validator set: %v", err) } // Change results from this height but only applies to the next next height. lastHeightValsChanged = header.Height + 1 + 1 - - logger.Info("Updates to validators", "updates", makeValidatorUpdatesLogString(validatorUpdates)) } - // Update validator accums and set state variables. - nValSet.IncrementAccum(1) + // Update validator proposer priority and set state variables. + nValSet.IncrementProposerPriority(1) // Update the params with the latest abciResponses. nextParams := state.ConsensusParams @@ -417,7 +474,7 @@ func updateState( // Fire NewBlock, NewBlockHeader. // Fire TxEvent for every tx. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again. -func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) { +func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses, validatorUpdates []*types.Validator) { eventBus.PublishEventNewBlock(types.EventDataNewBlock{ Block: block, ResultBeginBlock: *abciResponses.BeginBlock, @@ -438,12 +495,9 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty }}) } - abciValUpdates := abciResponses.EndBlock.ValidatorUpdates - if len(abciValUpdates) > 0 { - // if there were an error, we would've stopped in updateValidators - updates, _ := types.PB2TM.ValidatorUpdates(abciValUpdates) + if len(validatorUpdates) > 0 { eventBus.PublishEventValidatorSetUpdates( - types.EventDataValidatorSetUpdates{ValidatorUpdates: updates}) + types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}) } } diff --git a/state/execution_test.go b/state/execution_test.go index 41d9a484..21df1ee5 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" @@ -152,6 +153,76 @@ func TestBeginBlockByzantineValidators(t *testing.T) { } } +func TestValidateValidatorUpdates(t *testing.T) { + pubkey1 := ed25519.GenPrivKey().PubKey() + pubkey2 := ed25519.GenPrivKey().PubKey() + + secpKey := secp256k1.GenPrivKey().PubKey() + + defaultValidatorParams := types.ValidatorParams{[]string{types.ABCIPubKeyTypeEd25519}} + + testCases := []struct { + name string + + abciUpdates []abci.ValidatorUpdate + validatorParams types.ValidatorParams + + shouldErr bool + }{ + { + "adding a validator is OK", + + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, + defaultValidatorParams, + + false, + }, + { + "updating a validator is OK", + + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, + defaultValidatorParams, + + false, + }, + { + "removing a validator is OK", + + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + defaultValidatorParams, + + false, + }, + { + "adding a validator with negative power results in error", + + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, + defaultValidatorParams, + + true, + }, + { + "adding a validator with pubkey thats not in validator params results in error", + + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(secpKey), Power: -100}}, + defaultValidatorParams, + + true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := validateValidatorUpdates(tc.abciUpdates, tc.validatorParams) + if tc.shouldErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + func TestUpdateValidators(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() val1 := types.NewValidator(pubkey1, 10) @@ -194,7 +265,6 @@ func TestUpdateValidators(t *testing.T) { types.NewValidatorSet([]*types.Validator{val1}), false, }, - { "removing a non-existing validator results in error", @@ -204,24 +274,17 @@ func TestUpdateValidators(t *testing.T) { types.NewValidatorSet([]*types.Validator{val1}), true, }, - - { - "adding a validator with negative power results in error", - - types.NewValidatorSet([]*types.Validator{val1}), - []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, - - types.NewValidatorSet([]*types.Validator{val1}), - true, - }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - _, err := updateValidators(tc.currentSet, tc.abciUpdates) + updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates) + assert.NoError(t, err) + err = updateValidators(tc.currentSet, updates) if tc.shouldErr { assert.Error(t, err) } else { + assert.NoError(t, err) require.Equal(t, tc.resultingSet.Size(), tc.currentSet.Size()) assert.Equal(t, tc.resultingSet.TotalVotingPower(), tc.currentSet.TotalVotingPower()) diff --git a/state/state.go b/state/state.go index 451d6544..b6253b64 100644 --- a/state/state.go +++ b/state/state.go @@ -226,7 +226,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { validators[i] = types.NewValidator(val.PubKey, val.Power) } validatorSet = types.NewValidatorSet(validators) - nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementAccum(1) + nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementProposerPriority(1) } return State{ diff --git a/state/state_test.go b/state/state_test.go index 50346025..2ca5f8b2 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -5,12 +5,11 @@ import ( "fmt" "testing" - "github.com/tendermint/tendermint/libs/log" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" @@ -134,8 +133,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { {Code: 383}, {Data: []byte("Gotcha!"), Tags: []cmn.KVPair{ - cmn.KVPair{Key: []byte("a"), Value: []byte("1")}, - cmn.KVPair{Key: []byte("build"), Value: []byte("stuff")}, + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("build"), Value: []byte("stuff")}, }}, }, types.ABCIResults{ @@ -223,6 +222,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { _, val := state.Validators.GetByIndex(0) power := val.VotingPower var err error + var validatorUpdates []*types.Validator for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next pubkey. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { @@ -230,8 +230,10 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { power++ } header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) - state, err = updateState(log.TestingLogger(), state, blockID, &header, responses) - assert.Nil(t, err) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + require.NoError(t, err) + state, err = updateState(state, blockID, &header, responses, validatorUpdates) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) } @@ -261,11 +263,11 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } } -func TestStoreLoadValidatorsIncrementsAccum(t *testing.T) { +func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { const valSetSize = 2 tearDown, stateDB, state := setupTestCase(t) state.Validators = genValSet(valSetSize) - state.NextValidators = state.Validators.CopyIncrementAccum(1) + state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) SaveState(stateDB, state) defer tearDown(t) @@ -273,13 +275,13 @@ func TestStoreLoadValidatorsIncrementsAccum(t *testing.T) { v0, err := LoadValidators(stateDB, nextHeight) assert.Nil(t, err) - acc0 := v0.Validators[0].Accum + acc0 := v0.Validators[0].ProposerPriority v1, err := LoadValidators(stateDB, nextHeight+1) assert.Nil(t, err) - acc1 := v1.Validators[0].Accum + acc1 := v1.Validators[0].ProposerPriority - assert.NotEqual(t, acc1, acc0, "expected Accum value to change between heights") + assert.NotEqual(t, acc1, acc0, "expected ProposerPriority value to change between heights") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with @@ -289,7 +291,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) - state.NextValidators = state.Validators.CopyIncrementAccum(1) + state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) SaveState(stateDB, state) defer tearDown(t) @@ -303,7 +305,10 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Save state etc. var err error - state, err = updateState(log.TestingLogger(), state, blockID, &header, responses) + var validatorUpdates []*types.Validator + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + require.NoError(t, err) + state, err = updateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) @@ -375,6 +380,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex := 0 cp := params[changeIndex] var err error + var validatorUpdates []*types.Validator for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { @@ -382,7 +388,9 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { cp = params[changeIndex] } header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) - state, err = updateState(log.TestingLogger(), state, blockID, &header, responses) + validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + require.NoError(t, err) + state, err = updateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 diff --git a/state/store.go b/state/store.go index eb850fa7..6b01a829 100644 --- a/state/store.go +++ b/state/store.go @@ -194,7 +194,7 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { ), ) } - valInfo2.ValidatorSet.IncrementAccum(int(height - valInfo.LastHeightChanged)) // mutate + valInfo2.ValidatorSet.IncrementProposerPriority(int(height - valInfo.LastHeightChanged)) // mutate valInfo = valInfo2 } diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index a5913d5b..1e3733ba 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -172,10 +172,10 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { for _, r := range ranges { if !hashesInitialized { - hashes = txi.matchRange(r, []byte(r.key)) + hashes = txi.matchRange(r, startKey(r.key)) hashesInitialized = true } else { - hashes = intersect(hashes, txi.matchRange(r, []byte(r.key))) + hashes = intersect(hashes, txi.matchRange(r, startKey(r.key))) } } } @@ -190,10 +190,10 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { } if !hashesInitialized { - hashes = txi.match(c, startKey(c, height)) + hashes = txi.match(c, startKeyForCondition(c, height)) hashesInitialized = true } else { - hashes = intersect(hashes, txi.match(c, startKey(c, height))) + hashes = intersect(hashes, txi.match(c, startKeyForCondition(c, height))) } } @@ -359,14 +359,14 @@ func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte) return } -func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) { +func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) { // create a map to prevent duplicates hashesMap := make(map[string][]byte) lowerBound := r.lowerBoundValue() upperBound := r.upperBoundValue() - it := dbm.IteratePrefix(txi.store, prefix) + it := dbm.IteratePrefix(txi.store, startKey) defer it.Close() LOOP: for ; it.Valid(); it.Next() { @@ -409,16 +409,6 @@ LOOP: /////////////////////////////////////////////////////////////////////////////// // Keys -func startKey(c query.Condition, height int64) []byte { - var key string - if height > 0 { - key = fmt.Sprintf("%s/%v/%d/", c.Tag, c.Operand, height) - } else { - key = fmt.Sprintf("%s/%v/", c.Tag, c.Operand) - } - return []byte(key) -} - func isTagKey(key []byte) bool { return strings.Count(string(key), tagKeySeparator) == 3 } @@ -429,11 +419,36 @@ func extractValueFromKey(key []byte) string { } func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { - return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index)) + return []byte(fmt.Sprintf("%s/%s/%d/%d", + tag.Key, + tag.Value, + result.Height, + result.Index, + )) } func keyForHeight(result *types.TxResult) []byte { - return []byte(fmt.Sprintf("%s/%d/%d/%d", types.TxHeightKey, result.Height, result.Height, result.Index)) + return []byte(fmt.Sprintf("%s/%d/%d/%d", + types.TxHeightKey, + result.Height, + result.Height, + result.Index, + )) +} + +func startKeyForCondition(c query.Condition, height int64) []byte { + if height > 0 { + return startKey(c.Tag, c.Operand, height) + } + return startKey(c.Tag, c.Operand) +} + +func startKey(fields ...interface{}) []byte { + var b bytes.Buffer + for _, f := range fields { + b.Write([]byte(fmt.Sprintf("%v", f) + tagKeySeparator)) + } + return b.Bytes() } /////////////////////////////////////////////////////////////////////////////// diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 7cf16dc5..343cfbb5 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -126,7 +126,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - allowedTags := []string{"account.number"} + allowedTags := []string{"account.number", "account.number.id"} indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) // indexed first, but bigger height (to test the order of transactions) @@ -160,6 +160,17 @@ func TestTxSearchMultipleTxs(t *testing.T) { err = indexer.Index(txResult3) require.NoError(t, err) + // indexed fourth (to test we don't include txs with similar tags) + // https://github.com/tendermint/tendermint/issues/2908 + txResult4 := txResultWithTags([]cmn.KVPair{ + {Key: []byte("account.number.id"), Value: []byte("1")}, + }) + txResult4.Tx = types.Tx("Mike's account") + txResult4.Height = 2 + txResult4.Index = 2 + err = indexer.Index(txResult4) + require.NoError(t, err) + results, err := indexer.Search(query.MustParse("account.number >= 1")) assert.NoError(t, err) diff --git a/tools/tm-monitor/monitor/node.go b/tools/tm-monitor/monitor/node.go index 8bc15a15..1dc113a6 100644 --- a/tools/tm-monitor/monitor/node.go +++ b/tools/tm-monitor/monitor/node.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" ctypes "github.com/tendermint/tendermint/rpc/core/types" diff --git a/types/canonical.go b/types/canonical.go index a4f6f214..eabd7684 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -41,16 +41,6 @@ type CanonicalVote struct { ChainID string } -type CanonicalHeartbeat struct { - Type byte - Height int64 `binary:"fixed64"` - Round int `binary:"fixed64"` - Sequence int `binary:"fixed64"` - ValidatorAddress Address - ValidatorIndex int - ChainID string -} - //----------------------------------- // Canonicalize the structs @@ -91,18 +81,6 @@ func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { } } -func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { - return CanonicalHeartbeat{ - Type: byte(HeartbeatType), - Height: heartbeat.Height, - Round: heartbeat.Round, - Sequence: heartbeat.Sequence, - ValidatorAddress: heartbeat.ValidatorAddress, - ValidatorIndex: heartbeat.ValidatorIndex, - ChainID: chainID, - } -} - // CanonicalTime can be used to stringify time in a canonical way. func CanonicalTime(t time.Time) string { // Note that sending time over amino resets it to diff --git a/types/event_bus.go b/types/event_bus.go index d941e9aa..055cbd3f 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -146,10 +146,6 @@ func (b *EventBus) PublishEventTx(data EventDataTx) error { return nil } -func (b *EventBus) PublishEventProposalHeartbeat(data EventDataProposalHeartbeat) error { - return b.Publish(EventProposalHeartbeat, data) -} - func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { return b.Publish(EventNewRoundStep, data) } diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 0af11ebd..6845927b 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -152,7 +152,7 @@ func TestEventBusPublish(t *testing.T) { err = eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, eventsCh) require.NoError(t, err) - const numEventsExpected = 15 + const numEventsExpected = 14 done := make(chan struct{}) go func() { numEvents := 0 @@ -172,8 +172,6 @@ func TestEventBusPublish(t *testing.T) { require.NoError(t, err) err = eventBus.PublishEventVote(EventDataVote{}) require.NoError(t, err) - err = eventBus.PublishEventProposalHeartbeat(EventDataProposalHeartbeat{}) - require.NoError(t, err) err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) require.NoError(t, err) err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) diff --git a/types/events.go b/types/events.go index b22a1c8b..9b3b158d 100644 --- a/types/events.go +++ b/types/events.go @@ -18,7 +18,6 @@ const ( EventNewRound = "NewRound" EventNewRoundStep = "NewRoundStep" EventPolka = "Polka" - EventProposalHeartbeat = "ProposalHeartbeat" EventRelock = "Relock" EventTimeoutPropose = "TimeoutPropose" EventTimeoutWait = "TimeoutWait" @@ -47,7 +46,6 @@ func RegisterEventDatas(cdc *amino.Codec) { cdc.RegisterConcrete(EventDataNewRound{}, "tendermint/event/NewRound", nil) cdc.RegisterConcrete(EventDataCompleteProposal{}, "tendermint/event/CompleteProposal", nil) cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) - cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil) cdc.RegisterConcrete(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates", nil) cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) } @@ -75,10 +73,6 @@ type EventDataTx struct { TxResult } -type EventDataProposalHeartbeat struct { - Heartbeat *Heartbeat -} - // NOTE: This goes into the replay WAL type EventDataRoundState struct { Height int64 `json:"height"` @@ -143,7 +137,6 @@ var ( EventQueryNewRound = QueryForEvent(EventNewRound) EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) EventQueryPolka = QueryForEvent(EventPolka) - EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) EventQueryRelock = QueryForEvent(EventRelock) EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) diff --git a/types/heartbeat.go b/types/heartbeat.go deleted file mode 100644 index 986e9384..00000000 --- a/types/heartbeat.go +++ /dev/null @@ -1,83 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto" - cmn "github.com/tendermint/tendermint/libs/common" -) - -// Heartbeat is a simple vote-like structure so validators can -// alert others that they are alive and waiting for transactions. -// Note: We aren't adding ",omitempty" to Heartbeat's -// json field tags because we always want the JSON -// representation to be in its canonical form. -type Heartbeat struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - Signature []byte `json:"signature"` -} - -// SignBytes returns the Heartbeat bytes for signing. -// It panics if the Heartbeat is nil. -func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, heartbeat)) - if err != nil { - panic(err) - } - return bz -} - -// Copy makes a copy of the Heartbeat. -func (heartbeat *Heartbeat) Copy() *Heartbeat { - if heartbeat == nil { - return nil - } - heartbeatCopy := *heartbeat - return &heartbeatCopy -} - -// String returns a string representation of the Heartbeat. -func (heartbeat *Heartbeat) String() string { - if heartbeat == nil { - return "nil-heartbeat" - } - - return fmt.Sprintf("Heartbeat{%v:%X %v/%02d (%v) %v}", - heartbeat.ValidatorIndex, cmn.Fingerprint(heartbeat.ValidatorAddress), - heartbeat.Height, heartbeat.Round, heartbeat.Sequence, - fmt.Sprintf("/%X.../", cmn.Fingerprint(heartbeat.Signature[:]))) -} - -// ValidateBasic performs basic validation. -func (heartbeat *Heartbeat) ValidateBasic() error { - if len(heartbeat.ValidatorAddress) != crypto.AddressSize { - return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes", - crypto.AddressSize, - len(heartbeat.ValidatorAddress), - ) - } - if heartbeat.ValidatorIndex < 0 { - return errors.New("Negative ValidatorIndex") - } - if heartbeat.Height < 0 { - return errors.New("Negative Height") - } - if heartbeat.Round < 0 { - return errors.New("Negative Round") - } - if heartbeat.Sequence < 0 { - return errors.New("Negative Sequence") - } - if len(heartbeat.Signature) == 0 { - return errors.New("Signature is missing") - } - if len(heartbeat.Signature) > MaxSignatureSize { - return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) - } - return nil -} diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go deleted file mode 100644 index 0951c7b9..00000000 --- a/types/heartbeat_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/secp256k1" -) - -func TestHeartbeatCopy(t *testing.T) { - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - hbCopy := hb.Copy() - require.Equal(t, hbCopy, hb, "heartbeat copy should be the same") - hbCopy.Round = hb.Round + 10 - require.NotEqual(t, hbCopy, hb, "heartbeat copy mutation should not change original") - - var nilHb *Heartbeat - nilHbCopy := nilHb.Copy() - require.Nil(t, nilHbCopy, "copy of nil should also return nil") -} - -func TestHeartbeatString(t *testing.T) { - var nilHb *Heartbeat - require.Contains(t, nilHb.String(), "nil", "expecting a string and no panic") - - hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2} - require.Equal(t, "Heartbeat{1:000000000000 11/02 (0) /000000000000.../}", hb.String()) - - var key ed25519.PrivKeyEd25519 - sig, err := key.Sign([]byte("Tendermint")) - require.NoError(t, err) - hb.Signature = sig - require.Equal(t, "Heartbeat{1:000000000000 11/02 (0) /FF41E371B9BF.../}", hb.String()) -} - -func TestHeartbeatWriteSignBytes(t *testing.T) { - chainID := "test_chain_id" - - { - testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - signBytes := testHeartbeat.SignBytes(chainID) - expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) - require.NoError(t, err) - require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") - } - - { - testHeartbeat := &Heartbeat{} - signBytes := testHeartbeat.SignBytes(chainID) - expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) - require.NoError(t, err) - require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") - } - - require.Panics(t, func() { - var nilHb *Heartbeat - signBytes := nilHb.SignBytes(chainID) - require.Equal(t, string(signBytes), "null") - }) -} - -func TestHeartbeatValidateBasic(t *testing.T) { - testCases := []struct { - testName string - malleateHeartBeat func(*Heartbeat) - expectErr bool - }{ - {"Good HeartBeat", func(hb *Heartbeat) {}, false}, - {"Invalid address size", func(hb *Heartbeat) { - hb.ValidatorAddress = nil - }, true}, - {"Negative validator index", func(hb *Heartbeat) { - hb.ValidatorIndex = -1 - }, true}, - {"Negative height", func(hb *Heartbeat) { - hb.Height = -1 - }, true}, - {"Negative round", func(hb *Heartbeat) { - hb.Round = -1 - }, true}, - {"Negative sequence", func(hb *Heartbeat) { - hb.Sequence = -1 - }, true}, - {"Missing signature", func(hb *Heartbeat) { - hb.Signature = nil - }, true}, - {"Signature too big", func(hb *Heartbeat) { - hb.Signature = make([]byte, MaxSignatureSize+1) - }, true}, - } - for _, tc := range testCases { - t.Run(tc.testName, func(t *testing.T) { - hb := &Heartbeat{ - ValidatorAddress: secp256k1.GenPrivKey().PubKey().Address(), - Signature: make([]byte, 4), - ValidatorIndex: 1, Height: 10, Round: 1} - - tc.malleateHeartBeat(hb) - assert.Equal(t, tc.expectErr, hb.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) - } -} diff --git a/types/params.go b/types/params.go index 81cf429f..ec8a8f57 100644 --- a/types/params.go +++ b/types/params.go @@ -69,6 +69,15 @@ func DefaultValidatorParams() ValidatorParams { return ValidatorParams{[]string{ABCIPubKeyTypeEd25519}} } +func (params *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { + for i := 0; i < len(params.PubKeyTypes); i++ { + if params.PubKeyTypes[i] == pubkeyType { + return true + } + } + return false +} + // Validate validates the ConsensusParams to ensure all values are within their // allowed limits, and returns an error if they are not. func (params *ConsensusParams) Validate() error { @@ -124,19 +133,7 @@ func (params *ConsensusParams) Hash() []byte { func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { return params.BlockSize == params2.BlockSize && params.Evidence == params2.Evidence && - stringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) -} - -func stringSliceEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true + cmn.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) } // Update returns a copy of the params with updates from the non-zero fields of p2. @@ -157,7 +154,9 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar res.Evidence.MaxAge = params2.Evidence.MaxAge } if params2.Validator != nil { - res.Validator.PubKeyTypes = params2.Validator.PubKeyTypes + // Copy params2.Validator.PubkeyTypes, and set result's value to the copy. + // This avoids having to initialize the slice to 0 values, and then write to it again. + res.Validator.PubKeyTypes = append([]string{}, params2.Validator.PubKeyTypes...) } return res } diff --git a/types/priv_validator.go b/types/priv_validator.go index 25be5220..ebd64446 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -10,14 +10,13 @@ import ( ) // PrivValidator defines the functionality of a local Tendermint validator -// that signs votes, proposals, and heartbeats, and never double signs. +// that signs votes and proposals, and never double signs. type PrivValidator interface { GetAddress() Address // redundant since .PubKey().Address() GetPubKey() crypto.PubKey SignVote(chainID string, vote *Vote) error SignProposal(chainID string, proposal *Proposal) error - SignHeartbeat(chainID string, heartbeat *Heartbeat) error } //---------------------------------------- @@ -84,16 +83,6 @@ func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error { return nil } -// signHeartbeat signs the heartbeat without any checking. -func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { - sig, err := pv.privKey.Sign(heartbeat.SignBytes(chainID)) - if err != nil { - return err - } - heartbeat.Signature = sig - return nil -} - // String returns a string representation of the MockPV. func (pv *MockPV) String() string { return fmt.Sprintf("MockPV{%v}", pv.GetAddress()) @@ -121,11 +110,6 @@ func (pv *erroringMockPV) SignProposal(chainID string, proposal *Proposal) error return ErroringMockPVErr } -// signHeartbeat signs the heartbeat without any checking. -func (pv *erroringMockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { - return ErroringMockPVErr -} - // NewErroringMockPV returns a MockPV that fails on each signing request. Again, for testing only. func NewErroringMockPV() *erroringMockPV { return &erroringMockPV{&MockPV{ed25519.GenPrivKey()}} diff --git a/types/protobuf.go b/types/protobuf.go index 1535c1e3..0f0d25de 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -187,20 +187,19 @@ var PB2TM = pb2tm{} type pb2tm struct{} func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { - // TODO: define these in crypto and use them - sizeEd := 32 - sizeSecp := 33 switch pubKey.Type { case ABCIPubKeyTypeEd25519: - if len(pubKey.Data) != sizeEd { - return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd) + if len(pubKey.Data) != ed25519.PubKeyEd25519Size { + return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", + len(pubKey.Data), ed25519.PubKeyEd25519Size) } var pk ed25519.PubKeyEd25519 copy(pk[:], pubKey.Data) return pk, nil case ABCIPubKeyTypeSecp256k1: - if len(pubKey.Data) != sizeSecp { - return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp) + if len(pubKey.Data) != secp256k1.PubKeySecp256k1Size { + return nil, fmt.Errorf("Invalid size for PubKeySecp256k1. Got %d, expected %d", + len(pubKey.Data), secp256k1.PubKeySecp256k1Size) } var pk secp256k1.PubKeySecp256k1 copy(pk[:], pubKey.Data) diff --git a/types/signable.go b/types/signable.go index baabdff0..72d2ea9a 100644 --- a/types/signable.go +++ b/types/signable.go @@ -6,8 +6,8 @@ import ( ) var ( - // MaxSignatureSize is a maximum allowed signature size for the Heartbeat, - // Proposal and Vote. + // MaxSignatureSize is a maximum allowed signature size for the Proposal + // and Vote. // XXX: secp256k1 does not have Size nor MaxSize defined. MaxSignatureSize = cmn.MaxInt(ed25519.SignatureSize, 64) ) diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go index 10e7c70c..6bd5f057 100644 --- a/types/signed_msg_type.go +++ b/types/signed_msg_type.go @@ -10,9 +10,6 @@ const ( // Proposals ProposalType SignedMsgType = 0x20 - - // Heartbeat - HeartbeatType SignedMsgType = 0x30 ) // IsVoteTypeValid returns true if t is a valid vote type. diff --git a/types/validator.go b/types/validator.go index 4bfd78a6..b7c6c679 100644 --- a/types/validator.go +++ b/types/validator.go @@ -11,40 +11,40 @@ import ( ) // Volatile state for each Validator -// NOTE: The Accum is not included in Validator.Hash(); +// NOTE: The ProposerPriority is not included in Validator.Hash(); // make sure to update that method if changes are made here type Validator struct { Address Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` VotingPower int64 `json:"voting_power"` - Accum int64 `json:"accum"` + ProposerPriority int64 `json:"proposer_priority"` } func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { return &Validator{ - Address: pubKey.Address(), - PubKey: pubKey, - VotingPower: votingPower, - Accum: 0, + Address: pubKey.Address(), + PubKey: pubKey, + VotingPower: votingPower, + ProposerPriority: 0, } } -// Creates a new copy of the validator so we can mutate accum. +// Creates a new copy of the validator so we can mutate ProposerPriority. // Panics if the validator is nil. func (v *Validator) Copy() *Validator { vCopy := *v return &vCopy } -// Returns the one with higher Accum. -func (v *Validator) CompareAccum(other *Validator) *Validator { +// Returns the one with higher ProposerPriority. +func (v *Validator) CompareProposerPriority(other *Validator) *Validator { if v == nil { return other } - if v.Accum > other.Accum { + if v.ProposerPriority > other.ProposerPriority { return v - } else if v.Accum < other.Accum { + } else if v.ProposerPriority < other.ProposerPriority { return other } else { result := bytes.Compare(v.Address, other.Address) @@ -67,27 +67,27 @@ func (v *Validator) String() string { v.Address, v.PubKey, v.VotingPower, - v.Accum) + v.ProposerPriority) } // Hash computes the unique ID of a validator with a given voting power. -// It excludes the Accum value, which changes with every round. +// It excludes the ProposerPriority value, which changes with every round. func (v *Validator) Hash() []byte { return tmhash.Sum(v.Bytes()) } // Bytes computes the unique encoding of a validator with a given voting power. // These are the bytes that gets hashed in consensus. It excludes address -// as its redundant with the pubkey. This also excludes accum which changes -// every round. +// as its redundant with the pubkey. This also excludes ProposerPriority +// which changes every round. func (v *Validator) Bytes() []byte { - return cdcEncode((struct { + return cdcEncode(struct { PubKey crypto.PubKey VotingPower int64 }{ v.PubKey, v.VotingPower, - })) + }) } //---------------------------------------- diff --git a/types/validator_set.go b/types/validator_set.go index f5e57077..8b2d71b8 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "math" + "math/big" "sort" "strings" @@ -11,13 +12,22 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) +// The maximum allowed total voting power. +// We set the ProposerPriority of freshly added validators to -1.125*totalVotingPower. +// To compute 1.125*totalVotingPower efficiently, we do: +// totalVotingPower + (totalVotingPower >> 3) because +// x + (x >> 3) = x + x/8 = x * (1 + 0.125). +// MaxTotalVotingPower is the largest int64 `x` with the property that `x + (x >> 3)` is +// still in the bounds of int64. +const MaxTotalVotingPower = int64(8198552921648689607) + // ValidatorSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. // The index is in order of .Address, so the indices are fixed // for all rounds of a given blockchain height. -// On the other hand, the .AccumPower of each validator and +// On the other hand, the .ProposerPriority of each validator and // the designated .GetProposer() of a set changes every round, -// upon calling .IncrementAccum(). +// upon calling .IncrementProposerPriority(). // NOTE: Not goroutine-safe. // NOTE: All get/set to validators should copy the value for safety. type ValidatorSet struct { @@ -29,10 +39,10 @@ type ValidatorSet struct { totalVotingPower int64 } +// NewValidatorSet initializes a ValidatorSet by copying over the +// values from `valz`, a list of Validators. If valz is nil or empty, +// the new ValidatorSet will have an empty list of Validators. func NewValidatorSet(valz []*Validator) *ValidatorSet { - if valz != nil && len(valz) == 0 { - panic("validator set initialization slice cannot be an empty slice (but it can be nil)") - } validators := make([]*Validator, len(valz)) for i, val := range valz { validators[i] = val.Copy() @@ -42,7 +52,7 @@ func NewValidatorSet(valz []*Validator) *ValidatorSet { Validators: validators, } if len(valz) > 0 { - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) } return vals @@ -53,48 +63,87 @@ func (vals *ValidatorSet) IsNilOrEmpty() bool { return vals == nil || len(vals.Validators) == 0 } -// Increment Accum and update the proposer on a copy, and return it. -func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { +// Increment ProposerPriority and update the proposer on a copy, and return it. +func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { copy := vals.Copy() - copy.IncrementAccum(times) + copy.IncrementProposerPriority(times) return copy } -// IncrementAccum increments accum of each validator and updates the +// IncrementProposerPriority increments ProposerPriority of each validator and updates the // proposer. Panics if validator set is empty. // `times` must be positive. -func (vals *ValidatorSet) IncrementAccum(times int) { +func (vals *ValidatorSet) IncrementProposerPriority(times int) { if times <= 0 { - panic("Cannot call IncrementAccum with non-positive times") + panic("Cannot call IncrementProposerPriority with non-positive times") } - // Add VotingPower * times to each validator and order into heap. - validatorsHeap := cmn.NewHeap() - for _, val := range vals.Validators { - // Check for overflow both multiplication and sum. - val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) - validatorsHeap.PushComparable(val, accumComparable{val}) - } - - // Decrement the validator with most accum times times. + const shiftEveryNthIter = 10 + var proposer *Validator + // call IncrementProposerPriority(1) times times: for i := 0; i < times; i++ { - mostest := validatorsHeap.Peek().(*Validator) - // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, vals.TotalVotingPower()) + shiftByAvgProposerPriority := i%shiftEveryNthIter == 0 + proposer = vals.incrementProposerPriority(shiftByAvgProposerPriority) + } + isShiftedAvgOnLastIter := (times-1)%shiftEveryNthIter == 0 + if !isShiftedAvgOnLastIter { + validatorsHeap := cmn.NewHeap() + vals.shiftByAvgProposerPriority(validatorsHeap) + } + vals.Proposer = proposer +} - if i == times-1 { - vals.Proposer = mostest - } else { - validatorsHeap.Update(mostest, accumComparable{mostest}) +func (vals *ValidatorSet) incrementProposerPriority(subAvg bool) *Validator { + for _, val := range vals.Validators { + // Check for overflow for sum. + val.ProposerPriority = safeAddClip(val.ProposerPriority, val.VotingPower) + } + + validatorsHeap := cmn.NewHeap() + if subAvg { // shift by avg ProposerPriority + vals.shiftByAvgProposerPriority(validatorsHeap) + } else { // just update the heap + for _, val := range vals.Validators { + validatorsHeap.PushComparable(val, proposerPriorityComparable{val}) } } + + // Decrement the validator with most ProposerPriority: + mostest := validatorsHeap.Peek().(*Validator) + // mind underflow + mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) + + return mostest +} + +func (vals *ValidatorSet) computeAvgProposerPriority() int64 { + n := int64(len(vals.Validators)) + sum := big.NewInt(0) + for _, val := range vals.Validators { + sum.Add(sum, big.NewInt(val.ProposerPriority)) + } + avg := sum.Div(sum, big.NewInt(n)) + if avg.IsInt64() { + return avg.Int64() + } + + // this should never happen: each val.ProposerPriority is in bounds of int64 + panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg)) +} + +func (vals *ValidatorSet) shiftByAvgProposerPriority(validatorsHeap *cmn.Heap) { + avgProposerPriority := vals.computeAvgProposerPriority() + for _, val := range vals.Validators { + val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority) + validatorsHeap.PushComparable(val, proposerPriorityComparable{val}) + } } // Copy each validator into a new ValidatorSet func (vals *ValidatorSet) Copy() *ValidatorSet { validators := make([]*Validator, len(vals.Validators)) for i, val := range vals.Validators { - // NOTE: must copy, since IncrementAccum updates in place. + // NOTE: must copy, since IncrementProposerPriority updates in place. validators[i] = val.Copy() } return &ValidatorSet{ @@ -144,10 +193,18 @@ func (vals *ValidatorSet) Size() int { // TotalVotingPower returns the sum of the voting powers of all validators. func (vals *ValidatorSet) TotalVotingPower() int64 { if vals.totalVotingPower == 0 { + sum := int64(0) for _, val := range vals.Validators { // mind overflow - vals.totalVotingPower = safeAddClip(vals.totalVotingPower, val.VotingPower) + sum = safeAddClip(sum, val.VotingPower) } + if sum > MaxTotalVotingPower { + panic(fmt.Sprintf( + "Total voting power should be guarded to not exceed %v; got: %v", + MaxTotalVotingPower, + sum)) + } + vals.totalVotingPower = sum } return vals.totalVotingPower } @@ -168,7 +225,7 @@ func (vals *ValidatorSet) findProposer() *Validator { var proposer *Validator for _, val := range vals.Validators { if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { - proposer = proposer.CompareAccum(val) + proposer = proposer.CompareProposerPriority(val) } } return proposer @@ -215,13 +272,22 @@ func (vals *ValidatorSet) Add(val *Validator) (added bool) { } } -// Update updates val and returns true. It returns false if val is not present -// in the set. +// Update updates the ValidatorSet by copying in the val. +// If the val is not found, it returns false; otherwise, +// it returns true. The val.ProposerPriority field is ignored +// and unchanged by this method. func (vals *ValidatorSet) Update(val *Validator) (updated bool) { index, sameVal := vals.GetByAddress(val.Address) if sameVal == nil { return false } + // Overwrite the ProposerPriority so it doesn't change. + // During block execution, the val passed in here comes + // from ABCI via PB2TM.ValidatorUpdates. Since ABCI + // doesn't know about ProposerPriority, PB2TM.ValidatorUpdates + // uses the default value of 0, which would cause issues for + // proposer selection every time a validator's voting power changes. + val.ProposerPriority = sameVal.ProposerPriority vals.Validators[index] = val.Copy() // Invalidate cache vals.Proposer = nil @@ -308,7 +374,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height i return nil } return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (vals.TotalVotingPower()*2/3 + 1)) + talliedVotingPower, vals.TotalVotingPower()*2/3+1) } // VerifyFutureCommit will check to see if the set would be valid with a different @@ -391,7 +457,7 @@ func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID strin if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (oldVals.TotalVotingPower()*2/3 + 1)) + oldVotingPower, oldVals.TotalVotingPower()*2/3+1) } return nil } @@ -405,7 +471,7 @@ func (vals *ValidatorSet) StringIndented(indent string) string { if vals == nil { return "nil-ValidatorSet" } - valStrings := []string{} + var valStrings []string vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false @@ -443,16 +509,16 @@ func (valz ValidatorsByAddress) Swap(i, j int) { } //------------------------------------- -// Use with Heap for sorting validators by accum +// Use with Heap for sorting validators by ProposerPriority -type accumComparable struct { +type proposerPriorityComparable struct { *Validator } -// We want to find the validator with the greatest accum. -func (ac accumComparable) Less(o interface{}) bool { - other := o.(accumComparable).Validator - larger := ac.CompareAccum(other) +// We want to find the validator with the greatest ProposerPriority. +func (ac proposerPriorityComparable) Less(o interface{}) bool { + other := o.(proposerPriorityComparable).Validator + larger := ac.CompareProposerPriority(other) return bytes.Equal(larger.Address, ac.Address) } @@ -476,24 +542,7 @@ func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []Pr } /////////////////////////////////////////////////////////////////////////////// -// Safe multiplication and addition/subtraction - -func safeMul(a, b int64) (int64, bool) { - if a == 0 || b == 0 { - return 0, false - } - if a == 1 { - return b, false - } - if b == 1 { - return a, false - } - if a == math.MinInt64 || b == math.MinInt64 { - return -1, true - } - c := a * b - return c, c/b != a -} +// Safe addition/subtraction func safeAdd(a, b int64) (int64, bool) { if b > 0 && a > math.MaxInt64-b { @@ -513,17 +562,6 @@ func safeSub(a, b int64) (int64, bool) { return a - b, false } -func safeMulClip(a, b int64) int64 { - c, overflow := safeMul(a, b) - if overflow { - if (a < 0 || b < 0) && !(a < 0 && b < 0) { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} - func safeAddClip(a, b int64) int64 { c, overflow := safeAdd(a, b) if overflow { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 81124637..26793cc1 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -17,10 +17,13 @@ import ( ) func TestValidatorSetBasic(t *testing.T) { - assert.Panics(t, func() { NewValidatorSet([]*Validator{}) }) + // empty or nil validator lists are allowed, + // but attempting to IncrementProposerPriority on them will panic. + vset := NewValidatorSet([]*Validator{}) + assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) - vset := NewValidatorSet(nil) - assert.Panics(t, func() { vset.IncrementAccum(1) }) + vset = NewValidatorSet(nil) + assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) assert.EqualValues(t, vset, vset.Copy()) assert.False(t, vset.HasAddress([]byte("some val"))) @@ -42,7 +45,8 @@ func TestValidatorSetBasic(t *testing.T) { assert.Nil(t, vset.Hash()) // add - val = randValidator_() + + val = randValidator_(vset.TotalVotingPower()) assert.True(t, vset.Add(val)) assert.True(t, vset.HasAddress(val.Address)) idx, val2 := vset.GetByAddress(val.Address) @@ -55,15 +59,22 @@ func TestValidatorSetBasic(t *testing.T) { assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) assert.Equal(t, val, vset.GetProposer()) assert.NotNil(t, vset.Hash()) - assert.NotPanics(t, func() { vset.IncrementAccum(1) }) + assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) }) // update - assert.False(t, vset.Update(randValidator_())) - val.VotingPower = 100 + assert.False(t, vset.Update(randValidator_(vset.TotalVotingPower()))) + _, val = vset.GetByAddress(val.Address) + val.VotingPower += 100 + proposerPriority := val.ProposerPriority + // Mimic update from types.PB2TM.ValidatorUpdates which does not know about ProposerPriority + // and hence defaults to 0. + val.ProposerPriority = 0 assert.True(t, vset.Update(val)) + _, val = vset.GetByAddress(val.Address) + assert.Equal(t, proposerPriority, val.ProposerPriority) // remove - val2, removed := vset.Remove(randValidator_().Address) + val2, removed := vset.Remove(randValidator_(vset.TotalVotingPower()).Address) assert.Nil(t, val2) assert.False(t, removed) val2, removed = vset.Remove(val.Address) @@ -86,17 +97,17 @@ func TestCopy(t *testing.T) { } } -// Test that IncrementAccum requires positive times. -func TestIncrementAccumPositiveTimes(t *testing.T) { +// Test that IncrementProposerPriority requires positive times. +func TestIncrementProposerPriorityPositiveTimes(t *testing.T) { vset := NewValidatorSet([]*Validator{ newValidator([]byte("foo"), 1000), newValidator([]byte("bar"), 300), newValidator([]byte("baz"), 330), }) - assert.Panics(t, func() { vset.IncrementAccum(-1) }) - assert.Panics(t, func() { vset.IncrementAccum(0) }) - vset.IncrementAccum(1) + assert.Panics(t, func() { vset.IncrementProposerPriority(-1) }) + assert.Panics(t, func() { vset.IncrementProposerPriority(0) }) + vset.IncrementProposerPriority(1) } func BenchmarkValidatorSetCopy(b *testing.B) { @@ -125,11 +136,11 @@ func TestProposerSelection1(t *testing.T) { newValidator([]byte("bar"), 300), newValidator([]byte("baz"), 330), }) - proposers := []string{} + var proposers []string for i := 0; i < 99; i++ { val := vset.GetProposer() proposers = append(proposers, string(val.Address)) - vset.IncrementAccum(1) + vset.IncrementProposerPriority(1) } expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo` if expected != strings.Join(proposers, " ") { @@ -152,18 +163,18 @@ func TestProposerSelection2(t *testing.T) { if !bytes.Equal(prop.Address, valList[ii].Address) { t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address) } - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) } // One validator has more than the others, but not enough to propose twice in a row *val2 = *newValidator(addr2, 400) vals = NewValidatorSet(valList) - // vals.IncrementAccum(1) + // vals.IncrementProposerPriority(1) prop := vals.GetProposer() if !bytes.Equal(prop.Address, addr2) { t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) } - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) prop = vals.GetProposer() if !bytes.Equal(prop.Address, addr0) { t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) @@ -176,12 +187,12 @@ func TestProposerSelection2(t *testing.T) { if !bytes.Equal(prop.Address, addr2) { t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) } - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) prop = vals.GetProposer() if !bytes.Equal(prop.Address, addr2) { t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address) } - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) prop = vals.GetProposer() if !bytes.Equal(prop.Address, addr0) { t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) @@ -197,7 +208,7 @@ func TestProposerSelection2(t *testing.T) { prop := vals.GetProposer() ii := prop.Address[19] propCount[ii]++ - vals.IncrementAccum(1) + vals.IncrementProposerPriority(1) } if propCount[0] != 40*N { @@ -222,12 +233,12 @@ func TestProposerSelection3(t *testing.T) { proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { proposerOrder[i] = vset.GetProposer() - vset.IncrementAccum(1) + vset.IncrementProposerPriority(1) } // i for the loop // j for the times - // we should go in order for ever, despite some IncrementAccums with times > 1 + // we should go in order for ever, despite some IncrementProposerPriority with times > 1 var i, j int for ; i < 10000; i++ { got := vset.GetProposer().Address @@ -254,7 +265,7 @@ func TestProposerSelection3(t *testing.T) { // sometimes its up to 5 times = (cmn.RandInt() % 4) + 1 } - vset.IncrementAccum(times) + vset.IncrementProposerPriority(times) j += times } @@ -270,16 +281,20 @@ func randPubKey() crypto.PubKey { return ed25519.PubKeyEd25519(pubKey) } -func randValidator_() *Validator { - val := NewValidator(randPubKey(), cmn.RandInt64()) - val.Accum = cmn.RandInt64() +func randValidator_(totalVotingPower int64) *Validator { + // this modulo limits the ProposerPriority/VotingPower to stay in the + // bounds of MaxTotalVotingPower minus the already existing voting power: + val := NewValidator(randPubKey(), cmn.RandInt64()%(MaxTotalVotingPower-totalVotingPower)) + val.ProposerPriority = cmn.RandInt64() % (MaxTotalVotingPower - totalVotingPower) return val } func randValidatorSet(numValidators int) *ValidatorSet { validators := make([]*Validator, numValidators) + totalVotingPower := int64(0) for i := 0; i < numValidators; i++ { - validators[i] = randValidator_() + validators[i] = randValidator_(totalVotingPower) + totalVotingPower += validators[i].VotingPower } return NewValidatorSet(validators) } @@ -302,52 +317,203 @@ func (valSet *ValidatorSet) fromBytes(b []byte) { //------------------------------------------------------------------- -func TestValidatorSetTotalVotingPowerOverflows(t *testing.T) { - vset := NewValidatorSet([]*Validator{ - {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, - {Address: []byte("b"), VotingPower: math.MaxInt64, Accum: 0}, - {Address: []byte("c"), VotingPower: math.MaxInt64, Accum: 0}, - }) - - assert.EqualValues(t, math.MaxInt64, vset.TotalVotingPower()) -} - -func TestValidatorSetIncrementAccumOverflows(t *testing.T) { - // NewValidatorSet calls IncrementAccum(1) - vset := NewValidatorSet([]*Validator{ - // too much voting power - 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, - // too big accum - 1: {Address: []byte("b"), VotingPower: 10, Accum: math.MaxInt64}, - // almost too big accum - 2: {Address: []byte("c"), VotingPower: 10, Accum: math.MaxInt64 - 5}, - }) - - assert.Equal(t, int64(0), vset.Validators[0].Accum, "0") // because we decrement val with most voting power - assert.EqualValues(t, math.MaxInt64, vset.Validators[1].Accum, "1") - assert.EqualValues(t, math.MaxInt64, vset.Validators[2].Accum, "2") -} - -func TestValidatorSetIncrementAccumUnderflows(t *testing.T) { - // NewValidatorSet calls IncrementAccum(1) - vset := NewValidatorSet([]*Validator{ - 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: math.MinInt64}, - 1: {Address: []byte("b"), VotingPower: 1, Accum: math.MinInt64}, - }) - - vset.IncrementAccum(5) - - assert.EqualValues(t, math.MinInt64, vset.Validators[0].Accum, "0") - assert.EqualValues(t, math.MinInt64, vset.Validators[1].Accum, "1") -} - -func TestSafeMul(t *testing.T) { - f := func(a, b int64) bool { - c, overflow := safeMul(a, b) - return overflow || (!overflow && c == a*b) +func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) { + // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower() + // which should panic on overflows: + shouldPanic := func() { + NewValidatorSet([]*Validator{ + {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0}, + {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0}, + {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0}, + }) } - if err := quick.Check(f, nil); err != nil { - t.Error(err) + + assert.Panics(t, shouldPanic) +} + +func TestAvgProposerPriority(t *testing.T) { + // Create Validator set without calling IncrementProposerPriority: + tcs := []struct { + vs ValidatorSet + want int64 + }{ + 0: {ValidatorSet{Validators: []*Validator{{ProposerPriority: 0}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, 0}, + 1: {ValidatorSet{Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, math.MaxInt64 / 3}, + 2: {ValidatorSet{Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}}}, math.MaxInt64 / 2}, + 3: {ValidatorSet{Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: math.MaxInt64}}}, math.MaxInt64}, + 4: {ValidatorSet{Validators: []*Validator{{ProposerPriority: math.MinInt64}, {ProposerPriority: math.MinInt64}}}, math.MinInt64}, + } + for i, tc := range tcs { + got := tc.vs.computeAvgProposerPriority() + assert.Equal(t, tc.want, got, "test case: %v", i) + } +} + +func TestAveragingInIncrementProposerPriority(t *testing.T) { + // Test that the averaging works as expected inside of IncrementProposerPriority. + // Each validator comes with zero voting power which simplifies reasoning about + // the expected ProposerPriority. + tcs := []struct { + vs ValidatorSet + times int + avg int64 + }{ + 0: {ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 1}, + {Address: []byte("b"), ProposerPriority: 2}, + {Address: []byte("c"), ProposerPriority: 3}}}, + 1, 2}, + 1: {ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 10}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}}}, + // this should average twice but the average should be 0 after the first iteration + // (voting power is 0 -> no changes) + 11, 1 / 3}, + 2: {ValidatorSet{ + Validators: []*Validator{ + {Address: []byte("a"), ProposerPriority: 100}, + {Address: []byte("b"), ProposerPriority: -10}, + {Address: []byte("c"), ProposerPriority: 1}}}, + 1, 91 / 3}, + } + for i, tc := range tcs { + // work on copy to have the old ProposerPriorities: + newVset := tc.vs.CopyIncrementProposerPriority(tc.times) + for _, val := range tc.vs.Validators { + _, updatedVal := newVset.GetByAddress(val.Address) + assert.Equal(t, updatedVal.ProposerPriority, val.ProposerPriority-tc.avg, "test case: %v", i) + } + } +} + +func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { + // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing + // how each ProposerPriority changes in relation to the validator's voting power respectively. + vals := ValidatorSet{Validators: []*Validator{ + {Address: []byte{0}, ProposerPriority: 0, VotingPower: 10}, + {Address: []byte{1}, ProposerPriority: 0, VotingPower: 1}, + {Address: []byte{2}, ProposerPriority: 0, VotingPower: 1}}} + tcs := []struct { + vals *ValidatorSet + wantProposerPrioritys []int64 + times int + wantProposer *Validator + }{ + + 0: { + vals.Copy(), + []int64{ + // Acumm+VotingPower-Avg: + 0 + 10 - 12 - 4, // mostest will be subtracted by total voting power (12) + 0 + 1 - 4, + 0 + 1 - 4}, + 1, + vals.Validators[0]}, + 1: { + vals.Copy(), + []int64{ + (0 + 10 - 12 - 4) + 10 - 12 + 4, // this will be mostest on 2nd iter, too + (0 + 1 - 4) + 1 + 4, + (0 + 1 - 4) + 1 + 4}, + 2, + vals.Validators[0]}, // increment twice -> expect average to be subtracted twice + 2: { + vals.Copy(), + []int64{ + ((0 + 10 - 12 - 4) + 10 - 12) + 10 - 12 + 4, // still mostest + ((0 + 1 - 4) + 1) + 1 + 4, + ((0 + 1 - 4) + 1) + 1 + 4}, + 3, + vals.Validators[0]}, + 3: { + vals.Copy(), + []int64{ + 0 + 4*(10-12) + 4 - 4, // still mostest + 0 + 4*1 + 4 - 4, + 0 + 4*1 + 4 - 4}, + 4, + vals.Validators[0]}, + 4: { + vals.Copy(), + []int64{ + 0 + 4*(10-12) + 10 + 4 - 4, // 4 iters was mostest + 0 + 5*1 - 12 + 4 - 4, // now this val is mostest for the 1st time (hence -12==totalVotingPower) + 0 + 5*1 + 4 - 4}, + 5, + vals.Validators[1]}, + 5: { + vals.Copy(), + []int64{ + 0 + 6*10 - 5*12 + 4 - 4, // mostest again + 0 + 6*1 - 12 + 4 - 4, // mostest once up to here + 0 + 6*1 + 4 - 4}, + 6, + vals.Validators[0]}, + 6: { + vals.Copy(), + []int64{ + 0 + 7*10 - 6*12 + 4 - 4, // in 7 iters this val is mostest 6 times + 0 + 7*1 - 12 + 4 - 4, // in 7 iters this val is mostest 1 time + 0 + 7*1 + 4 - 4}, + 7, + vals.Validators[0]}, + 7: { + vals.Copy(), + []int64{ + 0 + 8*10 - 7*12 + 4 - 4, // mostest + 0 + 8*1 - 12 + 4 - 4, + 0 + 8*1 + 4 - 4}, + 8, + vals.Validators[0]}, + 8: { + vals.Copy(), + []int64{ + 0 + 9*10 - 7*12 + 4 - 4, + 0 + 9*1 - 12 + 4 - 4, + 0 + 9*1 - 12 + 4 - 4}, // mostest + 9, + vals.Validators[2]}, + 9: { + vals.Copy(), + []int64{ + 0 + 10*10 - 8*12 + 4 - 4, // after 10 iters this is mostest again + 0 + 10*1 - 12 + 4 - 4, // after 6 iters this val is "mostest" once and not in between + 0 + 10*1 - 12 + 4 - 4}, // in between 10 iters this val is "mostest" once + 10, + vals.Validators[0]}, + 10: { + vals.Copy(), + []int64{ + // shift twice inside incrementProposerPriority (shift every 10th iter); + // don't shift at the end of IncremenctProposerPriority + // last avg should be zero because + // ProposerPriority of validator 0: (0 + 11*10 - 8*12 - 4) == 10 + // ProposerPriority of validator 1 and 2: (0 + 11*1 - 12 - 4) == -5 + // and (10 + 5 - 5) / 3 == 0 + 0 + 11*10 - 8*12 - 4 - 12 - 0, + 0 + 11*1 - 12 - 4 - 0, // after 6 iters this val is "mostest" once and not in between + 0 + 11*1 - 12 - 4 - 0}, // after 10 iters this val is "mostest" once + 11, + vals.Validators[0]}, + } + for i, tc := range tcs { + tc.vals.IncrementProposerPriority(tc.times) + + assert.Equal(t, tc.wantProposer.Address, tc.vals.GetProposer().Address, + "test case: %v", + i) + + for valIdx, val := range tc.vals.Validators { + assert.Equal(t, + tc.wantProposerPrioritys[valIdx], + val.ProposerPriority, + "test case: %v, validator: %v", + i, + valIdx) + } } } @@ -361,13 +527,6 @@ func TestSafeAdd(t *testing.T) { } } -func TestSafeMulClip(t *testing.T) { - assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MinInt64, math.MinInt64)) - assert.EqualValues(t, math.MinInt64, safeMulClip(math.MaxInt64, math.MinInt64)) - assert.EqualValues(t, math.MinInt64, safeMulClip(math.MinInt64, math.MaxInt64)) - assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MaxInt64, 2)) -} - func TestSafeAddClip(t *testing.T) { assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10)) assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64)) diff --git a/version/version.go b/version/version.go index 933328a6..921e1430 100644 --- a/version/version.go +++ b/version/version.go @@ -18,7 +18,7 @@ const ( // TMCoreSemVer is the current version of Tendermint Core. // It's the Semantic Version of the software. // Must be a string because scripts like dist.sh read this file. - TMCoreSemVer = "0.26.4" + TMCoreSemVer = "0.27.0" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.15.0" @@ -36,10 +36,10 @@ func (p Protocol) Uint64() uint64 { var ( // P2PProtocol versions all p2p behaviour and msgs. - P2PProtocol Protocol = 4 + P2PProtocol Protocol = 5 // BlockProtocol versions all block data structures and processing. - BlockProtocol Protocol = 7 + BlockProtocol Protocol = 8 ) //------------------------------------------------------------------------