diff --git a/.codecov.yml b/.codecov.yml index 995865ee..7321557b 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -19,3 +19,8 @@ coverage: comment: layout: "header, diff" behavior: default # update if exists else create new + +ignore: + - "docs" + - "*.md" + - "*.rst" diff --git a/.editorconfig b/.editorconfig index 82f77436..481621f7 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,10 +8,7 @@ end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true -[Makefile] -indent_style = tab - -[*.sh] +[*.{sh,Makefile}] indent_style = tab [*.proto] diff --git a/CHANGELOG.md b/CHANGELOG.md index 7bdaa30d..76a34d5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,38 @@ BUG FIXES: - Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for violations of safety, or liveness +## 0.13.0 (December 6, 2017) + +BREAKING CHANGES: +- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc. +- types: block heights are now `int64` everywhere +- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled +- node: EventSwitch methods now refer to EventBus +- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified +- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch +- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe +- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery +- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event` +- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds +- mempool: cached transactions return an error instead of an ABCI response with BadNonce + +FEATURES: +- rpc: new `/unsubscribe_all` WebSocket RPC endpoint +- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries +- p2p/trust: new trust metric for tracking peers. See ADR-006 +- config: TxIndexConfig allows to set what DeliverTx tags to index + +IMPROVEMENTS: +- New asynchronous events system using `tmlibs/pubsub` +- logging: Various small improvements +- consensus: Graceful shutdown when app crashes +- tests: Fix various non-deterministic errors +- p2p: more defensive programming + +BUG FIXES: +- consensus: fix panic where prs.ProposalBlockParts is not initialized +- p2p: fix panic on bad channel + ## 0.12.1 (November 27, 2017) BUG FIXES: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0eb9f973..787fd718 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,9 +8,9 @@ Please make sure to use `gofmt` before every commit - the easiest way to do this ## Forking -Please note that Go requires code to live under absolute paths, which complicates forking. -While my fork lives at `https://github.com/ebuchman/tendermint`, -the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. +Please note that Go requires code to live under absolute paths, which complicates forking. +While my fork lives at `https://github.com/ebuchman/tendermint`, +the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. Instead, we use `git remote` to add the fork as a new remote for the original repo, `$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there. @@ -38,11 +38,22 @@ We use [glide](https://github.com/masterminds/glide) to manage dependencies. That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software. Since some dependencies are not under our control, a third party may break our build, in which case we can fall back on `glide install`. Even for dependencies under our control, glide helps us keeps multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use glide. -Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date. +Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date. + +## Vagrant + +If you are a [Vagrant](https://www.vagrantup.com/) user, all you have to do to get started hacking Tendermint is: + +``` +vagrant up +vagrant ssh +cd ~/go/src/github.com/tendermint/tendermint +make test +``` ## Testing -All repos should be hooked up to circle. +All repos should be hooked up to circle. If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`. ## Branching Model and Release diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 019e1f30..67d346b0 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,8 +1,8 @@ FROM alpine:3.6 # This is the release of tendermint to pull in. -ENV TM_VERSION 0.11.0 -ENV TM_SHA256SUM 7e443bac4d42f12e7beaf9cee63b4a565dad8c58895291fdedde8057088b70c5 +ENV TM_VERSION 0.12.0 +ENV TM_SHA256SUM be17469e92f04fc2a3663f891da28edbaa6c37c4d2f746736571887f4790555a # Tendermint will be looking for genesis file in /tendermint (unless you change # `genesis_file` in config.toml). You can put your config.toml and private diff --git a/DOCKER/README.md b/DOCKER/README.md index e5c6fee3..fd19c101 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -1,6 +1,7 @@ # Supported tags and respective `Dockerfile` links -- `0.11.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile) +- `0.12.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile) +- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile) - `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile) - `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile) - `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile) @@ -12,7 +13,7 @@ # Quick reference * **Where to get help:** - [Chat on Rocket](https://cosmos.rocket.chat/) + https://tendermint.com/community * **Where to file issues:** https://github.com/tendermint/tendermint/issues diff --git a/INSTALL.md b/INSTALL.md deleted file mode 100644 index 35b5ffec..00000000 --- a/INSTALL.md +++ /dev/null @@ -1 +0,0 @@ -The installation guide has moved to the [docs directory](docs/guides/install-from-source.md) in order to easily be rendered by the website. Please update your links accordingly. diff --git a/Makefile b/Makefile index 2271abeb..fb15dfc4 100644 --- a/Makefile +++ b/Makefile @@ -1,30 +1,32 @@ GOTOOLS = \ github.com/mitchellh/gox \ github.com/tcnksm/ghr \ - github.com/Masterminds/glide \ + github.com/alecthomas/gometalinter PACKAGES=$(shell go list ./... | grep -v '/vendor/') BUILD_TAGS?=tendermint TMHOME = $${TMHOME:-$$HOME/.tendermint} -all: install test +BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short HEAD`" -install: get_vendor_deps - @go install --ldflags '-extldflags "-static"' \ - --ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" ./cmd/tendermint +all: get_vendor_deps install test + +install: + CGO_ENABLED=0 go install $(BUILD_FLAGS) ./cmd/tendermint build: - go build \ - --ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" -o build/tendermint ./cmd/tendermint/ + CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint/ build_race: - go build -race -o build/tendermint ./cmd/tendermint + CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint # dist builds binaries for all platforms and packages them for distribution dist: @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" test: + @echo "--> Running linter" + @make metalinter_test @echo "--> Running go test" @go test $(PACKAGES) @@ -35,7 +37,7 @@ test_race: test_integrations: @bash ./test/test.sh -release: +test_release: @go test -tags release $(PACKAGES) test100: @@ -58,29 +60,57 @@ get_deps: grep -v /vendor/ | sort | uniq | \ xargs go get -v -d -get_vendor_deps: ensure_tools +update_deps: + @echo "--> Updating dependencies" + @go get -d -u ./... + +get_vendor_deps: + @hash glide 2>/dev/null || go get github.com/Masterminds/glide @rm -rf vendor/ @echo "--> Running glide install" @glide install -update_deps: tools - @echo "--> Updating dependencies" - @go get -d -u ./... - -revision: - -echo `git rev-parse --verify HEAD` > $(TMHOME)/revision - -echo `git rev-parse --verify HEAD` >> $(TMHOME)/revision_history +update_tools: + @echo "--> Updating tools" + @go get -u $(GOTOOLS) tools: - go get -u -v $(GOTOOLS) - -ensure_tools: - go get $(GOTOOLS) + @echo "--> Installing tools" + @go get $(GOTOOLS) + @gometalinter --install ### Formatting, linting, and vetting -megacheck: - @for pkg in ${PACKAGES}; do megacheck "$$pkg"; done +metalinter: + @gometalinter --vendor --deadline=600s --enable-all --disable=lll ./... +metalinter_test: + @gometalinter --vendor --deadline=600s --disable-all \ + --enable=deadcode \ + --enable=misspell \ + --enable=safesql \ + ./... -.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools + # --enable=gas \ + #--enable=maligned \ + #--enable=dupl \ + #--enable=errcheck \ + #--enable=goconst \ + #--enable=gocyclo \ + #--enable=goimports \ + #--enable=golint \ <== comments on anything exported + #--enable=gosimple \ + #--enable=gotype \ + #--enable=ineffassign \ + #--enable=interfacer \ + #--enable=megacheck \ + #--enable=staticcheck \ + #--enable=structcheck \ + #--enable=unconvert \ + #--enable=unparam \ + #--enable=unused \ + #--enable=varcheck \ + #--enable=vet \ + #--enable=vetshadow \ + +.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps update_tools tools test_release diff --git a/README.md b/README.md index 6aa4d878..c6ece82f 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. [![API Reference]( https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 )](https://godoc.org/github.com/tendermint/tendermint) +[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) [![Rocket.Chat](https://demo.rocket.chat/images/join-chat.svg)](https://cosmos.rocket.chat/) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) @@ -23,7 +24,7 @@ _NOTE: This is alpha software. Please contact us if you intend to run it in prod Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. -For more information, from introduction to install to application development, [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master). +For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/). ## Install @@ -33,13 +34,13 @@ To install from source, you should be able to: `go get -u github.com/tendermint/tendermint/cmd/tendermint` -For more details (or if it fails), [read the docs](http://tendermint.readthedocs.io/projects/tools/en/master/install.html). +For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html). ## Resources ### Tendermint Core -All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. +All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](https://tendermint.readthedocs.io/en/master/). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. ### Sub-projects diff --git a/Vagrantfile b/Vagrantfile index 0f69feed..ea804236 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -17,6 +17,7 @@ Vagrant.configure("2") do |config| usermod -a -G docker vagrant apt-get autoremove -y + apt-get install -y --no-install-recommends git curl -O https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz tar -xvf go1.9.linux-amd64.tar.gz rm -rf /usr/local/go diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index 7162e63d..631b303e 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -2,11 +2,13 @@ package benchmarks import ( "testing" + "time" "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/p2p" "github.com/tendermint/go-wire" + proto "github.com/tendermint/tendermint/benchmarks/proto" + "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -26,7 +28,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) { PubKey: pubKey, LatestBlockHash: []byte("SOMEBYTES"), LatestBlockHeight: 123, - LatestBlockTime: 1234, + LatestBlockTime: time.Unix(0, 1234), } b.StartTimer() diff --git a/benchmarks/os_test.go b/benchmarks/os_test.go index 9c8fae65..dfadc312 100644 --- a/benchmarks/os_test.go +++ b/benchmarks/os_test.go @@ -18,12 +18,16 @@ func BenchmarkFileWrite(b *testing.B) { b.StartTimer() for i := 0; i < b.N; i++ { - file.Write([]byte(testString)) + _, err := file.Write([]byte(testString)) + if err != nil { + b.Error(err) + } } - file.Close() - err = os.Remove("benchmark_file_write.out") - if err != nil { + if err := file.Close(); err != nil { + b.Error(err) + } + if err := os.Remove("benchmark_file_write.out"); err != nil { b.Error(err) } } diff --git a/benchmarks/proto/test.pb.go b/benchmarks/proto/test.pb.go index 6539cae3..dc21a2a8 100644 --- a/benchmarks/proto/test.pb.go +++ b/benchmarks/proto/test.pb.go @@ -24,9 +24,6 @@ import bytes "bytes" import strings "strings" import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" -import sort "sort" -import strconv "strconv" -import reflect "reflect" import io "io" @@ -392,31 +389,6 @@ func (this *PubKeyEd25519) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func valueToGoStringTest(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringTest(e map[int32]github_com_gogo_protobuf_proto.Extension) string { - if e == nil { - return "nil" - } - s := "map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "}" - return s -} func (m *ResultStatus) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) @@ -586,24 +558,6 @@ func (m *PubKeyEd25519) MarshalTo(data []byte) (int, error) { return i, nil } -func encodeFixed64Test(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Test(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTest(data []byte, offset int, v uint64) int { for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) @@ -689,9 +643,6 @@ func sovTest(x uint64) (n int) { } return n } -func sozTest(x uint64) (n int) { - return sovTest(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} func (this *ResultStatus) String() string { if this == nil { return "nil" @@ -742,14 +693,6 @@ func (this *PubKeyEd25519) String() string { }, "") return s } -func valueToStringTest(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} func (m *ResultStatus) Unmarshal(data []byte) error { var hasFields [1]uint64 l := len(data) diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index c6b4c161..b4d90325 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -12,7 +12,7 @@ import ( func main() { wsc := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket") - _, err := wsc.Start() + err := wsc.Start() if err != nil { cmn.Exit(err.Error()) } diff --git a/blockchain/pool.go b/blockchain/pool.go index 47e59711..e39749dc 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -52,22 +52,22 @@ type BlockPool struct { mtx sync.Mutex // block requests - requesters map[int]*bpRequester - height int // the lowest key in requesters. + requesters map[int64]*bpRequester + height int64 // the lowest key in requesters. numPending int32 // number of requests pending assignment or block response // peers peers map[string]*bpPeer - maxPeerHeight int + maxPeerHeight int64 requestsCh chan<- BlockRequest timeoutsCh chan<- string } -func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool { +func NewBlockPool(start int64, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool { bp := &BlockPool{ peers: make(map[string]*bpPeer), - requesters: make(map[int]*bpRequester), + requesters: make(map[int64]*bpRequester), height: start, numPending: 0, @@ -132,7 +132,7 @@ func (pool *BlockPool) removeTimedoutPeers() { } } -func (pool *BlockPool) GetStatus() (height int, numPending int32, lenRequesters int) { +func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -195,7 +195,7 @@ func (pool *BlockPool) PopRequest() { // Invalidates the block at pool.height, // Remove the peer and redo request from others. -func (pool *BlockPool) RedoRequest(height int) { +func (pool *BlockPool) RedoRequest(height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -232,15 +232,15 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int } } -// MaxPeerHeight returns the heighest height reported by a peer -func (pool *BlockPool) MaxPeerHeight() int { +// MaxPeerHeight returns the highest height reported by a peer. +func (pool *BlockPool) MaxPeerHeight() int64 { pool.mtx.Lock() defer pool.mtx.Unlock() return pool.maxPeerHeight } // Sets the peer's alleged blockchain height. -func (pool *BlockPool) SetPeerHeight(peerID string, height int) { +func (pool *BlockPool) SetPeerHeight(peerID string, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -279,7 +279,7 @@ func (pool *BlockPool) removePeer(peerID string) { // Pick an available peer with at least the given minHeight. // If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(minHeight int) *bpPeer { +func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -304,17 +304,24 @@ func (pool *BlockPool) makeNextRequester() { pool.mtx.Lock() defer pool.mtx.Unlock() - nextHeight := pool.height + len(pool.requesters) + nextHeight := pool.height + pool.requestersLen() request := newBPRequester(pool, nextHeight) // request.SetLogger(pool.Logger.With("height", nextHeight)) pool.requesters[nextHeight] = request pool.numPending++ - request.Start() + err := request.Start() + if err != nil { + request.Logger.Error("Error starting request", "err", err) + } } -func (pool *BlockPool) sendRequest(height int, peerID string) { +func (pool *BlockPool) requestersLen() int64 { + return int64(len(pool.requesters)) +} + +func (pool *BlockPool) sendRequest(height int64, peerID string) { if !pool.IsRunning() { return } @@ -334,7 +341,8 @@ func (pool *BlockPool) debug() string { defer pool.mtx.Unlock() str := "" - for h := pool.height; h < pool.height+len(pool.requesters); h++ { + nextHeight := pool.height + pool.requestersLen() + for h := pool.height; h < nextHeight; h++ { if pool.requesters[h] == nil { str += cmn.Fmt("H(%v):X ", h) } else { @@ -352,7 +360,7 @@ type bpPeer struct { id string recvMonitor *flow.Monitor - height int + height int64 numPending int32 timeout *time.Timer didTimeout bool @@ -360,7 +368,7 @@ type bpPeer struct { logger log.Logger } -func newBPPeer(pool *BlockPool, peerID string, height int) *bpPeer { +func newBPPeer(pool *BlockPool, peerID string, height int64) *bpPeer { peer := &bpPeer{ pool: pool, id: peerID, @@ -421,7 +429,7 @@ func (peer *bpPeer) onTimeout() { type bpRequester struct { cmn.BaseService pool *BlockPool - height int + height int64 gotBlockCh chan struct{} redoCh chan struct{} @@ -430,7 +438,7 @@ type bpRequester struct { block *types.Block } -func newBPRequester(pool *BlockPool, height int) *bpRequester { +func newBPRequester(pool *BlockPool, height int64) *bpRequester { bpr := &bpRequester{ pool: pool, height: height, @@ -542,6 +550,6 @@ OUTER_LOOP: //------------------------------------- type BlockRequest struct { - Height int + Height int64 PeerID string } diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go index a1fce2da..3e347fd2 100644 --- a/blockchain/pool_test.go +++ b/blockchain/pool_test.go @@ -16,27 +16,32 @@ func init() { type testPeer struct { id string - height int + height int64 } -func makePeers(numPeers int, minHeight, maxHeight int) map[string]testPeer { +func makePeers(numPeers int, minHeight, maxHeight int64) map[string]testPeer { peers := make(map[string]testPeer, numPeers) for i := 0; i < numPeers; i++ { peerID := cmn.RandStr(12) - height := minHeight + rand.Intn(maxHeight-minHeight) + height := minHeight + rand.Int63n(maxHeight-minHeight) peers[peerID] = testPeer{peerID, height} } return peers } func TestBasic(t *testing.T) { - start := 42 + start := int64(42) peers := makePeers(10, start+1, 1000) timeoutsCh := make(chan string, 100) requestsCh := make(chan BlockRequest, 100) pool := NewBlockPool(start, requestsCh, timeoutsCh) pool.SetLogger(log.TestingLogger()) - pool.Start() + + err := pool.Start() + if err != nil { + t.Error(err) + } + defer pool.Stop() // Introduce each peer. @@ -82,13 +87,16 @@ func TestBasic(t *testing.T) { } func TestTimeout(t *testing.T) { - start := 42 + start := int64(42) peers := makePeers(10, start+1, 1000) timeoutsCh := make(chan string, 100) requestsCh := make(chan BlockRequest, 100) pool := NewBlockPool(start, requestsCh, timeoutsCh) pool.SetLogger(log.TestingLogger()) - pool.Start() + err := pool.Start() + if err != nil { + t.Error(err) + } defer pool.Stop() for _, peer := range peers { diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 9ac58031..60626b3d 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -49,14 +49,11 @@ type BlockchainReactor struct { requestsCh chan BlockRequest timeoutsCh chan string - evsw types.EventSwitch + eventBus *types.EventBus } // NewBlockchainReactor returns new reactor instance. func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor { - if state.LastBlockHeight == store.Height()-1 { - store.height-- // XXX HACK, make this better - } if state.LastBlockHeight != store.Height() { cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) } @@ -88,9 +85,11 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) { // OnStart implements cmn.Service. func (bcR *BlockchainReactor) OnStart() error { - bcR.BaseReactor.OnStart() + if err := bcR.BaseReactor.OnStart(); err != nil { + return err + } if bcR.fastSync { - _, err := bcR.pool.Start() + err := bcR.pool.Start() if err != nil { return err } @@ -108,7 +107,7 @@ func (bcR *BlockchainReactor) OnStop() { // GetChannels implements Reactor func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ - &p2p.ChannelDescriptor{ + { ID: BlockchainChannel, Priority: 10, SendQueueCapacity: 1000, @@ -121,6 +120,8 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) { // doing nothing, will try later in `poolRoutine` } + // peer is added to the pool once we receive the first + // bcStatusResponseMessage from the peer and call pool.SetPeerHeight } // RemovePeer implements Reactor by removing peer from the pool. @@ -224,7 +225,7 @@ FOR_LOOP: } case <-statusUpdateTicker.C: // ask for status updates - go bcR.BroadcastStatusRequest() + go bcR.BroadcastStatusRequest() // nolint: errcheck case <-switchToConsensusTicker.C: height, numPending, lenRequesters := bcR.pool.GetStatus() outbound, inbound, _ := bcR.Switch.NumPeers() @@ -271,7 +272,7 @@ FOR_LOOP: // NOTE: we could improve performance if we // didn't make the app commit to disk every block // ... but we would need a way to get the hash without it persisting - err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{}) + err := bcR.state.ApplyBlock(bcR.eventBus, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{}) if err != nil { // TODO This is bad, are we zombie? cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -299,9 +300,9 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error { return nil } -// SetEventSwitch implements events.Eventable -func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) { - bcR.evsw = evsw +// SetEventBus sets event bus. +func (bcR *BlockchainReactor) SetEventBus(b *types.EventBus) { + bcR.eventBus = b } //----------------------------------------------------------------------------- @@ -343,7 +344,7 @@ func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage, //------------------------------------- type bcBlockRequestMessage struct { - Height int + Height int64 } func (m *bcBlockRequestMessage) String() string { @@ -351,7 +352,7 @@ func (m *bcBlockRequestMessage) String() string { } type bcNoBlockResponseMessage struct { - Height int + Height int64 } func (brm *bcNoBlockResponseMessage) String() string { @@ -372,7 +373,7 @@ func (m *bcBlockResponseMessage) String() string { //------------------------------------- type bcStatusRequestMessage struct { - Height int + Height int64 } func (m *bcStatusRequestMessage) String() string { @@ -382,7 +383,7 @@ func (m *bcStatusRequestMessage) String() string { //------------------------------------- type bcStatusResponseMessage struct { - Height int + Height int64 } func (m *bcStatusResponseMessage) String() string { diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 584aadf3..7342b72c 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/types" ) -func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor { +func newBlockchainReactor(maxBlockHeight int64) *BlockchainReactor { logger := log.TestingLogger() config := cfg.ResetTestRoot("blockchain_reactor_test") @@ -34,7 +34,7 @@ func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor { bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) // Lastly: let's add some blocks in - for blockHeight := 1; blockHeight <= maxBlockHeight; blockHeight++ { + for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { firstBlock := makeBlock(blockHeight, state) secondBlock := makeBlock(blockHeight+1, state) firstParts := firstBlock.MakePartSet(state.Params.BlockGossipParams.BlockPartSizeBytes) @@ -45,7 +45,7 @@ func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor { } func TestNoBlockMessageResponse(t *testing.T) { - maxBlockHeight := 20 + maxBlockHeight := int64(20) bcr := newBlockchainReactor(maxBlockHeight) bcr.Start() @@ -58,7 +58,7 @@ func TestNoBlockMessageResponse(t *testing.T) { chID := byte(0x01) tests := []struct { - height int + height int64 existent bool }{ {maxBlockHeight + 2, false}, @@ -93,19 +93,19 @@ func TestNoBlockMessageResponse(t *testing.T) { //---------------------------------------------- // utility funcs -func makeTxs(blockNumber int) (txs []types.Tx) { +func makeTxs(height int64) (txs []types.Tx) { for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(blockNumber), byte(i)})) + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) } return txs } -func makeBlock(blockNumber int, state *sm.State) *types.Block { +func makeBlock(height int64, state *sm.State) *types.Block { prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} valHash := state.Validators.Hash() prevBlockID := types.BlockID{prevHash, prevParts} - block, _ := types.MakeBlock(blockNumber, "test_chain", makeTxs(blockNumber), + block, _ := types.MakeBlock(height, "test_chain", makeTxs(height), new(types.Commit), prevBlockID, valHash, state.AppHash, state.Params.BlockGossipParams.BlockPartSizeBytes) return block } diff --git a/blockchain/store.go b/blockchain/store.go index 5bf85477..c77f67ed 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -9,7 +9,7 @@ import ( wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/types" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" ) @@ -32,7 +32,7 @@ type BlockStore struct { db dbm.DB mtx sync.RWMutex - height int + height int64 } func NewBlockStore(db dbm.DB) *BlockStore { @@ -44,7 +44,7 @@ func NewBlockStore(db dbm.DB) *BlockStore { } // Height() returns the last known contiguous block height. -func (bs *BlockStore) Height() int { +func (bs *BlockStore) Height() int64 { bs.mtx.RLock() defer bs.mtx.RUnlock() return bs.height @@ -58,7 +58,7 @@ func (bs *BlockStore) GetReader(key []byte) io.Reader { return bytes.NewReader(bytez) } -func (bs *BlockStore) LoadBlock(height int) *types.Block { +func (bs *BlockStore) LoadBlock(height int64) *types.Block { var n int var err error r := bs.GetReader(calcBlockMetaKey(height)) @@ -67,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block { } blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) if err != nil { - PanicCrisis(Fmt("Error reading block meta: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err)) } bytez := []byte{} for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { @@ -76,12 +76,12 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block { } block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block) if err != nil { - PanicCrisis(Fmt("Error reading block: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err)) } return block } -func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part { +func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { var n int var err error r := bs.GetReader(calcBlockPartKey(height, index)) @@ -90,12 +90,12 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part { } part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part) if err != nil { - PanicCrisis(Fmt("Error reading block part: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err)) } return part } -func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta { +func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { var n int var err error r := bs.GetReader(calcBlockMetaKey(height)) @@ -104,14 +104,14 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta { } blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) if err != nil { - PanicCrisis(Fmt("Error reading block meta: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err)) } return blockMeta } // The +2/3 and other Precommit-votes for block at `height`. // This Commit comes from block.LastCommit for `height+1`. -func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit { +func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { var n int var err error r := bs.GetReader(calcBlockCommitKey(height)) @@ -120,13 +120,13 @@ func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit { } commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) if err != nil { - PanicCrisis(Fmt("Error reading commit: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err)) } return commit } // NOTE: the Precommit-vote heights are for the block at `height` -func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit { +func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { var n int var err error r := bs.GetReader(calcSeenCommitKey(height)) @@ -135,7 +135,7 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit { } commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) if err != nil { - PanicCrisis(Fmt("Error reading commit: %v", err)) + cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err)) } return commit } @@ -148,10 +148,10 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit { func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { height := block.Height if height != bs.Height()+1 { - PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) } if !blockParts.IsComplete() { - PanicSanity(Fmt("BlockStore can only save complete block part sets")) + cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets")) } // Save block meta @@ -185,9 +185,9 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s bs.db.SetSync(nil, nil) } -func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) { +func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { if height != bs.Height()+1 { - PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) } partBytes := wire.BinaryBytes(part) bs.db.Set(calcBlockPartKey(height, index), partBytes) @@ -195,19 +195,19 @@ func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) { //----------------------------------------------------------------------------- -func calcBlockMetaKey(height int) []byte { +func calcBlockMetaKey(height int64) []byte { return []byte(fmt.Sprintf("H:%v", height)) } -func calcBlockPartKey(height int, partIndex int) []byte { +func calcBlockPartKey(height int64, partIndex int) []byte { return []byte(fmt.Sprintf("P:%v:%v", height, partIndex)) } -func calcBlockCommitKey(height int) []byte { +func calcBlockCommitKey(height int64) []byte { return []byte(fmt.Sprintf("C:%v", height)) } -func calcSeenCommitKey(height int) []byte { +func calcSeenCommitKey(height int64) []byte { return []byte(fmt.Sprintf("SC:%v", height)) } @@ -216,13 +216,13 @@ func calcSeenCommitKey(height int) []byte { var blockStoreKey = []byte("blockStore") type BlockStoreStateJSON struct { - Height int + Height int64 } func (bsj BlockStoreStateJSON) Save(db dbm.DB) { bytes, err := json.Marshal(bsj) if err != nil { - PanicSanity(Fmt("Could not marshal state bytes: %v", err)) + cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) } db.SetSync(blockStoreKey, bytes) } @@ -237,7 +237,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { bsj := BlockStoreStateJSON{} err := json.Unmarshal(bytes, &bsj) if err != nil { - PanicCrisis(Fmt("Could not unmarshal bytes: %X", bytes)) + cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes)) } return bsj } diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 984176d2..59fe3012 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -19,7 +19,10 @@ var GenValidatorCmd = &cobra.Command{ func genValidator(cmd *cobra.Command, args []string) { privValidator := types.GenPrivValidatorFS("") - privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t") + privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t") + if err != nil { + panic(err) + } fmt.Printf(`%v `, string(privValidatorJSONBytes)) } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index cbafac3e..e8f22eb1 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -28,12 +28,14 @@ func initFiles(cmd *cobra.Command, args []string) { genDoc := types.GenesisDoc{ ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), } - genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{ + genDoc.Validators = []types.GenesisValidator{{ PubKey: privValidator.GetPubKey(), Power: 10, }} - genDoc.SaveAs(genFile) + if err := genDoc.SaveAs(genFile); err != nil { + panic(err) + } } logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile()) diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index b9c08715..51336523 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -25,10 +25,13 @@ var ResetPrivValidatorCmd = &cobra.Command{ } // ResetAll removes the privValidator files. -// Exported so other CLI tools can use it +// Exported so other CLI tools can use it. func ResetAll(dbDir, privValFile string, logger log.Logger) { resetPrivValidatorFS(privValFile, logger) - os.RemoveAll(dbDir) + if err := os.RemoveAll(dbDir); err != nil { + logger.Error("Error removing directory", "err", err) + return + } logger.Info("Removed all data", "dir", dbDir) } diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go index 7c3bf801..b4e30d98 100644 --- a/cmd/tendermint/commands/root_test.go +++ b/cmd/tendermint/commands/root_test.go @@ -26,8 +26,12 @@ const ( // modify in the test cases. // NOTE: it unsets all TM* env variables. func isolate(cmds ...*cobra.Command) cli.Executable { - os.Unsetenv("TMHOME") - os.Unsetenv("TM_HOME") + if err := os.Unsetenv("TMHOME"); err != nil { + panic(err) + } + if err := os.Unsetenv("TM_HOME"); err != nil { + panic(err) + } viper.Reset() config = cfg.DefaultConfig() diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index f0a1eede..c71b4783 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -49,7 +49,7 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { return fmt.Errorf("Failed to create node: %v", err) } - if _, err := n.Start(); err != nil { + if err := n.Start(); err != nil { return fmt.Errorf("Failed to start node: %v", err) } else { logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index ac6f337a..2c859df2 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -63,7 +63,9 @@ func testnetFiles(cmd *cobra.Command, args []string) { // Write genesis file. for i := 0; i < nValidators; i++ { mach := cmn.Fmt("mach%d", i) - genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")) + if err := genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")); err != nil { + panic(err) + } } fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators)) diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 86ca1531..a46f227c 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -37,5 +37,7 @@ func main() { rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint")) - cmd.Execute() + if err := cmd.Execute(); err != nil { + panic(err) + } } diff --git a/config/config.go b/config/config.go index 23da4f40..ea3fa13e 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,7 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` Consensus *ConsensusConfig `mapstructure:"consensus"` + TxIndex *TxIndexConfig `mapstructure:"tx_index"` } // DefaultConfig returns a default configuration for a Tendermint node @@ -26,6 +27,7 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), Consensus: DefaultConsensusConfig(), + TxIndex: DefaultTxIndexConfig(), } } @@ -37,6 +39,7 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: DefaultMempoolConfig(), Consensus: TestConsensusConfig(), + TxIndex: DefaultTxIndexConfig(), } } @@ -93,9 +96,6 @@ type BaseConfig struct { // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter_peers"` // false - // What indexer to use for transactions - TxIndex string `mapstructure:"tx_index"` - // Database backend: leveldb | memdb DBBackend string `mapstructure:"db_backend"` @@ -115,7 +115,6 @@ func DefaultBaseConfig() BaseConfig { ProfListenAddress: "", FastSync: true, FilterPeers: false, - TxIndex: "kv", DBBackend: "leveldb", DBPath: "data", } @@ -255,7 +254,7 @@ func TestP2PConfig() *P2PConfig { return conf } -// AddrBookFile returns the full path to the address bool +// AddrBookFile returns the full path to the address book func (p *P2PConfig) AddrBookFile() string { return rootify(p.AddrBook, p.RootDir) } @@ -412,6 +411,41 @@ func (c *ConsensusConfig) SetWalFile(walFile string) { c.walFile = walFile } +//----------------------------------------------------------------------------- +// TxIndexConfig + +// TxIndexConfig defines the confuguration for the transaction +// indexer, including tags to index. +type TxIndexConfig struct { + // What indexer to use for transactions + // + // Options: + // 1) "null" (default) + // 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). + Indexer string `mapstructure:"indexer"` + + // Comma-separated list of tags to index (by default the only tag is tx hash) + // + // It's recommended to index only a subset of tags due to possible memory + // bloat. This is, of course, depends on the indexer's DB and the volume of + // transactions. + IndexTags string `mapstructure:"index_tags"` + + // When set to true, tells indexer to index all tags. Note this may be not + // desirable (see the comment above). IndexTags has a precedence over + // IndexAllTags (i.e. when given both, IndexTags will be indexed). + IndexAllTags bool `mapstructure:"index_all_tags"` +} + +// DefaultTxIndexConfig returns a default configuration for the transaction indexer. +func DefaultTxIndexConfig() *TxIndexConfig { + return &TxIndexConfig{ + Indexer: "kv", + IndexTags: "", + IndexAllTags: false, + } +} + //----------------------------------------------------------------------------- // Utils diff --git a/config/toml.go b/config/toml.go index 5dcbe533..ec70ab75 100644 --- a/config/toml.go +++ b/config/toml.go @@ -12,8 +12,12 @@ import ( /****** these are for production settings ***********/ func EnsureRoot(rootDir string) { - cmn.EnsureDir(rootDir, 0700) - cmn.EnsureDir(rootDir+"/data", 0700) + if err := cmn.EnsureDir(rootDir, 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil { + cmn.PanicSanity(err.Error()) + } configFilePath := path.Join(rootDir, "config.toml") @@ -53,21 +57,23 @@ func ResetTestRoot(testName string) *Config { rootDir = filepath.Join(rootDir, testName) // Remove ~/.tendermint_test_bak if cmn.FileExists(rootDir + "_bak") { - err := os.RemoveAll(rootDir + "_bak") - if err != nil { + if err := os.RemoveAll(rootDir + "_bak"); err != nil { cmn.PanicSanity(err.Error()) } } // Move ~/.tendermint_test to ~/.tendermint_test_bak if cmn.FileExists(rootDir) { - err := os.Rename(rootDir, rootDir+"_bak") - if err != nil { + if err := os.Rename(rootDir, rootDir+"_bak"); err != nil { cmn.PanicSanity(err.Error()) } } // Create new dir - cmn.EnsureDir(rootDir, 0700) - cmn.EnsureDir(rootDir+"/data", 0700) + if err := cmn.EnsureDir(rootDir, 0700); err != nil { + cmn.PanicSanity(err.Error()) + } + if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil { + cmn.PanicSanity(err.Error()) + } configFilePath := path.Join(rootDir, "config.toml") genesisFilePath := path.Join(rootDir, "genesis.json") diff --git a/config/toml_test.go b/config/toml_test.go index d8f372ae..bf3bf58f 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -24,7 +24,7 @@ func TestEnsureRoot(t *testing.T) { // setup temp dir for test tmpDir, err := ioutil.TempDir("", "config-test") require.Nil(err) - defer os.RemoveAll(tmpDir) + defer os.RemoveAll(tmpDir) // nolint: errcheck // create root dir EnsureRoot(tmpDir) diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index c96ccf97..2f5f3f76 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -1,16 +1,17 @@ package consensus import ( + "context" "sync" "testing" "time" + "github.com/stretchr/testify/require" crypto "github.com/tendermint/go-crypto" data "github.com/tendermint/go-wire/data" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/events" ) func init() { @@ -41,7 +42,43 @@ func TestByzantine(t *testing.T) { switches[i].SetLogger(p2pLogger.With("validator", i)) } + eventChans := make([]chan interface{}, N) reactors := make([]p2p.Reactor, N) + for i := 0; i < N; i++ { + if i == 0 { + css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator) + // make byzantine + css[i].decideProposal = func(j int) func(int64, int) { + return func(height int64, round int) { + byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) + } + }(i) + css[i].doPrevote = func(height int64, round int) {} + } + + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events", "validator", i)) + err := eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + + eventChans[i] = make(chan interface{}, 1) + err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) + require.NoError(t, err) + + conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states + conR.SetLogger(logger.With("validator", i)) + conR.SetEventBus(eventBus) + + var conRI p2p.Reactor // nolint: gotype, gosimple + conRI = conR + + if i == 0 { + conRI = NewByzantineReactor(conR) + } + reactors[i] = conRI + } + defer func() { for _, r := range reactors { if rr, ok := r.(*ByzantineReactor); ok { @@ -51,40 +88,6 @@ func TestByzantine(t *testing.T) { } } }() - eventChans := make([]chan interface{}, N) - eventLogger := logger.With("module", "events") - for i := 0; i < N; i++ { - if i == 0 { - css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator) - // make byzantine - css[i].decideProposal = func(j int) func(int, int) { - return func(height, round int) { - byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) - } - }(i) - css[i].doPrevote = func(height, round int) {} - } - - eventSwitch := events.NewEventSwitch() - eventSwitch.SetLogger(eventLogger.With("validator", i)) - _, err := eventSwitch.Start() - if err != nil { - t.Fatalf("Failed to start switch: %v", err) - } - eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1) - - conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states - conR.SetLogger(logger.With("validator", i)) - conR.SetEventSwitch(eventSwitch) - - var conRI p2p.Reactor - conRI = conR - - if i == 0 { - conRI = NewByzantineReactor(conR) - } - reactors[i] = conRI - } p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { // ignore new switch s, we already made ours @@ -159,7 +162,7 @@ func TestByzantine(t *testing.T) { //------------------------------- // byzantine consensus functions -func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusState, sw *p2p.Switch) { +func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *ConsensusState, sw *p2p.Switch) { // byzantine user should create two proposals and try to split the vote. // Avoid sending on internalMsgQueue and running consensus state. @@ -167,13 +170,17 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS block1, blockParts1 := cs.createProposalBlock() polRound, polBlockID := cs.Votes.POLInfo() proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) - cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { + t.Error(err) + } // Create a new proposal block from state/txs from the mempool. block2, blockParts2 := cs.createProposalBlock() polRound, polBlockID = cs.Votes.POLInfo() proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) - cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { + t.Error(err) + } block1Hash := block1.Hash() block2Hash := block2.Hash() @@ -190,7 +197,7 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS } } -func sendProposalAndParts(height, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { +func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { // proposal msg := &ProposalMessage{Proposal: proposal} peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) @@ -286,12 +293,12 @@ func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote } func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) { - proposal.Signature, err = privVal.Sign(types.SignBytes(chainID, proposal)) + proposal.Signature, _ = privVal.Sign(types.SignBytes(chainID, proposal)) return nil } func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) { - heartbeat.Signature, err = privVal.Sign(types.SignBytes(chainID, heartbeat)) + heartbeat.Signature, _ = privVal.Sign(types.SignBytes(chainID, heartbeat)) return nil } diff --git a/consensus/common.go b/consensus/common.go deleted file mode 100644 index 1e16c4da..00000000 --- a/consensus/common.go +++ /dev/null @@ -1,35 +0,0 @@ -package consensus - -import ( - "github.com/tendermint/tendermint/types" -) - -// XXX: WARNING: these functions can halt the consensus as firing events is synchronous. -// Make sure to read off the channels, and in the case of subscribeToEventRespond, to write back on it - -// NOTE: if chanCap=0, this blocks on the event being consumed -func subscribeToEvent(evsw types.EventSwitch, receiver, eventID string, chanCap int) chan interface{} { - // listen for event - ch := make(chan interface{}, chanCap) - types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) { - ch <- data - }) - return ch -} - -// NOTE: this blocks on receiving a response after the event is consumed -func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) chan interface{} { - // listen for event - ch := make(chan interface{}) - types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) { - ch <- data - <-ch - }) - return ch -} - -func discardFromChan(ch chan interface{}, n int) { - for i := 0; i < n; i++ { - <-ch - } -} diff --git a/consensus/common_test.go b/consensus/common_test.go index 9810024d..23e56e67 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -30,6 +31,10 @@ import ( "github.com/go-kit/kit/log/term" ) +const ( + testSubscriber = "test-client" +) + // genesis, chain_id, priv_val var config *cfg.Config // NOTE: must be reset for each _test.go file var ensureTimeout = time.Second * 2 @@ -49,12 +54,12 @@ func ResetConfig(name string) *cfg.Config { type validatorStub struct { Index int // Validator index. NOTE: we don't assume validator set changes. - Height int + Height int64 Round int types.PrivValidator } -var testMinPower = 10 +var testMinPower int64 = 10 func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub { return &validatorStub{ @@ -108,13 +113,13 @@ func incrementRound(vss ...*validatorStub) { //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *ConsensusState, height, round int) { +func startTestRound(cs *ConsensusState, height int64, round int) { cs.enterNewRound(height, round) cs.startRoutines(0) } // Create proposal block from cs1 but sign it with vs -func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) { +func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) { block, blockParts := cs1.createProposalBlock() if block == nil { // on error panic("error creating proposal block") @@ -208,11 +213,14 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo // genesis func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} { - voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1) + voteCh0 := make(chan interface{}) + err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) + } voteCh := make(chan interface{}) go func() { - for { - v := <-voteCh0 + for v := range voteCh0 { vote := v.(types.TMEventData).Unwrap().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { @@ -231,8 +239,12 @@ func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Applica } func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { - // Get BlockStore blockDB := dbm.NewMemDB() + return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) +} + +func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { + // Get BlockStore blockStore := bc.NewBlockStore(blockDB) // one for mempool, one for consensus @@ -252,10 +264,10 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv typ cs.SetLogger(log.TestingLogger()) cs.SetPrivValidator(pv) - evsw := types.NewEventSwitch() - evsw.SetLogger(log.TestingLogger().With("module", "events")) - cs.SetEventSwitch(evsw) - evsw.Start() + eventBus := types.NewEventBus() + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + eventBus.Start() + cs.SetEventBus(eventBus) return cs } @@ -267,13 +279,13 @@ func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS { return privValidator } -func fixedConsensusStateDummy() *ConsensusState { +func fixedConsensusStateDummy(config *cfg.Config, logger log.Logger) *ConsensusState { stateDB := dbm.NewMemDB() state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile()) - state.SetLogger(log.TestingLogger().With("module", "state")) + state.SetLogger(logger.With("module", "state")) privValidator := loadPrivValidator(config) cs := newConsensusState(state, privValidator, dummy.NewDummyApplication()) - cs.SetLogger(log.TestingLogger()) + cs.SetLogger(logger) return cs } @@ -297,7 +309,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { //------------------------------------------------------------------------------- -func ensureNoNewStep(stepCh chan interface{}) { +func ensureNoNewStep(stepCh <-chan interface{}) { timer := time.NewTimer(ensureTimeout) select { case <-timer.C: @@ -307,7 +319,7 @@ func ensureNoNewStep(stepCh chan interface{}) { } } -func ensureNewStep(stepCh chan interface{}) { +func ensureNewStep(stepCh <-chan interface{}) { timer := time.NewTimer(ensureTimeout) select { case <-timer.C: @@ -360,12 +372,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState { - genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower)) + genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) css := make([]*ConsensusState, nPeers) + logger := consensusLogger() for i := 0; i < nPeers; i++ { db := dbm.NewMemDB() // each state needs its own db state, _ := sm.MakeGenesisState(db, genDoc) - state.SetLogger(log.TestingLogger().With("module", "state")) + state.SetLogger(logger.With("module", "state", "validator", i)) state.Save() thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -382,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) - css[i].SetLogger(log.TestingLogger()) + css[i].SetLogger(logger.With("validator", i)) css[i].SetTimeoutTicker(tickerFunc()) } return css @@ -451,12 +464,12 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() (bool, error) { - return true, nil +func (m *mockTicker) Start() error { + return nil } -func (m *mockTicker) Stop() bool { - return true +func (m *mockTicker) Stop() error { + return nil } func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) { diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 3a430ef2..b35bdc53 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -2,13 +2,17 @@ package consensus import ( "encoding/binary" + "fmt" "testing" "time" - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/types" + "github.com/stretchr/testify/assert" + "github.com/tendermint/abci/example/code" + abci "github.com/tendermint/abci/types" cmn "github.com/tendermint/tmlibs/common" + + "github.com/tendermint/tendermint/types" ) func init() { @@ -22,16 +26,15 @@ func TestNoProgressUntilTxsAvailable(t *testing.T) { cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1) + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) ensureNewStep(newBlockCh) // first block gets committed ensureNoNewStep(newBlockCh) - deliverTxsRange(cs, 0, 2) + deliverTxsRange(cs, 0, 1) ensureNewStep(newBlockCh) // commit txs ensureNewStep(newBlockCh) // commit updated app hash ensureNoNewStep(newBlockCh) - } func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) { @@ -41,7 +44,7 @@ func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) { cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1) + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) ensureNewStep(newBlockCh) // first block gets committed @@ -56,9 +59,9 @@ func TestProgressInHigherRound(t *testing.T) { cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1) - newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1) - timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1) + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) cs.setProposal = func(proposal *types.Proposal) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and @@ -73,7 +76,7 @@ func TestProgressInHigherRound(t *testing.T) { ensureNewStep(newRoundCh) // first round at first height ensureNewStep(newBlockCh) // first block gets committed ensureNewStep(newRoundCh) // first round at next height - deliverTxsRange(cs, 0, 2) // we deliver txs, but dont set a proposal so we get the next round + deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round <-timeoutCh ensureNewStep(newRoundCh) // wait for the next round ensureNewStep(newBlockCh) // now we can commit the block @@ -92,11 +95,10 @@ func deliverTxsRange(cs *ConsensusState, start, end int) { } func TestTxConcurrentWithCommit(t *testing.T) { - state, privVals := randGenesisState(1, false, 10) cs := newConsensusState(state, privVals[0], NewCounterApplication()) height, round := cs.Height, cs.Round - newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1) + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) NTxs := 10000 go deliverTxsRange(cs, 0, NTxs) @@ -121,41 +123,43 @@ func TestRmBadTx(t *testing.T) { // increment the counter by 1 txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - app.DeliverTx(txBytes) - app.Commit() - ch := make(chan struct{}) - cbCh := make(chan struct{}) + resDeliver := app.DeliverTx(txBytes) + assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver)) + + resCommit := app.Commit() + assert.False(t, resCommit.IsErr(), cmn.Fmt("expected no error. got %v", resCommit)) + + emptyMempoolCh := make(chan struct{}) + checkTxRespCh := make(chan struct{}) go func() { // Try to send the tx through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) { - if r.GetCheckTx().Code != abci.CodeType_BadNonce { + if r.GetCheckTx().Code != code.CodeTypeBadNonce { t.Fatalf("expected checktx to return bad nonce, got %v", r) } - cbCh <- struct{}{} + checkTxRespCh <- struct{}{} }) if err != nil { - t.Fatal("Error after CheckTx: %v", err) + t.Fatalf("Error after CheckTx: %v", err) } // check for the tx for { - time.Sleep(time.Second) txs := cs.mempool.Reap(1) if len(txs) == 0 { - ch <- struct{}{} - return + emptyMempoolCh <- struct{}{} } - + time.Sleep(10 * time.Millisecond) } }() // Wait until the tx returns ticker := time.After(time.Second * 5) select { - case <-cbCh: + case <-checkTxRespCh: // success case <-ticker: t.Fatalf("Timed out waiting for tx to return") @@ -164,7 +168,7 @@ func TestRmBadTx(t *testing.T) { // Wait until the tx is removed ticker = time.After(time.Second * 5) select { - case <-ch: + case <-emptyMempoolCh: // success case <-ticker: t.Fatalf("Timed out waiting for tx to be removed") @@ -187,33 +191,41 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)} } -func (app *CounterApplication) DeliverTx(tx []byte) abci.Result { - return runTx(tx, &app.txCount) +func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { + txValue := txAsUint64(tx) + if txValue != uint64(app.txCount) { + return abci.ResponseDeliverTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} + } + app.txCount += 1 + return abci.ResponseDeliverTx{Code: code.CodeTypeOK} } -func (app *CounterApplication) CheckTx(tx []byte) abci.Result { - return runTx(tx, &app.mempoolTxCount) +func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx { + txValue := txAsUint64(tx) + if txValue != uint64(app.mempoolTxCount) { + return abci.ResponseCheckTx{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} + } + app.mempoolTxCount += 1 + return abci.ResponseCheckTx{Code: code.CodeTypeOK} } -func runTx(tx []byte, countPtr *int) abci.Result { - count := *countPtr +func txAsUint64(tx []byte) uint64 { tx8 := make([]byte, 8) copy(tx8[len(tx8)-len(tx):], tx) - txValue := binary.BigEndian.Uint64(tx8) - if txValue != uint64(count) { - return abci.ErrBadNonce.AppendLog(cmn.Fmt("Invalid nonce. Expected %v, got %v", count, txValue)) - } - *countPtr += 1 - return abci.OK + return binary.BigEndian.Uint64(tx8) } -func (app *CounterApplication) Commit() abci.Result { +func (app *CounterApplication) Commit() abci.ResponseCommit { app.mempoolTxCount = app.txCount if app.txCount == 0 { - return abci.OK + return abci.ResponseCommit{Code: code.CodeTypeOK} } else { hash := make([]byte, 8) binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return abci.NewResultOK(hash, "") + return abci.ResponseCommit{Code: code.CodeTypeOK, Data: hash} } } diff --git a/consensus/reactor.go b/consensus/reactor.go index e6849992..90dfa3b1 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -2,12 +2,14 @@ package consensus import ( "bytes" - "errors" + "context" "fmt" "reflect" "sync" "time" + "github.com/pkg/errors" + wire "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" @@ -34,10 +36,10 @@ type ConsensusReactor struct { p2p.BaseReactor // BaseService + p2p.Switch conS *ConsensusState - evsw types.EventSwitch mtx sync.RWMutex fastSync bool + eventBus *types.EventBus } // NewConsensusReactor returns a new ConsensusReactor with the given consensusState. @@ -53,18 +55,22 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens // OnStart implements BaseService. func (conR *ConsensusReactor) OnStart() error { conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync()) - conR.BaseReactor.OnStart() + if err := conR.BaseReactor.OnStart(); err != nil { + return err + } - // callbacks for broadcasting new steps and votes to peers - // upon their respective events (ie. uses evsw) - conR.registerEventCallbacks() + err := conR.startBroadcastRoutine() + if err != nil { + return err + } if !conR.FastSync() { - _, err := conR.conS.Start() + err := conR.conS.Start() if err != nil { return err } } + return nil } @@ -91,31 +97,34 @@ func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced in // dont bother with the WAL if we fast synced conR.conS.doWALCatchup = false } - conR.conS.Start() + err := conR.conS.Start() + if err != nil { + conR.Logger.Error("Error starting conS", "err", err) + } } // GetChannels implements Reactor func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { // TODO optimize return []*p2p.ChannelDescriptor{ - &p2p.ChannelDescriptor{ + { ID: StateChannel, Priority: 5, SendQueueCapacity: 100, }, - &p2p.ChannelDescriptor{ + { ID: DataChannel, // maybe split between gossiping current block and catchup stuff Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round SendQueueCapacity: 100, RecvBufferCapacity: 50 * 4096, }, - &p2p.ChannelDescriptor{ + { ID: VoteChannel, Priority: 5, SendQueueCapacity: 100, RecvBufferCapacity: 100 * 100, }, - &p2p.ChannelDescriptor{ + { ID: VoteSetBitsChannel, Priority: 1, SendQueueCapacity: 2, @@ -306,10 +315,10 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) } } -// SetEventSwitch implements events.Eventable -func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) { - conR.evsw = evsw - conR.conS.SetEventSwitch(evsw) +// SetEventBus sets event bus. +func (conR *ConsensusReactor) SetEventBus(b *types.EventBus) { + conR.eventBus = b + conR.conS.SetEventBus(b) } // FastSync returns whether the consensus reactor is in fast-sync mode. @@ -321,24 +330,60 @@ func (conR *ConsensusReactor) FastSync() bool { //-------------------------------------- -// Listens for new steps and votes, -// broadcasting the result to peers -func (conR *ConsensusReactor) registerEventCallbacks() { +// startBroadcastRoutine subscribes for new round steps, votes and proposal +// heartbeats using the event bus and starts a go routine to broadcasts events +// to peers upon receiving them. +func (conR *ConsensusReactor) startBroadcastRoutine() error { + const subscriber = "consensus-reactor" + ctx := context.Background() - types.AddListenerForEvent(conR.evsw, "conR", types.EventStringNewRoundStep(), func(data types.TMEventData) { - rs := data.Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - conR.broadcastNewRoundStep(rs) - }) + // new round steps + stepsCh := make(chan interface{}) + err := conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, stepsCh) + if err != nil { + return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryNewRoundStep) + } - types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) { - edv := data.Unwrap().(types.EventDataVote) - conR.broadcastHasVoteMessage(edv.Vote) - }) + // votes + votesCh := make(chan interface{}) + err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryVote, votesCh) + if err != nil { + return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryVote) + } - types.AddListenerForEvent(conR.evsw, "conR", types.EventStringProposalHeartbeat(), func(data types.TMEventData) { - heartbeat := data.Unwrap().(types.EventDataProposalHeartbeat) - conR.broadcastProposalHeartbeatMessage(heartbeat) - }) + // proposal heartbeats + heartbeatsCh := make(chan interface{}) + err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryProposalHeartbeat, heartbeatsCh) + if err != nil { + return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryProposalHeartbeat) + } + + go func() { + for { + select { + case data, ok := <-stepsCh: + if ok { // a receive from a closed channel returns the zero value immediately + edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState) + conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState)) + } + case data, ok := <-votesCh: + if ok { + edv := data.(types.TMEventData).Unwrap().(types.EventDataVote) + conR.broadcastHasVoteMessage(edv.Vote) + } + case data, ok := <-heartbeatsCh: + if ok { + edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat) + conR.broadcastProposalHeartbeatMessage(edph) + } + case <-conR.Quit: + conR.eventBus.UnsubscribeAll(ctx, subscriber) + return + } + } + }() + + return nil } func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.EventDataProposalHeartbeat) { @@ -350,7 +395,6 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types. } func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) { - nrsMsg, csMsg := makeRoundStepMessages(rs) if nrsMsg != nil { conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg}) @@ -448,6 +492,18 @@ OUTER_LOOP: // If the peer is on a previous height, help catch up. if (0 < prs.Height) && (prs.Height < rs.Height) { heightLogger := logger.With("height", prs.Height) + + // if we never received the commit message from the peer, the block parts wont be initialized + if prs.ProposalBlockParts == nil { + blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) + if blockMeta == nil { + cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d", + prs.Height, conR.conS.blockStore.Height())) + } + ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) + // continue the loop since prs is a copy and not effected by this initialization + continue OUTER_LOOP + } conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) continue OUTER_LOOP } @@ -527,9 +583,11 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype Round: prs.Round, // Not our height, so it doesn't matter. Part: part, } - logger.Debug("Sending block part for catchup", "round", prs.Round) + logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) { ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) + } else { + logger.Debug("Sending block part for catchup failed") } return } else { @@ -803,7 +861,7 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { // GetHeight returns an atomic snapshot of the PeerRoundState's height // used by the mempool to ensure peers are caught up before broadcasting new txs -func (ps *PeerState) GetHeight() int { +func (ps *PeerState) GetHeight() int64 { ps.mtx.Lock() defer ps.mtx.Unlock() return ps.PeerRoundState.Height @@ -828,8 +886,21 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { ps.ProposalPOL = nil // Nil until ProposalPOLMessage received. } +// InitProposalBlockParts initializes the peer's proposal block parts header and bit array. +func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { + ps.mtx.Lock() + defer ps.mtx.Unlock() + + if ps.ProposalBlockParts != nil { + return + } + + ps.ProposalBlockPartsHeader = partsHeader + ps.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total) +} + // SetHasProposalBlockPart sets the given block part index as known for the peer. -func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) { +func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -880,7 +951,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } -func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *cmn.BitArray { +func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray { if !types.IsVoteTypeValid(type_) { return nil } @@ -927,7 +998,7 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *cmn.BitArra } // 'round': A round for which we have a +2/3 commit. -func (ps *PeerState) ensureCatchupCommitRound(height, round int, numValidators int) { +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { if ps.Height != height { return } @@ -953,13 +1024,13 @@ func (ps *PeerState) ensureCatchupCommitRound(height, round int, numValidators i // what votes this peer has received. // NOTE: It's important to make sure that numValidators actually matches // what the node sees as the number of validators for height. -func (ps *PeerState) EnsureVoteBitArrays(height int, numValidators int) { +func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { ps.mtx.Lock() defer ps.mtx.Unlock() ps.ensureVoteBitArrays(height, numValidators) } -func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) { +func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { if ps.Height == height { if ps.Prevotes == nil { ps.Prevotes = cmn.NewBitArray(numValidators) @@ -988,9 +1059,9 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) { - logger := ps.logger.With("peerRound", ps.Round, "height", height, "round", round) - logger.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index) +func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { + logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.Height, ps.Round), "H/R", cmn.Fmt("%d/%d", height, round)) + logger.Debug("setHasVote", "type", type_, "index", index) // NOTE: some may be nil BitArrays -> no side effects. psVotes := ps.getVoteBitArray(height, round, type_) @@ -1182,7 +1253,7 @@ func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) { // NewRoundStepMessage is sent for every step taken in the ConsensusState. // For every height/round/step transition type NewRoundStepMessage struct { - Height int + Height int64 Round int Step cstypes.RoundStepType SecondsSinceStartTime int @@ -1199,7 +1270,7 @@ func (m *NewRoundStepMessage) String() string { // CommitStepMessage is sent when a block is committed. type CommitStepMessage struct { - Height int + Height int64 BlockPartsHeader types.PartSetHeader BlockParts *cmn.BitArray } @@ -1225,7 +1296,7 @@ func (m *ProposalMessage) String() string { // ProposalPOLMessage is sent when a previous proposal is re-proposed. type ProposalPOLMessage struct { - Height int + Height int64 ProposalPOLRound int ProposalPOL *cmn.BitArray } @@ -1239,7 +1310,7 @@ func (m *ProposalPOLMessage) String() string { // BlockPartMessage is sent when gossipping a piece of the proposed block. type BlockPartMessage struct { - Height int + Height int64 Round int Part *types.Part } @@ -1265,7 +1336,7 @@ func (m *VoteMessage) String() string { // HasVoteMessage is sent to indicate that a particular vote has been received. type HasVoteMessage struct { - Height int + Height int64 Round int Type byte Index int @@ -1280,7 +1351,7 @@ func (m *HasVoteMessage) String() string { // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23Message struct { - Height int + Height int64 Round int Type byte BlockID types.BlockID @@ -1295,7 +1366,7 @@ func (m *VoteSetMaj23Message) String() string { // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. type VoteSetBitsMessage struct { - Height int + Height int64 Round int Type byte BlockID types.BlockID diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index ed8fa87b..56ac17af 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -1,17 +1,21 @@ package consensus import ( + "context" "fmt" + "os" + "runtime/pprof" "sync" "testing" "time" "github.com/tendermint/abci/example/dummy" - "github.com/tendermint/tmlibs/events" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/require" ) func init() { @@ -21,27 +25,30 @@ func init() { //---------------------------------------------- // in-process testnets -func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) { +func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) { reactors := make([]*ConsensusReactor, N) eventChans := make([]chan interface{}, N) + eventBuses := make([]*types.EventBus, N) logger := consensusLogger() for i := 0; i < N; i++ { + /*thisLogger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") + if err != nil { t.Fatal(err)}*/ + thisLogger := logger + reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states - reactors[i].SetLogger(logger.With("validator", i)) + reactors[i].conS.SetLogger(thisLogger.With("validator", i)) + reactors[i].SetLogger(thisLogger.With("validator", i)) - eventSwitch := events.NewEventSwitch() - eventSwitch.SetLogger(logger.With("module", "events", "validator", i)) - _, err := eventSwitch.Start() - if err != nil { - t.Fatalf("Failed to start switch: %v", err) - } + eventBuses[i] = types.NewEventBus() + eventBuses[i].SetLogger(thisLogger.With("module", "events", "validator", i)) + err := eventBuses[i].Start() + require.NoError(t, err) - reactors[i].SetEventSwitch(eventSwitch) - if subscribeEventRespond { - eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock()) - } else { - eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1) - } + reactors[i].SetEventBus(eventBuses[i]) + + eventChans[i] = make(chan interface{}, 1) + err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) + require.NoError(t, err) } // make connected switches and start all reactors p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { @@ -52,25 +59,29 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEven // now that everyone is connected, start the state machines // If we started the state machines before everyone was connected, // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors + // TODO: is this still true with new pubsub? for i := 0; i < N; i++ { s := reactors[i].conS.GetState() reactors[i].SwitchToConsensus(s, 0) } - return reactors, eventChans + return reactors, eventChans, eventBuses } -func stopConsensusNet(reactors []*ConsensusReactor) { +func stopConsensusNet(reactors []*ConsensusReactor, eventBuses []*types.EventBus) { for _, r := range reactors { r.Switch.Stop() } + for _, b := range eventBuses { + b.Stop() + } } // Ensure a testnet makes blocks func TestReactor(t *testing.T) { N := 4 css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) - reactors, eventChans := startConsensusNet(t, css, N, false) - defer stopConsensusNet(reactors) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(reactors, eventBuses) // wait till everyone makes the first new block timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) { <-eventChans[j] @@ -85,11 +96,14 @@ func TestReactorProposalHeartbeats(t *testing.T) { func(c *cfg.Config) { c.Consensus.CreateEmptyBlocks = false }) - reactors, eventChans := startConsensusNet(t, css, N, false) - defer stopConsensusNet(reactors) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(reactors, eventBuses) heartbeatChans := make([]chan interface{}, N) + var err error for i := 0; i < N; i++ { - heartbeatChans[i] = subscribeToEvent(css[i].evsw, "tester", types.EventStringProposalHeartbeat(), 1) + heartbeatChans[i] = make(chan interface{}, 1) + err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i]) + require.NoError(t, err) } // wait till everyone sends a proposal heartbeat timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) { @@ -98,7 +112,9 @@ func TestReactorProposalHeartbeats(t *testing.T) { }, css) // send a tx - css[3].mempool.CheckTx([]byte{1, 2, 3}, nil) + if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil { + //t.Fatal(err) + } // wait till everyone makes the first new block timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) { @@ -113,8 +129,8 @@ func TestReactorProposalHeartbeats(t *testing.T) { func TestVotingPowerChange(t *testing.T) { nVals := 4 css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy) - reactors, eventChans := startConsensusNet(t, css, nVals, true) - defer stopConsensusNet(reactors) + reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals) + defer stopConsensusNet(reactors, eventBuses) // map of active validators activeVals := make(map[string]struct{}) @@ -125,7 +141,6 @@ func TestVotingPowerChange(t *testing.T) { // wait till everyone makes block 1 timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) { <-eventChans[j] - eventChans[j] <- struct{}{} wg.Done() }, css) @@ -174,8 +189,9 @@ func TestValidatorSetChanges(t *testing.T) { nPeers := 7 nVals := 4 css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy) - reactors, eventChans := startConsensusNet(t, css, nPeers, true) - defer stopConsensusNet(reactors) + + reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers) + defer stopConsensusNet(reactors, eventBuses) // map of active validators activeVals := make(map[string]struct{}) @@ -186,7 +202,6 @@ func TestValidatorSetChanges(t *testing.T) { // wait till everyone makes block 1 timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) { <-eventChans[j] - eventChans[j] <- struct{}{} wg.Done() }, css) @@ -194,7 +209,7 @@ func TestValidatorSetChanges(t *testing.T) { t.Log("---------------------------- Testing adding one validator") newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() - newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower)) + newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower) // wait till everyone makes block 2 // ensure the commit includes all validators @@ -214,7 +229,7 @@ func TestValidatorSetChanges(t *testing.T) { // wait till everyone makes block 5 // it includes the commit for block 4, which should have the updated validator set - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) //--------------------------------------------------------------------------- t.Log("---------------------------- Testing changing the voting power of one validator") @@ -226,7 +241,7 @@ func TestValidatorSetChanges(t *testing.T) { waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower()) @@ -236,17 +251,17 @@ func TestValidatorSetChanges(t *testing.T) { t.Log("---------------------------- Testing adding two validators at once") newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() - newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower)) + newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower) newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() - newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower)) + newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower) waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) activeVals[string(newValidatorPubKey2.Address())] = struct{}{} activeVals[string(newValidatorPubKey3.Address())] = struct{}{} - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) //--------------------------------------------------------------------------- t.Log("---------------------------- Testing removing two validators at once") @@ -259,7 +274,7 @@ func TestValidatorSetChanges(t *testing.T) { waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) delete(activeVals, string(newValidatorPubKey2.Address())) delete(activeVals, string(newValidatorPubKey3.Address())) - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) + waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) } // Check we can make blocks with skip_timeout_commit=false @@ -271,8 +286,8 @@ func TestReactorWithTimeoutCommit(t *testing.T) { css[i].config.SkipTimeoutCommit = false } - reactors, eventChans := startConsensusNet(t, css, N-1, false) - defer stopConsensusNet(reactors) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1) + defer stopConsensusNet(reactors, eventBuses) // wait till everyone makes the first new block timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) { @@ -283,19 +298,50 @@ func TestReactorWithTimeoutCommit(t *testing.T) { func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) { - newBlockI := <-eventChans[j] + defer wg.Done() + + newBlockI, ok := <-eventChans[j] + if !ok { + return + } newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block - t.Logf("[WARN] Got block height=%v validator=%v", newBlock.Height, j) + t.Logf("Got block height=%v validator=%v", newBlock.Height, j) err := validateBlock(newBlock, activeVals) if err != nil { t.Fatal(err) } for _, tx := range txs { - css[j].mempool.CheckTx(tx, nil) + if err = css[j].mempool.CheckTx(tx, nil); err != nil { + t.Fatal(err) + } + } + }, css) +} + +func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) { + timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) { + defer wg.Done() + + var newBlock *types.Block + LOOP: + for { + newBlockI, ok := <-eventChans[j] + if !ok { + return + } + newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block + if newBlock.LastCommit.Size() == len(updatedVals) { + t.Logf("Block with new validators height=%v validator=%v", newBlock.Height, j) + break LOOP + } else { + t.Logf("Block with no new validators height=%v validator=%v. Skipping...", newBlock.Height, j) + } } - eventChans[j] <- struct{}{} - wg.Done() + err := validateBlock(newBlock, updatedVals) + if err != nil { + t.Fatal(err) + } }, css) } @@ -326,15 +372,20 @@ func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []* close(done) }() + // we're running many nodes in-process, possibly in in a virtual machine, + // and spewing debug messages - making a block could take a while, + timeout := time.Second * 60 + select { case <-done: - case <-time.After(time.Second * 10): + case <-time.After(timeout): for i, cs := range css { - fmt.Println("#################") - fmt.Println("Validator", i) - fmt.Println(cs.GetRoundState()) - fmt.Println("") + t.Log("#################") + t.Log("Validator", i) + t.Log(cs.GetRoundState()) + t.Log("") } + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) panic("Timed out waiting for all validators to commit a block") } } diff --git a/consensus/replay.go b/consensus/replay.go index d3c5cd5d..168404af 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -7,12 +7,12 @@ import ( "hash/crc32" "io" "reflect" - "strconv" - "strings" + //"strconv" + //"strings" "time" abci "github.com/tendermint/abci/types" - auto "github.com/tendermint/tmlibs/autofile" + //auto "github.com/tendermint/tmlibs/autofile" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" @@ -90,8 +90,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan // replay only those messages since the last block. // timeoutRoutine should run concurrently to read off tickChan -func (cs *ConsensusState) catchupReplay(csHeight int) error { - +func (cs *ConsensusState) catchupReplay(csHeight int64) error { // set replayMode cs.replayMode = true defer func() { cs.replayMode = false }() @@ -99,16 +98,21 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error { // Ensure that ENDHEIGHT for this height doesn't exist // NOTE: This is just a sanity check. As far as we know things work fine without it, // and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT). - gr, found, err := cs.wal.SearchForEndHeight(uint64(csHeight)) + gr, found, err := cs.wal.SearchForEndHeight(csHeight) + if err != nil { + return err + } if gr != nil { - gr.Close() + if err := gr.Close(); err != nil { + return err + } } if found { - return errors.New(cmn.Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight)) + return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight) } // Search for last height marker - gr, found, err = cs.wal.SearchForEndHeight(uint64(csHeight - 1)) + gr, found, err = cs.wal.SearchForEndHeight(csHeight - 1) if err == io.EOF { cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1) } else if err != nil { @@ -117,7 +121,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error { if !found { return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)) } - defer gr.Close() + defer gr.Close() // nolint: errcheck cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) @@ -146,7 +150,8 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error { // Parses marker lines of the form: // #ENDHEIGHT: 12345 -func makeHeightSearchFunc(height int) auto.SearchFunc { +/* +func makeHeightSearchFunc(height int64) auto.SearchFunc { return func(line string) (int, error) { line = strings.TrimRight(line, "\n") parts := strings.Split(line, " ") @@ -165,7 +170,7 @@ func makeHeightSearchFunc(height int) auto.SearchFunc { return -1, nil } } -} +}*/ //---------------------------------------------- // Recover from failure during block processing @@ -200,7 +205,10 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { return errors.New(cmn.Fmt("Error calling Info: %v", err)) } - blockHeight := int(res.LastBlockHeight) // XXX: beware overflow + blockHeight := int64(res.LastBlockHeight) + if blockHeight < 0 { + return fmt.Errorf("Got a negative last block height (%d) from the app", blockHeight) + } appHash := res.LastBlockAppHash h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) @@ -222,7 +230,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Replay all blocks since appBlockHeight and ensure the result matches the current state. // Returns the final AppHash or an error -func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp proxy.AppConns) ([]byte, error) { +func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { storeBlockHeight := h.store.Height() stateBlockHeight := h.state.LastBlockHeight @@ -231,7 +239,9 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain if appBlockHeight == 0 { validators := types.TM2PB.Validators(h.state.Validators) - proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) + if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil { + return nil, err + } } // First handle edge cases and constraints on the storeBlockHeight @@ -295,7 +305,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p return nil, nil } -func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) { +func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { // App is further behind than it should be, so we need to replay blocks. // We replay all blocks from appBlockHeight+1. // @@ -331,14 +341,13 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store } // ApplyBlock on the proxyApp with the last block. -func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([]byte, error) { +func (h *Handshaker) replayBlock(height int64, proxyApp proxy.AppConnConsensus) ([]byte, error) { mempool := types.MockMempool{} - var eventCache types.Fireable // nil block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - if err := h.state.ApplyBlock(eventCache, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil { + if err := h.state.ApplyBlock(types.NopEventBus{}, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil { return nil, err } @@ -350,7 +359,6 @@ func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([ func (h *Handshaker) checkAppHash(appHash []byte) error { if !bytes.Equal(h.state.AppHash, appHash) { panic(errors.New(cmn.Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error()) - return nil } return nil } @@ -365,7 +373,10 @@ func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppC abciResponses: abciResponses, }) cli, _ := clientCreator.NewABCIClient() - cli.Start() + err := cli.Start() + if err != nil { + panic(err) + } return proxy.NewAppConnConsensus(cli) } @@ -377,21 +388,17 @@ type mockProxyApp struct { abciResponses *sm.ABCIResponses } -func (mock *mockProxyApp) DeliverTx(tx []byte) abci.Result { +func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { r := mock.abciResponses.DeliverTx[mock.txCount] mock.txCount += 1 - return abci.Result{ - r.Code, - r.Data, - r.Log, - } + return *r } -func (mock *mockProxyApp) EndBlock(height uint64) abci.ResponseEndBlock { +func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { mock.txCount = 0 - return mock.abciResponses.EndBlock + return *mock.abciResponses.EndBlock } -func (mock *mockProxyApp) Commit() abci.Result { - return abci.NewResultOK(mock.appHash, "") +func (mock *mockProxyApp) Commit() abci.ResponseCommit { + return abci.ResponseCommit{Code: abci.CodeTypeOK, Data: mock.appHash} } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 24df20fb..d291e87c 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -2,13 +2,15 @@ package consensus import ( "bufio" - "errors" + "context" "fmt" "io" "os" "strconv" "strings" + "github.com/pkg/errors" + bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" @@ -18,6 +20,11 @@ import ( dbm "github.com/tendermint/tmlibs/db" ) +const ( + // event bus subscriber + subscriber = "replay-file" +) + //-------------------------------------------------------- // replay messages interactively or all at once @@ -42,16 +49,23 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error { cs.startForReplay() // ensure all new step events are regenerated as expected - newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1) + newStepCh := make(chan interface{}, 1) + + ctx := context.Background() + err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) + if err != nil { + return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + } + defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) // just open the file for reading, no need to use wal - fp, err := os.OpenFile(file, os.O_RDONLY, 0666) + fp, err := os.OpenFile(file, os.O_RDONLY, 0600) if err != nil { return err } pb := newPlayback(file, fp, cs, cs.state.Copy()) - defer pb.fp.Close() + defer pb.fp.Close() // nolint: errcheck var nextN int // apply N msgs in a row var msg *TimedWALMessage @@ -106,16 +120,17 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm. // go back count steps by resetting the state and running (pb.count - count) steps func (pb *playback) replayReset(count int, newStepCh chan interface{}) error { - pb.cs.Stop() pb.cs.Wait() newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool) - newCS.SetEventSwitch(pb.cs.evsw) + newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() - pb.fp.Close() - fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666) + if err := pb.fp.Close(); err != nil { + return err + } + fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600) if err != nil { return err } @@ -196,10 +211,20 @@ func (pb *playback) replayConsoleLoop() int { // NOTE: "back" is not supported in the state machine design, // so we restart and replay up to + ctx := context.Background() // ensure all new step events are regenerated as expected - newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1) + newStepCh := make(chan interface{}, 1) + + err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) + if err != nil { + cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + } + defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) + if len(tokens) == 1 { - pb.replayReset(1, newStepCh) + if err := pb.replayReset(1, newStepCh); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } } else { i, err := strconv.Atoi(tokens[1]) if err != nil { @@ -207,7 +232,9 @@ func (pb *playback) replayConsoleLoop() int { } else if i > pb.count { fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) } else { - pb.replayReset(i, newStepCh) + if err := pb.replayReset(i, newStepCh); err != nil { + pb.cs.Logger.Error("Replay reset error", "err", err) + } } } @@ -265,19 +292,18 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo // Create proxyAppConn connection (consensus, mempool, query) clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore)) - _, err = proxyApp.Start() + err = proxyApp.Start() if err != nil { cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) } - // Make event switch - eventSwitch := types.NewEventSwitch() - if _, err := eventSwitch.Start(); err != nil { - cmn.Exit(cmn.Fmt("Failed to start event switch: %v", err)) + eventBus := types.NewEventBus() + if err := eventBus.Start(); err != nil { + cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) } consensusState := NewConsensusState(csConfig, state.Copy(), proxyApp.Consensus(), blockStore, types.MockMempool{}) - consensusState.SetEventSwitch(eventSwitch) + consensusState.SetEventBus(eventBus) return consensusState } diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 7d882dc1..af0af3e7 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -2,19 +2,24 @@ package consensus import ( "bytes" + "context" "errors" "fmt" "io" "io/ioutil" "os" "path" + "runtime" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/abci/example/dummy" abci "github.com/tendermint/abci/types" crypto "github.com/tendermint/go-crypto" wire "github.com/tendermint/go-wire" + auto "github.com/tendermint/tmlibs/autofile" cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" @@ -25,8 +30,10 @@ import ( "github.com/tendermint/tmlibs/log" ) +var consensusReplayConfig *cfg.Config + func init() { - config = ResetConfig("consensus_replay_test") + consensusReplayConfig = ResetConfig("consensus_replay_test") } // These tests ensure we can always recover from failure at any part of the consensus process. @@ -39,8 +46,7 @@ func init() { // NOTE: Files in this dir are generated by running the `build.sh` therein. // It's a simple way to generate wals for a single block, or multiple blocks, with random transactions, -// and different part sizes. The output is not deterministic, and the stepChanges may need to be adjusted -// after running it (eg. sometimes small_block2 will have 5 block parts, sometimes 6). +// and different part sizes. The output is not deterministic. // It should only have to be re-run if there is some breaking change to the consensus data structures (eg. blocks, votes) // or to the behaviour of the app (eg. computes app hash differently) var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/consensus", "test_data") @@ -52,230 +58,209 @@ var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/con // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -// the priv validator changes step at these lines for a block with 1 val and 1 part -var baseStepChanges = []int{3, 6, 8} +func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { + logger := log.TestingLogger() + state, _ := sm.GetState(stateDB, consensusReplayConfig.GenesisFile()) + state.SetLogger(logger.With("module", "state")) + privValidator := loadPrivValidator(consensusReplayConfig) + cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB) + cs.SetLogger(logger) -// test recovery from each line in each testCase -var testCases = []*testCase{ - newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part) - newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part - newTestCase("small_block2", []int{3, 12, 14}), // small block with txs across 6 smaller block parts -} + bytes, _ := ioutil.ReadFile(cs.config.WalFile()) + // fmt.Printf("====== WAL: \n\r%s\n", bytes) + t.Logf("====== WAL: \n\r%s\n", bytes) -type testCase struct { - name string - log []byte //full cs wal - stepMap map[int]int8 // map lines of log to privval step + err := cs.Start() + require.NoError(t, err) + defer func() { + cs.Stop() + }() - proposeLine int - prevoteLine int - precommitLine int -} - -func newTestCase(name string, stepChanges []int) *testCase { - if len(stepChanges) != 3 { - panic(cmn.Fmt("a full wal has 3 step changes! Got array %v", stepChanges)) - } - return &testCase{ - name: name, - log: readWAL(path.Join(data_dir, name+".cswal")), - stepMap: newMapFromChanges(stepChanges), - - proposeLine: stepChanges[0], - prevoteLine: stepChanges[1], - precommitLine: stepChanges[2], - } -} - -func newMapFromChanges(changes []int) map[int]int8 { - changes = append(changes, changes[2]+1) // so we add the last step change to the map - m := make(map[int]int8) - var count int - for changeNum, nextChange := range changes { - for ; count < nextChange; count++ { - m[count] = int8(changeNum) - } - } - return m -} - -func readWAL(p string) []byte { - b, err := ioutil.ReadFile(p) - if err != nil { - panic(err) - } - return b -} - -func writeWAL(walMsgs []byte) string { - walFile, err := ioutil.TempFile("", "wal") - if err != nil { - panic(fmt.Errorf("failed to create temp WAL file: %v", err)) - } - _, err = walFile.Write(walMsgs) - if err != nil { - panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) - } - if err := walFile.Close(); err != nil { - panic(fmt.Errorf("failed to close temp WAL file: %v", err)) - } - return walFile.Name() -} - -func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) { - after := time.After(time.Second * 10) + // This is just a signal that we haven't halted; its not something contained + // in the WAL itself. Assuming the consensus state is running, replay of any + // WAL, including the empty one, should eventually be followed by a new + // block, or else something is wrong. + newBlockCh := make(chan interface{}, 1) + err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh) + require.NoError(t, err) select { case <-newBlockCh: - case <-after: - panic(cmn.Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i)) + case <-time.After(10 * time.Second): + t.Fatalf("Timed out waiting for new block (see trace above)") } } -func runReplayTest(t *testing.T, cs *ConsensusState, walFile string, newBlockCh chan interface{}, - thisCase *testCase, i int) { - - cs.config.SetWalFile(walFile) - started, err := cs.Start() - if err != nil { - t.Fatalf("Cannot start consensus: %v", err) - } - if !started { - t.Error("Consensus did not start") - } - // Wait to make a new block. - // This is just a signal that we haven't halted; its not something contained in the WAL itself. - // Assuming the consensus state is running, replay of any WAL, including the empty one, - // should eventually be followed by a new block, or else something is wrong - waitForBlock(newBlockCh, thisCase, i) - cs.evsw.Stop() - cs.Stop() -LOOP: +func sendTxs(cs *ConsensusState, ctx context.Context) { + i := 0 for { select { - case <-newBlockCh: + case <-ctx.Done(): + return default: - break LOOP - } - } - cs.Wait() -} - -func toPV(pv types.PrivValidator) *types.PrivValidatorFS { - return pv.(*types.PrivValidatorFS) -} - -func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, []byte, string) { - t.Log("-------------------------------------") - t.Logf("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter) - - lineStep := nLines - if crashAfter { - lineStep -= 1 - } - - split := bytes.Split(thisCase.log, walSeparator) - lastMsg := split[nLines] - - // we write those lines up to (not including) one with the signature - b := bytes.Join(split[:nLines], walSeparator) - b = append(b, walSeparator...) - walFile := writeWAL(b) - - cs := fixedConsensusStateDummy() - - // set the last step according to when we crashed vs the wal - toPV(cs.privValidator).LastHeight = 1 // first block - toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep] - - t.Logf("[WARN] setupReplayTest LastStep=%v", toPV(cs.privValidator).LastStep) - - newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1) - - return cs, newBlockCh, lastMsg, walFile -} - -func readTimedWALMessage(t *testing.T, rawMsg []byte) TimedWALMessage { - b := bytes.NewBuffer(rawMsg) - // because rawMsg does not contain a separator and WALDecoder#Decode expects it - _, err := b.Write(walSeparator) - if err != nil { - t.Fatal(err) - } - dec := NewWALDecoder(b) - msg, err := dec.Decode() - if err != nil { - t.Fatalf("Error reading json data: %v", err) - } - return *msg -} - -//----------------------------------------------- -// Test the log at every iteration, and set the privVal last step -// as if the log was written after signing, before the crash - -func TestWALCrashAfterWrite(t *testing.T) { - for _, thisCase := range testCases { - splitSize := bytes.Count(thisCase.log, walSeparator) - for i := 0; i < splitSize-1; i++ { - t.Run(fmt.Sprintf("%s:%d", thisCase.name, i), func(t *testing.T) { - cs, newBlockCh, _, walFile := setupReplayTest(t, thisCase, i+1, true) - cs.config.TimeoutPropose = 100 - runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1) - // cleanup - os.Remove(walFile) - }) + cs.mempool.CheckTx([]byte{byte(i)}, nil) + i++ } } } -//----------------------------------------------- -// Test the log as if we crashed after signing but before writing. -// This relies on privValidator.LastSignature being set +// TestWALCrash uses crashing WAL to test we can recover from any WAL failure. +func TestWALCrash(t *testing.T) { + testCases := []struct { + name string + initFn func(*ConsensusState, context.Context) + heightToStop int64 + }{ + {"empty block", + func(cs *ConsensusState, ctx context.Context) {}, + 1}, + {"block with a smaller part size", + func(cs *ConsensusState, ctx context.Context) { + // XXX: is there a better way to change BlockPartSizeBytes? + params := cs.state.Params + params.BlockPartSizeBytes = 512 + cs.state.Params = params + sendTxs(cs, ctx) + }, + 1}, + {"many non-empty blocks", + sendTxs, + 3}, + } -func TestWALCrashBeforeWritePropose(t *testing.T) { - for _, thisCase := range testCases { - lineNum := thisCase.proposeLine - t.Run(fmt.Sprintf("%s:%d", thisCase.name, lineNum), func(t *testing.T) { - // setup replay test where last message is a proposal - cs, newBlockCh, proposalMsg, walFile := setupReplayTest(t, thisCase, lineNum, false) - cs.config.TimeoutPropose = 100 - msg := readTimedWALMessage(t, proposalMsg) - proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage) - // Set LastSig - toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal) - toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature - runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum) - // cleanup - os.Remove(walFile) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop) }) } } -func TestWALCrashBeforeWritePrevote(t *testing.T) { - for _, thisCase := range testCases { - testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal()) +func crashWALandCheckLiveness(t *testing.T, initFn func(*ConsensusState, context.Context), heightToStop int64) { + walPaniced := make(chan error) + crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop} + + i := 1 +LOOP: + for { + // fmt.Printf("====== LOOP %d\n", i) + t.Logf("====== LOOP %d\n", i) + + // create consensus state from a clean slate + logger := log.NewNopLogger() + stateDB := dbm.NewMemDB() + state, _ := sm.MakeGenesisStateFromFile(stateDB, consensusReplayConfig.GenesisFile()) + state.SetLogger(logger.With("module", "state")) + privValidator := loadPrivValidator(consensusReplayConfig) + blockDB := dbm.NewMemDB() + cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB) + cs.SetLogger(logger) + + // start sending transactions + ctx, cancel := context.WithCancel(context.Background()) + go initFn(cs, ctx) + + // clean up WAL file from the previous iteration + walFile := cs.config.WalFile() + os.Remove(walFile) + + // set crashing WAL + csWal, err := cs.OpenWAL(walFile) + require.NoError(t, err) + crashingWal.next = csWal + // reset the message counter + crashingWal.msgIndex = 1 + cs.wal = crashingWal + + // start consensus state + err = cs.Start() + require.NoError(t, err) + + i++ + + select { + case err := <-walPaniced: + t.Logf("WAL paniced: %v", err) + + // make sure we can make blocks after a crash + startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB) + + // stop consensus state and transactions sender (initFn) + cs.Stop() + cancel() + + // if we reached the required height, exit + if _, ok := err.(ReachedHeightToStopError); ok { + break LOOP + } + case <-time.After(10 * time.Second): + t.Fatal("WAL did not panic for 10 seconds (check the log)") + } } } -func TestWALCrashBeforeWritePrecommit(t *testing.T) { - for _, thisCase := range testCases { - testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka()) +// crashingWAL is a WAL which crashes or rather simulates a crash during Save +// (before and after). It remembers a message for which we last panicked +// (lastPanicedForMsgIndex), so we don't panic for it in subsequent iterations. +type crashingWAL struct { + next WAL + panicCh chan error + heightToStop int64 + + msgIndex int // current message index + lastPanicedForMsgIndex int // last message for which we panicked +} + +// WALWriteError indicates a WAL crash. +type WALWriteError struct { + msg string +} + +func (e WALWriteError) Error() string { + return e.msg +} + +// ReachedHeightToStopError indicates we've reached the required consensus +// height and may exit. +type ReachedHeightToStopError struct { + height int64 +} + +func (e ReachedHeightToStopError) Error() string { + return fmt.Sprintf("reached height to stop %d", e.height) +} + +// Save simulate WAL's crashing by sending an error to the panicCh and then +// exiting the cs.receiveRoutine. +func (w *crashingWAL) Save(m WALMessage) { + if endMsg, ok := m.(EndHeightMessage); ok { + if endMsg.Height == w.heightToStop { + w.panicCh <- ReachedHeightToStopError{endMsg.Height} + runtime.Goexit() + } else { + w.next.Save(m) + } + return + } + + if w.msgIndex > w.lastPanicedForMsgIndex { + w.lastPanicedForMsgIndex = w.msgIndex + _, file, line, _ := runtime.Caller(1) + w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} + runtime.Goexit() + } else { + w.msgIndex++ + w.next.Save(m) } } -func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) { - // setup replay test where last message is a vote - cs, newBlockCh, voteMsg, walFile := setupReplayTest(t, thisCase, lineNum, false) - types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) { - msg := readTimedWALMessage(t, voteMsg) - vote := msg.Msg.(msgInfo).Msg.(*VoteMessage) - // Set LastSig - toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote) - toPV(cs.privValidator).LastSignature = vote.Vote.Signature - }) - runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum) +func (w *crashingWAL) Group() *auto.Group { return w.next.Group() } +func (w *crashingWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) { + return w.next.SearchForEndHeight(height) } +func (w *crashingWAL) Start() error { return w.next.Start() } +func (w *crashingWAL) Stop() error { return w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } + //------------------------------------------------------------------------------------------ // Handshake Tests @@ -320,6 +305,21 @@ func TestHandshakeReplayNone(t *testing.T) { } } +func writeWAL(walMsgs []byte) string { + walFile, err := ioutil.TempFile("", "wal") + if err != nil { + panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + } + _, err = walFile.Write(walMsgs) + if err != nil { + panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) + } + if err := walFile.Close(); err != nil { + panic(fmt.Errorf("failed to close temp WAL file: %v", err)) + } + return walFile.Name() +} + // Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { config := ResetConfig("proxy_test_") @@ -339,7 +339,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { t.Fatal(err) } wal.SetLogger(log.TestingLogger()) - if _, err := wal.Start(); err != nil { + if err := wal.Start(); err != nil { t.Fatal(err) } chain, commits, err := makeBlockchainFromWAL(wal) @@ -368,7 +368,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { // now start the app using the handshake - it should sync handshaker := NewHandshaker(state, store) proxyApp := proxy.NewAppConns(clientCreator2, handshaker) - if _, err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } @@ -397,7 +397,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) { testPartSize := st.Params.BlockPartSizeBytes - err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool) + err := st.ApplyBlock(types.NopEventBus{}, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool) if err != nil { panic(err) } @@ -406,12 +406,14 @@ func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) { func buildAppStateFromChain(proxyApp proxy.AppConns, state *sm.State, chain []*types.Block, nBlocks int, mode uint) { // start a new app without handshake, play nBlocks blocks - if _, err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(); err != nil { panic(err) } validators := types.TM2PB.Validators(state.Validators) - proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) + if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil { + panic(err) + } defer proxyApp.Stop() switch mode { @@ -439,13 +441,15 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B // run the whole chain against this client to build up the tendermint state clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "1"))) proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) - if _, err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(); err != nil { panic(err) } defer proxyApp.Stop() validators := types.TM2PB.Validators(state.Validators) - proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) + if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil { + panic(err) + } var latestAppHash []byte @@ -477,7 +481,7 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B //-------------------------- // utils for making blocks -func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { +func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // Search for height marker gr, found, err := wal.SearchForEndHeight(0) if err != nil { @@ -486,7 +490,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { if !found { return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) } - defer gr.Close() + defer gr.Close() // nolint: errcheck // log.Notice("Build a blockchain by reading from the WAL") @@ -586,21 +590,21 @@ func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBl return &mockBlockStore{config, params, nil, nil} } -func (bs *mockBlockStore) Height() int { return len(bs.chain) } -func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta { +func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } +func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } +func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()}, Header: block.Header, } } -func (bs *mockBlockStore) LoadBlockPart(height int, index int) *types.Part { return nil } +func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } -func (bs *mockBlockStore) LoadBlockCommit(height int) *types.Commit { +func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return bs.commits[height-1] } -func (bs *mockBlockStore) LoadSeenCommit(height int) *types.Commit { +func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } diff --git a/consensus/state.go b/consensus/state.go index e5b7641f..eedc30bc 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "path/filepath" "reflect" "runtime/debug" "sync" @@ -55,7 +54,7 @@ type msgInfo struct { // internally generated messages which may update the state type timeoutInfo struct { Duration time.Duration `json:"duration"` - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Step cstypes.RoundStepType `json:"step"` } @@ -91,13 +90,13 @@ type ConsensusState struct { internalMsgQueue chan msgInfo timeoutTicker TimeoutTicker - // we use PubSub to trigger msg broadcasts in the reactor, + // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - evsw types.EventSwitch + eventBus *types.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes - wal *WAL + wal WAL replayMode bool // so we don't log signing errors during replay doWALCatchup bool // determines if we even try to do the catchup @@ -105,8 +104,8 @@ type ConsensusState struct { nSteps int // some functions can be overwritten for testing - decideProposal func(height, round int) - doPrevote func(height, round int) + decideProposal func(height int64, round int) + doPrevote func(height int64, round int) setProposal func(proposal *types.Proposal) error // closed when we finish shutting down @@ -125,6 +124,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppCon timeoutTicker: NewTimeoutTicker(), done: make(chan struct{}), doWALCatchup: true, + wal: nilWAL{}, } // set function defaults (may be overwritten before calling Start) cs.decideProposal = cs.defaultDecideProposal @@ -148,9 +148,9 @@ func (cs *ConsensusState) SetLogger(l log.Logger) { cs.timeoutTicker.SetLogger(l) } -// SetEventSwitch implements events.Eventable -func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) { - cs.evsw = evsw +// SetEventBus sets event bus. +func (cs *ConsensusState) SetEventBus(b *types.EventBus) { + cs.eventBus = b } // String returns a string. @@ -179,7 +179,7 @@ func (cs *ConsensusState) getRoundState() *cstypes.RoundState { } // GetValidators returns a copy of the current validators. -func (cs *ConsensusState) GetValidators() (int, []*types.Validator) { +func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { cs.mtx.Lock() defer cs.mtx.Unlock() return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators @@ -200,7 +200,7 @@ func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) { } // LoadCommit loads the commit for a given height. -func (cs *ConsensusState) LoadCommit(height int) *types.Commit { +func (cs *ConsensusState) LoadCommit(height int64) *types.Commit { cs.mtx.Lock() defer cs.mtx.Unlock() if height == cs.blockStore.Height() { @@ -212,19 +212,27 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit { // OnStart implements cmn.Service. // It loads the latest state via the WAL, and starts the timeout and receive routines. func (cs *ConsensusState) OnStart() error { - - walFile := cs.config.WalFile() - if err := cs.OpenWAL(walFile); err != nil { - cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error()) - return err + // we may set the WAL in testing before calling Start, + // so only OpenWAL if its still the nilWAL + if _, ok := cs.wal.(nilWAL); ok { + walFile := cs.config.WalFile() + wal, err := cs.OpenWAL(walFile) + if err != nil { + cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error()) + return err + } + cs.wal = wal } // we need the timeoutRoutine for replay so - // we don't block on the tick chan. + // we don't block on the tick chan. // NOTE: we will get a build up of garbage go routines - // firing on the tockChan until the receiveRoutine is started - // to deal with them (by that point, at most one will be valid) - cs.timeoutTicker.Start() + // firing on the tockChan until the receiveRoutine is started + // to deal with them (by that point, at most one will be valid) + err := cs.timeoutTicker.Start() + if err != nil { + return err + } // we may have lost some votes if the process crashed // reload from consensus log to catchup @@ -249,7 +257,11 @@ func (cs *ConsensusState) OnStart() error { // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions func (cs *ConsensusState) startRoutines(maxSteps int) { - cs.timeoutTicker.Start() + err := cs.timeoutTicker.Start() + if err != nil { + cs.Logger.Error("Error starting timeout ticker", "err", err) + return + } go cs.receiveRoutine(maxSteps) } @@ -260,7 +272,7 @@ func (cs *ConsensusState) OnStop() { cs.timeoutTicker.Stop() // Make BaseService.Wait() wait until cs.wal.Wait() - if cs.wal != nil && cs.IsRunning() { + if cs.IsRunning() { cs.wal.Wait() } } @@ -273,25 +285,17 @@ func (cs *ConsensusState) Wait() { } // OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability -func (cs *ConsensusState) OpenWAL(walFile string) (err error) { - err = cmn.EnsureDir(filepath.Dir(walFile), 0700) - if err != nil { - cs.Logger.Error("Error ensuring ConsensusState wal dir", "err", err.Error()) - return err - } - - cs.mtx.Lock() - defer cs.mtx.Unlock() +func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) { wal, err := NewWAL(walFile, cs.config.WalLight) if err != nil { - return err + cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err) + return nil, err } wal.SetLogger(cs.Logger.With("wal", walFile)) - if _, err := wal.Start(); err != nil { - return err + if err := wal.Start(); err != nil { + return nil, err } - cs.wal = wal - return nil + return wal, nil } //------------------------------------------------------------ @@ -327,7 +331,7 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string) } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Part, peerKey string) error { +func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerKey string) error { if peerKey == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} @@ -341,18 +345,22 @@ func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Pa // SetProposalAndBlock inputs the proposal and all block parts. func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error { - cs.SetProposal(proposal, peerKey) + if err := cs.SetProposal(proposal, peerKey); err != nil { + return err + } for i := 0; i < parts.Total(); i++ { part := parts.GetPart(i) - cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey) + if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey); err != nil { + return err + } } - return nil // TODO errors + return nil } //------------------------------------------------------------ // internal functions for managing the state -func (cs *ConsensusState) updateHeight(height int) { +func (cs *ConsensusState) updateHeight(height int64) { cs.Height = height } @@ -364,12 +372,12 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) // enterNewRound(height, 0) at cs.StartTime. func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) - sleepDuration := rs.StartTime.Sub(time.Now()) + sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } // Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) -func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height, round int, step cstypes.RoundStepType) { +func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) { cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) } @@ -480,9 +488,9 @@ func (cs *ConsensusState) newStep() { rs := cs.RoundStateEvent() cs.wal.Save(rs) cs.nSteps += 1 - // newStep is called by updateToStep in NewConsensusState before the evsw is set! - if cs.evsw != nil { - types.FireEventNewRoundStep(cs.evsw, rs) + // newStep is called by updateToStep in NewConsensusState before the eventBus is set! + if cs.eventBus != nil { + cs.eventBus.PublishEventNewRoundStep(rs) } } @@ -536,9 +544,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) { // priv_val tracks LastSig // close wal now that we're done writing to it - if cs.wal != nil { - cs.wal.Stop() - } + cs.wal.Stop() close(cs.done) return @@ -607,13 +613,13 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { case cstypes.RoundStepNewRound: cs.enterPropose(ti.Height, 0) case cstypes.RoundStepPropose: - types.FireEventTimeoutPropose(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()) cs.enterPrevote(ti.Height, ti.Round) case cstypes.RoundStepPrevoteWait: - types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) cs.enterPrecommit(ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: - types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) cs.enterNewRound(ti.Height, ti.Round+1) default: panic(cmn.Fmt("Invalid timeout step: %v", ti.Step)) @@ -621,7 +627,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { } -func (cs *ConsensusState) handleTxsAvailable(height int) { +func (cs *ConsensusState) handleTxsAvailable(height int64) { cs.mtx.Lock() defer cs.mtx.Unlock() // we only need to do this for round 0 @@ -638,7 +644,7 @@ func (cs *ConsensusState) handleTxsAvailable(height int) { // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) // NOTE: cs.StartTime was already set for height. -func (cs *ConsensusState) enterNewRound(height int, round int) { +func (cs *ConsensusState) enterNewRound(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -673,7 +679,7 @@ func (cs *ConsensusState) enterNewRound(height int, round int) { } cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping - types.FireEventNewRound(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventNewRound(cs.RoundStateEvent()) // Wait for txs to be available in the mempool // before we enterPropose in round 0. If the last block changed the app hash, @@ -691,19 +697,16 @@ func (cs *ConsensusState) enterNewRound(height int, round int) { // needProofBlock returns true on the first height (so the genesis app hash is signed right away) // and where the last block (height-1) caused the app hash to change -func (cs *ConsensusState) needProofBlock(height int) bool { +func (cs *ConsensusState) needProofBlock(height int64) bool { if height == 1 { return true } lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - if !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) { - return true - } - return false + return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) } -func (cs *ConsensusState) proposalHeartbeat(height, round int) { +func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { counter := 0 addr := cs.privValidator.GetAddress() valIndex, v := cs.Validators.GetByAddress(addr) @@ -726,8 +729,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) { ValidatorIndex: valIndex, } cs.privValidator.SignHeartbeat(chainID, heartbeat) - heartbeatEvent := types.EventDataProposalHeartbeat{heartbeat} - types.FireEventProposalHeartbeat(cs.evsw, heartbeatEvent) + cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) counter += 1 time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) } @@ -736,7 +738,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool -func (cs *ConsensusState) enterPropose(height int, round int) { +func (cs *ConsensusState) enterPropose(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -783,7 +785,7 @@ func (cs *ConsensusState) isProposer() bool { return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) } -func (cs *ConsensusState) defaultDecideProposal(height, round int) { +func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { var block *types.Block var blockParts *types.PartSet @@ -871,7 +873,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts // Enter: any +2/3 prevotes for future round. // Prevote for LockedBlock if we're locked, or ProposalBlock if valid. // Otherwise vote nil. -func (cs *ConsensusState) enterPrevote(height int, round int) { +func (cs *ConsensusState) enterPrevote(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -885,7 +887,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) { // fire event for how we got here if cs.isProposalComplete() { - types.FireEventCompleteProposal(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) } else { // we received +2/3 prevotes for a future round // TODO: catchup event? @@ -900,7 +902,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) { // (so we have more time to try and collect +2/3 prevotes for a single block) } -func (cs *ConsensusState) defaultDoPrevote(height int, round int) { +func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) // If a block is locked, prevote that. if cs.LockedBlock != nil { @@ -933,7 +935,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) { } // Enter: any +2/3 prevotes at next round. -func (cs *ConsensusState) enterPrevoteWait(height int, round int) { +func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -959,7 +961,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) { // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. -func (cs *ConsensusState) enterPrecommit(height int, round int) { +func (cs *ConsensusState) enterPrecommit(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -987,7 +989,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) { } // At this point +2/3 prevoted for a particular block or nil - types.FireEventPolka(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) // the latest POLRound should be this round polRound, _ := cs.Votes.POLInfo() @@ -1004,7 +1006,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) { cs.LockedRound = 0 cs.LockedBlock = nil cs.LockedBlockParts = nil - types.FireEventUnlock(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) } cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) return @@ -1016,7 +1018,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) { if cs.LockedBlock.HashesTo(blockID.Hash) { cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") cs.LockedRound = round - types.FireEventRelock(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) return } @@ -1031,7 +1033,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) { cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock cs.LockedBlockParts = cs.ProposalBlockParts - types.FireEventLock(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventLock(cs.RoundStateEvent()) cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) return } @@ -1047,12 +1049,12 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) { cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) } - types.FireEventUnlock(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. -func (cs *ConsensusState) enterPrecommitWait(height int, round int) { +func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return @@ -1074,7 +1076,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int, round int) { } // Enter: +2/3 precommits for block -func (cs *ConsensusState) enterCommit(height int, commitRound int) { +func (cs *ConsensusState) enterCommit(height int64, commitRound int) { if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) return @@ -1120,7 +1122,7 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) { } // If we have the block AND +2/3 commits for it, finalize. -func (cs *ConsensusState) tryFinalizeCommit(height int) { +func (cs *ConsensusState) tryFinalizeCommit(height int64) { if cs.Height != height { cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } @@ -1142,7 +1144,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) { } // Increment height and goto cstypes.RoundStepNewHeight -func (cs *ConsensusState) finalizeCommit(height int) { +func (cs *ConsensusState) finalizeCommit(height int64) { if cs.Height != height || cs.Step != cstypes.RoundStepCommit { cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) return @@ -1191,23 +1193,25 @@ func (cs *ConsensusState) finalizeCommit(height int) { // WAL replay for blocks with an #ENDHEIGHT // As is, ConsensusState should not be started again // until we successfully call ApplyBlock (ie. here or in Handshake after restart) - if cs.wal != nil { - cs.wal.Save(EndHeightMessage{uint64(height)}) - } + cs.wal.Save(EndHeightMessage{height}) fail.Fail() // XXX // Create a copy of the state for staging // and an event cache for txs stateCopy := cs.state.Copy() - eventCache := types.NewEventCache(cs.evsw) + txEventBuffer := types.NewTxEventBuffer(cs.eventBus, block.NumTxs) // Execute and commit the block, update and save the state, and update the mempool. // All calls to the proxyAppConn come here. // NOTE: the block.AppHash wont reflect these txs until the next block - err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool) + err := stateCopy.ApplyBlock(txEventBuffer, cs.proxyAppConn, block, blockParts.Header(), cs.mempool) if err != nil { cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) + err := cmn.Kill() + if err != nil { + cs.Logger.Error("Failed to kill this process - please do so manually", "err", err) + } return } @@ -1220,9 +1224,12 @@ func (cs *ConsensusState) finalizeCommit(height int) { // * Fire before persisting state, in ApplyBlock // * Fire on start up if we haven't written any new WAL msgs // Both options mean we may fire more than once. Is that fine ? - types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block}) - types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header}) - eventCache.Flush() + cs.eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) + cs.eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) + err = txEventBuffer.Flush() + if err != nil { + cs.Logger.Error("Failed to flush event buffer", "err", err) + } fail.Fail() // XXX @@ -1278,7 +1285,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. -func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, verify bool) (added bool, err error) { +func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) { // Blocks might be reused, so round mismatch is OK if cs.Height != height { return false, nil @@ -1357,7 +1364,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, added, err = cs.LastCommit.AddVote(vote) if added { cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) - types.FireEventVote(cs.evsw, types.EventDataVote{vote}) + cs.eventBus.PublishEventVote(types.EventDataVote{vote}) // if we can skip timeoutCommit and have all the votes now, if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { @@ -1375,7 +1382,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, height := cs.Height added, err = cs.Votes.AddVote(vote, peerKey) if added { - types.FireEventVote(cs.evsw, types.EventDataVote{vote}) + cs.eventBus.PublishEventVote(types.EventDataVote{vote}) switch vote.Type { case types.VoteTypePrevote: @@ -1393,7 +1400,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool, cs.LockedRound = 0 cs.LockedBlock = nil cs.LockedBlockParts = nil - types.FireEventUnlock(cs.evsw, cs.RoundStateEvent()) + cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) } } if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() { @@ -1487,7 +1494,7 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part //--------------------------------------------------------- -func CompareHRS(h1, r1 int, s1 cstypes.RoundStepType, h2, r2 int, s2 cstypes.RoundStepType) int { +func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int { if h1 < h2 { return -1 } else if h1 > h2 { diff --git a/consensus/state_test.go b/consensus/state_test.go index 060e37d4..ecccafed 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "testing" "time" @@ -9,6 +10,8 @@ import ( cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" + tmpubsub "github.com/tendermint/tmlibs/pubsub" ) func init() { @@ -56,8 +59,8 @@ func TestProposerSelection0(t *testing.T) { cs1, vss := randConsensusState(4) height, round := cs1.Height, cs1.Round - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) startTestRound(cs1, height, round) @@ -89,7 +92,7 @@ func TestProposerSelection0(t *testing.T) { func TestProposerSelection2(t *testing.T) { cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) @@ -121,7 +124,7 @@ func TestEnterProposeNoPrivValidator(t *testing.T) { height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1) + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) startTestRound(cs, height, round) @@ -146,8 +149,8 @@ func TestEnterProposeYesPrivValidator(t *testing.T) { // Listen for propose timeout event - timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1) - proposalCh := subscribeToEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), 1) + timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) cs.enterNewRound(height, round) cs.startRoutines(3) @@ -183,8 +186,8 @@ func TestBadProposal(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2) @@ -206,7 +209,9 @@ func TestBadProposal(t *testing.T) { } // set the proposal block - cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer") + if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } // start the machine startTestRound(cs1, height, round) @@ -238,9 +243,17 @@ func TestFullRound1(t *testing.T) { cs, vss := randConsensusState(1) height, round := cs.Height, cs.Round - voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 0) - propCh := subscribeToEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), 1) - newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1) + // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit + // before consensus can move to the next height (and cause a race condition) + cs.eventBus.Stop() + eventBus := types.NewEventBusWithBufferCapacity(0) + eventBus.SetLogger(log.TestingLogger().With("module", "events")) + cs.SetEventBus(eventBus) + eventBus.Start() + + voteCh := subscribe(cs.eventBus, types.EventQueryVote) + propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) startTestRound(cs, height, round) @@ -251,8 +264,6 @@ func TestFullRound1(t *testing.T) { propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() <-voteCh // wait for prevote - // NOTE: voteChan cap of 0 ensures we can complete this - // before consensus can move to the next height (and cause a race condition) validatePrevote(t, cs, round, vss[0], propBlockHash) <-voteCh // wait for precommit @@ -268,7 +279,7 @@ func TestFullRoundNil(t *testing.T) { cs, vss := randConsensusState(1) height, round := cs.Height, cs.Round - voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -287,8 +298,8 @@ func TestFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1) - newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -330,11 +341,11 @@ func TestLockNoPOL(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1) - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 @@ -469,7 +480,9 @@ func TestLockNoPOL(t *testing.T) { // now we're on a new round and not the proposer // so set the proposal block - cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "") + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { + t.Fatal(err) + } <-proposalCh <-voteCh // prevote @@ -496,12 +509,12 @@ func TestLockPOLRelock(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) - newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) // everything done from perspective of cs1 @@ -546,7 +559,9 @@ func TestLockPOLRelock(t *testing.T) { <-timeoutWaitCh //XXX: this isnt guaranteed to get there before the timeoutPropose ... - cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } <-newRoundCh t.Log("### ONTO ROUND 1") @@ -609,11 +624,11 @@ func TestLockPOLUnlock(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) - unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // everything done from perspective of cs1 @@ -658,7 +673,9 @@ func TestLockPOLUnlock(t *testing.T) { lockedBlockHash := rs.LockedBlock.Hash() //XXX: this isnt guaranteed to get there before the timeoutPropose ... - cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } <-newRoundCh t.Log("#### ONTO ROUND 1") @@ -704,10 +721,10 @@ func TestLockPOLSafety1(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -745,7 +762,9 @@ func TestLockPOLSafety1(t *testing.T) { incrementRound(vs2, vs3, vs4) //XXX: this isnt guaranteed to get there before the timeoutPropose ... - cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) + } <-newRoundCh t.Log("### ONTO ROUND 1") @@ -802,7 +821,7 @@ func TestLockPOLSafety1(t *testing.T) { // we should prevote what we're locked on validatePrevote(t, cs1, 2, vss[0], propBlockHash) - newStepCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRoundStep(), 1) + newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) // add prevotes from the earlier round addVotes(cs1, prevotes...) @@ -825,11 +844,11 @@ func TestLockPOLSafety2(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) - unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // the block for R0: gets polkad but we miss it @@ -857,7 +876,9 @@ func TestLockPOLSafety2(t *testing.T) { startTestRound(cs1, height, 1) <-newRoundCh - cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer") + if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { + t.Fatal(err) + } <-proposalCh <-voteCh // prevote @@ -882,7 +903,9 @@ func TestLockPOLSafety2(t *testing.T) { if err := vs3.SignProposal(config.ChainID, newProp); err != nil { t.Fatal(err) } - cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer") + if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { + t.Fatal(err) + } // Add the pol votes addVotes(cs1, prevotes...) @@ -919,9 +942,9 @@ func TestSlashingPrevotes(t *testing.T) { vs2 := vss[1] - proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1) - newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -954,9 +977,9 @@ func TestSlashingPrecommits(t *testing.T) { vs2 := vss[1] - proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1) - newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -1000,10 +1023,10 @@ func TestHalt1(t *testing.T) { partSize := cs1.state.Params.BlockPartSizeBytes - proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) - timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) - newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1) - newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -1057,3 +1080,20 @@ func TestHalt1(t *testing.T) { panic("expected height to increment") } } + +// subscribe subscribes test client to the given query and returns a channel with cap = 1. +func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} { + out := make(chan interface{}, 1) + err := eventBus.Subscribe(context.Background(), testSubscriber, q, out) + if err != nil { + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) + } + return out +} + +// discardFromChan reads n values from the channel. +func discardFromChan(ch <-chan interface{}, n int) { + for i := 0; i < n; i++ { + <-ch + } +} diff --git a/consensus/test_data/build.sh b/consensus/test_data/build.sh index dcec6f2a..6f410c70 100755 --- a/consensus/test_data/build.sh +++ b/consensus/test_data/build.sh @@ -52,19 +52,19 @@ function reset(){ reset -function empty_block(){ - echo "==> Starting tendermint..." - tendermint node --proxy_app=persistent_dummy &> /dev/null & - sleep 5 - echo "==> Killing tendermint..." - killall tendermint +# function empty_block(){ +# echo "==> Starting tendermint..." +# tendermint node --proxy_app=persistent_dummy &> /dev/null & +# sleep 5 +# echo "==> Killing tendermint..." +# killall tendermint - echo "==> Copying WAL log..." - $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_empty_block.cswal - mv consensus/test_data/new_empty_block.cswal consensus/test_data/empty_block.cswal +# echo "==> Copying WAL log..." +# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_empty_block.cswal +# mv consensus/test_data/new_empty_block.cswal consensus/test_data/empty_block.cswal - reset -} +# reset +# } function many_blocks(){ bash scripts/txs/random.sh 1000 36657 &> /dev/null & @@ -84,63 +84,63 @@ function many_blocks(){ } -function small_block1(){ - bash scripts/txs/random.sh 1000 36657 &> /dev/null & - PID=$! - echo "==> Starting tendermint..." - tendermint node --proxy_app=persistent_dummy &> /dev/null & - sleep 10 - echo "==> Killing tendermint..." - kill -9 $PID - killall tendermint +# function small_block1(){ +# bash scripts/txs/random.sh 1000 36657 &> /dev/null & +# PID=$! +# echo "==> Starting tendermint..." +# tendermint node --proxy_app=persistent_dummy &> /dev/null & +# sleep 10 +# echo "==> Killing tendermint..." +# kill -9 $PID +# killall tendermint - echo "==> Copying WAL log..." - $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block1.cswal - mv consensus/test_data/new_small_block1.cswal consensus/test_data/small_block1.cswal +# echo "==> Copying WAL log..." +# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block1.cswal +# mv consensus/test_data/new_small_block1.cswal consensus/test_data/small_block1.cswal - reset -} +# reset +# } -# block part size = 512 -function small_block2(){ - cat "$TMHOME/genesis.json" | jq '. + {consensus_params: {block_size_params: {max_bytes: 22020096}, block_gossip_params: {block_part_size_bytes: 512}}}' > "$TMHOME/new_genesis.json" - mv "$TMHOME/new_genesis.json" "$TMHOME/genesis.json" - bash scripts/txs/random.sh 1000 36657 &> /dev/null & - PID=$! - echo "==> Starting tendermint..." - tendermint node --proxy_app=persistent_dummy &> /dev/null & - sleep 5 - echo "==> Killing tendermint..." - kill -9 $PID - killall tendermint +# # block part size = 512 +# function small_block2(){ +# cat "$TMHOME/genesis.json" | jq '. + {consensus_params: {block_size_params: {max_bytes: 22020096}, block_gossip_params: {block_part_size_bytes: 512}}}' > "$TMHOME/new_genesis.json" +# mv "$TMHOME/new_genesis.json" "$TMHOME/genesis.json" +# bash scripts/txs/random.sh 1000 36657 &> /dev/null & +# PID=$! +# echo "==> Starting tendermint..." +# tendermint node --proxy_app=persistent_dummy &> /dev/null & +# sleep 5 +# echo "==> Killing tendermint..." +# kill -9 $PID +# killall tendermint - echo "==> Copying WAL log..." - $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block2.cswal - mv consensus/test_data/new_small_block2.cswal consensus/test_data/small_block2.cswal +# echo "==> Copying WAL log..." +# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block2.cswal +# mv consensus/test_data/new_small_block2.cswal consensus/test_data/small_block2.cswal - reset -} +# reset +# } case "$1" in - "small_block1") - small_block1 - ;; - "small_block2") - small_block2 - ;; - "empty_block") - empty_block - ;; + # "small_block1") + # small_block1 + # ;; + # "small_block2") + # small_block2 + # ;; + # "empty_block") + # empty_block + # ;; "many_blocks") many_blocks ;; *) - small_block1 - small_block2 - empty_block + # small_block1 + # small_block2 + # empty_block many_blocks esac diff --git a/consensus/test_data/empty_block.cswal b/consensus/test_data/empty_block.cswal deleted file mode 100644 index 609f4ddf..00000000 Binary files a/consensus/test_data/empty_block.cswal and /dev/null differ diff --git a/consensus/test_data/many_blocks.cswal b/consensus/test_data/many_blocks.cswal index ab486b5a..2af0c3cc 100644 Binary files a/consensus/test_data/many_blocks.cswal and b/consensus/test_data/many_blocks.cswal differ diff --git a/consensus/test_data/small_block1.cswal b/consensus/test_data/small_block1.cswal deleted file mode 100644 index b7c7e777..00000000 Binary files a/consensus/test_data/small_block1.cswal and /dev/null differ diff --git a/consensus/test_data/small_block2.cswal b/consensus/test_data/small_block2.cswal deleted file mode 100644 index 2ef077dc..00000000 Binary files a/consensus/test_data/small_block2.cswal and /dev/null differ diff --git a/consensus/ticker.go b/consensus/ticker.go index 317268b7..4762becc 100644 --- a/consensus/ticker.go +++ b/consensus/ticker.go @@ -15,8 +15,8 @@ var ( // conditional on the height/round/step in the timeoutInfo. // The timeoutInfo.Duration may be non-positive. type TimeoutTicker interface { - Start() (bool, error) - Stop() bool + Start() error + Stop() error Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 18c1c78a..0a0a25fe 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -29,7 +29,7 @@ One for their LastCommit round, and another for the official commit round. */ type HeightVoteSet struct { chainID string - height int + height int64 valSet *types.ValidatorSet mtx sync.Mutex @@ -38,7 +38,7 @@ type HeightVoteSet struct { peerCatchupRounds map[string][]int // keys: peer.Key; values: at most 2 rounds } -func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *HeightVoteSet { +func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { hvs := &HeightVoteSet{ chainID: chainID, } @@ -46,7 +46,7 @@ func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *H return hvs } -func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) { +func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { hvs.mtx.Lock() defer hvs.mtx.Unlock() @@ -59,7 +59,7 @@ func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) { hvs.round = 0 } -func (hvs *HeightVoteSet) Height() int { +func (hvs *HeightVoteSet) Height() int64 { hvs.mtx.Lock() defer hvs.mtx.Unlock() return hvs.height diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index d5797368..e09d1419 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -47,7 +47,7 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote { +func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote { privVal := privVals[valIndex] vote := &types.Vote{ ValidatorAddress: privVal.GetAddress(), diff --git a/consensus/types/reactor.go b/consensus/types/reactor.go index 2306ee38..7dfeed92 100644 --- a/consensus/types/reactor.go +++ b/consensus/types/reactor.go @@ -13,7 +13,7 @@ import ( // PeerRoundState contains the known state of a peer. // NOTE: Read-only when returned by PeerState.GetRoundState(). type PeerRoundState struct { - Height int // Height peer is at + Height int64 // Height peer is at Round int // Round peer is at, -1 if unknown. Step RoundStepType // Step peer is at StartTime time.Time // Estimated start of round 0 at this height diff --git a/consensus/types/state.go b/consensus/types/state.go index 0e6b1577..da4df6a4 100644 --- a/consensus/types/state.go +++ b/consensus/types/state.go @@ -55,8 +55,10 @@ func (rs RoundStepType) String() string { // It is Immutable when returned from ConsensusState.GetRoundState() // TODO: Actually, only the top pointer is copied, // so access to field pointers is still racey +// NOTE: Not thread safe. Should only be manipulated by functions downstream +// of the cs.receiveRoutine type RoundState struct { - Height int // Height we are working on + Height int64 // Height we are working on Round int Step RoundStepType StartTime time.Time @@ -76,11 +78,14 @@ type RoundState struct { // RoundStateEvent returns the H/R/S of the RoundState as an event. func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { + // XXX: copy the RoundState + // if we want to avoid this, we may need synchronous events after all + rs_ := *rs edrs := types.EventDataRoundState{ Height: rs.Height, Round: rs.Round, Step: rs.Step.String(), - RoundState: rs, + RoundState: &rs_, } return edrs } diff --git a/consensus/wal.go b/consensus/wal.go index 80f4b809..69519c16 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -6,8 +6,11 @@ import ( "fmt" "hash/crc32" "io" + "path/filepath" "time" + "github.com/pkg/errors" + wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tmlibs/autofile" @@ -29,7 +32,7 @@ type TimedWALMessage struct { // EndHeightMessage marks the end of the given height inside WAL. // @internal used by scripts/cutWALUntil util. type EndHeightMessage struct { - Height uint64 `json:"height"` + Height int64 `json:"height"` } type WALMessage interface{} @@ -45,11 +48,22 @@ var _ = wire.RegisterInterface( //-------------------------------------------------------- // Simple write-ahead logger +// WAL is an interface for any write-ahead logger. +type WAL interface { + Save(WALMessage) + Group() *auto.Group + SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) + + Start() error + Stop() error + Wait() +} + // Write ahead logger writes msgs to disk before they are processed. // Can be used for crash-recovery and deterministic replay // TODO: currently the wal is overwritten during replay catchup // give it a mode so it's either reading or appending - must read to end to start appending again -type WAL struct { +type baseWAL struct { cmn.BaseService group *auto.Group @@ -58,38 +72,47 @@ type WAL struct { enc *WALEncoder } -func NewWAL(walFile string, light bool) (*WAL, error) { +func NewWAL(walFile string, light bool) (*baseWAL, error) { + err := cmn.EnsureDir(filepath.Dir(walFile), 0700) + if err != nil { + return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") + } + group, err := auto.OpenGroup(walFile) if err != nil { return nil, err } - wal := &WAL{ + wal := &baseWAL{ group: group, light: light, enc: NewWALEncoder(group), } - wal.BaseService = *cmn.NewBaseService(nil, "WAL", wal) + wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal) return wal, nil } -func (wal *WAL) OnStart() error { +func (wal *baseWAL) Group() *auto.Group { + return wal.group +} + +func (wal *baseWAL) OnStart() error { size, err := wal.group.Head.Size() if err != nil { return err } else if size == 0 { wal.Save(EndHeightMessage{0}) } - _, err = wal.group.Start() + err = wal.group.Start() return err } -func (wal *WAL) OnStop() { +func (wal *baseWAL) OnStop() { wal.BaseService.OnStop() wal.group.Stop() } // called in newStep and for each pass in receiveRoutine -func (wal *WAL) Save(msg WALMessage) { +func (wal *baseWAL) Save(msg WALMessage) { if wal == nil { return } @@ -119,7 +142,7 @@ func (wal *WAL) Save(msg WALMessage) { // Group reader will be nil if found equals false. // // CONTRACT: caller must close group reader. -func (wal *WAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, found bool, err error) { +func (wal *baseWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) { var msg *TimedWALMessage // NOTE: starting from the last file in the group because we're usually @@ -151,7 +174,6 @@ func (wal *WAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, found b } } } - gr.Close() } @@ -250,7 +272,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { } var nn int - var res *TimedWALMessage + var res *TimedWALMessage // nolint: gosimple res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage) if err != nil { return nil, fmt.Errorf("failed to decode data: %v", err) @@ -277,3 +299,14 @@ func readSeparator(r io.Reader) error { } return nil } + +type nilWAL struct{} + +func (nilWAL) Save(m WALMessage) {} +func (nilWAL) Group() *auto.Group { return nil } +func (nilWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) { + return nil, false, nil +} +func (nilWAL) Start() error { return nil } +func (nilWAL) Stop() error { return nil } +func (nilWAL) Wait() {} diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 0235afab..38f2ce03 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -2,10 +2,13 @@ package consensus import ( "bytes" + "crypto/rand" "path" + "sync" "testing" "time" + wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/consensus/types" tmtypes "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" @@ -45,8 +48,8 @@ func TestSearchForEndHeight(t *testing.T) { t.Fatal(err) } - h := 3 - gr, found, err := wal.SearchForEndHeight(uint64(h)) + h := int64(3) + gr, found, err := wal.SearchForEndHeight(h) assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) assert.NotNil(t, gr, "expected group not to be nil") @@ -58,5 +61,67 @@ func TestSearchForEndHeight(t *testing.T) { rs, ok := msg.Msg.(tmtypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) - +} + +var initOnce sync.Once + +func registerInterfacesOnce() { + initOnce.Do(func() { + var _ = wire.RegisterInterface( + struct{ WALMessage }{}, + wire.ConcreteType{[]byte{}, 0x10}, + ) + }) +} + +func nBytes(n int) []byte { + buf := make([]byte, n) + n, _ = rand.Read(buf) + return buf[:n] +} + +func benchmarkWalDecode(b *testing.B, n int) { + registerInterfacesOnce() + + buf := new(bytes.Buffer) + enc := NewWALEncoder(buf) + + data := nBytes(n) + enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)}) + + encoded := buf.Bytes() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + buf.Write(encoded) + dec := NewWALDecoder(buf) + if _, err := dec.Decode(); err != nil { + b.Fatal(err) + } + } + b.ReportAllocs() +} + +func BenchmarkWalDecode512B(b *testing.B) { + benchmarkWalDecode(b, 512) +} + +func BenchmarkWalDecode10KB(b *testing.B) { + benchmarkWalDecode(b, 10*1024) +} +func BenchmarkWalDecode100KB(b *testing.B) { + benchmarkWalDecode(b, 100*1024) +} +func BenchmarkWalDecode1MB(b *testing.B) { + benchmarkWalDecode(b, 1024*1024) +} +func BenchmarkWalDecode10MB(b *testing.B) { + benchmarkWalDecode(b, 10*1024*1024) +} +func BenchmarkWalDecode100MB(b *testing.B) { + benchmarkWalDecode(b, 100*1024*1024) +} +func BenchmarkWalDecode1GB(b *testing.B) { + benchmarkWalDecode(b, 1024*1024*1024) } diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md new file mode 100644 index 00000000..ec8a0cce --- /dev/null +++ b/docs/architecture/adr-006-trust-metric.md @@ -0,0 +1,238 @@ +# ADR 006: Trust Metric Design + +## Context + +The proposed trust metric will allow Tendermint to maintain local trust rankings for peers it has directly interacted with, which can then be used to implement soft security controls. The calculations were obtained from the [TrustGuard](https://dl.acm.org/citation.cfm?id=1060808) project. + +### Background + +The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped. + +Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. + +Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node. + +### References + +S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005. + +## Decision + +The proposed trust metric will allow a developer to inform the trust metric store of all good and bad events relevant to a peer's behavior, and at any time, the metric can be queried for a peer's current trust ranking. + +The three subsections below will cover the process being considered for calculating the trust ranking, the concept of the trust metric store, and the interface for the trust metric. + +### Proposed Process + +The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation. + +The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. + +```math +(1) Proportional Value = a * R[i] +``` + +where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*: + + +`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula") + + +The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value: + +```math +(2) Integral Value = b * H[i] +``` + +Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: + +```math +D[i] = R[i] – H[i] + +(3) Derivative Value = c(D[i]) * D[i] +``` + +Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: + +```math +TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i] +``` + +As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below: + +```math +(4) j = index, where index > 0 +``` + +Where *j* is one of *(0, 1, 2, … , m – 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations: + +```math +R[0] = raw data for current time interval +``` + +`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula") + +### Trust Metric Store + +Similar to the P2P subsystem AddrBook, the trust metric store will maintain information relevant to Tendermint peers. Additionally, the trust metric store will ensure that trust metrics will only be active for peers that a node is currently and directly engaged with. + +Reactors will provide a peer key to the trust metric store in order to retrieve the associated trust metric. The trust metric can then record new positive and negative events experienced by the reactor, as well as provided the current trust score calculated by the metric. + +When the node is shutting down, the trust metric store will save history data for trust metrics associated with all known peers. This saved information allows experiences with a peer to be preserved across node executions, which can span a tracking windows of days or weeks. The trust history data is loaded automatically during OnStart. + +### Interface Detailed Design + +Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below: + + +```go + +// TrustMetric - keeps track of peer reliability +type TrustMetric struct { + // Private elements. +} + +// Pause tells the metric to pause recording data over time intervals. +// All method calls that indicate events will unpause the metric +func (tm *TrustMetric) Pause() {} + +// Stop tells the metric to stop recording data over time intervals +func (tm *TrustMetric) Stop() {} + +// BadEvents indicates that an undesirable event(s) took place +func (tm *TrustMetric) BadEvents(num int) {} + +// GoodEvents indicates that a desirable event(s) took place +func (tm *TrustMetric) GoodEvents(num int) {} + +// TrustValue gets the dependable trust value; always between 0 and 1 +func (tm *TrustMetric) TrustValue() float64 {} + +// TrustScore gets a score based on the trust value always between 0 and 100 +func (tm *TrustMetric) TrustScore() int {} + +// NewMetric returns a trust metric with the default configuration +func NewMetric() *TrustMetric {} + +//------------------------------------------------------------------------------------------------ +// For example + +tm := NewMetric() + +tm.BadEvents(1) +score := tm.TrustScore() + +tm.Stop() + +``` + +Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered. + +```go + +// TrustMetricConfig - Configures the weight functions and time intervals for the metric +type TrustMetricConfig struct { + // Determines the percentage given to current behavior + ProportionalWeight float64 + + // Determines the percentage given to prior behavior + IntegralWeight float64 + + // The window of time that the trust metric will track events across. + // This can be set to cover many days without issue + TrackingWindow time.Duration + + // Each interval should be short for adapability. + // Less than 30 seconds is too sensitive, + // and greater than 5 minutes will make the metric numb + IntervalLength time.Duration +} + +// DefaultConfig returns a config with values that have been tested and produce desirable results +func DefaultConfig() TrustMetricConfig {} + +// NewMetricWithConfig returns a trust metric with a custom configuration +func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {} + +//------------------------------------------------------------------------------------------------ +// For example + +config := TrustMetricConfig{ + TrackingWindow: time.Minute * 60 * 24, // one day + IntervalLength: time.Minute * 2, +} + +tm := NewMetricWithConfig(config) + +tm.BadEvents(10) +tm.Pause() +tm.GoodEvents(1) // becomes active again + +``` + +A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. + +When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store. + +In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer. + +```go + +// TrustMetricStore - Manages all trust metrics for peers +type TrustMetricStore struct { + cmn.BaseService + + // Private elements +} + +// OnStart implements Service +func (tms *TrustMetricStore) OnStart() error {} + +// OnStop implements Service +func (tms *TrustMetricStore) OnStop() {} + +// NewTrustMetricStore returns a store that saves data to the DB +// and uses the config when creating new trust metrics +func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {} + +// Size returns the number of entries in the trust metric store +func (tms *TrustMetricStore) Size() int {} + +// GetPeerTrustMetric returns a trust metric by peer key +func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {} + +// PeerDisconnected pauses the trust metric associated with the peer identified by the key +func (tms *TrustMetricStore) PeerDisconnected(key string) {} + +//------------------------------------------------------------------------------------------------ +// For example + +db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr) +tms := NewTrustMetricStore(db, DefaultConfig()) + +tm := tms.GetPeerTrustMetric(key) +tm.BadEvents(1) + +tms.PeerDisconnected(key) + +``` + +## Status + +Approved. + +## Consequences + +### Positive + +- The trust metric will allow Tendermint to make non-binary security and reliability decisions +- Will help Tendermint implement deterrents that provide soft security controls, yet avoids disruption on the network +- Will provide useful profiling information when analyzing performance over time related to peer interaction + +### Negative + +- Requires saving the trust metric history data across node executions + +### Neutral + +- Keep in mind that, good events need to be recorded just as bad events do using this implementation diff --git a/docs/architecture/img/formula1.png b/docs/architecture/img/formula1.png new file mode 100644 index 00000000..447ee30f Binary files /dev/null and b/docs/architecture/img/formula1.png differ diff --git a/docs/architecture/img/formula2.png b/docs/architecture/img/formula2.png new file mode 100644 index 00000000..081a1576 Binary files /dev/null and b/docs/architecture/img/formula2.png differ diff --git a/docs/ecosystem.rst b/docs/ecosystem.rst index dc643c5b..30ab9a35 100644 --- a/docs/ecosystem.rst +++ b/docs/ecosystem.rst @@ -106,7 +106,7 @@ ABCI Servers +------------------------------------------------------------------+--------------------+--------------+ | `Spearmint `__ | Dennis Mckinnon | Javascript | +------------------------------------------------------------------+--------------------+--------------+ -| `py-tendermint `__ | Dave Bryson | Python | +| `py-abci `__ | Dave Bryson | Python | +------------------------------------------------------------------+--------------------+--------------+ Deployment Tools diff --git a/docs/getting-started.rst b/docs/getting-started.rst index a9a391b0..26f6b789 100644 --- a/docs/getting-started.rst +++ b/docs/getting-started.rst @@ -5,7 +5,7 @@ As a general purpose blockchain engine, Tendermint is agnostic to the application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is Tendermint Core, the other is your application, which can be written in any programming -language. Recall from `the intro to ABCI `__ that +language. Recall from `the intro to ABCI `__ that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be committed to a block. diff --git a/docs/install.rst b/docs/install.rst index 36865594..64fae4cd 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -15,7 +15,7 @@ Install Go ^^^^^^^^^^ Make sure you have `installed Go `__ and -set the ``GOPATH``. +set the ``GOPATH``. You should also put ``GOPATH/bin`` on your ``PATH``. Get Source Code ^^^^^^^^^^^^^^^ diff --git a/docs/specification/block-structure.rst b/docs/specification/block-structure.rst index 92cd9d3f..f1bf4b0b 100644 --- a/docs/specification/block-structure.rst +++ b/docs/specification/block-structure.rst @@ -98,7 +98,7 @@ This is to protect anyone from swapping votes between chains to fake (or frame) a validator. Also note that this ``chainID`` is in the ``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the basecoin app (`that is a different -chainID... `__). +chainID... `__). Once we have those votes, and we calculated the proper `sign bytes `__ @@ -136,7 +136,7 @@ Block Hash The `block hash `__ -is the `Simple Tree hash `__ +is the `Simple Tree hash <./merkle.html#simple-tree-with-dictionaries>`__ of the fields of the block ``Header`` encoded as a list of ``KVPair``\ s. diff --git a/docs/specification/merkle.rst b/docs/specification/merkle.rst index 64bf7eac..588f24a9 100644 --- a/docs/specification/merkle.rst +++ b/docs/specification/merkle.rst @@ -6,9 +6,9 @@ For an overview of Merkle trees, see There are two types of Merkle trees used in Tendermint. -- ```IAVL+ Tree`` <#iavl-tree>`__: An immutable self-balancing binary +- **IAVL+ Tree**: An immutable self-balancing binary tree for persistent application state -- ```Simple Tree`` <#simple-tree>`__: A simple compact binary tree for +- **Simple Tree**: A simple compact binary tree for a static list of items IAVL+ Tree diff --git a/glide.lock b/glide.lock index 9814bb33..82846067 100644 --- a/glide.lock +++ b/glide.lock @@ -1,8 +1,8 @@ -hash: dce4a972f0e46b3c5e2b2b12913cde282eaaa7f5d7146def47fa509ceccbfe95 -updated: 2017-11-28T04:03:55.53240986Z +hash: 09fc7f59ca6b718fe236368bb55f4801455295cfe455ea5865d544ee4dcfdc08 +updated: 2017-12-06T03:31:34.476581624-05:00 imports: - name: github.com/btcsuite/btcd - version: 8cea3866d0f7fb12d567a20744942c0d078c7d15 + version: 2e60448ffcc6bf78332d1fe590260095f554dd78 subpackages: - btcec - name: github.com/ebuchman/fail-test @@ -28,7 +28,12 @@ imports: - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: + - gogoproto + - jsonpb - proto + - protoc-gen-gogo/descriptor + - sortkeys + - types - name: github.com/golang/protobuf version: 1e59b77b52bf8e4b449a57e6f79f21226d571845 subpackages: @@ -67,7 +72,7 @@ imports: - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/rcrowley/go-metrics - version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c + version: e181e095bae94582363434144c61a9653aff6e50 - name: github.com/spf13/afero version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 subpackages: @@ -98,9 +103,10 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/abci - version: 76ef8a0697c6179220a74c479b36c27a5b53008a + version: fca2b508c185b855af1446ec4afc19bdfc7b315d subpackages: - client + - example/code - example/counter - example/dummy - server @@ -113,16 +119,17 @@ imports: - name: github.com/tendermint/go-crypto version: dd20358a264c772b4a83e477b0cfce4c88a7001d - name: github.com/tendermint/go-wire - version: 2baffcb6b690057568bc90ef1d457efb150b979a + version: b6fc872b42d41158a60307db4da051dd6f179415 subpackages: - data - data/base58 + - nowriter/tmlegacy - name: github.com/tendermint/iavl version: 594cc0c062a7174475f0ab654384038d77067917 subpackages: - iavl - name: github.com/tendermint/tmlibs - version: b854baa1fce7101c90b1d301b3359bb412f981c0 + version: bfcc0217f120d3bee6730ba0789d2eb72fc2e889 subpackages: - autofile - cli @@ -130,13 +137,14 @@ imports: - clist - common - db - - events - flowrate - log - merkle + - pubsub + - pubsub/query - test - name: golang.org/x/crypto - version: 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94 + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - curve25519 - nacl/box @@ -147,7 +155,7 @@ imports: - ripemd160 - salsa20/salsa - name: golang.org/x/net - version: 9dfe39835686865bff950a07b394c12a98ddc811 + version: a8b9294777976932365dabb6640cf1468d95c70f subpackages: - context - http2 @@ -157,22 +165,22 @@ imports: - lex/httplex - trace - name: golang.org/x/sys - version: b98136db334ff9cb24f28a68e3be3cb6608f7630 + version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 subpackages: - unix - name: golang.org/x/text - version: 88f656faf3f37f690df1a32515b479415e1a6769 + version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 subpackages: - secure/bidirule - transform - unicode/bidi - unicode/norm - name: google.golang.org/genproto - version: 891aceb7c239e72692819142dfca057bdcbfcb96 + version: 7f0da29060c682909f650ad8ed4e515bd74fa12a subpackages: - googleapis/rpc/status - name: google.golang.org/grpc - version: f7bf885db0b7479a537ec317c6e48ce53145f3db + version: 401e0e00e4bb830a10496d64cd95e068c5bf50de subpackages: - balancer - codes diff --git a/glide.yaml b/glide.yaml index 5c76f5e6..3f20a468 100644 --- a/glide.yaml +++ b/glide.yaml @@ -18,7 +18,7 @@ import: - package: github.com/spf13/viper version: v1.0.0 - package: github.com/tendermint/abci - version: ~0.7.0 + version: ~v0.8.0 subpackages: - client - example/dummy @@ -26,7 +26,7 @@ import: - package: github.com/tendermint/go-crypto version: ~0.4.1 - package: github.com/tendermint/go-wire - version: ~0.7.1 + version: ~0.7.2 subpackages: - data - package: github.com/tendermint/iavl @@ -34,7 +34,7 @@ import: subpackages: - iavl - package: github.com/tendermint/tmlibs - version: ~0.4.1 + version: ~0.5.0 subpackages: - autofile - cli @@ -45,6 +45,7 @@ import: - flowrate - log - merkle + - pubsub - package: golang.org/x/crypto subpackages: - nacl/box @@ -54,7 +55,7 @@ import: subpackages: - context - package: google.golang.org/grpc - version: v1.7.0 + version: v1.7.3 testImport: - package: github.com/go-kit/kit subpackages: diff --git a/certifiers/client/main_test.go b/lite/client/main_test.go similarity index 100% rename from certifiers/client/main_test.go rename to lite/client/main_test.go diff --git a/certifiers/client/provider.go b/lite/client/provider.go similarity index 68% rename from certifiers/client/provider.go rename to lite/client/provider.go index 6240da11..c98297de 100644 --- a/certifiers/client/provider.go +++ b/lite/client/provider.go @@ -12,10 +12,11 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/certifiers" - certerr "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" ) +// SignStatusClient combines a SignClient and StatusClient. type SignStatusClient interface { rpcclient.SignClient rpcclient.StatusClient @@ -23,31 +24,36 @@ type SignStatusClient interface { type provider struct { node SignStatusClient - lastHeight int + lastHeight int64 } // NewProvider can wrap any rpcclient to expose it as // a read-only provider. -func NewProvider(node SignStatusClient) certifiers.Provider { +func NewProvider(node SignStatusClient) lite.Provider { return &provider{node: node} } -// NewProvider can connects to a tendermint json-rpc endpoint +// NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) certifiers.Provider { +func NewHTTPProvider(remote string) lite.Provider { return &provider{ node: rpcclient.NewHTTP(remote, "/websocket"), } } +// StatusClient returns the internal node as a StatusClient +func (p *provider) StatusClient() rpcclient.StatusClient { + return p.node +} + // StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ certifiers.FullCommit) error { return nil } +func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } // GetHash gets the most recent validator and sees if it matches // // TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { - var fc certifiers.FullCommit +func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { + var fc lite.FullCommit vals, err := p.node.Validators(nil) // if we get no validators, or a different height, return an error if err != nil { @@ -56,13 +62,13 @@ func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { p.updateHeight(vals.BlockHeight) vhash := types.NewValidatorSet(vals.Validators).Hash() if !bytes.Equal(hash, vhash) { - return fc, certerr.ErrCommitNotFound() + return fc, liteErr.ErrCommitNotFound() } return p.seedFromVals(vals) } // GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) { +func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { commit, err := p.node.Commit(&h) if err != nil { return fc, err @@ -70,7 +76,8 @@ func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) { return p.seedFromCommit(commit) } -func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { +// LatestCommit returns the newest commit stored. +func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { commit, err := p.GetLatestCommit() if err != nil { return fc, err @@ -89,24 +96,25 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { return p.node.Commit(&status.LatestBlockHeight) } -func CommitFromResult(result *ctypes.ResultCommit) certifiers.Commit { - return (certifiers.Commit)(result.SignedHeader) +// CommitFromResult ... +func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { + return (lite.Commit)(result.SignedHeader) } -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullCommit, error) { +func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { // now get the commits and build a full commit commit, err := p.node.Commit(&vals.BlockHeight) if err != nil { - return certifiers.FullCommit{}, err + return lite.FullCommit{}, err } - fc := certifiers.NewFullCommit( + fc := lite.NewFullCommit( CommitFromResult(commit), types.NewValidatorSet(vals.Validators), ) return fc, nil } -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.FullCommit, err error) { +func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { fc.Commit = CommitFromResult(commit) // now get the proper validators @@ -118,7 +126,7 @@ func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.Fu // make sure they match the commit (as we cannot enforce height) vset := types.NewValidatorSet(vals.Validators) if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, certerr.ErrValidatorsChanged() + return fc, liteErr.ErrValidatorsChanged() } p.updateHeight(commit.Header.Height) @@ -126,7 +134,7 @@ func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.Fu return fc, nil } -func (p *provider) updateHeight(h int) { +func (p *provider) updateHeight(h int64) { if h > p.lastHeight { p.lastHeight = h } diff --git a/certifiers/client/provider_test.go b/lite/client/provider_test.go similarity index 76% rename from certifiers/client/provider_test.go rename to lite/client/provider_test.go index c63cd6a1..0bebfced 100644 --- a/certifiers/client/provider_test.go +++ b/lite/client/provider_test.go @@ -1,17 +1,15 @@ -package client_test +package client import ( "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" + rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" - - "github.com/tendermint/tendermint/certifiers" - "github.com/tendermint/tendermint/certifiers/client" - certerr "github.com/tendermint/tendermint/certifiers/errors" ) func TestProvider(t *testing.T) { @@ -20,11 +18,12 @@ func TestProvider(t *testing.T) { cfg := rpctest.GetConfig() rpcAddr := cfg.RPC.ListenAddress chainID := cfg.ChainID - p := client.NewHTTPProvider(rpcAddr) + p := NewHTTPProvider(rpcAddr) require.NotNil(t, p) // let it produce some blocks - time.Sleep(500 * time.Millisecond) + err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + require.Nil(err) // let's get the highest block seed, err := p.LatestCommit() @@ -36,7 +35,7 @@ func TestProvider(t *testing.T) { // let's check this is valid somehow assert.Nil(seed.ValidateBasic(chainID)) - cert := certifiers.NewStatic(chainID, seed.Validators) + cert := lite.NewStatic(chainID, seed.Validators) // historical queries now work :) lower := sh - 5 @@ -54,7 +53,7 @@ func TestProvider(t *testing.T) { // get by hash fails without match seed, err = p.GetByHash([]byte("foobar")) assert.NotNil(err) - assert.True(certerr.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) // storing the seed silently ignored err = p.StoreCommit(seed) diff --git a/certifiers/commit.go b/lite/commit.go similarity index 88% rename from certifiers/commit.go rename to lite/commit.go index 464a48ba..11ae6d7f 100644 --- a/certifiers/commit.go +++ b/lite/commit.go @@ -1,4 +1,4 @@ -package certifiers +package lite import ( "bytes" @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" - certerr "github.com/tendermint/tendermint/certifiers/errors" + liteErr "github.com/tendermint/tendermint/lite/errors" ) // Certifier checks the votes to make sure the block really is signed properly. @@ -33,6 +33,7 @@ type FullCommit struct { Validators *types.ValidatorSet `json:"validator_set"` } +// NewFullCommit returns a new FullCommit. func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { return FullCommit{ Commit: commit, @@ -40,13 +41,15 @@ func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { } } -func (c Commit) Height() int { +// Height returns the height of the header. +func (c Commit) Height() int64 { if c.Header == nil { return 0 } return c.Header.Height } +// ValidatorsHash returns the hash of the validator set. func (c Commit) ValidatorsHash() []byte { if c.Header == nil { return nil @@ -75,7 +78,7 @@ func (c Commit) ValidateBasic(chainID string) error { // make sure the header and commit match (height and hash) if c.Commit.Height() != c.Header.Height { - return certerr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) + return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) } hhash := c.Header.Hash() chash := c.Commit.BlockID.Hash diff --git a/certifiers/doc.go b/lite/doc.go similarity index 98% rename from certifiers/doc.go rename to lite/doc.go index 7566405b..89dc702f 100644 --- a/certifiers/doc.go +++ b/lite/doc.go @@ -1,5 +1,5 @@ /* -Package certifiers allows you to securely validate headers +Package lite allows you to securely validate headers without a full node. This library pulls together all the crypto and algorithms, @@ -130,4 +130,4 @@ to manually verify the new validator set hash using off-chain means (the same as getting the initial hash). */ -package certifiers +package lite diff --git a/certifiers/dynamic.go b/lite/dynamic.go similarity index 76% rename from certifiers/dynamic.go rename to lite/dynamic.go index b4017794..231aed7a 100644 --- a/certifiers/dynamic.go +++ b/lite/dynamic.go @@ -1,9 +1,9 @@ -package certifiers +package lite import ( "github.com/tendermint/tendermint/types" - certerr "github.com/tendermint/tendermint/certifiers/errors" + liteErr "github.com/tendermint/tendermint/lite/errors" ) var _ Certifier = &Dynamic{} @@ -19,33 +19,39 @@ var _ Certifier = &Dynamic{} // going forward. type Dynamic struct { cert *Static - lastHeight int + lastHeight int64 } -func NewDynamic(chainID string, vals *types.ValidatorSet, height int) *Dynamic { +// NewDynamic returns a new dynamic certifier. +func NewDynamic(chainID string, vals *types.ValidatorSet, height int64) *Dynamic { return &Dynamic{ cert: NewStatic(chainID, vals), lastHeight: height, } } +// ChainID returns the chain id of this certifier. func (c *Dynamic) ChainID() string { return c.cert.ChainID() } +// Validators returns the validators of this certifier. func (c *Dynamic) Validators() *types.ValidatorSet { return c.cert.vSet } +// Hash returns the hash of this certifier. func (c *Dynamic) Hash() []byte { return c.cert.Hash() } -func (c *Dynamic) LastHeight() int { +// LastHeight returns the last height of this certifier. +func (c *Dynamic) LastHeight() int64 { return c.lastHeight } -// Certify handles this with +// Certify will verify whether the commit is valid and will update the height if it is or return an +// error if it is not. func (c *Dynamic) Certify(check Commit) error { err := c.cert.Certify(check) if err == nil { @@ -63,7 +69,7 @@ func (c *Dynamic) Update(fc FullCommit) error { // ignore all checkpoints in the past -> only to the future h := fc.Height() if h <= c.lastHeight { - return certerr.ErrPastTime() + return liteErr.ErrPastTime() } // first, verify if the input is self-consistent.... @@ -79,7 +85,7 @@ func (c *Dynamic) Update(fc FullCommit) error { err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(), commit.BlockID, h, commit) if err != nil { - return certerr.ErrTooMuchChange() + return liteErr.ErrTooMuchChange() } // looks good, we can update diff --git a/certifiers/dynamic_test.go b/lite/dynamic_test.go similarity index 90% rename from certifiers/dynamic_test.go rename to lite/dynamic_test.go index 2c921099..12db1946 100644 --- a/certifiers/dynamic_test.go +++ b/lite/dynamic_test.go @@ -1,4 +1,4 @@ -package certifiers_test +package lite_test import ( "testing" @@ -8,8 +8,8 @@ import ( "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/certifiers" - "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + "github.com/tendermint/tendermint/lite/errors" ) // TestDynamicCert just makes sure it still works like StaticCert @@ -18,17 +18,17 @@ func TestDynamicCert(t *testing.T) { assert := assert.New(t) // require := require.New(t) - keys := certifiers.GenValKeys(4) + keys := lite.GenValKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals := keys.ToValidators(20, 10) // and a certifier based on our known set chainID := "test-dyno" - cert := certifiers.NewDynamic(chainID, vals, 0) + cert := lite.NewDynamic(chainID, vals, 0) cases := []struct { - keys certifiers.ValKeys + keys lite.ValKeys vals *types.ValidatorSet - height int + height int64 first, last int // who actually signs proper bool // true -> expect no error changed bool // true -> expect validator change error @@ -65,12 +65,12 @@ func TestDynamicUpdate(t *testing.T) { assert, require := assert.New(t), require.New(t) chainID := "test-dyno-up" - keys := certifiers.GenValKeys(5) + keys := lite.GenValKeys(5) vals := keys.ToValidators(20, 0) - cert := certifiers.NewDynamic(chainID, vals, 40) + cert := lite.NewDynamic(chainID, vals, 40) // one valid block to give us a sense of time - h := 100 + h := int64(100) good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), 0, len(keys)) err := cert.Certify(good) require.Nil(err, "%+v", err) @@ -81,9 +81,9 @@ func TestDynamicUpdate(t *testing.T) { // we try to update with some blocks cases := []struct { - keys certifiers.ValKeys + keys lite.ValKeys vals *types.ValidatorSet - height int + height int64 first, last int // who actually signs proper bool // true -> expect no error changed bool // true -> expect too much change error diff --git a/certifiers/errors/errors.go b/lite/errors/errors.go similarity index 83% rename from certifiers/errors/errors.go rename to lite/errors/errors.go index c716c8fc..99e42a0b 100644 --- a/certifiers/errors/errors.go +++ b/lite/errors/errors.go @@ -19,34 +19,39 @@ func IsCommitNotFoundErr(err error) bool { return err != nil && (errors.Cause(err) == errCommitNotFound) } +// ErrCommitNotFound indicates that a the requested commit was not found. func ErrCommitNotFound() error { return errors.WithStack(errCommitNotFound) } // IsValidatorsChangedErr checks whether an error is due -// to a differing validator set +// to a differing validator set. func IsValidatorsChangedErr(err error) bool { return err != nil && (errors.Cause(err) == errValidatorsChanged) } +// ErrValidatorsChanged indicates that the validator set was changed between two commits. func ErrValidatorsChanged() error { return errors.WithStack(errValidatorsChanged) } // IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets +// between these validators sets. func IsTooMuchChangeErr(err error) bool { return err != nil && (errors.Cause(err) == errTooMuchChange) } +// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. func ErrTooMuchChange() error { return errors.WithStack(errTooMuchChange) } +// IsPastTimeErr ... func IsPastTimeErr(err error) bool { return err != nil && (errors.Cause(err) == errPastTime) } +// ErrPastTime ... func ErrPastTime() error { return errors.WithStack(errPastTime) } @@ -57,6 +62,7 @@ func IsNoPathFoundErr(err error) bool { return err != nil && (errors.Cause(err) == errNoPathFound) } +// ErrNoPathFound ... func ErrNoPathFound() error { return errors.WithStack(errNoPathFound) } @@ -64,7 +70,7 @@ func ErrNoPathFound() error { //-------------------------------------------- type errHeightMismatch struct { - h1, h2 int + h1, h2 int64 } func (e errHeightMismatch) Error() string { @@ -81,6 +87,6 @@ func IsHeightMismatchErr(err error) bool { } // ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int) error { +func ErrHeightMismatch(h1, h2 int64) error { return errors.WithStack(errHeightMismatch{h1, h2}) } diff --git a/certifiers/errors/errors_test.go b/lite/errors/errors_test.go similarity index 100% rename from certifiers/errors/errors_test.go rename to lite/errors/errors_test.go diff --git a/certifiers/files/commit.go b/lite/files/commit.go similarity index 66% rename from certifiers/files/commit.go rename to lite/files/commit.go index 18994f0f..33f5bb67 100644 --- a/certifiers/files/commit.go +++ b/lite/files/commit.go @@ -8,8 +8,8 @@ import ( wire "github.com/tendermint/go-wire" - "github.com/tendermint/tendermint/certifiers" - certerr "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" ) const ( @@ -20,7 +20,7 @@ const ( ) // SaveFullCommit exports the seed in binary / go-wire style -func SaveFullCommit(fc certifiers.FullCommit, path string) error { +func SaveFullCommit(fc lite.FullCommit, path string) error { f, err := os.Create(path) if err != nil { return errors.WithStack(err) @@ -33,7 +33,7 @@ func SaveFullCommit(fc certifiers.FullCommit, path string) error { } // SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error { +func SaveFullCommitJSON(fc lite.FullCommit, path string) error { f, err := os.Create(path) if err != nil { return errors.WithStack(err) @@ -44,12 +44,13 @@ func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error { return errors.WithStack(err) } -func LoadFullCommit(path string) (certifiers.FullCommit, error) { - var fc certifiers.FullCommit +// LoadFullCommit loads the full commit from the file system. +func LoadFullCommit(path string) (lite.FullCommit, error) { + var fc lite.FullCommit f, err := os.Open(path) if err != nil { if os.IsNotExist(err) { - return fc, certerr.ErrCommitNotFound() + return fc, liteErr.ErrCommitNotFound() } return fc, errors.WithStack(err) } @@ -60,12 +61,13 @@ func LoadFullCommit(path string) (certifiers.FullCommit, error) { return fc, errors.WithStack(err) } -func LoadFullCommitJSON(path string) (certifiers.FullCommit, error) { - var fc certifiers.FullCommit +// LoadFullCommitJSON loads the commit from the file system in JSON format. +func LoadFullCommitJSON(path string) (lite.FullCommit, error) { + var fc lite.FullCommit f, err := os.Open(path) if err != nil { if os.IsNotExist(err) { - return fc, certerr.ErrCommitNotFound() + return fc, liteErr.ErrCommitNotFound() } return fc, errors.WithStack(err) } diff --git a/certifiers/files/commit_test.go b/lite/files/commit_test.go similarity index 94% rename from certifiers/files/commit_test.go rename to lite/files/commit_test.go index 934ab7b6..c2124379 100644 --- a/certifiers/files/commit_test.go +++ b/lite/files/commit_test.go @@ -10,7 +10,7 @@ import ( cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/lite" ) func tmpFile() string { @@ -24,10 +24,10 @@ func TestSerializeFullCommits(t *testing.T) { // some constants appHash := []byte("some crazy thing") chainID := "ser-ial" - h := 25 + h := int64(25) // build a fc - keys := certifiers.GenValKeys(5) + keys := lite.GenValKeys(5) vals := keys.ToValidators(10, 0) fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) diff --git a/certifiers/files/provider.go b/lite/files/provider.go similarity index 76% rename from certifiers/files/provider.go rename to lite/files/provider.go index 9dcfb169..327b0331 100644 --- a/certifiers/files/provider.go +++ b/lite/files/provider.go @@ -24,16 +24,17 @@ import ( "github.com/pkg/errors" - "github.com/tendermint/tendermint/certifiers" - certerr "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" ) +// nolint const ( Ext = ".tsd" ValDir = "validators" CheckDir = "checkpoints" dirPerm = os.FileMode(0755) - filePerm = os.FileMode(0644) + //filePerm = os.FileMode(0644) ) type provider struct { @@ -43,7 +44,7 @@ type provider struct { // NewProvider creates the parent dir and subdirs // for validators and checkpoints as needed -func NewProvider(dir string) certifiers.Provider { +func NewProvider(dir string) lite.Provider { valDir := filepath.Join(dir, ValDir) checkDir := filepath.Join(dir, CheckDir) for _, d := range []string{valDir, checkDir} { @@ -59,12 +60,13 @@ func (p *provider) encodeHash(hash []byte) string { return hex.EncodeToString(hash) + Ext } -func (p *provider) encodeHeight(h int) string { +func (p *provider) encodeHeight(h int64) string { // pad up to 10^12 for height... return fmt.Sprintf("%012d%s", h, Ext) } -func (p *provider) StoreCommit(fc certifiers.FullCommit) error { +// StoreCommit saves a full commit after it has been verified. +func (p *provider) StoreCommit(fc lite.FullCommit) error { // make sure the fc is self-consistent before saving err := fc.ValidateBasic(fc.Commit.Header.ChainID) if err != nil { @@ -85,11 +87,12 @@ func (p *provider) StoreCommit(fc certifiers.FullCommit) error { return nil } -func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) { +// GetByHeight returns the closest commit with height <= h. +func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { // first we look for exact match, then search... path := filepath.Join(p.checkDir, p.encodeHeight(h)) fc, err := LoadFullCommit(path) - if certerr.IsCommitNotFoundErr(err) { + if liteErr.IsCommitNotFoundErr(err) { path, err = p.searchForHeight(h) if err == nil { fc, err = LoadFullCommit(path) @@ -98,14 +101,15 @@ func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) { return fc, err } -func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { +// LatestCommit returns the newest commit stored. +func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { // Note to future: please update by 2077 to avoid rollover return p.GetByHeight(math.MaxInt32 - 1) } // search for height, looks for a file with highest height < h // return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int) (string, error) { +func (p *provider) searchForHeight(h int64) (string, error) { d, err := os.Open(p.checkDir) if err != nil { return "", errors.WithStack(err) @@ -121,14 +125,15 @@ func (p *provider) searchForHeight(h int) (string, error) { sort.Strings(files) i := sort.SearchStrings(files, desired) if i == 0 { - return "", certerr.ErrCommitNotFound() + return "", liteErr.ErrCommitNotFound() } found := files[i-1] path := filepath.Join(p.checkDir, found) return path, errors.WithStack(err) } -func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { +// GetByHash returns a commit exactly matching this validator hash. +func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { path := filepath.Join(p.valDir, p.encodeHash(hash)) return LoadFullCommit(path) } diff --git a/certifiers/files/provider_test.go b/lite/files/provider_test.go similarity index 77% rename from certifiers/files/provider_test.go rename to lite/files/provider_test.go index 05e8f59d..b8d8e88b 100644 --- a/certifiers/files/provider_test.go +++ b/lite/files/provider_test.go @@ -10,12 +10,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/certifiers" - certerr "github.com/tendermint/tendermint/certifiers/errors" - "github.com/tendermint/tendermint/certifiers/files" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/lite/files" ) -func checkEqual(stored, loaded certifiers.FullCommit, chainID string) error { +func checkEqual(stored, loaded lite.FullCommit, chainID string) error { err := loaded.ValidateBasic(chainID) if err != nil { return err @@ -36,28 +36,28 @@ func TestFileProvider(t *testing.T) { chainID := "test-files" appHash := []byte("some-data") - keys := certifiers.GenValKeys(5) + keys := lite.GenValKeys(5) count := 10 // make a bunch of seeds... - seeds := make([]certifiers.FullCommit, count) + seeds := make([]lite.FullCommit, count) for i := 0; i < count; i++ { // two seeds for each validator, to check how we handle dups // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) - h := 20 + 10*i + h := int64(20 + 10*i) check := keys.GenCommit(chainID, h, nil, vals, appHash, 0, 5) - seeds[i] = certifiers.NewFullCommit(check, vals) + seeds[i] = lite.NewFullCommit(check, vals) } // check provider is empty seed, err := p.GetByHeight(20) require.NotNil(err) - assert.True(certerr.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) seed, err = p.GetByHash(seeds[3].ValidatorsHash()) require.NotNil(err) - assert.True(certerr.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) // now add them all to the provider for _, s := range seeds { @@ -86,11 +86,11 @@ func TestFileProvider(t *testing.T) { seed, err = p.GetByHeight(47) if assert.Nil(err, "%+v", err) { // we only step by 10, so 40 must be the one below this - assert.Equal(40, seed.Height()) + assert.EqualValues(40, seed.Height()) } // and proper error for too low _, err = p.GetByHeight(5) assert.NotNil(err) - assert.True(certerr.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) } diff --git a/certifiers/helper.go b/lite/helpers.go similarity index 86% rename from certifiers/helper.go rename to lite/helpers.go index 6f2daa63..9319c459 100644 --- a/certifiers/helper.go +++ b/lite/helpers.go @@ -1,4 +1,4 @@ -package certifiers +package lite import ( "time" @@ -12,14 +12,14 @@ import ( // // It lets us simulate signing with many keys, either ed25519 or secp256k1. // The main use case is to create a set, and call GenCommit -// to get propely signed header for testing. +// to get properly signed header for testing. // // You can set different weights of validators each time you call // ToValidators, and can optionally extend the validator set later // with Extend or ExtendSecp type ValKeys []crypto.PrivKey -// GenValKeys produces an array of private keys to generate commits +// GenValKeys produces an array of private keys to generate commits. func GenValKeys(n int) ValKeys { res := make(ValKeys, n) for i := range res { @@ -28,7 +28,7 @@ func GenValKeys(n int) ValKeys { return res } -// Change replaces the key at index i +// Change replaces the key at index i. func (v ValKeys) Change(i int) ValKeys { res := make(ValKeys, len(v)) copy(res, v) @@ -36,13 +36,13 @@ func (v ValKeys) Change(i int) ValKeys { return res } -// Extend adds n more keys (to remove, just take a slice) +// Extend adds n more keys (to remove, just take a slice). func (v ValKeys) Extend(n int) ValKeys { extra := GenValKeys(n) return append(v, extra...) } -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits +// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. func GenSecpValKeys(n int) ValKeys { res := make(ValKeys, n) for i := range res { @@ -51,7 +51,7 @@ func GenSecpValKeys(n int) ValKeys { return res } -// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice) +// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). func (v ValKeys) ExtendSecp(n int) ValKeys { extra := GenSecpValKeys(n) return append(v, extra...) @@ -60,7 +60,7 @@ func (v ValKeys) ExtendSecp(n int) ValKeys { // ToValidators produces a list of validators from the set of keys // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution -// (should be enough for testing) +// (should be enough for testing). func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { res := make([]*types.Validator, len(v)) for i, k := range v { @@ -69,7 +69,7 @@ func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { return types.NewValidatorSet(res) } -// signHeader properly signs the header with all keys from first to last exclusive +// signHeader properly signs the header with all keys from first to last exclusive. func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { votes := make([]*types.Vote, len(v)) @@ -106,7 +106,9 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey return vote } -func genHeader(chainID string, height int, txs types.Txs, +// Silences warning that vals can also be merkle.Hashable +// nolint: interfacer +func genHeader(chainID string, height int64, txs types.Txs, vals *types.ValidatorSet, appHash []byte) *types.Header { return &types.Header{ @@ -122,8 +124,8 @@ func genHeader(chainID string, height int, txs types.Txs, } } -// GenCommit calls genHeader and signHeader and combines them into a Commit -func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs, +// GenCommit calls genHeader and signHeader and combines them into a Commit. +func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, vals *types.ValidatorSet, appHash []byte, first, last int) Commit { header := genHeader(chainID, height, txs, vals, appHash) @@ -134,8 +136,8 @@ func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs, return check } -// GenFullCommit calls genHeader and signHeader and combines them into a Commit -func (v ValKeys) GenFullCommit(chainID string, height int, txs types.Txs, +// GenFullCommit calls genHeader and signHeader and combines them into a Commit. +func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, vals *types.ValidatorSet, appHash []byte, first, last int) FullCommit { header := genHeader(chainID, height, txs, vals, appHash) diff --git a/certifiers/inquirer.go b/lite/inquirer.go similarity index 65% rename from certifiers/inquirer.go rename to lite/inquirer.go index 460b622a..5d6ce60c 100644 --- a/certifiers/inquirer.go +++ b/lite/inquirer.go @@ -1,11 +1,15 @@ -package certifiers +package lite import ( "github.com/tendermint/tendermint/types" - certerr "github.com/tendermint/tendermint/certifiers/errors" + liteErr "github.com/tendermint/tendermint/lite/errors" ) +// Inquiring wraps a dynamic certifier and implements an auto-update strategy. If a call to Certify +// fails due to a change it validator set, Inquiring will try and find a previous FullCommit which +// it can use to safely update the validator set. It uses a source provider to obtain the needed +// FullCommits. It stores properly validated data on the local system. type Inquiring struct { cert *Dynamic // These are only properly validated data, from local system @@ -14,8 +18,14 @@ type Inquiring struct { Source Provider } +// NewInquiring returns a new Inquiring object. It uses the trusted provider to store validated +// data and the source provider to obtain missing FullCommits. +// +// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source +// provider should be a client.HTTPProvider. func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring { // store the data in trusted + // TODO: StoredCommit() can return an error and we need to handle this. trusted.StoreCommit(fc) return &Inquiring{ @@ -25,15 +35,18 @@ func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provid } } +// ChainID returns the chain id. func (c *Inquiring) ChainID() string { return c.cert.ChainID() } +// Validators returns the validator set. func (c *Inquiring) Validators() *types.ValidatorSet { return c.cert.cert.vSet } -func (c *Inquiring) LastHeight() int { +// LastHeight returns the last height. +func (c *Inquiring) LastHeight() int64 { return c.cert.lastHeight } @@ -50,7 +63,7 @@ func (c *Inquiring) Certify(commit Commit) error { } err = c.cert.Certify(commit) - if !certerr.IsValidatorsChangedErr(err) { + if !liteErr.IsValidatorsChangedErr(err) { return err } err = c.updateToHash(commit.Header.ValidatorsHash) @@ -64,11 +77,11 @@ func (c *Inquiring) Certify(commit Commit) error { } // store the new checkpoint - c.trusted.StoreCommit( - NewFullCommit(commit, c.Validators())) - return nil + return c.trusted.StoreCommit(NewFullCommit(commit, c.Validators())) } +// Update will verify if this is a valid change and update +// the certifying validator set if safe to do so. func (c *Inquiring) Update(fc FullCommit) error { err := c.useClosestTrust(fc.Height()) if err != nil { @@ -77,12 +90,12 @@ func (c *Inquiring) Update(fc FullCommit) error { err = c.cert.Update(fc) if err == nil { - c.trusted.StoreCommit(fc) + err = c.trusted.StoreCommit(fc) } return err } -func (c *Inquiring) useClosestTrust(h int) error { +func (c *Inquiring) useClosestTrust(h int64) error { closest, err := c.trusted.GetByHeight(h) if err != nil { return err @@ -106,14 +119,14 @@ func (c *Inquiring) updateToHash(vhash []byte) error { } err = c.cert.Update(fc) // handle IsTooMuchChangeErr by using divide and conquer - if certerr.IsTooMuchChangeErr(err) { + if liteErr.IsTooMuchChangeErr(err) { err = c.updateToHeight(fc.Height()) } return err } // updateToHeight will use divide-and-conquer to find a path to h -func (c *Inquiring) updateToHeight(h int) error { +func (c *Inquiring) updateToHeight(h int64) error { // try to update to this height (with checks) fc, err := c.Source.GetByHeight(h) if err != nil { @@ -121,12 +134,12 @@ func (c *Inquiring) updateToHeight(h int) error { } start, end := c.LastHeight(), fc.Height() if end <= start { - return certerr.ErrNoPathFound() + return liteErr.ErrNoPathFound() } err = c.Update(fc) // we can handle IsTooMuchChangeErr specially - if !certerr.IsTooMuchChangeErr(err) { + if !liteErr.IsTooMuchChangeErr(err) { return err } diff --git a/certifiers/inquirer_test.go b/lite/inquirer_test.go similarity index 77% rename from certifiers/inquirer_test.go rename to lite/inquirer_test.go index 2a0ee555..c30d8209 100644 --- a/certifiers/inquirer_test.go +++ b/lite/inquirer_test.go @@ -1,4 +1,5 @@ -package certifiers_test +// nolint: vetshadow +package lite_test import ( "fmt" @@ -7,34 +8,33 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/lite" ) func TestInquirerValidPath(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := certifiers.NewMemStoreProvider() - source := certifiers.NewMemStoreProvider() + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() // set up the validators to generate test blocks var vote int64 = 10 - keys := certifiers.GenValKeys(5) - vals := keys.ToValidators(vote, 0) + keys := lite.GenValKeys(5) // construct a bunch of commits, each with one more height than the last chainID := "inquiry-test" count := 50 - commits := make([]certifiers.FullCommit, count) + commits := make([]lite.FullCommit, count) for i := 0; i < count; i++ { // extend the keys by 1 each time keys = keys.Extend(1) - vals = keys.ToValidators(vote, 0) - h := 20 + 10*i + vals := keys.ToValidators(vote, 0) + h := int64(20 + 10*i) appHash := []byte(fmt.Sprintf("h=%d", h)) commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) } // initialize a certifier with the initial state - cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + cert := lite.NewInquiring(chainID, commits[0], trust, source) // this should fail validation.... commit := commits[count-1].Commit @@ -60,29 +60,28 @@ func TestInquirerValidPath(t *testing.T) { func TestInquirerMinimalPath(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := certifiers.NewMemStoreProvider() - source := certifiers.NewMemStoreProvider() + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() // set up the validators to generate test blocks var vote int64 = 10 - keys := certifiers.GenValKeys(5) - vals := keys.ToValidators(vote, 0) + keys := lite.GenValKeys(5) // construct a bunch of commits, each with one more height than the last chainID := "minimal-path" count := 12 - commits := make([]certifiers.FullCommit, count) + commits := make([]lite.FullCommit, count) for i := 0; i < count; i++ { // extend the validators, so we are just below 2/3 keys = keys.Extend(len(keys)/2 - 1) - vals = keys.ToValidators(vote, 0) - h := 5 + 10*i + vals := keys.ToValidators(vote, 0) + h := int64(5 + 10*i) appHash := []byte(fmt.Sprintf("h=%d", h)) commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) } // initialize a certifier with the initial state - cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + cert := lite.NewInquiring(chainID, commits[0], trust, source) // this should fail validation.... commit := commits[count-1].Commit @@ -108,29 +107,28 @@ func TestInquirerMinimalPath(t *testing.T) { func TestInquirerVerifyHistorical(t *testing.T) { assert, require := assert.New(t), require.New(t) - trust := certifiers.NewMemStoreProvider() - source := certifiers.NewMemStoreProvider() + trust := lite.NewMemStoreProvider() + source := lite.NewMemStoreProvider() // set up the validators to generate test blocks var vote int64 = 10 - keys := certifiers.GenValKeys(5) - vals := keys.ToValidators(vote, 0) + keys := lite.GenValKeys(5) // construct a bunch of commits, each with one more height than the last chainID := "inquiry-test" count := 10 - commits := make([]certifiers.FullCommit, count) + commits := make([]lite.FullCommit, count) for i := 0; i < count; i++ { // extend the keys by 1 each time keys = keys.Extend(1) - vals = keys.ToValidators(vote, 0) - h := 20 + 10*i + vals := keys.ToValidators(vote, 0) + h := int64(20 + 10*i) appHash := []byte(fmt.Sprintf("h=%d", h)) commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) } // initialize a certifier with the initial state - cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + cert := lite.NewInquiring(chainID, commits[0], trust, source) // store a few commits as trust for _, i := range []int{2, 5} { diff --git a/certifiers/memprovider.go b/lite/memprovider.go similarity index 70% rename from certifiers/memprovider.go rename to lite/memprovider.go index cdad75e4..9c454be0 100644 --- a/certifiers/memprovider.go +++ b/lite/memprovider.go @@ -1,10 +1,10 @@ -package certifiers +package lite import ( "encoding/hex" "sort" - certerr "github.com/tendermint/tendermint/certifiers/errors" + liteErr "github.com/tendermint/tendermint/lite/errors" ) type memStoreProvider struct { @@ -23,6 +23,7 @@ func (s fullCommits) Less(i, j int) bool { return s[i].Height() < s[j].Height() } +// NewMemStoreProvider returns a new in-memory provider. func NewMemStoreProvider() Provider { return &memStoreProvider{ byHeight: fullCommits{}, @@ -34,6 +35,7 @@ func (m *memStoreProvider) encodeHash(hash []byte) string { return hex.EncodeToString(hash) } +// StoreCommit stores a FullCommit after verifying it. func (m *memStoreProvider) StoreCommit(fc FullCommit) error { // make sure the fc is self-consistent before saving err := fc.ValidateBasic(fc.Commit.Header.ChainID) @@ -49,7 +51,8 @@ func (m *memStoreProvider) StoreCommit(fc FullCommit) error { return nil } -func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) { +// GetByHeight returns the FullCommit for height h or an error if the commit is not found. +func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { // search from highest to lowest for i := len(m.byHeight) - 1; i >= 0; i-- { fc := m.byHeight[i] @@ -57,22 +60,24 @@ func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) { return fc, nil } } - return FullCommit{}, certerr.ErrCommitNotFound() + return FullCommit{}, liteErr.ErrCommitNotFound() } +// GetByHash returns the FullCommit for the hash or an error if the commit is not found. func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { var err error fc, ok := m.byHash[m.encodeHash(hash)] if !ok { - err = certerr.ErrCommitNotFound() + err = liteErr.ErrCommitNotFound() } return fc, err } +// LatestCommit returns the latest FullCommit or an error if no commits exist. func (m *memStoreProvider) LatestCommit() (FullCommit, error) { l := len(m.byHeight) if l == 0 { - return FullCommit{}, certerr.ErrCommitNotFound() + return FullCommit{}, liteErr.ErrCommitNotFound() } return m.byHeight[l-1], nil } diff --git a/certifiers/performance_test.go b/lite/performance_test.go similarity index 74% rename from certifiers/performance_test.go rename to lite/performance_test.go index 2a6c6ced..e01b8993 100644 --- a/certifiers/performance_test.go +++ b/lite/performance_test.go @@ -1,37 +1,37 @@ -package certifiers_test +package lite_test import ( "fmt" "testing" - "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/lite" ) func BenchmarkGenCommit20(b *testing.B) { - keys := certifiers.GenValKeys(20) + keys := lite.GenValKeys(20) benchmarkGenCommit(b, keys) } func BenchmarkGenCommit100(b *testing.B) { - keys := certifiers.GenValKeys(100) + keys := lite.GenValKeys(100) benchmarkGenCommit(b, keys) } func BenchmarkGenCommitSec20(b *testing.B) { - keys := certifiers.GenSecpValKeys(20) + keys := lite.GenSecpValKeys(20) benchmarkGenCommit(b, keys) } func BenchmarkGenCommitSec100(b *testing.B) { - keys := certifiers.GenSecpValKeys(100) + keys := lite.GenSecpValKeys(100) benchmarkGenCommit(b, keys) } -func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) { +func benchmarkGenCommit(b *testing.B, keys lite.ValKeys) { chainID := fmt.Sprintf("bench-%d", len(keys)) vals := keys.ToValidators(20, 10) for i := 0; i < b.N; i++ { - h := 1 + i + h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) keys.GenCommit(chainID, h, nil, vals, appHash, 0, len(keys)) } @@ -39,7 +39,7 @@ func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) { // this benchmarks generating one key func BenchmarkGenValKeys(b *testing.B) { - keys := certifiers.GenValKeys(20) + keys := lite.GenValKeys(20) for i := 0; i < b.N; i++ { keys = keys.Extend(1) } @@ -47,7 +47,7 @@ func BenchmarkGenValKeys(b *testing.B) { // this benchmarks generating one key func BenchmarkGenSecpValKeys(b *testing.B) { - keys := certifiers.GenSecpValKeys(20) + keys := lite.GenSecpValKeys(20) for i := 0; i < b.N; i++ { keys = keys.Extend(1) } @@ -63,7 +63,7 @@ func BenchmarkToValidators100(b *testing.B) { // this benchmarks constructing the validator set (.PubKey() * nodes) func benchmarkToValidators(b *testing.B, nodes int) { - keys := certifiers.GenValKeys(nodes) + keys := lite.GenValKeys(nodes) for i := 1; i <= b.N; i++ { keys.ToValidators(int64(2*i), int64(i)) } @@ -75,36 +75,36 @@ func BenchmarkToValidatorsSec100(b *testing.B) { // this benchmarks constructing the validator set (.PubKey() * nodes) func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := certifiers.GenSecpValKeys(nodes) + keys := lite.GenSecpValKeys(nodes) for i := 1; i <= b.N; i++ { keys.ToValidators(int64(2*i), int64(i)) } } func BenchmarkCertifyCommit20(b *testing.B) { - keys := certifiers.GenValKeys(20) + keys := lite.GenValKeys(20) benchmarkCertifyCommit(b, keys) } func BenchmarkCertifyCommit100(b *testing.B) { - keys := certifiers.GenValKeys(100) + keys := lite.GenValKeys(100) benchmarkCertifyCommit(b, keys) } func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := certifiers.GenSecpValKeys(20) + keys := lite.GenSecpValKeys(20) benchmarkCertifyCommit(b, keys) } func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := certifiers.GenSecpValKeys(100) + keys := lite.GenSecpValKeys(100) benchmarkCertifyCommit(b, keys) } -func benchmarkCertifyCommit(b *testing.B, keys certifiers.ValKeys) { +func benchmarkCertifyCommit(b *testing.B, keys lite.ValKeys) { chainID := "bench-certify" vals := keys.ToValidators(20, 10) - cert := certifiers.NewStatic(chainID, vals) + cert := lite.NewStatic(chainID, vals) check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), 0, len(keys)) for i := 0; i < b.N; i++ { err := cert.Certify(check) diff --git a/certifiers/provider.go b/lite/provider.go similarity index 53% rename from certifiers/provider.go rename to lite/provider.go index 64b4212d..22dc964a 100644 --- a/certifiers/provider.go +++ b/lite/provider.go @@ -1,22 +1,18 @@ -package certifiers +package lite -import ( - certerr "github.com/tendermint/tendermint/certifiers/errors" -) - -// Provider is used to get more validators by other means +// Provider is used to get more validators by other means. // -// Examples: MemProvider, files.Provider, client.Provider.... +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... type Provider interface { // StoreCommit saves a FullCommit after we have verified it, // so we can query for it later. Important for updating our - // store of trusted commits + // store of trusted commits. StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h - GetByHeight(h int) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash + // GetByHeight returns the closest commit with height <= h. + GetByHeight(h int64) (FullCommit, error) + // GetByHash returns a commit exactly matching this validator hash. GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored + // LatestCommit returns the newest commit stored. LatestCommit() (FullCommit, error) } @@ -28,6 +24,7 @@ type cacheProvider struct { Providers []Provider } +// NewCacheProvider returns a new provider which wraps multiple other providers. func NewCacheProvider(providers ...Provider) Provider { return cacheProvider{ Providers: providers, @@ -47,20 +44,18 @@ func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { return err } -/* -GetByHeight should return the closest possible match from all providers. - -The Cache is usually organized in order from cheapest call (memory) -to most expensive calls (disk/network). However, since GetByHeight returns -a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -give us the exact match, a naive "stop at first non-error" would hide -the actual desired results. - -Thus, we query each provider in order until we find an exact match -or we finished querying them all. If at least one returned a non-error, -then this returns the best match (minimum h-h'). -*/ -func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) { +// GetByHeight should return the closest possible match from all providers. +// +// The Cache is usually organized in order from cheapest call (memory) +// to most expensive calls (disk/network). However, since GetByHeight returns +// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would +// give us the exact match, a naive "stop at first non-error" would hide +// the actual desired results. +// +// Thus, we query each provider in order until we find an exact match +// or we finished querying them all. If at least one returned a non-error, +// then this returns the best match (minimum h-h'). +func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { for _, p := range c.Providers { var tfc FullCommit tfc, err = p.GetByHeight(h) @@ -80,6 +75,7 @@ func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) { return fc, err } +// GetByHash returns the FullCommit for the hash or an error if the commit is not found. func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { for _, p := range c.Providers { fc, err = p.GetByHash(hash) @@ -90,6 +86,7 @@ func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { return fc, err } +// LatestCommit returns the latest FullCommit or an error if no commit exists. func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { for _, p := range c.Providers { var tfc FullCommit @@ -104,22 +101,3 @@ func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { } return fc, err } - -// missingProvider doens't store anything, always a miss -// Designed as a mock for testing -type missingProvider struct{} - -func NewMissingProvider() Provider { - return missingProvider{} -} - -func (missingProvider) StoreCommit(_ FullCommit) error { return nil } -func (missingProvider) GetByHeight(_ int) (FullCommit, error) { - return FullCommit{}, certerr.ErrCommitNotFound() -} -func (missingProvider) GetByHash(_ []byte) (FullCommit, error) { - return FullCommit{}, certerr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (FullCommit, error) { - return FullCommit{}, certerr.ErrCommitNotFound() -} diff --git a/certifiers/provider_test.go b/lite/provider_test.go similarity index 63% rename from certifiers/provider_test.go rename to lite/provider_test.go index c1e9ae51..f1165619 100644 --- a/certifiers/provider_test.go +++ b/lite/provider_test.go @@ -1,4 +1,5 @@ -package certifiers_test +// nolint: vetshadow +package lite_test import ( "testing" @@ -6,48 +7,68 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/certifiers" - "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" ) +// missingProvider doesn't store anything, always a miss +// Designed as a mock for testing +type missingProvider struct{} + +// NewMissingProvider returns a provider which does not store anything and always misses. +func NewMissingProvider() lite.Provider { + return missingProvider{} +} + +func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } +func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} +func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} +func (missingProvider) LatestCommit() (lite.FullCommit, error) { + return lite.FullCommit{}, liteErr.ErrCommitNotFound() +} + func TestMemProvider(t *testing.T) { - p := certifiers.NewMemStoreProvider() + p := lite.NewMemStoreProvider() checkProvider(t, p, "test-mem", "empty") } func TestCacheProvider(t *testing.T) { - p := certifiers.NewCacheProvider( - certifiers.NewMissingProvider(), - certifiers.NewMemStoreProvider(), - certifiers.NewMissingProvider(), + p := lite.NewCacheProvider( + NewMissingProvider(), + lite.NewMemStoreProvider(), + NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") } -func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) { +func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { assert, require := assert.New(t), require.New(t) appHash := []byte(app) - keys := certifiers.GenValKeys(5) + keys := lite.GenValKeys(5) count := 10 // make a bunch of commits... - commits := make([]certifiers.FullCommit, count) + commits := make([]lite.FullCommit, count) for i := 0; i < count; i++ { // two commits for each validator, to check how we handle dups // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) - h := 20 + 10*i + h := int64(20 + 10*i) commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) } // check provider is empty fc, err := p.GetByHeight(20) require.NotNil(err) - assert.True(errors.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) fc, err = p.GetByHash(commits[3].ValidatorsHash()) require.NotNil(err) - assert.True(errors.IsCommitNotFoundErr(err)) + assert.True(liteErr.IsCommitNotFoundErr(err)) // now add them all to the provider for _, s := range commits { @@ -74,13 +95,13 @@ func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) { fc, err = p.GetByHeight(47) if assert.Nil(err) { // we only step by 10, so 40 must be the one below this - assert.Equal(40, fc.Height()) + assert.EqualValues(40, fc.Height()) } } // this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p certifiers.Provider, ask, expect int) { +func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { fc, err := p.GetByHeight(ask) require.Nil(t, err, "%+v", err) if assert.Equal(t, expect, fc.Height()) { @@ -95,19 +116,19 @@ func TestCacheGetsBestHeight(t *testing.T) { // we will write data to the second level of the cache (p2), // and see what gets cached, stored in - p := certifiers.NewMemStoreProvider() - p2 := certifiers.NewMemStoreProvider() - cp := certifiers.NewCacheProvider(p, p2) + p := lite.NewMemStoreProvider() + p2 := lite.NewMemStoreProvider() + cp := lite.NewCacheProvider(p, p2) chainID := "cache-best-height" appHash := []byte("01234567") - keys := certifiers.GenValKeys(5) + keys := lite.GenValKeys(5) count := 10 // set a bunch of commits for i := 0; i < count; i++ { vals := keys.ToValidators(10, int64(count/2)) - h := 10 * (i + 1) + h := int64(10 * (i + 1)) fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) err := p2.StoreCommit(fc) require.NoError(err) diff --git a/certifiers/static.go b/lite/static.go similarity index 79% rename from certifiers/static.go rename to lite/static.go index 787aecb3..abbef578 100644 --- a/certifiers/static.go +++ b/lite/static.go @@ -1,4 +1,4 @@ -package certifiers +package lite import ( "bytes" @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/types" - certerr "github.com/tendermint/tendermint/certifiers/errors" + liteErr "github.com/tendermint/tendermint/lite/errors" ) var _ Certifier = &Static{} @@ -25,6 +25,7 @@ type Static struct { vhash []byte } +// NewStatic returns a new certifier with a static validator set. func NewStatic(chainID string, vals *types.ValidatorSet) *Static { return &Static{ chainID: chainID, @@ -32,14 +33,17 @@ func NewStatic(chainID string, vals *types.ValidatorSet) *Static { } } +// ChainID returns the chain id. func (c *Static) ChainID() string { return c.chainID } +// Validators returns the validator set. func (c *Static) Validators() *types.ValidatorSet { return c.vSet } +// Hash returns the hash of the validator set. func (c *Static) Hash() []byte { if len(c.vhash) == 0 { c.vhash = c.vSet.Hash() @@ -47,6 +51,7 @@ func (c *Static) Hash() []byte { return c.vhash } +// Certify makes sure that the commit is valid. func (c *Static) Certify(commit Commit) error { // do basic sanity checks err := commit.ValidateBasic(c.chainID) @@ -56,7 +61,7 @@ func (c *Static) Certify(commit Commit) error { // make sure it has the same validator set we have (static means static) if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) { - return certerr.ErrValidatorsChanged() + return liteErr.ErrValidatorsChanged() } // then make sure we have the proper signatures for this diff --git a/certifiers/static_test.go b/lite/static_test.go similarity index 80% rename from certifiers/static_test.go rename to lite/static_test.go index f1f40c6c..e4bf435c 100644 --- a/certifiers/static_test.go +++ b/lite/static_test.go @@ -1,4 +1,4 @@ -package certifiers_test +package lite_test import ( "testing" @@ -7,8 +7,8 @@ import ( "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/certifiers" - errors "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/lite" + liteErr "github.com/tendermint/tendermint/lite/errors" ) func TestStaticCert(t *testing.T) { @@ -16,17 +16,17 @@ func TestStaticCert(t *testing.T) { assert := assert.New(t) // require := require.New(t) - keys := certifiers.GenValKeys(4) + keys := lite.GenValKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! vals := keys.ToValidators(20, 10) // and a certifier based on our known set chainID := "test-static" - cert := certifiers.NewStatic(chainID, vals) + cert := lite.NewStatic(chainID, vals) cases := []struct { - keys certifiers.ValKeys + keys lite.ValKeys vals *types.ValidatorSet - height int + height int64 first, last int // who actually signs proper bool // true -> expect no error changed bool // true -> expect validator change error @@ -51,7 +51,7 @@ func TestStaticCert(t *testing.T) { } else { assert.NotNil(err) if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) } } } diff --git a/mempool/mempool.go b/mempool/mempool.go index caaa034e..ccd615ac 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -3,6 +3,7 @@ package mempool import ( "bytes" "container/list" + "fmt" "sync" "sync/atomic" "time" @@ -61,12 +62,12 @@ type Mempool struct { proxyAppConn proxy.AppConnMempool txs *clist.CList // concurrent linked-list of good txs counter int64 // simple incrementing counter - height int // the last block Update()'d to + height int64 // the last block Update()'d to rechecking int32 // for re-checking filtered txs on Update() recheckCursor *clist.CElement // next expected response recheckEnd *clist.CElement // re-checking stops here notifiedTxsAvailable bool // true if fired on txsAvailable for this height - txsAvailable chan int // fires the next height once for each height, when the mempool is not empty + txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -80,7 +81,7 @@ type Mempool struct { // NewMempool returns a new Mempool with the given configuration and connection to an application. // TODO: Extract logger into arguments. -func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int) *Mempool { +func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64) *Mempool { mempool := &Mempool{ config: config, proxyAppConn: proxyAppConn, @@ -102,7 +103,7 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he // ensuring it will trigger once every height when transactions are available. // NOTE: not thread safe - should only be called once, on startup func (mem *Mempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan int, 1) + mem.txsAvailable = make(chan int64, 1) } // SetLogger sets the Logger. @@ -110,6 +111,26 @@ func (mem *Mempool) SetLogger(l log.Logger) { mem.logger = l } +// CloseWAL closes and discards the underlying WAL file. +// Any further writes will not be relayed to disk. +func (mem *Mempool) CloseWAL() bool { + if mem == nil { + return false + } + + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if mem.wal == nil { + return false + } + if err := mem.wal.Close(); err != nil && mem.logger != nil { + mem.logger.Error("Mempool.CloseWAL", "err", err) + } + mem.wal = nil + return true +} + func (mem *Mempool) initWAL() { walDir := mem.config.WalDir() if walDir != "" { @@ -171,17 +192,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { // CACHE if mem.cache.Exists(tx) { - if cb != nil { - cb(&abci.Response{ - Value: &abci.Response_CheckTx{ - &abci.ResponseCheckTx{ - Code: abci.CodeType_BadNonce, // TODO or duplicate tx - Log: "Duplicate transaction (ignored)", - }, - }, - }) - } - return nil // TODO: return an error (?) + return fmt.Errorf("Tx already exists in cache") } mem.cache.Push(tx) // END CACHE @@ -189,8 +200,14 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { // WAL if mem.wal != nil { // TODO: Notify administrators when WAL fails - mem.wal.Write([]byte(tx)) - mem.wal.Write([]byte("\n")) + _, err := mem.wal.Write([]byte(tx)) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } + _, err = mem.wal.Write([]byte("\n")) + if err != nil { + mem.logger.Error("Error writing to WAL", "err", err) + } } // END WAL @@ -219,11 +236,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: tx := req.GetCheckTx().Tx - if r.CheckTx.Code == abci.CodeType_OK { + if r.CheckTx.Code == abci.CodeTypeOK { mem.counter++ memTx := &mempoolTx{ counter: mem.counter, - height: int64(mem.height), + height: mem.height, tx: tx, } mem.txs.PushBack(memTx) @@ -251,7 +268,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+ "Expected %X, got %X", r.CheckTx.Data, memTx.tx)) } - if r.CheckTx.Code == abci.CodeType_OK { + if r.CheckTx.Code == abci.CodeTypeOK { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. @@ -284,7 +301,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { // TxsAvailable returns a channel which fires once for every height, // and only when transactions are available in the mempool. // NOTE: the returned channel may be nil if EnableTxsAvailable was not called. -func (mem *Mempool) TxsAvailable() <-chan int { +func (mem *Mempool) TxsAvailable() <-chan int64 { return mem.txsAvailable } @@ -331,10 +348,10 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs { // Update informs the mempool that the given txs were committed and can be discarded. // NOTE: this should be called *after* block is committed by consensus. // NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update(height int, txs types.Txs) { - // TODO: check err ? - mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx - +func (mem *Mempool) Update(height int64, txs types.Txs) error { + if err := mem.proxyAppConn.FlushSync(); err != nil { // To flush async resCb calls e.g. from CheckTx + return err + } // First, create a lookup map of txns in new txs. txsMap := make(map[string]struct{}) for _, tx := range txs { @@ -357,6 +374,7 @@ func (mem *Mempool) Update(height int, txs types.Txs) { // mem.recheckCursor re-scans mem.txs and possibly removes some txs. // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. } + return nil } func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx { @@ -405,8 +423,8 @@ type mempoolTx struct { } // Height returns the height for this transaction -func (memTx *mempoolTx) Height() int { - return int(atomic.LoadInt64(&memTx.height)) +func (memTx *mempoolTx) Height() int64 { + return atomic.LoadInt64(&memTx.height) } //-------------------------------------------------------------------------------- diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 46401e88..4d75cc58 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -1,18 +1,27 @@ package mempool import ( + "crypto/md5" "crypto/rand" "encoding/binary" + "fmt" + "io/ioutil" + "os" + "path/filepath" "testing" "time" "github.com/tendermint/abci/example/counter" "github.com/tendermint/abci/example/dummy" + abci "github.com/tendermint/abci/types" + cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/require" ) func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { @@ -20,13 +29,16 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { appConnMem, _ := cc.NewABCIClient() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - appConnMem.Start() + err := appConnMem.Start() + if err != nil { + panic(err) + } mempool := NewMempool(config.Mempool, appConnMem, 0) mempool.SetLogger(log.TestingLogger()) return mempool } -func ensureNoFire(t *testing.T, ch <-chan int, timeoutMS int) { +func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) { timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) select { case <-ch: @@ -35,7 +47,7 @@ func ensureNoFire(t *testing.T, ch <-chan int, timeoutMS int) { } } -func ensureFire(t *testing.T, ch <-chan int, timeoutMS int) { +func ensureFire(t *testing.T, ch <-chan int64, timeoutMS int) { timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) select { case <-ch: @@ -49,10 +61,12 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { for i := 0; i < count; i++ { txBytes := make([]byte, 20) txs[i] = txBytes - rand.Read(txBytes) - err := mempool.CheckTx(txBytes, nil) + _, err := rand.Read(txBytes) if err != nil { - t.Fatal("Error after CheckTx: %v", err) + t.Error(err) + } + if err := mempool.CheckTx(txBytes, nil); err != nil { + t.Fatalf("Error after CheckTx: %v", err) } } return txs @@ -78,7 +92,9 @@ func TestTxsAvailable(t *testing.T) { // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - mempool.Update(1, committedTxs) + if err := mempool.Update(1, committedTxs); err != nil { + t.Error(err) + } ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -88,7 +104,9 @@ func TestTxsAvailable(t *testing.T) { // now call update with all the txs. it should not fire as there are no txs left committedTxs = append(txs, moreTxs...) - mempool.Update(2, committedTxs) + if err := mempool.Update(2, committedTxs); err != nil { + t.Error(err) + } ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) // send a bunch more txs, it should only fire once @@ -99,16 +117,16 @@ func TestTxsAvailable(t *testing.T) { func TestSerialReap(t *testing.T) { app := counter.NewCounterApplication(true) - app.SetOption("serial", "on") + app.SetOption(abci.RequestSetOption{"serial", "on"}) cc := proxy.NewLocalClientCreator(app) mempool := newMempoolWithApp(cc) appConnCon, _ := cc.NewABCIClient() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - if _, err := appConnCon.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } + err := appConnCon.Start() + require.Nil(t, err) + cacheMap := make(map[string]struct{}) deliverTxsRange := func(start, end int) { // Deliver some txs. for i := start; i < end; i++ { @@ -117,26 +135,23 @@ func TestSerialReap(t *testing.T) { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) err := mempool.CheckTx(txBytes, nil) - if err != nil { - t.Fatal("Error after CheckTx: %v", err) + _, cached := cacheMap[string(txBytes)] + if cached { + require.NotNil(t, err, "expected error for cached tx") + } else { + require.Nil(t, err, "expected no err for uncached tx") } + cacheMap[string(txBytes)] = struct{}{} - // This will fail because not serial (incrementing) - // However, error should still be nil. - // It just won't show up on Reap(). + // Duplicates are cached and should return error err = mempool.CheckTx(txBytes, nil) - if err != nil { - t.Fatal("Error after CheckTx: %v", err) - } - + require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") } } reapCheck := func(exp int) { txs := mempool.Reap(-1) - if len(txs) != exp { - t.Fatalf("Expected to reap %v txs but got %v", exp, len(txs)) - } + require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs))) } updateRange := func(start, end int) { @@ -146,7 +161,9 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - mempool.Update(0, txs) + if err := mempool.Update(0, txs); err != nil { + t.Error(err) + } } commitRange := func(start, end int) { @@ -154,13 +171,19 @@ func TestSerialReap(t *testing.T) { for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - res := appConnCon.DeliverTxSync(txBytes) - if !res.IsOK() { + res, err := appConnCon.DeliverTxSync(txBytes) + if err != nil { + t.Errorf("Client error committing tx: %v", err) + } + if res.IsErr() { t.Errorf("Error committing tx. Code:%v result:%X log:%v", res.Code, res.Data, res.Log) } } - res := appConnCon.CommitSync() + res, err := appConnCon.CommitSync() + if err != nil { + t.Errorf("Client error committing: %v", err) + } if len(res.Data) != 8 { t.Errorf("Error committing. Hash:%X log:%v", res.Data, res.Log) } @@ -200,3 +223,63 @@ func TestSerialReap(t *testing.T) { // We should have 600 now. reapCheck(600) } + +func TestMempoolCloseWAL(t *testing.T) { + // 1. Create the temporary directory for mempool and WAL testing. + rootDir, err := ioutil.TempDir("", "mempool-test") + require.Nil(t, err, "expecting successful tmpdir creation") + defer os.RemoveAll(rootDir) + + // 2. Ensure that it doesn't contain any elements -- Sanity check + m1, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 0, len(m1), "no matches yet") + + // 3. Create the mempool + wcfg := *(cfg.DefaultMempoolConfig()) + wcfg.RootDir = rootDir + app := dummy.NewDummyApplication() + cc := proxy.NewLocalClientCreator(app) + appConnMem, _ := cc.NewABCIClient() + mempool := NewMempool(&wcfg, appConnMem, 10) + + // 4. Ensure that the directory contains the WAL file + m2, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 1, len(m2), "expecting the wal match in") + + // 5. Write some contents to the WAL + mempool.CheckTx(types.Tx([]byte("foo")), nil) + walFilepath := mempool.wal.Path + sum1 := checksumFile(walFilepath, t) + + // 6. Sanity check to ensure that the written TX matches the expectation. + require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written") + + // 7. Invoke CloseWAL() and ensure it discards the + // WAL thus any other write won't go through. + require.True(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") + mempool.CheckTx(types.Tx([]byte("bar")), nil) + sum2 := checksumFile(walFilepath, t) + require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") + + // 8. Second CloseWAL should do nothing + require.False(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") + + // 9. Sanity check to ensure that the WAL file still exists + m3, err := filepath.Glob(filepath.Join(rootDir, "*")) + require.Nil(t, err, "successful globbing expected") + require.Equal(t, 1, len(m3), "expecting the wal match in") +} + +func checksumIt(data []byte) string { + h := md5.New() + h.Write(data) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func checksumFile(p string, t *testing.T) string { + data, err := ioutil.ReadFile(p) + require.Nil(t, err, "expecting successful read of %q", p) + return checksumIt(data) +} diff --git a/mempool/reactor.go b/mempool/reactor.go index 87bac5d9..9aed416f 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -28,7 +28,6 @@ type MempoolReactor struct { p2p.BaseReactor config *cfg.MempoolConfig Mempool *Mempool - evsw types.EventSwitch } // NewMempoolReactor returns a new MempoolReactor with the given config and mempool. @@ -51,7 +50,7 @@ func (memR *MempoolReactor) SetLogger(l log.Logger) { // It returns the list of channels for this reactor. func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ - &p2p.ChannelDescriptor{ + { ID: MempoolChannel, Priority: 5, }, @@ -98,7 +97,7 @@ func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) er // PeerState describes the state of a peer. type PeerState interface { - GetHeight() int + GetHeight() int64 } // Peer describes a peer. @@ -150,11 +149,6 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) { } } -// SetEventSwitch implements events.Eventable. -func (memR *MempoolReactor) SetEventSwitch(evsw types.EventSwitch) { - memR.evsw = evsw -} - //----------------------------------------------------------------------------- // Messages diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index a2f0f272..45458a98 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -81,7 +81,7 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int mempool := reactors[reactorIdx].Mempool for mempool.Size() != len(txs) { - time.Sleep(time.Second) + time.Sleep(time.Millisecond * 100) } reapedTxs := mempool.Reap(len(txs)) diff --git a/node/node.go b/node/node.go index 7bb71449..eb550971 100644 --- a/node/node.go +++ b/node/node.go @@ -2,6 +2,7 @@ package node import ( "bytes" + "context" "encoding/json" "errors" "fmt" @@ -21,6 +22,7 @@ import ( "github.com/tendermint/tendermint/consensus" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/p2p/trust" "github.com/tendermint/tendermint/proxy" rpccore "github.com/tendermint/tendermint/rpc/core" grpccore "github.com/tendermint/tendermint/rpc/grpc" @@ -94,12 +96,13 @@ type Node struct { privValidator types.PrivValidator // local node's validator key // network - privKey crypto.PrivKeyEd25519 // local node's p2p key - sw *p2p.Switch // p2p connections - addrBook *p2p.AddrBook // known peers + privKey crypto.PrivKeyEd25519 // local node's p2p key + sw *p2p.Switch // p2p connections + addrBook *p2p.AddrBook // known peers + trustMetricStore *trust.TrustMetricStore // trust metrics for all peers // services - evsw types.EventSwitch // pub/sub for services + eventBus *types.EventBus // pub/sub for services blockStore *bc.BlockStore // store the blockchain to disk bcReactor *bc.BlockchainReactor // for fast-syncing mempoolReactor *mempl.MempoolReactor // for gossipping transactions @@ -108,6 +111,7 @@ type Node struct { proxyApp proxy.AppConns // connection to the application rpcListeners []net.Listener // rpc servers txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService } // NewNode returns a new, ready to go, Tendermint Node. @@ -162,7 +166,7 @@ func NewNode(config *cfg.Config, handshaker.SetLogger(consensusLogger) proxyApp := proxy.NewAppConns(clientCreator, handshaker) proxyApp.SetLogger(logger.With("module", "proxy")) - if _, err := proxyApp.Start(); err != nil { + if err := proxyApp.Start(); err != nil { return nil, fmt.Errorf("Error starting proxy app connections: %v", err) } @@ -170,30 +174,9 @@ func NewNode(config *cfg.Config, state = sm.LoadState(stateDB) state.SetLogger(stateLogger) - // Transaction indexing - var txIndexer txindex.TxIndexer - switch config.TxIndex { - case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) - if err != nil { - return nil, err - } - txIndexer = kv.NewTxIndex(store) - default: - txIndexer = &null.TxIndex{} - } - state.TxIndexer = txIndexer - // Generate node PrivKey privKey := crypto.GenPrivKeyEd25519() - // Make event switch - eventSwitch := types.NewEventSwitch() - eventSwitch.SetLogger(logger.With("module", "types")) - if _, err := eventSwitch.Start(); err != nil { - return nil, fmt.Errorf("Failed to start switch: %v", err) - } - // Decide whether to fast-sync or not // We don't fast-sync when the only validator is us. fastSync := config.FastSync @@ -245,9 +228,19 @@ func NewNode(config *cfg.Config, // Optionally, start the pex reactor var addrBook *p2p.AddrBook + var trustMetricStore *trust.TrustMetricStore if config.P2P.PexReactor { addrBook = p2p.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) + + // Get the trust metric history data + trustHistoryDB, err := dbProvider(&DBContext{"trusthistory", config}) + if err != nil { + return nil, err + } + trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig()) + trustMetricStore.SetLogger(p2pLogger) + pexReactor := p2p.NewPEXReactor(addrBook) pexReactor.SetLogger(p2pLogger) sw.AddReactor("PEX", pexReactor) @@ -263,31 +256,54 @@ func NewNode(config *cfg.Config, if err != nil { return err } - if resQuery.Code.IsOK() { - return nil + if resQuery.IsErr() { + return resQuery } - return errors.New(resQuery.Code.String()) + return nil }) sw.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error { resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%X", pubkey.Bytes())}) if err != nil { return err } - if resQuery.Code.IsOK() { - return nil + if resQuery.IsErr() { + return resQuery } - return errors.New(resQuery.Code.String()) + return nil }) } - // add the event switch to all services - // they should all satisfy events.Eventable - SetEventSwitch(eventSwitch, bcReactor, mempoolReactor, consensusReactor) + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + + // services which will be publishing and/or subscribing for messages (events) + bcReactor.SetEventBus(eventBus) + consensusReactor.SetEventBus(eventBus) + + // Transaction indexing + var txIndexer txindex.TxIndexer + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, err + } + if config.TxIndex.IndexTags != "" { + txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ","))) + } else if config.TxIndex.IndexAllTags { + txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) + } else { + txIndexer = kv.NewTxIndex(store) + } + default: + txIndexer = &null.TxIndex{} + } + + indexerService := txindex.NewIndexerService(txIndexer, eventBus) // run the profile server profileHost := config.ProfListenAddress if profileHost != "" { - go func() { logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil)) }() @@ -298,11 +314,11 @@ func NewNode(config *cfg.Config, genesisDoc: genDoc, privValidator: privValidator, - privKey: privKey, - sw: sw, - addrBook: addrBook, + privKey: privKey, + sw: sw, + addrBook: addrBook, + trustMetricStore: trustMetricStore, - evsw: eventSwitch, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, @@ -310,6 +326,8 @@ func NewNode(config *cfg.Config, consensusReactor: consensusReactor, proxyApp: proxyApp, txIndexer: txIndexer, + indexerService: indexerService, + eventBus: eventBus, } node.BaseService = *cmn.NewBaseService(logger, "Node", node) return node, nil @@ -317,6 +335,11 @@ func NewNode(config *cfg.Config, // OnStart starts the Node. It implements cmn.Service. func (n *Node) OnStart() error { + err := n.eventBus.Start() + if err != nil { + return err + } + // Run the RPC server first // so we can eg. receive txs for the first block if n.config.RPC.ListenAddress != "" { @@ -335,7 +358,7 @@ func (n *Node) OnStart() error { // Start the switch n.sw.SetNodeInfo(n.makeNodeInfo()) n.sw.SetNodePrivKey(n.privKey) - _, err := n.sw.Start() + err = n.sw.Start() if err != nil { return err } @@ -349,6 +372,12 @@ func (n *Node) OnStart() error { } } + // start tx indexer + err = n.indexerService.Start() + if err != nil { + return err + } + return nil } @@ -366,9 +395,13 @@ func (n *Node) OnStop() { n.Logger.Error("Error closing listener", "listener", l, "err", err) } } + + n.eventBus.Stop() + + n.indexerService.Stop() } -// RunForever waits for an interupt signal and stops the node. +// RunForever waits for an interrupt signal and stops the node. func (n *Node) RunForever() { // Sleep forever and then... cmn.TrapSignal(func() { @@ -376,13 +409,6 @@ func (n *Node) RunForever() { }) } -// SetEventSwitch adds the event switch to reactors, mempool, etc. -func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) { - for _, e := range eventables { - e.SetEventSwitch(evsw) - } -} - // AddListener adds a listener to accept inbound peer connections. // It should be called before starting the Node. // The first listener is the primary listener (in NodeInfo) @@ -393,7 +419,6 @@ func (n *Node) AddListener(l p2p.Listener) { // ConfigureRPC sets all variables in rpccore so they will serve // rpc calls from this node func (n *Node) ConfigureRPC() { - rpccore.SetEventSwitch(n.evsw) rpccore.SetBlockStore(n.blockStore) rpccore.SetConsensusState(n.consensusState) rpccore.SetMempool(n.mempoolReactor.Mempool) @@ -404,6 +429,7 @@ func (n *Node) ConfigureRPC() { rpccore.SetProxyAppQuery(n.proxyApp.Query()) rpccore.SetTxIndexer(n.txIndexer) rpccore.SetConsensusReactor(n.consensusReactor) + rpccore.SetEventBus(n.eventBus) rpccore.SetLogger(n.Logger.With("module", "rpc")) } @@ -420,7 +446,13 @@ func (n *Node) startRPC() ([]net.Listener, error) { for i, listenAddr := range listenAddrs { mux := http.NewServeMux() rpcLogger := n.Logger.With("module", "rpc-server") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, n.evsw) + onDisconnect := rpcserver.OnDisconnect(func(remoteAddr string) { + err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil { + rpcLogger.Error("Error unsubsribing from all on disconnect", "err", err) + } + }) + wm := rpcserver.NewWebsocketManager(rpccore.Routes, onDisconnect) wm.SetLogger(rpcLogger.With("protocol", "websocket")) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger) @@ -469,9 +501,9 @@ func (n *Node) MempoolReactor() *mempl.MempoolReactor { return n.mempoolReactor } -// EventSwitch returns the Node's EventSwitch. -func (n *Node) EventSwitch() types.EventSwitch { - return n.evsw +// EventBus returns the Node's EventBus. +func (n *Node) EventBus() *types.EventBus { + return n.eventBus } // PrivValidator returns the Node's PrivValidator. @@ -509,11 +541,8 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo { }, } - // include git hash in the nodeInfo if available - // TODO: use ld-flags - /*if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil { - nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev))) - }*/ + rpcListenAddr := n.config.RPC.ListenAddress + nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) if !n.sw.IsListening() { return nodeInfo @@ -522,13 +551,8 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo { p2pListener := n.sw.Listeners()[0] p2pHost := p2pListener.ExternalAddress().IP.String() p2pPort := p2pListener.ExternalAddress().Port - rpcListenAddr := n.config.RPC.ListenAddress - - // We assume that the rpcListener has the same ExternalAddress. - // This is probably true because both P2P and RPC listeners use UPnP, - // except of course if the rpc is only bound to localhost nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) - nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) + return nodeInfo } diff --git a/node/node_test.go b/node/node_test.go index 641e606c..eb8d109f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -1,6 +1,7 @@ package node import ( + "context" "testing" "time" @@ -9,30 +10,39 @@ import ( "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/types" ) func TestNodeStartStop(t *testing.T) { config := cfg.ResetTestRoot("node_node_test") - // Create & start node + // create & start node n, err := DefaultNewNode(config, log.TestingLogger()) assert.NoError(t, err, "expected no err on DefaultNewNode") - n.Start() + err1 := n.Start() + if err1 != nil { + t.Error(err1) + } t.Logf("Started node %v", n.sw.NodeInfo()) - // Wait a bit to initialize - // TODO remove time.Sleep(), make asynchronous. - time.Sleep(time.Second * 2) + // wait for the node to produce a block + blockCh := make(chan interface{}) + err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh) + assert.NoError(t, err) + select { + case <-blockCh: + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for the node to produce a block") + } - ch := make(chan struct{}, 1) + // stop the node go func() { n.Stop() - ch <- struct{}{} }() - ticker := time.NewTicker(time.Second * 5) + select { - case <-ch: - case <-ticker.C: + case <-n.Quit: + case <-time.After(5 * time.Second): t.Fatal("timed out waiting for shutdown") } } diff --git a/p2p/README.md b/p2p/README.md index bf0a5c4d..d653b2ca 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -4,9 +4,9 @@ `tendermint/tendermint/p2p` provides an abstraction around peer-to-peer communication.
-## Peer/MConnection/Channel +## MConnection -Each peer has one `MConnection` (multiplex connection) instance. +`MConnection` is a multiplex connection: __multiplex__ *noun* a system or signal involving simultaneous transmission of several messages along a single channel of communication. @@ -16,6 +16,43 @@ Each `MConnection` handles message transmission on multiple abstract communicati The byte id and the relative priorities of each `Channel` are configured upon initialization of the connection. +The `MConnection` supports three packet types: Ping, Pong, and Msg. + +### Ping and Pong + +The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively + +When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message. +When a ping is received on the `MConnection`, a pong is sent in response. + +If a pong is not received in sufficient time, the peer's score should be decremented (TODO). + +### Msg + +Messages in channels are chopped into smaller msgPackets for multiplexing. + +``` +type msgPacket struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} +``` + +The msgPacket is serialized using go-wire, and prefixed with a 0x3. +The received `Bytes` of a sequential set of packets are appended together +until a packet with `EOF=1` is received, at which point the complete serialized message +is returned for processing by the corresponding channels `onReceive` function. + +### Multiplexing + +Messages are sent from a single `sendRoutine`, which loops over a select statement that results in the sending +of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. +Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. +Messages are chosen for a batch one a time from the channel with the lowest ratio of recently sent bytes to channel priority. + +## Sending Messages + There are two methods for sending messages: ```go func (m MConnection) Send(chID byte, msg interface{}) bool {} @@ -31,6 +68,12 @@ queue is full. `Send()` and `TrySend()` are also exposed for each `Peer`. +## Peer + +Each peer has one `MConnection` instance, and includes other information such as whether the connection +was outbound, whether the connection should be recreated if it closes, various identity information about the node, +and other higher level thread-safe data used by the reactors. + ## Switch/Reactor The `Switch` handles peer connections and exposes an API to receive incoming messages diff --git a/p2p/addrbook.go b/p2p/addrbook.go index 62b25a71..8f924d12 100644 --- a/p2p/addrbook.go +++ b/p2p/addrbook.go @@ -7,6 +7,7 @@ package p2p import ( "encoding/binary" "encoding/json" + "fmt" "math" "math/rand" "net" @@ -40,7 +41,7 @@ const ( // old buckets over which an address group will be spread. oldBucketsPerGroup = 4 - // new buckets over which an source address group will be spread. + // new buckets over which a source address group will be spread. newBucketsPerGroup = 32 // buckets a frequently seen new address may end up in. @@ -79,18 +80,22 @@ const ( type AddrBook struct { cmn.BaseService - mtx sync.Mutex + // immutable after creation filePath string routabilityStrict bool - rand *rand.Rand key string - ourAddrs map[string]*NetAddress - addrLookup map[string]*knownAddress // new & old - addrNew []map[string]*knownAddress - addrOld []map[string]*knownAddress - wg sync.WaitGroup - nOld int - nNew int + + // accessed concurrently + mtx sync.Mutex + rand *rand.Rand + ourAddrs map[string]*NetAddress + addrLookup map[string]*knownAddress // new & old + bucketsOld []map[string]*knownAddress + bucketsNew []map[string]*knownAddress + nOld int + nNew int + + wg sync.WaitGroup } // NewAddrBook creates a new address book. @@ -112,23 +117,29 @@ func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook { func (a *AddrBook) init() { a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits // New addr buckets - a.addrNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.addrNew { - a.addrNew[i] = make(map[string]*knownAddress) + a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) + for i := range a.bucketsNew { + a.bucketsNew[i] = make(map[string]*knownAddress) } // Old addr buckets - a.addrOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.addrOld { - a.addrOld[i] = make(map[string]*knownAddress) + a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) + for i := range a.bucketsOld { + a.bucketsOld[i] = make(map[string]*knownAddress) } } // OnStart implements Service. func (a *AddrBook) OnStart() error { - a.BaseService.OnStart() + if err := a.BaseService.OnStart(); err != nil { + return err + } a.loadFromFile(a.filePath) + + // wg.Add to ensure that any invocation of .Wait() + // later on will wait for saveRoutine to terminate. a.wg.Add(1) go a.saveRoutine() + return nil } @@ -141,6 +152,7 @@ func (a *AddrBook) Wait() { a.wg.Wait() } +// AddOurAddress adds another one of our addresses. func (a *AddrBook) AddOurAddress(addr *NetAddress) { a.mtx.Lock() defer a.mtx.Unlock() @@ -148,6 +160,7 @@ func (a *AddrBook) AddOurAddress(addr *NetAddress) { a.ourAddrs[addr.String()] = addr } +// OurAddresses returns a list of our addresses. func (a *AddrBook) OurAddresses() []*NetAddress { addrs := []*NetAddress{} for _, addr := range a.ourAddrs { @@ -156,18 +169,20 @@ func (a *AddrBook) OurAddresses() []*NetAddress { return addrs } +// AddAddress adds the given address as received from the given source. // NOTE: addr must not be nil -func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) { +func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) error { a.mtx.Lock() defer a.mtx.Unlock() - a.Logger.Info("Add address to book", "addr", addr, "src", src) - a.addAddress(addr, src) + return a.addAddress(addr, src) } +// NeedMoreAddrs returns true if there are not have enough addresses in the book. func (a *AddrBook) NeedMoreAddrs() bool { return a.Size() < needAddressThreshold } +// Size returns the number of addresses in the book. func (a *AddrBook) Size() int { a.mtx.Lock() defer a.mtx.Unlock() @@ -178,7 +193,12 @@ func (a *AddrBook) size() int { return a.nNew + a.nOld } -// Pick an address to connect to with new/old bias. +// PickAddress picks an address to connect to. +// The address is picked randomly from an old or new bucket according +// to the newBias argument, which must be between [0, 100] (or else is truncated to that range) +// and determines how biased we are to pick an address from a new bucket. +// PickAddress returns nil if the AddrBook is empty or if we try to pick +// from an empty bucket. func (a *AddrBook) PickAddress(newBias int) *NetAddress { a.mtx.Lock() defer a.mtx.Unlock() @@ -197,40 +217,34 @@ func (a *AddrBook) PickAddress(newBias int) *NetAddress { oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias)) newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias) - if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation { - // pick random Old bucket. - var bucket map[string]*knownAddress = nil - for len(bucket) == 0 { - bucket = a.addrOld[a.rand.Intn(len(a.addrOld))] + // pick a random peer from a random bucket + var bucket map[string]*knownAddress + pickFromOldBucket := (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation + if (pickFromOldBucket && a.nOld == 0) || + (!pickFromOldBucket && a.nNew == 0) { + return nil + } + // loop until we pick a random non-empty bucket + for len(bucket) == 0 { + if pickFromOldBucket { + bucket = a.bucketsOld[a.rand.Intn(len(a.bucketsOld))] + } else { + bucket = a.bucketsNew[a.rand.Intn(len(a.bucketsNew))] } - // pick a random ka from bucket. - randIndex := a.rand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- + } + // pick a random index and loop over the map to return that index + randIndex := a.rand.Intn(len(bucket)) + for _, ka := range bucket { + if randIndex == 0 { + return ka.Addr } - cmn.PanicSanity("Should not happen") - } else { - // pick random New bucket. - var bucket map[string]*knownAddress = nil - for len(bucket) == 0 { - bucket = a.addrNew[a.rand.Intn(len(a.addrNew))] - } - // pick a random ka from bucket. - randIndex := a.rand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - cmn.PanicSanity("Should not happen") + randIndex-- } return nil } +// MarkGood marks the peer as good and moves it into an "old" bucket. +// XXX: we never call this! func (a *AddrBook) MarkGood(addr *NetAddress) { a.mtx.Lock() defer a.mtx.Unlock() @@ -244,6 +258,7 @@ func (a *AddrBook) MarkGood(addr *NetAddress) { } } +// MarkAttempt marks that an attempt was made to connect to the address. func (a *AddrBook) MarkAttempt(addr *NetAddress) { a.mtx.Lock() defer a.mtx.Unlock() @@ -297,6 +312,7 @@ func (a *AddrBook) GetSelection() []*NetAddress { // Fisher-Yates shuffle the array. We only need to do the first // `numAddresses' since we are throwing the rest. + // XXX: What's the point of this if we already loop randomly through addrLookup ? for i := 0; i < numAddresses; i++ { // pick a number between current index and the end j := rand.Intn(len(allAddr)-i) + i @@ -355,7 +371,7 @@ func (a *AddrBook) loadFromFile(filePath string) bool { if err != nil { cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) } - defer r.Close() + defer r.Close() // nolint: errcheck aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) @@ -366,7 +382,7 @@ func (a *AddrBook) loadFromFile(filePath string) bool { // Restore all the fields... // Restore the key a.key = aJSON.Key - // Restore .addrNew & .addrOld + // Restore .bucketsNew & .bucketsOld for _, ka := range aJSON.Addrs { for _, bucketIndex := range ka.Buckets { bucket := a.getBucket(ka.BucketType, bucketIndex) @@ -391,28 +407,29 @@ func (a *AddrBook) Save() { /* Private methods */ func (a *AddrBook) saveRoutine() { - dumpAddressTicker := time.NewTicker(dumpAddressInterval) + defer a.wg.Done() + + saveFileTicker := time.NewTicker(dumpAddressInterval) out: for { select { - case <-dumpAddressTicker.C: + case <-saveFileTicker.C: a.saveToFile(a.filePath) case <-a.Quit: break out } } - dumpAddressTicker.Stop() + saveFileTicker.Stop() a.saveToFile(a.filePath) - a.wg.Done() a.Logger.Info("Address handler done") } func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { switch bucketType { case bucketTypeNew: - return a.addrNew[bucketIdx] + return a.bucketsNew[bucketIdx] case bucketTypeOld: - return a.addrOld[bucketIdx] + return a.bucketsOld[bucketIdx] default: cmn.PanicSanity("Should not happen") return nil @@ -467,7 +484,7 @@ func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { } addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) + bucket := a.getBucket(bucketTypeOld, bucketIdx) // Already exists? if _, ok := bucket[addrStr]; ok { @@ -533,14 +550,13 @@ func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { return oldest } -func (a *AddrBook) addAddress(addr, src *NetAddress) { +func (a *AddrBook) addAddress(addr, src *NetAddress) error { if a.routabilityStrict && !addr.Routable() { - a.Logger.Error(cmn.Fmt("Cannot add non-routable address %v", addr)) - return + return fmt.Errorf("Cannot add non-routable address %v", addr) } if _, ok := a.ourAddrs[addr.String()]; ok { // Ignore our own listener address. - return + return fmt.Errorf("Cannot add ourselves with address %v", addr) } ka := a.addrLookup[addr.String()] @@ -548,16 +564,16 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) { if ka != nil { // Already old. if ka.isOld() { - return + return nil } // Already in max new buckets. if len(ka.Buckets) == maxNewBucketsPerAddress { - return + return nil } // The more entries we have, the less likely we are to add more. factor := int32(2 * len(ka.Buckets)) if a.rand.Int31n(factor) != 0 { - return + return nil } } else { ka = newKnownAddress(addr, src) @@ -567,12 +583,13 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) { a.addToNewBucket(ka, bucket) a.Logger.Info("Added new address", "address", addr, "total", a.size()) + return nil } // Make space in the new buckets by expiring the really bad entries. // If no bad entries are available we remove the oldest. func (a *AddrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.addrNew[bucketIdx] { + for addrStr, ka := range a.bucketsNew[bucketIdx] { // If an entry is bad, throw it away if ka.isBad() { a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr)) @@ -674,8 +691,8 @@ func (a *AddrBook) calcOldBucket(addr *NetAddress) int { } // Return a string representing the network group of this address. -// This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable for an unroutable +// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string +// "local" for a local address and the string "unroutable" for an unroutable // address. func (a *AddrBook) groupKey(na *NetAddress) string { if a.routabilityStrict && na.Local() { @@ -801,8 +818,8 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int { } /* - An address is bad if the address in question has not been tried in the last - minute and meets one of the following criteria: + An address is bad if the address in question is a New address, has not been tried in the last + minute, and meets one of the following criteria: 1) It claims to be from the future 2) It hasn't been seen in over a month @@ -811,14 +828,23 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int { All addresses that meet these criteria are assumed to be worthless and not worth keeping hold of. + + XXX: so a good peer needs us to call MarkGood before the conditions above are reached! */ func (ka *knownAddress) isBad() bool { + // Is Old --> good + if ka.BucketType == bucketTypeOld { + return false + } + // Has been attempted in the last minute --> good if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) { return false } - // Over a month old? + // Too old? + // XXX: does this mean if we've kept a connection up for this long we'll disconnect?! + // and shouldn't it be .Before ? if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { return true } @@ -829,6 +855,7 @@ func (ka *knownAddress) isBad() bool { } // Hasn't succeeded in too long? + // XXX: does this mean if we've kept a connection up for this long we'll disconnect?! if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && ka.Attempts >= maxFailures { return true diff --git a/p2p/addrbook_test.go b/p2p/addrbook_test.go index 9b83be18..d84c008e 100644 --- a/p2p/addrbook_test.go +++ b/p2p/addrbook_test.go @@ -23,6 +23,42 @@ func createTempFileName(prefix string) string { return fname } +func TestAddrBookPickAddress(t *testing.T) { + assert := assert.New(t) + fname := createTempFileName("addrbook_test") + + // 0 addresses + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + assert.Zero(book.Size()) + + addr := book.PickAddress(50) + assert.Nil(addr, "expected no address") + + randAddrs := randNetAddressPairs(t, 1) + addrSrc := randAddrs[0] + book.AddAddress(addrSrc.addr, addrSrc.src) + + // pick an address when we only have new address + addr = book.PickAddress(0) + assert.NotNil(addr, "expected an address") + addr = book.PickAddress(50) + assert.NotNil(addr, "expected an address") + addr = book.PickAddress(100) + assert.NotNil(addr, "expected an address") + + // pick an address when we only have old address + book.MarkGood(addrSrc.addr) + addr = book.PickAddress(0) + assert.NotNil(addr, "expected an address") + addr = book.PickAddress(50) + assert.NotNil(addr, "expected an address") + + // in this case, nNew==0 but we biased 100% to new, so we return nil + addr = book.PickAddress(100) + assert.Nil(addr, "did not expected an address") +} + func TestAddrBookSaveLoad(t *testing.T) { fname := createTempFileName("addrbook_test") @@ -76,6 +112,7 @@ func TestAddrBookLookup(t *testing.T) { } func TestAddrBookPromoteToOld(t *testing.T) { + assert := assert.New(t) fname := createTempFileName("addrbook_test") randAddrs := randNetAddressPairs(t, 100) @@ -106,6 +143,8 @@ func TestAddrBookPromoteToOld(t *testing.T) { if len(selection) > book.Size() { t.Errorf("selection could not be bigger than the book") } + + assert.Equal(book.Size(), 100, "expecting book size to be 100") } func TestAddrBookHandlesDuplicates(t *testing.T) { diff --git a/p2p/conn_go110.go b/p2p/conn_go110.go new file mode 100644 index 00000000..2fca7c3d --- /dev/null +++ b/p2p/conn_go110.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package p2p + +// Go1.10 has a proper net.Conn implementation that +// has the SetDeadline method implemented as per +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// lest we run into problems like +// https://github.com/tendermint/tendermint/issues/851 + +import "net" + +func netPipe() (net.Conn, net.Conn) { + return net.Pipe() +} diff --git a/p2p/conn_notgo110.go b/p2p/conn_notgo110.go new file mode 100644 index 00000000..a5c2f741 --- /dev/null +++ b/p2p/conn_notgo110.go @@ -0,0 +1,32 @@ +// +build !go1.10 + +package p2p + +import ( + "net" + "time" +) + +// Only Go1.10 has a proper net.Conn implementation that +// has the SetDeadline method implemented as per +// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 +// lest we run into problems like +// https://github.com/tendermint/tendermint/issues/851 +// so for go versions < Go1.10 use our custom net.Conn creator +// that doesn't return an `Unimplemented error` for net.Conn. +// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 +// we hadn't cared about errors from SetDeadline so swallow them up anyways. +type pipe struct { + net.Conn +} + +func (p *pipe) SetDeadline(t time.Time) error { + return nil +} + +func netPipe() (net.Conn, net.Conn) { + p1, p2 := net.Pipe() + return &pipe{p1}, &pipe{p2} +} + +var _ net.Conn = (*pipe)(nil) diff --git a/p2p/connection.go b/p2p/connection.go index 97d54635..b0167403 100644 --- a/p2p/connection.go +++ b/p2p/connection.go @@ -11,10 +11,13 @@ import ( "time" wire "github.com/tendermint/go-wire" + tmlegacy "github.com/tendermint/go-wire/nowriter/tmlegacy" cmn "github.com/tendermint/tmlibs/common" flow "github.com/tendermint/tmlibs/flowrate" ) +var legacy = tmlegacy.TMEncoderLegacy{} + const ( numBatchMsgPackets = 10 minReadBufferSize = 1024 @@ -146,9 +149,8 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec var channels = []*Channel{} for _, desc := range chDescs { - descCopy := *desc // copy the desc else unsafe access across connections - channel := newChannel(mconn, &descCopy) - channelsIdx[channel.id] = channel + channel := newChannel(mconn, *desc) + channelsIdx[channel.desc.ID] = channel channels = append(channels, channel) } mconn.channels = channels @@ -161,7 +163,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec // OnStart implements BaseService func (c *MConnection) OnStart() error { - c.BaseService.OnStart() + if err := c.BaseService.OnStart(); err != nil { + return err + } c.quit = make(chan struct{}) c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle) c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout) @@ -180,7 +184,7 @@ func (c *MConnection) OnStop() { if c.quit != nil { close(c.quit) } - c.conn.Close() + c.conn.Close() // nolint: errcheck // We can't close pong safely here because // recvRoutine may write to it after we've stopped. // Though it doesn't need to get closed at all, @@ -308,12 +312,12 @@ FOR_LOOP: } case <-c.pingTimer.Ch: c.Logger.Debug("Send Ping") - wire.WriteByte(packetTypePing, c.bufWriter, &n, &err) + legacy.WriteOctet(packetTypePing, c.bufWriter, &n, &err) c.sendMonitor.Update(int(n)) c.flush() case <-c.pong: c.Logger.Debug("Send Pong") - wire.WriteByte(packetTypePong, c.bufWriter, &n, &err) + legacy.WriteOctet(packetTypePong, c.bufWriter, &n, &err) c.sendMonitor.Update(int(n)) c.flush() case <-c.quit: @@ -372,7 +376,7 @@ func (c *MConnection) sendMsgPacket() bool { continue } // Get ratio, and keep track of lowest ratio. - ratio := float32(channel.recentlySent) / float32(channel.priority) + ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) if ratio < leastRatio { leastRatio = ratio leastChannel = channel @@ -413,7 +417,7 @@ FOR_LOOP: // Peek into bufReader for debugging if numBytes := c.bufReader.Buffered(); numBytes > 0 { log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte { - bytes, err := c.bufReader.Peek(MinInt(numBytes, 100)) + bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100)) if err == nil { return bytes } else { @@ -459,8 +463,11 @@ FOR_LOOP: } channel, ok := c.channelsIdx[pkt.ChannelID] if !ok || channel == nil { - cmn.PanicQ(cmn.Fmt("Unknown channel %X", pkt.ChannelID)) + err := fmt.Errorf("Unknown channel %X", pkt.ChannelID) + c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(err) } + msgBytes, err := channel.recvMsgPacket(pkt) if err != nil { if c.IsRunning() { @@ -475,7 +482,9 @@ FOR_LOOP: c.onReceive(pkt.ChannelID, msgBytes) } default: - cmn.PanicSanity(cmn.Fmt("Unknown message type %X", pktType)) + err := fmt.Errorf("Unknown message type %X", pktType) + c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(err) } // TODO: shouldn't this go in the sendRoutine? @@ -511,10 +520,10 @@ func (c *MConnection) Status() ConnectionStatus { status.Channels = make([]ChannelStatus, len(c.channels)) for i, channel := range c.channels { status.Channels[i] = ChannelStatus{ - ID: channel.id, + ID: channel.desc.ID, SendQueueCapacity: cap(channel.sendQueue), SendQueueSize: int(channel.sendQueueSize), // TODO use atomic - Priority: channel.priority, + Priority: channel.desc.Priority, RecentlySent: channel.recentlySent, } } @@ -531,7 +540,7 @@ type ChannelDescriptor struct { RecvMessageCapacity int } -func (chDesc *ChannelDescriptor) FillDefaults() { +func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { if chDesc.SendQueueCapacity == 0 { chDesc.SendQueueCapacity = defaultSendQueueCapacity } @@ -541,36 +550,34 @@ func (chDesc *ChannelDescriptor) FillDefaults() { if chDesc.RecvMessageCapacity == 0 { chDesc.RecvMessageCapacity = defaultRecvMessageCapacity } + filled = chDesc + return } // TODO: lowercase. // NOTE: not goroutine-safe. type Channel struct { conn *MConnection - desc *ChannelDescriptor - id byte + desc ChannelDescriptor sendQueue chan []byte sendQueueSize int32 // atomic. recving []byte sending []byte - priority int recentlySent int64 // exponential moving average maxMsgPacketPayloadSize int } -func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel { - desc.FillDefaults() +func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { + desc = desc.FillDefaults() if desc.Priority <= 0 { - cmn.PanicSanity("Channel default priority must be a postive integer") + cmn.PanicSanity("Channel default priority must be a positive integer") } return &Channel{ conn: conn, desc: desc, - id: desc.ID, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), - priority: desc.Priority, maxMsgPacketPayloadSize: conn.config.maxMsgPacketPayloadSize, } } @@ -629,7 +636,7 @@ func (ch *Channel) isSendPending() bool { // Not goroutine-safe func (ch *Channel) nextMsgPacket() msgPacket { packet := msgPacket{} - packet.ChannelID = byte(ch.id) + packet.ChannelID = byte(ch.desc.ID) maxSize := ch.maxMsgPacketPayloadSize packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] if len(ch.sending) <= maxSize { @@ -648,14 +655,18 @@ func (ch *Channel) nextMsgPacket() msgPacket { func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) { packet := ch.nextMsgPacket() // log.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet) - wire.WriteByte(packetTypeMsg, w, &n, &err) - wire.WriteBinary(packet, w, &n, &err) + writeMsgPacketTo(packet, w, &n, &err) if err == nil { ch.recentlySent += int64(n) } return } +func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) { + legacy.WriteOctet(packetTypeMsg, w, n, err) + wire.WriteBinary(packet, w, n, err) +} + // Handles incoming msgPackets. Returns a msg bytes if msg is complete. // Not goroutine-safe func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) { diff --git a/p2p/connection_test.go b/p2p/connection_test.go index 71c3d64c..2a64764e 100644 --- a/p2p/connection_test.go +++ b/p2p/connection_test.go @@ -1,4 +1,4 @@ -package p2p_test +package p2p import ( "net" @@ -7,11 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - p2p "github.com/tendermint/tendermint/p2p" + wire "github.com/tendermint/go-wire" "github.com/tendermint/tmlibs/log" ) -func createMConnection(conn net.Conn) *p2p.MConnection { +func createTestMConnection(conn net.Conn) *MConnection { onReceive := func(chID byte, msgBytes []byte) { } onError := func(r interface{}) { @@ -21,9 +21,9 @@ func createMConnection(conn net.Conn) *p2p.MConnection { return c } -func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *p2p.MConnection { - chDescs := []*p2p.ChannelDescriptor{&p2p.ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := p2p.NewMConnection(conn, chDescs, onReceive, onError) +func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *MConnection { + chDescs := []*ChannelDescriptor{&ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} + c := NewMConnection(conn, chDescs, onReceive, onError) c.SetLogger(log.TestingLogger()) return c } @@ -31,12 +31,12 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg func TestMConnectionSend(t *testing.T) { assert, require := assert.New(t), require.New(t) - server, client := net.Pipe() - defer server.Close() - defer client.Close() + server, client := netPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck - mconn := createMConnection(client) - _, err := mconn.Start() + mconn := createTestMConnection(client) + err := mconn.Start() require.Nil(err) defer mconn.Stop() @@ -44,12 +44,18 @@ func TestMConnectionSend(t *testing.T) { assert.True(mconn.Send(0x01, msg)) // Note: subsequent Send/TrySend calls could pass because we are reading from // the send queue in a separate goroutine. - server.Read(make([]byte, len(msg))) + _, err = server.Read(make([]byte, len(msg))) + if err != nil { + t.Error(err) + } assert.True(mconn.CanSend(0x01)) msg = "Spider-Man" assert.True(mconn.TrySend(0x01, msg)) - server.Read(make([]byte, len(msg))) + _, err = server.Read(make([]byte, len(msg))) + if err != nil { + t.Error(err) + } assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown") @@ -58,9 +64,9 @@ func TestMConnectionSend(t *testing.T) { func TestMConnectionReceive(t *testing.T) { assert, require := assert.New(t), require.New(t) - server, client := net.Pipe() - defer server.Close() - defer client.Close() + server, client := netPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -71,12 +77,12 @@ func TestMConnectionReceive(t *testing.T) { errorsCh <- r } mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) - _, err := mconn1.Start() + err := mconn1.Start() require.Nil(err) defer mconn1.Stop() - mconn2 := createMConnection(server) - _, err = mconn2.Start() + mconn2 := createTestMConnection(server) + err = mconn2.Start() require.Nil(err) defer mconn2.Stop() @@ -96,12 +102,12 @@ func TestMConnectionReceive(t *testing.T) { func TestMConnectionStatus(t *testing.T) { assert, require := assert.New(t), require.New(t) - server, client := net.Pipe() - defer server.Close() - defer client.Close() + server, client := netPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck - mconn := createMConnection(client) - _, err := mconn.Start() + mconn := createTestMConnection(client) + err := mconn.Start() require.Nil(err) defer mconn.Stop() @@ -113,9 +119,9 @@ func TestMConnectionStatus(t *testing.T) { func TestMConnectionStopsAndReturnsError(t *testing.T) { assert, require := assert.New(t), require.New(t) - server, client := net.Pipe() - defer server.Close() - defer client.Close() + server, client := netPipe() + defer server.Close() // nolint: errcheck + defer client.Close() // nolint: errcheck receivedCh := make(chan []byte) errorsCh := make(chan interface{}) @@ -126,11 +132,13 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { errorsCh <- r } mconn := createMConnectionWithCallbacks(client, onReceive, onError) - _, err := mconn.Start() + err := mconn.Start() require.Nil(err) defer mconn.Stop() - client.Close() + if err := client.Close(); err != nil { + t.Error(err) + } select { case receivedBytes := <-receivedCh: @@ -142,3 +150,166 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { t.Fatal("Did not receive error in 500ms") } } + +func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr chan struct{}) (*MConnection, *MConnection) { + server, client := netPipe() + + onReceive := func(chID byte, msgBytes []byte) {} + onError := func(r interface{}) {} + + // create client conn with two channels + chDescs := []*ChannelDescriptor{ + {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, + {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, + } + mconnClient := NewMConnection(client, chDescs, onReceive, onError) + mconnClient.SetLogger(log.TestingLogger().With("module", "client")) + err := mconnClient.Start() + require.Nil(err) + + // create server conn with 1 channel + // it fires on chOnErr when there's an error + serverLogger := log.TestingLogger().With("module", "server") + onError = func(r interface{}) { + chOnErr <- struct{}{} + } + mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) + mconnServer.SetLogger(serverLogger) + err = mconnServer.Start() + require.Nil(err) + return mconnClient, mconnServer +} + +func expectSend(ch chan struct{}) bool { + after := time.After(time.Second * 5) + select { + case <-ch: + return true + case <-after: + return false + } +} + +func TestMConnectionReadErrorBadEncoding(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + client := mconnClient.conn + msg := "Ant-Man" + + // send badly encoded msgPacket + var n int + var err error + wire.WriteByte(packetTypeMsg, client, &n, &err) + wire.WriteByteSlice([]byte(msg), client, &n, &err) + assert.True(expectSend(chOnErr), "badly encoded msgPacket") +} + +func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + msg := "Ant-Man" + + // fail to send msg on channel unknown by client + assert.False(mconnClient.Send(0x03, msg)) + + // send msg on channel unknown by the server. + // should cause an error + assert.True(mconnClient.Send(0x02, msg)) + assert.True(expectSend(chOnErr), "unknown channel") +} + +func TestMConnectionReadErrorLongMessage(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chOnErr := make(chan struct{}) + chOnRcv := make(chan struct{}) + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + mconnServer.onReceive = func(chID byte, msgBytes []byte) { + chOnRcv <- struct{}{} + } + + client := mconnClient.conn + + // send msg thats just right + var n int + var err error + packet := msgPacket{ + ChannelID: 0x01, + Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-5), + EOF: 1, + } + writeMsgPacketTo(packet, client, &n, &err) + assert.True(expectSend(chOnRcv), "msg just right") + + // send msg thats too long + packet = msgPacket{ + ChannelID: 0x01, + Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-4), + EOF: 1, + } + writeMsgPacketTo(packet, client, &n, &err) + assert.True(expectSend(chOnErr), "msg too long") +} + +func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chOnErr := make(chan struct{}) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr) + defer mconnClient.Stop() + defer mconnServer.Stop() + + // send msg with unknown msg type + var n int + var err error + wire.WriteByte(0x04, mconnClient.conn, &n, &err) + assert.True(expectSend(chOnErr), "unknown msg type") +} + +func TestMConnectionTrySend(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + server, client := netPipe() + defer server.Close() + defer client.Close() + + mconn := createTestMConnection(client) + err := mconn.Start() + require.Nil(err) + defer mconn.Stop() + + msg := "Semicolon-Woman" + resultCh := make(chan string, 2) + assert.True(mconn.TrySend(0x01, msg)) + server.Read(make([]byte, len(msg))) + assert.True(mconn.CanSend(0x01)) + assert.True(mconn.TrySend(0x01, msg)) + assert.False(mconn.CanSend(0x01)) + go func() { + mconn.TrySend(0x01, msg) + resultCh <- "TrySend" + }() + go func() { + mconn.Send(0x01, msg) + resultCh <- "Send" + }() + assert.False(mconn.CanSend(0x01)) + assert.False(mconn.TrySend(0x01, msg)) + assert.Equal("TrySend", <-resultCh) + server.Read(make([]byte, len(msg))) + assert.Equal("Send", <-resultCh) // Order constrained by parallel blocking above +} diff --git a/p2p/fuzz.go b/p2p/fuzz.go index aefac986..fa16e4a2 100644 --- a/p2p/fuzz.go +++ b/p2p/fuzz.go @@ -124,7 +124,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { func (fc *FuzzedConnection) randomDuration() time.Duration { maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) - return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) + return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas } // implements the fuzz (delay, kill conn) @@ -143,7 +143,7 @@ func (fc *FuzzedConnection) fuzz() bool { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { // XXX: can't this fail because machine precision? // XXX: do we need an error? - fc.Close() + fc.Close() // nolint: errcheck, gas return true } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { time.Sleep(fc.randomDuration()) diff --git a/p2p/listener.go b/p2p/listener.go index 97139097..a0cc2732 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -16,7 +16,7 @@ type Listener interface { InternalAddress() *NetAddress ExternalAddress() *NetAddress String() string - Stop() bool + Stop() error } // Implements Listener @@ -100,19 +100,24 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log connections: make(chan net.Conn, numBufferedConnections), } dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl) - dl.Start() // Started upon construction + err = dl.Start() // Started upon construction + if err != nil { + logger.Error("Error starting base service", "err", err) + } return dl } func (l *DefaultListener) OnStart() error { - l.BaseService.OnStart() + if err := l.BaseService.OnStart(); err != nil { + return err + } go l.listenRoutine() return nil } func (l *DefaultListener) OnStop() { l.BaseService.OnStop() - l.listener.Close() + l.listener.Close() // nolint: errcheck } // Accept connections and pass on the channel diff --git a/p2p/listener_test.go b/p2p/listener_test.go index c3d33a9a..92018e0a 100644 --- a/p2p/listener_test.go +++ b/p2p/listener_test.go @@ -25,7 +25,12 @@ func TestListener(t *testing.T) { } msg := []byte("hi!") - go connIn.Write(msg) + go func() { + _, err := connIn.Write(msg) + if err != nil { + t.Error(err) + } + }() b := make([]byte, 32) n, err := connOut.Read(b) if err != nil { diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 8c60da25..7e899a31 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -31,9 +31,9 @@ func TestNewNetAddressString(t *testing.T) { }{ {"127.0.0.1:8080", true}, // {"127.0.0:8080", false}, - {"a", false}, - {"127.0.0.1:a", false}, - {"a:8080", false}, + {"notahost", false}, + {"127.0.0.1:notapath", false}, + {"notahost:8080", false}, {"8082", false}, {"127.0.0:8080000", false}, } diff --git a/p2p/peer.go b/p2p/peer.go index 3652c465..cc9c14c3 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -11,6 +11,7 @@ import ( crypto "github.com/tendermint/go-crypto" wire "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" ) // Peer is an interface representing a peer connected on a reactor. @@ -87,7 +88,9 @@ func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs [] peer, err := newPeerFromConnAndConfig(conn, true, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + return nil, err + } return nil, err } return peer, nil @@ -112,7 +115,9 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[ // Encrypt connection if config.AuthEnc { - conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)) + if err := conn.SetDeadline(time.Now().Add(config.HandshakeTimeout * time.Second)); err != nil { + return nil, errors.Wrap(err, "Error setting deadline while encrypting connection") + } var err error conn, err = MakeSecretConnection(conn, ourNodePrivKey) @@ -136,9 +141,14 @@ func newPeerFromConnAndConfig(rawConn net.Conn, outbound bool, reactorsByCh map[ return p, nil } +func (p *peer) SetLogger(l log.Logger) { + p.Logger = l + p.mconn.SetLogger(l) +} + // CloseConn should be used when the peer was created, but never started. func (p *peer) CloseConn() { - p.conn.Close() + p.conn.Close() // nolint: errcheck } // makePersistent marks the peer as persistent. @@ -159,7 +169,9 @@ func (p *peer) IsPersistent() bool { // NOTE: blocking func (p *peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) error { // Set deadline for handshake so we don't block forever on conn.ReadFull - p.conn.SetDeadline(time.Now().Add(timeout)) + if err := p.conn.SetDeadline(time.Now().Add(timeout)); err != nil { + return errors.Wrap(err, "Error setting deadline") + } var peerNodeInfo = new(NodeInfo) var err1 error @@ -190,7 +202,9 @@ func (p *peer) HandshakeTimeout(ourNodeInfo *NodeInfo, timeout time.Duration) er } // Remove deadline - p.conn.SetDeadline(time.Time{}) + if err := p.conn.SetDeadline(time.Time{}); err != nil { + return errors.Wrap(err, "Error removing deadline") + } peerNodeInfo.RemoteAddr = p.Addr().String() @@ -210,7 +224,7 @@ func (p *peer) PubKey() crypto.PubKeyEd25519 { if p.config.AuthEnc { return p.conn.(*SecretConnection).RemotePubKey() } - if p.NodeInfo == nil { + if p.NodeInfo() == nil { panic("Attempt to get peer's PubKey before calling Handshake") } return p.PubKey() @@ -218,8 +232,10 @@ func (p *peer) PubKey() crypto.PubKeyEd25519 { // OnStart implements BaseService. func (p *peer) OnStart() error { - p.BaseService.OnStart() - _, err := p.mconn.Start() + if err := p.BaseService.OnStart(); err != nil { + return err + } + err := p.mconn.Start() return err } @@ -306,6 +322,9 @@ func (p *peer) Key() string { // NodeInfo returns a copy of the peer's NodeInfo. func (p *peer) NodeInfo() *NodeInfo { + if p.nodeInfo == nil { + return nil + } n := *p.nodeInfo // copy return &n } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index e3745525..69430052 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -28,7 +28,9 @@ func TestPeerSetAddRemoveOne(t *testing.T) { var peerList []Peer for i := 0; i < 5; i++ { p := randPeer() - peerSet.Add(p) + if err := peerSet.Add(p); err != nil { + t.Error(err) + } peerList = append(peerList, p) } @@ -48,7 +50,9 @@ func TestPeerSetAddRemoveOne(t *testing.T) { // 2. Next we are testing removing the peer at the end // a) Replenish the peerSet for _, peer := range peerList { - peerSet.Add(peer) + if err := peerSet.Add(peer); err != nil { + t.Error(err) + } } // b) In reverse, remove each element diff --git a/p2p/peer_test.go b/p2p/peer_test.go index ba52b22a..b53b0bb1 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -23,7 +23,8 @@ func TestPeerBasic(t *testing.T) { p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), DefaultPeerConfig()) require.Nil(err) - p.Start() + err = p.Start() + require.Nil(err) defer p.Stop() assert.True(p.IsRunning()) @@ -49,7 +50,8 @@ func TestPeerWithoutAuthEnc(t *testing.T) { p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config) require.Nil(err) - p.Start() + err = p.Start() + require.Nil(err) defer p.Stop() assert.True(p.IsRunning()) @@ -69,7 +71,9 @@ func TestPeerSend(t *testing.T) { p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config) require.Nil(err) - p.Start() + err = p.Start() + require.Nil(err) + defer p.Stop() assert.True(p.CanSend(0x01)) @@ -78,7 +82,7 @@ func TestPeerSend(t *testing.T) { func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) { chDescs := []*ChannelDescriptor{ - &ChannelDescriptor{ID: 0x01, Priority: 1}, + {ID: 0x01, Priority: 1}, } reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)} pk := crypto.GenPrivKeyEd25519() @@ -148,7 +152,9 @@ func (p *remotePeer) accept(l net.Listener) { } select { case <-p.quit: - conn.Close() + if err := conn.Close(); err != nil { + golog.Fatal(err) + } return default: } diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 54c2d06b..6e49f6d0 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -66,8 +66,13 @@ func NewPEXReactor(b *AddrBook) *PEXReactor { // OnStart implements BaseService func (r *PEXReactor) OnStart() error { - r.BaseReactor.OnStart() - r.book.Start() + if err := r.BaseReactor.OnStart(); err != nil { + return err + } + err := r.book.Start() + if err != nil && err != cmn.ErrAlreadyStarted { + return err + } go r.ensurePeersRoutine() go r.flushMsgCountByPeer() return nil @@ -82,7 +87,7 @@ func (r *PEXReactor) OnStop() { // GetChannels implements Reactor func (r *PEXReactor) GetChannels() []*ChannelDescriptor { return []*ChannelDescriptor{ - &ChannelDescriptor{ + { ID: PexChannel, Priority: 1, SendQueueCapacity: 10, @@ -103,7 +108,7 @@ func (r *PEXReactor) AddPeer(p Peer) { } else { // For inbound connections, the peer is its own source addr, err := NewNetAddressString(p.NodeInfo().ListenAddr) if err != nil { - // this should never happen + // peer gave us a bad ListenAddr. TODO: punish r.Logger.Error("Error in AddPeer: invalid peer address", "addr", p.NodeInfo().ListenAddr, "err", err) return } @@ -120,7 +125,12 @@ func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { // Receive implements Reactor by handling incoming PEX messages. func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { srcAddrStr := src.NodeInfo().RemoteAddr - srcAddr, _ := NewNetAddressString(srcAddrStr) + srcAddr, err := NewNetAddressString(srcAddrStr) + if err != nil { + // this should never happen. TODO: cancel conn + r.Logger.Error("Error in Receive: invalid peer address", "addr", srcAddrStr, "err", err) + return + } r.IncrementMsgCountForPeer(srcAddrStr) if r.ReachedMaxMsgCountForPeer(srcAddrStr) { @@ -143,7 +153,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { r.SendAddrs(src, r.book.GetSelection()) case *pexAddrsMessage: // We received some peer addresses from src. - // (We don't want to get spammed with bad peers) + // TODO: (We don't want to get spammed with bad peers) for _, addr := range msg.Addrs { if addr != nil { r.book.AddAddress(addr, srcAddr) @@ -235,43 +245,29 @@ func (r *PEXReactor) ensurePeers() { return } - toDial := make(map[string]*NetAddress) + // bias to prefer more vetted peers when we have fewer connections. + // not perfect, but somewhate ensures that we prioritize connecting to more-vetted + newBias := cmn.MinInt(numOutPeers, 8)*10 + 10 - // Try to pick numToDial addresses to dial. - for i := 0; i < numToDial; i++ { - // The purpose of newBias is to first prioritize old (more vetted) peers - // when we have few connections, but to allow for new (less vetted) peers - // if we already have many connections. This algorithm isn't perfect, but - // it somewhat ensures that we prioritize connecting to more-vetted - // peers. - newBias := cmn.MinInt(numOutPeers, 8)*10 + 10 - var picked *NetAddress - // Try to fetch a new peer 3 times. - // This caps the maximum number of tries to 3 * numToDial. - for j := 0; j < 3; j++ { - try := r.book.PickAddress(newBias) - if try == nil { - break - } - _, alreadySelected := toDial[try.IP.String()] - alreadyDialing := r.Switch.IsDialing(try) - alreadyConnected := r.Switch.Peers().Has(try.IP.String()) - if alreadySelected || alreadyDialing || alreadyConnected { - // r.Logger.Info("Cannot dial address", "addr", try, - // "alreadySelected", alreadySelected, - // "alreadyDialing", alreadyDialing, - // "alreadyConnected", alreadyConnected) - continue - } else { - r.Logger.Info("Will dial address", "addr", try) - picked = try - break - } - } - if picked == nil { + toDial := make(map[string]*NetAddress) + // Try maxAttempts times to pick numToDial addresses to dial + maxAttempts := numToDial * 3 + for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { + try := r.book.PickAddress(newBias) + if try == nil { continue } - toDial[picked.IP.String()] = picked + if _, selected := toDial[try.IP.String()]; selected { + continue + } + if dialling := r.Switch.IsDialing(try); dialling { + continue + } + if connected := r.Switch.Peers().Has(try.IP.String()); connected { + continue + } + r.Logger.Info("Will dial address", "addr", try) + toDial[try.IP.String()] = try } // Dial picked addresses @@ -287,7 +283,7 @@ func (r *PEXReactor) ensurePeers() { // If we need more addresses, pick a random peer and ask for more. if r.book.NeedMoreAddrs() { if peers := r.Switch.Peers().List(); len(peers) > 0 { - i := rand.Int() % len(peers) + i := rand.Int() % len(peers) // nolint: gas peer := peers[i] r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer) r.RequestPEX(peer) diff --git a/p2p/pex_reactor_test.go b/p2p/pex_reactor_test.go index b2c15ed8..e80840b1 100644 --- a/p2p/pex_reactor_test.go +++ b/p2p/pex_reactor_test.go @@ -1,6 +1,7 @@ package p2p import ( + "fmt" "io/ioutil" "math/rand" "os" @@ -19,7 +20,7 @@ func TestPEXReactorBasic(t *testing.T) { dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(err) - defer os.RemoveAll(dir) + defer os.RemoveAll(dir) // nolint: errcheck book := NewAddrBook(dir+"addrbook.json", true) book.SetLogger(log.TestingLogger()) @@ -35,7 +36,7 @@ func TestPEXReactorAddRemovePeer(t *testing.T) { dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(err) - defer os.RemoveAll(dir) + defer os.RemoveAll(dir) // nolint: errcheck book := NewAddrBook(dir+"addrbook.json", true) book.SetLogger(log.TestingLogger()) @@ -61,14 +62,12 @@ func TestPEXReactorAddRemovePeer(t *testing.T) { } func TestPEXReactorRunning(t *testing.T) { - require := require.New(t) - N := 3 switches := make([]*Switch, N) dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(err) - defer os.RemoveAll(dir) + require.Nil(t, err) + defer os.RemoveAll(dir) // nolint: errcheck book := NewAddrBook(dir+"addrbook.json", false) book.SetLogger(log.TestingLogger()) @@ -94,19 +93,11 @@ func TestPEXReactorRunning(t *testing.T) { // start switches for _, s := range switches { - _, err := s.Start() // start switch and reactors - require.Nil(err) + err := s.Start() // start switch and reactors + require.Nil(t, err) } - time.Sleep(1 * time.Second) - - // check peers are connected after some time - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound == 0 { - t.Errorf("%v expected to be connected to at least one peer", s.NodeInfo().ListenAddr) - } - } + assertSomePeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second) // stop them for _, s := range switches { @@ -114,12 +105,39 @@ func TestPEXReactorRunning(t *testing.T) { } } +func assertSomePeersWithTimeout(t *testing.T, switches []*Switch, checkPeriod, timeout time.Duration) { + ticker := time.NewTicker(checkPeriod) + for { + select { + case <-ticker.C: + // check peers are connected + allGood := true + for _, s := range switches { + outbound, inbound, _ := s.NumPeers() + if outbound+inbound == 0 { + allGood = false + } + } + if allGood { + return + } + case <-time.After(timeout): + numPeersStr := "" + for i, s := range switches { + outbound, inbound, _ := s.NumPeers() + numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) + } + t.Errorf("expected all switches to be connected to at least one peer (switches: %s)", numPeersStr) + } + } +} + func TestPEXReactorReceive(t *testing.T) { assert, require := assert.New(t), require.New(t) dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(err) - defer os.RemoveAll(dir) + defer os.RemoveAll(dir) // nolint: errcheck book := NewAddrBook(dir+"addrbook.json", false) book.SetLogger(log.TestingLogger()) @@ -144,7 +162,7 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) { dir, err := ioutil.TempDir("", "pex_reactor") require.Nil(err) - defer os.RemoveAll(dir) + defer os.RemoveAll(dir) // nolint: errcheck book := NewAddrBook(dir+"addrbook.json", true) book.SetLogger(log.TestingLogger()) @@ -162,9 +180,19 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) { assert.True(r.ReachedMaxMsgCountForPeer(peer.NodeInfo().ListenAddr)) } +func createRoutableAddr() (addr string, netAddr *NetAddress) { + for { + addr = cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256) + netAddr, _ = NewNetAddressString(addr) + if netAddr.Routable() { + break + } + } + return +} + func createRandomPeer(outbound bool) *peer { - addr := cmn.Fmt("%v.%v.%v.%v:46656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256) - netAddr, _ := NewNetAddressString(addr) + addr, netAddr := createRoutableAddr() p := &peer{ key: cmn.RandStr(12), nodeInfo: &NodeInfo{ diff --git a/p2p/secret_connection.go b/p2p/secret_connection.go index 06c28317..aec0a751 100644 --- a/p2p/secret_connection.go +++ b/p2p/secret_connection.go @@ -67,8 +67,12 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKeyEd25 // Sort by lexical order. loEphPub, hiEphPub := sort32(locEphPub, remEphPub) + // Check if the local ephemeral public key + // was the least, lexicographically sorted. + locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) + // Generate nonces to use for secretbox. - recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locEphPub == loEphPub) + recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast) // Generate common challenge to sign. challenge := genChallenge(loEphPub, hiEphPub) @@ -298,7 +302,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signa // sha256 func hash32(input []byte) (res *[32]byte) { hasher := sha256.New() - hasher.Write(input) // does not error + hasher.Write(input) // nolint: errcheck, gas resSlice := hasher.Sum(nil) res = new([32]byte) copy(res[:], resSlice) @@ -308,7 +312,7 @@ func hash32(input []byte) (res *[32]byte) { // We only fill in the first 20 bytes with ripemd160 func hash24(input []byte) (res *[24]byte) { hasher := ripemd160.New() - hasher.Write(input) // does not error + hasher.Write(input) // nolint: errcheck, gas resSlice := hasher.Sum(nil) res = new([24]byte) copy(res[:], resSlice) diff --git a/p2p/secret_connection_test.go b/p2p/secret_connection_test.go index d0d00852..8b58fb41 100644 --- a/p2p/secret_connection_test.go +++ b/p2p/secret_connection_test.go @@ -70,8 +70,12 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection func TestSecretConnectionHandshake(t *testing.T) { fooSecConn, barSecConn := makeSecretConnPair(t) - fooSecConn.Close() - barSecConn.Close() + if err := fooSecConn.Close(); err != nil { + t.Error(err) + } + if err := barSecConn.Close(); err != nil { + t.Error(err) + } } func TestSecretConnectionReadWrite(t *testing.T) { @@ -110,7 +114,9 @@ func TestSecretConnectionReadWrite(t *testing.T) { return } } - nodeConn.PipeWriter.Close() + if err := nodeConn.PipeWriter.Close(); err != nil { + t.Error(err) + } }, func() { // Node reads @@ -125,7 +131,9 @@ func TestSecretConnectionReadWrite(t *testing.T) { } *nodeReads = append(*nodeReads, string(readBuffer[:n])) } - nodeConn.PipeReader.Close() + if err := nodeConn.PipeReader.Close(); err != nil { + t.Error(err) + } }) } } @@ -197,6 +205,8 @@ func BenchmarkSecretConnection(b *testing.B) { } b.StopTimer() - fooSecConn.Close() + if err := fooSecConn.Close(); err != nil { + b.Error(err) + } //barSecConn.Close() race condition } diff --git a/p2p/switch.go b/p2p/switch.go index 9ede8c10..f41b8295 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -1,12 +1,13 @@ package p2p import ( - "errors" "fmt" "math/rand" "net" "time" + "github.com/pkg/errors" + crypto "github.com/tendermint/go-crypto" cfg "github.com/tendermint/tendermint/config" cmn "github.com/tendermint/tmlibs/common" @@ -24,7 +25,7 @@ type Reactor interface { GetChannels() []*ChannelDescriptor AddPeer(peer Peer) RemovePeer(peer Peer, reason interface{}) - Receive(chID byte, peer Peer, msgBytes []byte) + Receive(chID byte, peer Peer, msgBytes []byte) // CONTRACT: msgBytes are not nil } //-------------------------------------- @@ -162,7 +163,7 @@ func (sw *Switch) NodeInfo() *NodeInfo { return sw.nodeInfo } -// SetNodePrivKey sets the switche's private key for authenticated encryption. +// SetNodePrivKey sets the switch's private key for authenticated encryption. // NOTE: Overwrites sw.nodeInfo.PubKey. // NOTE: Not goroutine safe. func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) { @@ -174,15 +175,13 @@ func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) { // OnStart implements BaseService. It starts all the reactors, peers, and listeners. func (sw *Switch) OnStart() error { - sw.BaseService.OnStart() // Start reactors for _, reactor := range sw.reactors { - _, err := reactor.Start() + err := reactor.Start() if err != nil { - return err + return errors.Wrapf(err, "failed to start %v", reactor) } } - // Start listeners for _, listener := range sw.listeners { go sw.listenerRoutine(listener) @@ -192,7 +191,6 @@ func (sw *Switch) OnStart() error { // OnStop implements BaseService. It stops all listeners, peers, and reactors. func (sw *Switch) OnStop() { - sw.BaseService.OnStop() // Stop listeners for _, listener := range sw.listeners { listener.Stop() @@ -209,10 +207,10 @@ func (sw *Switch) OnStop() { } } -// addPeer checks the given peer's validity, performs a handshake, and adds the peer to the switch -// and to all registered reactors. +// addPeer checks the given peer's validity, performs a handshake, and adds the +// peer to the switch and to all registered reactors. // NOTE: This performs a blocking handshake before the peer is added. -// CONTRACT: If error is returned, peer is nil, and conn is immediately closed. +// NOTE: If error is returned, caller is responsible for calling peer.CloseConn() func (sw *Switch) addPeer(peer *peer) error { if err := sw.FilterConnByAddr(peer.Addr()); err != nil { @@ -250,7 +248,7 @@ func (sw *Switch) addPeer(peer *peer) error { // Add the peer to .peers. // We start it first so that a peer in the list is safe to Stop. - // It should not err since we already checked peers.Has() + // It should not err since we already checked peers.Has(). if err := sw.peers.Add(peer); err != nil { return err } @@ -287,15 +285,19 @@ func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) { } func (sw *Switch) startInitPeer(peer *peer) { - peer.Start() // spawn send/recv routines + err := peer.Start() // spawn send/recv routines + if err != nil { + // Should never happen + sw.Logger.Error("Error starting peer", "peer", peer, "err", err) + } + for _, reactor := range sw.reactors { reactor.AddPeer(peer) } } -// DialSeeds dials a list of seeds asynchronously in random order +// DialSeeds dials a list of seeds asynchronously in random order. func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error { - netAddrs, err := NewNetAddressStrings(seeds) if err != nil { return err @@ -315,11 +317,15 @@ func (sw *Switch) DialSeeds(addrBook *AddrBook, seeds []string) error { addrBook.Save() } + // Ensure we have a completely undeterministic PRNG. cmd.RandInt64() draws + // from a seed that's initialized with OS entropy on process start. + rng := rand.New(rand.NewSource(cmn.RandInt64())) + // permute the list, dial them in random order. - perm := rand.Perm(len(netAddrs)) + perm := rng.Perm(len(netAddrs)) for i := 0; i < len(perm); i++ { go func(i int) { - time.Sleep(time.Duration(rand.Int63n(3000)) * time.Millisecond) + time.Sleep(time.Duration(rng.Int63n(3000)) * time.Millisecond) j := perm[i] sw.dialSeed(netAddrs[j]) }(i) @@ -369,7 +375,7 @@ func (sw *Switch) IsDialing(addr *NetAddress) bool { // Broadcast runs a go routine for each attempted send, which will block // trying to send for defaultSendTimeoutSeconds. Returns a channel -// which receives success values for each attempted send (false if times out) +// which receives success values for each attempted send (false if times out). // NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. // TODO: Something more intelligent. func (sw *Switch) Broadcast(chID byte, msg interface{}) chan bool { @@ -398,7 +404,7 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { return } -// Peers returns the set of peers the switch is connected to. +// Peers returns the set of peers that are connected to the switch. func (sw *Switch) Peers() IPeerSet { return sw.peers } @@ -475,29 +481,18 @@ func (sw *Switch) listenerRoutine(l Listener) { // NOTE: We don't yet have the listening port of the // remote (if they have a listener at all). - // The peerHandshake will handle that + // The peerHandshake will handle that. } // cleanup } -//----------------------------------------------------------------------------- - -type SwitchEventNewPeer struct { - Peer Peer -} - -type SwitchEventDonePeer struct { - Peer Peer - Error interface{} -} - //------------------------------------------------------------------ -// Switches connected via arbitrary net.Conn; useful for testing +// Connects switches via arbitrary net.Conn. Used for testing. // MakeConnectedSwitches returns n switches, connected according to the connect func. // If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the ith switch should be initialized (ie. with what reactors). +// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). // NOTE: panics if any switch fails to start. func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch { switches := make([]*Switch, n) @@ -510,7 +505,7 @@ func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Swit } for i := 0; i < n; i++ { - for j := i; j < n; j++ { + for j := i + 1; j < n; j++ { connect(switches, i, j) } } @@ -518,26 +513,24 @@ func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Swit return switches } -var PanicOnAddPeerErr = false - -// Connect2Switches will connect switches i and j via net.Pipe() -// Blocks until a conection is established. -// NOTE: caller ensures i and j are within bounds +// Connect2Switches will connect switches i and j via net.Pipe(). +// Blocks until a connection is established. +// NOTE: caller ensures i and j are within bounds. func Connect2Switches(switches []*Switch, i, j int) { switchI := switches[i] switchJ := switches[j] - c1, c2 := net.Pipe() + c1, c2 := netPipe() doneCh := make(chan struct{}) go func() { err := switchI.addPeerWithConnection(c1) - if PanicOnAddPeerErr && err != nil { + if err != nil { panic(err) } doneCh <- struct{}{} }() go func() { err := switchJ.addPeerWithConnection(c2) - if PanicOnAddPeerErr && err != nil { + if err != nil { panic(err) } doneCh <- struct{}{} @@ -550,7 +543,7 @@ func Connect2Switches(switches []*Switch, i, j int) { // It returns the first encountered error. func StartSwitches(switches []*Switch) error { for _, s := range switches { - _, err := s.Start() // start switch and reactors + err := s.Start() // start switch and reactors if err != nil { return err } @@ -578,12 +571,14 @@ func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f func (sw *Switch) addPeerWithConnection(conn net.Conn) error { peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + sw.Logger.Error("Error closing connection", "err", err) + } return err } peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr())) if err = sw.addPeer(peer); err != nil { - conn.Close() + peer.CloseConn() return err } @@ -593,12 +588,14 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error { func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error { peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config) if err != nil { - conn.Close() + if err := conn.Close(); err != nil { + sw.Logger.Error("Error closing connection", "err", err) + } return err } peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr())) if err = sw.addPeer(peer); err != nil { - conn.Close() + peer.CloseConn() return err } diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 115811b0..3ce24d08 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -10,11 +10,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + crypto "github.com/tendermint/go-crypto" wire "github.com/tendermint/go-wire" + "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/log" ) var ( @@ -100,12 +101,12 @@ func makeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc func initSwitchFunc(i int, sw *Switch) *Switch { // Make two reactors of two channels each sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x00), Priority: 10}, - &ChannelDescriptor{ID: byte(0x01), Priority: 10}, + {ID: byte(0x00), Priority: 10}, + {ID: byte(0x01), Priority: 10}, }, true)) sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x02), Priority: 10}, - &ChannelDescriptor{ID: byte(0x03), Priority: 10}, + {ID: byte(0x02), Priority: 10}, + {ID: byte(0x03), Priority: 10}, }, true)) return sw } @@ -131,43 +132,36 @@ func TestSwitches(t *testing.T) { s1.Broadcast(byte(0x01), ch1Msg) s1.Broadcast(byte(0x02), ch2Msg) - // Wait for things to settle... - time.Sleep(5000 * time.Millisecond) + assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) + assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) +} - // Check message on ch0 - ch0Msgs := s2.Reactor("foo").(*TestReactor).getMsgs(byte(0x00)) - if len(ch0Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch0") +func assertMsgReceivedWithTimeout(t *testing.T, msg string, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) { + ticker := time.NewTicker(checkPeriod) + for { + select { + case <-ticker.C: + msgs := reactor.getMsgs(channel) + if len(msgs) > 0 { + if !bytes.Equal(msgs[0].Bytes, wire.BinaryBytes(msg)) { + t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(msg), msgs[0].Bytes) + } + return + } + case <-time.After(timeout): + t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) + } } - if !bytes.Equal(ch0Msgs[0].Bytes, wire.BinaryBytes(ch0Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch0Msg), ch0Msgs[0].Bytes) - } - - // Check message on ch1 - ch1Msgs := s2.Reactor("foo").(*TestReactor).getMsgs(byte(0x01)) - if len(ch1Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch1") - } - if !bytes.Equal(ch1Msgs[0].Bytes, wire.BinaryBytes(ch1Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch1Msg), ch1Msgs[0].Bytes) - } - - // Check message on ch2 - ch2Msgs := s2.Reactor("bar").(*TestReactor).getMsgs(byte(0x02)) - if len(ch2Msgs) != 1 { - t.Errorf("Expected to have received 1 message in ch2") - } - if !bytes.Equal(ch2Msgs[0].Bytes, wire.BinaryBytes(ch2Msg)) { - t.Errorf("Unexpected message bytes. Wanted: %X, Got: %X", wire.BinaryBytes(ch2Msg), ch2Msgs[0].Bytes) - } - } func TestConnAddrFilter(t *testing.T) { s1 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) s2 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) + defer s1.Stop() + defer s2.Stop() - c1, c2 := net.Pipe() + c1, c2 := netPipe() s1.SetAddrFilter(func(addr net.Addr) error { if addr.String() == c1.RemoteAddr().String() { @@ -178,30 +172,32 @@ func TestConnAddrFilter(t *testing.T) { // connect to good peer go func() { - s1.addPeerWithConnection(c1) + err := s1.addPeerWithConnection(c1) + assert.NotNil(t, err, "expected err") }() go func() { - s2.addPeerWithConnection(c2) + err := s2.addPeerWithConnection(c2) + assert.NotNil(t, err, "expected err") }() - // Wait for things to happen, peers to get added... - time.Sleep(100 * time.Millisecond * time.Duration(4)) + assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) + assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) +} - defer s1.Stop() - defer s2.Stop() - if s1.Peers().Size() != 0 { - t.Errorf("Expected s1 not to connect to peers, got %d", s1.Peers().Size()) - } - if s2.Peers().Size() != 0 { - t.Errorf("Expected s2 not to connect to peers, got %d", s2.Peers().Size()) +func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { + time.Sleep(timeout) + if sw.Peers().Size() != 0 { + t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) } } func TestConnPubKeyFilter(t *testing.T) { s1 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) s2 := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) + defer s1.Stop() + defer s2.Stop() - c1, c2 := net.Pipe() + c1, c2 := netPipe() // set pubkey filter s1.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error { @@ -213,30 +209,26 @@ func TestConnPubKeyFilter(t *testing.T) { // connect to good peer go func() { - s1.addPeerWithConnection(c1) + err := s1.addPeerWithConnection(c1) + assert.NotNil(t, err, "expected error") }() go func() { - s2.addPeerWithConnection(c2) + err := s2.addPeerWithConnection(c2) + assert.NotNil(t, err, "expected error") }() - // Wait for things to happen, peers to get added... - time.Sleep(100 * time.Millisecond * time.Duration(4)) - - defer s1.Stop() - defer s2.Stop() - if s1.Peers().Size() != 0 { - t.Errorf("Expected s1 not to connect to peers, got %d", s1.Peers().Size()) - } - if s2.Peers().Size() != 0 { - t.Errorf("Expected s2 not to connect to peers, got %d", s2.Peers().Size()) - } + assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) + assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) } func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { assert, require := assert.New(t), require.New(t) sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) - sw.Start() + err := sw.Start() + if err != nil { + t.Error(err) + } defer sw.Stop() // simulate remote peer @@ -252,9 +244,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { // simulate failure by closing connection peer.CloseConn() - time.Sleep(100 * time.Millisecond) - - assert.Zero(sw.Peers().Size()) + assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) assert.False(peer.IsRunning()) } @@ -262,7 +252,10 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { assert, require := assert.New(t), require.New(t) sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) - sw.Start() + err := sw.Start() + if err != nil { + t.Error(err) + } defer sw.Stop() // simulate remote peer @@ -280,24 +273,45 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { peer.CloseConn() // TODO: actually detect the disconnection and wait for reconnect - time.Sleep(100 * time.Millisecond) - - assert.NotZero(sw.Peers().Size()) + npeers := sw.Peers().Size() + for i := 0; i < 20; i++ { + time.Sleep(100 * time.Millisecond) + npeers = sw.Peers().Size() + if npeers > 0 { + break + } + } + assert.NotZero(npeers) assert.False(peer.IsRunning()) } +func TestSwitchFullConnectivity(t *testing.T) { + switches := MakeConnectedSwitches(config, 3, initSwitchFunc, Connect2Switches) + defer func() { + for _, sw := range switches { + sw.Stop() + } + }() + + for i, sw := range switches { + if sw.Peers().Size() != 2 { + t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) + } + } +} + func BenchmarkSwitches(b *testing.B) { b.StopTimer() s1, s2 := makeSwitchPair(b, func(i int, sw *Switch) *Switch { // Make bar reactors of bar channels each sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x00), Priority: 10}, - &ChannelDescriptor{ID: byte(0x01), Priority: 10}, + {ID: byte(0x00), Priority: 10}, + {ID: byte(0x01), Priority: 10}, }, false)) sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ - &ChannelDescriptor{ID: byte(0x02), Priority: 10}, - &ChannelDescriptor{ID: byte(0x03), Priority: 10}, + {ID: byte(0x02), Priority: 10}, + {ID: byte(0x03), Priority: 10}, }, false)) return sw }) @@ -305,7 +319,7 @@ func BenchmarkSwitches(b *testing.B) { defer s2.Stop() // Allow time for goroutines to boot up - time.Sleep(1000 * time.Millisecond) + time.Sleep(1 * time.Second) b.StartTimer() numSuccess, numFailure := 0, 0 @@ -327,5 +341,4 @@ func BenchmarkSwitches(b *testing.B) { // Allow everything to flush before stopping switches & closing connections. b.StopTimer() - time.Sleep(1000 * time.Millisecond) } diff --git a/p2p/trust/config.go b/p2p/trust/config.go new file mode 100644 index 00000000..6fb0e681 --- /dev/null +++ b/p2p/trust/config.go @@ -0,0 +1,56 @@ +package trust + +import "time" + +// TrustMetricConfig - Configures the weight functions and time intervals for the metric +type TrustMetricConfig struct { + // Determines the percentage given to current behavior + ProportionalWeight float64 + + // Determines the percentage given to prior behavior + IntegralWeight float64 + + // The window of time that the trust metric will track events across. + // This can be set to cover many days without issue + TrackingWindow time.Duration + + // Each interval should be short for adapability. + // Less than 30 seconds is too sensitive, + // and greater than 5 minutes will make the metric numb + IntervalLength time.Duration +} + +// DefaultConfig returns a config with values that have been tested and produce desirable results +func DefaultConfig() TrustMetricConfig { + return TrustMetricConfig{ + ProportionalWeight: 0.4, + IntegralWeight: 0.6, + TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. + IntervalLength: 1 * time.Minute, + } +} + +// Ensures that all configuration elements have valid values +func customConfig(tmc TrustMetricConfig) TrustMetricConfig { + config := DefaultConfig() + + // Check the config for set values, and setup appropriately + if tmc.ProportionalWeight > 0 { + config.ProportionalWeight = tmc.ProportionalWeight + } + + if tmc.IntegralWeight > 0 { + config.IntegralWeight = tmc.IntegralWeight + } + + if tmc.IntervalLength > time.Duration(0) { + config.IntervalLength = tmc.IntervalLength + } + + if tmc.TrackingWindow > time.Duration(0) && + tmc.TrackingWindow >= config.IntervalLength { + config.TrackingWindow = tmc.TrackingWindow + } + + return config +} diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go new file mode 100644 index 00000000..beb462b2 --- /dev/null +++ b/p2p/trust/metric.go @@ -0,0 +1,383 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "math" + "sync" + "time" +) + +//--------------------------------------------------------------------------------------- + +const ( + // The weight applied to the derivative when current behavior is >= previous behavior + defaultDerivativeGamma1 = 0 + + // The weight applied to the derivative when current behavior is less than previous behavior + defaultDerivativeGamma2 = 1.0 + + // The weight applied to history data values when calculating the history value + defaultHistoryDataWeight = 0.8 +) + +// MetricHistoryJSON - history data necessary to save the trust metric +type MetricHistoryJSON struct { + NumIntervals int `json:"intervals"` + History []float64 `json:"history"` +} + +// TrustMetric - keeps track of peer reliability +// See tendermint/docs/architecture/adr-006-trust-metric.md for details +type TrustMetric struct { + // Mutex that protects the metric from concurrent access + mtx sync.Mutex + + // Determines the percentage given to current behavior + proportionalWeight float64 + + // Determines the percentage given to prior behavior + integralWeight float64 + + // Count of how many time intervals this metric has been tracking + numIntervals int + + // Size of the time interval window for this trust metric + maxIntervals int + + // The time duration for a single time interval + intervalLen time.Duration + + // Stores the trust history data for this metric + history []float64 + + // Weights applied to the history data when calculating the history value + historyWeights []float64 + + // The sum of the history weights used when calculating the history value + historyWeightSum float64 + + // The current number of history data elements + historySize int + + // The maximum number of history data elements + historyMaxSize int + + // The calculated history value for the current time interval + historyValue float64 + + // The number of recorded good and bad events for the current time interval + bad, good float64 + + // While true, history data is not modified + paused bool + + // Signal channel for stopping the trust metric go-routine + stop chan struct{} +} + +// NewMetric returns a trust metric with the default configuration +func NewMetric() *TrustMetric { + return NewMetricWithConfig(DefaultConfig()) +} + +// NewMetricWithConfig returns a trust metric with a custom configuration +func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric { + tm := new(TrustMetric) + config := customConfig(tmc) + + // Setup using the configuration values + tm.proportionalWeight = config.ProportionalWeight + tm.integralWeight = config.IntegralWeight + tm.intervalLen = config.IntervalLength + // The maximum number of time intervals is the tracking window / interval length + tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) + // The history size will be determined by the maximum number of time intervals + tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 + // This metric has a perfect history so far + tm.historyValue = 1.0 + // Setup the stop channel + tm.stop = make(chan struct{}) + + go tm.processRequests() + return tm +} + +// Returns a snapshot of the trust metric history data +func (tm *TrustMetric) HistoryJSON() MetricHistoryJSON { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + return MetricHistoryJSON{ + NumIntervals: tm.numIntervals, + History: tm.history, + } +} + +// Instantiates a trust metric by loading the history data for a single peer. +// This is called only once and only right after creation, which is why the +// lock is not held while accessing the trust metric struct members +func (tm *TrustMetric) Init(hist MetricHistoryJSON) { + // Restore the number of time intervals we have previously tracked + if hist.NumIntervals > tm.maxIntervals { + hist.NumIntervals = tm.maxIntervals + } + tm.numIntervals = hist.NumIntervals + // Restore the history and its current size + if len(hist.History) > tm.historyMaxSize { + // Keep the history no larger than historyMaxSize + last := len(hist.History) - tm.historyMaxSize + hist.History = hist.History[last:] + } + tm.history = hist.History + tm.historySize = len(tm.history) + // Create the history weight values and weight sum + for i := 1; i <= tm.numIntervals; i++ { + x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight + tm.historyWeights = append(tm.historyWeights, x) + } + + for _, v := range tm.historyWeights { + tm.historyWeightSum += v + } + // Calculate the history value based on the loaded history data + tm.historyValue = tm.calcHistoryValue() +} + +// Pause tells the metric to pause recording data over time intervals. +// All method calls that indicate events will unpause the metric +func (tm *TrustMetric) Pause() { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + // Pause the metric for now + tm.paused = true +} + +// Stop tells the metric to stop recording data over time intervals +func (tm *TrustMetric) Stop() { + tm.stop <- struct{}{} +} + +// BadEvents indicates that an undesirable event(s) took place +func (tm *TrustMetric) BadEvents(num int) { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + tm.unpause() + tm.bad += float64(num) +} + +// GoodEvents indicates that a desirable event(s) took place +func (tm *TrustMetric) GoodEvents(num int) { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + tm.unpause() + tm.good += float64(num) +} + +// TrustValue gets the dependable trust value; always between 0 and 1 +func (tm *TrustMetric) TrustValue() float64 { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + return tm.calcTrustValue() +} + +// TrustScore gets a score based on the trust value always between 0 and 100 +func (tm *TrustMetric) TrustScore() int { + score := tm.TrustValue() * 100 + + return int(math.Floor(score)) +} + +// NextTimeInterval saves current time interval data and prepares for the following interval +func (tm *TrustMetric) NextTimeInterval() { + tm.mtx.Lock() + defer tm.mtx.Unlock() + + if tm.paused { + // Do not prepare for the next time interval while paused + return + } + + // Add the current trust value to the history data + newHist := tm.calcTrustValue() + tm.history = append(tm.history, newHist) + + // Update history and interval counters + if tm.historySize < tm.historyMaxSize { + tm.historySize++ + } else { + // Keep the history no larger than historyMaxSize + last := len(tm.history) - tm.historyMaxSize + tm.history = tm.history[last:] + } + + if tm.numIntervals < tm.maxIntervals { + tm.numIntervals++ + // Add the optimistic weight for the new time interval + wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) + tm.historyWeights = append(tm.historyWeights, wk) + tm.historyWeightSum += wk + } + + // Update the history data using Faded Memories + tm.updateFadedMemory() + // Calculate the history value for the upcoming time interval + tm.historyValue = tm.calcHistoryValue() + tm.good = 0 + tm.bad = 0 +} + +// Copy returns a new trust metric with members containing the same values +func (tm *TrustMetric) Copy() *TrustMetric { + tm.mtx.Lock() + defer tm.mtx.Unlock() + if tm == nil { + return nil + } + + return &TrustMetric{ + proportionalWeight: tm.proportionalWeight, + integralWeight: tm.integralWeight, + numIntervals: tm.numIntervals, + maxIntervals: tm.maxIntervals, + intervalLen: tm.intervalLen, + history: tm.history, + historyWeights: tm.historyWeights, + historyWeightSum: tm.historyWeightSum, + historySize: tm.historySize, + historyMaxSize: tm.historyMaxSize, + historyValue: tm.historyValue, + good: tm.good, + bad: tm.bad, + paused: tm.paused, + stop: make(chan struct{}), + } +} + +/* Private methods */ + +// This method is for a goroutine that handles all requests on the metric +func (tm *TrustMetric) processRequests() { + t := time.NewTicker(tm.intervalLen) + defer t.Stop() +loop: + for { + select { + case <-t.C: + tm.NextTimeInterval() + case <-tm.stop: + // Stop all further tracking for this metric + break loop + } + } +} + +// Wakes the trust metric up if it is currently paused +// This method needs to be called with the mutex locked +func (tm *TrustMetric) unpause() { + // Check if this is the first experience with + // what we are tracking since being paused + if tm.paused { + tm.good = 0 + tm.bad = 0 + // New events cause us to unpause the metric + tm.paused = false + } +} + +// Calculates the trust value for the request processing +func (tm *TrustMetric) calcTrustValue() float64 { + weightedP := tm.proportionalWeight * tm.proportionalValue() + weightedI := tm.integralWeight * tm.historyValue + weightedD := tm.weightedDerivative() + + tv := weightedP + weightedI + weightedD + // Do not return a negative value. + if tv < 0 { + tv = 0 + } + return tv +} + +// Calculates the current score for good/bad experiences +func (tm *TrustMetric) proportionalValue() float64 { + value := 1.0 + + total := tm.good + tm.bad + if total > 0 { + value = tm.good / total + } + return value +} + +// Strengthens the derivative component when the change is negative +func (tm *TrustMetric) weightedDerivative() float64 { + var weight float64 = defaultDerivativeGamma1 + + d := tm.derivativeValue() + if d < 0 { + weight = defaultDerivativeGamma2 + } + return weight * d +} + +// Calculates the derivative component +func (tm *TrustMetric) derivativeValue() float64 { + return tm.proportionalValue() - tm.historyValue +} + +// Calculates the integral (history) component of the trust value +func (tm *TrustMetric) calcHistoryValue() float64 { + var hv float64 + + for i := 0; i < tm.numIntervals; i++ { + hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] + } + + return hv / tm.historyWeightSum +} + +// Retrieves the actual history data value that represents the requested time interval +func (tm *TrustMetric) fadedMemoryValue(interval int) float64 { + first := tm.historySize - 1 + + if interval == 0 { + // Base case + return tm.history[first] + } + + offset := intervalToHistoryOffset(interval) + return tm.history[first-offset] +} + +// Performs the update for our Faded Memories process, which allows the +// trust metric tracking window to be large while maintaining a small +// number of history data values +func (tm *TrustMetric) updateFadedMemory() { + if tm.historySize < 2 { + return + } + + end := tm.historySize - 1 + // Keep the most recent history element + for count := 1; count < tm.historySize; count++ { + i := end - count + // The older the data is, the more we spread it out + x := math.Pow(2, float64(count)) + // Two history data values are merged into a single value + tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x + } +} + +// Map the interval value down to an offset from the beginning of history +func intervalToHistoryOffset(interval int) int { + // The system maintains 2^m interval values in the form of m history + // data values. Therefore, we access the ith interval by obtaining + // the history data index = the floor of log2(i) + return int(math.Floor(math.Log2(float64(interval)))) +} diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go new file mode 100644 index 00000000..92272615 --- /dev/null +++ b/p2p/trust/metric_test.go @@ -0,0 +1,90 @@ +package trust + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestTrustMetricScores(t *testing.T) { + tm := NewMetric() + + // Perfect score + tm.GoodEvents(1) + score := tm.TrustScore() + assert.Equal(t, 100, score) + + // Less than perfect score + tm.BadEvents(10) + score = tm.TrustScore() + assert.NotEqual(t, 100, score) + tm.Stop() +} + +func TestTrustMetricConfig(t *testing.T) { + // 7 days + window := time.Minute * 60 * 24 * 7 + config := TrustMetricConfig{ + TrackingWindow: window, + IntervalLength: 2 * time.Minute, + } + + tm := NewMetricWithConfig(config) + + // The max time intervals should be the TrackingWindow / IntervalLen + assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) + + dc := DefaultConfig() + // These weights should still be the default values + assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) + assert.Equal(t, dc.IntegralWeight, tm.integralWeight) + tm.Stop() + + config.ProportionalWeight = 0.3 + config.IntegralWeight = 0.7 + tm = NewMetricWithConfig(config) + + // These weights should be equal to our custom values + assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) + assert.Equal(t, config.IntegralWeight, tm.integralWeight) + tm.Stop() +} + +func TestTrustMetricStopPause(t *testing.T) { + // Cause time intervals to pass quickly + config := TrustMetricConfig{ + TrackingWindow: 5 * time.Minute, + IntervalLength: 10 * time.Millisecond, + } + + tm := NewMetricWithConfig(config) + + // Allow some time intervals to pass and pause + time.Sleep(50 * time.Millisecond) + tm.Pause() + // Give the pause some time to take place + time.Sleep(10 * time.Millisecond) + + first := tm.Copy().numIntervals + // Allow more time to pass and check the intervals are unchanged + time.Sleep(50 * time.Millisecond) + assert.Equal(t, first, tm.numIntervals) + + // Get the trust metric activated again + tm.GoodEvents(5) + // Allow some time intervals to pass and stop + time.Sleep(50 * time.Millisecond) + tm.Stop() + // Give the stop some time to take place + time.Sleep(10 * time.Millisecond) + + second := tm.Copy().numIntervals + // Allow more time to pass and check the intervals are unchanged + time.Sleep(50 * time.Millisecond) + assert.Equal(t, second, tm.numIntervals) + + if first >= second { + t.Fatalf("numIntervals should always increase or stay the same over time") + } +} diff --git a/p2p/trust/store.go b/p2p/trust/store.go new file mode 100644 index 00000000..e86aecd2 --- /dev/null +++ b/p2p/trust/store.go @@ -0,0 +1,192 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "encoding/json" + "sync" + "time" + + cmn "github.com/tendermint/tmlibs/common" + dbm "github.com/tendermint/tmlibs/db" +) + +const defaultStorePeriodicSaveInterval = 1 * time.Minute + +var trustMetricKey = []byte("trustMetricStore") + +// TrustMetricStore - Manages all trust metrics for peers +type TrustMetricStore struct { + cmn.BaseService + + // Maps a Peer.Key to that peer's TrustMetric + peerMetrics map[string]*TrustMetric + + // Mutex that protects the map and history data file + mtx sync.Mutex + + // The db where peer trust metric history data will be stored + db dbm.DB + + // This configuration will be used when creating new TrustMetrics + config TrustMetricConfig +} + +// NewTrustMetricStore returns a store that saves data to the DB +// and uses the config when creating new trust metrics +func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore { + tms := &TrustMetricStore{ + peerMetrics: make(map[string]*TrustMetric), + db: db, + config: tmc, + } + + tms.BaseService = *cmn.NewBaseService(nil, "TrustMetricStore", tms) + return tms +} + +// OnStart implements Service +func (tms *TrustMetricStore) OnStart() error { + if err := tms.BaseService.OnStart(); err != nil { + return err + } + + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tms.loadFromDB() + go tms.saveRoutine() + return nil +} + +// OnStop implements Service +func (tms *TrustMetricStore) OnStop() { + tms.BaseService.OnStop() + + tms.mtx.Lock() + defer tms.mtx.Unlock() + + // Stop all trust metric go-routines + for _, tm := range tms.peerMetrics { + tm.Stop() + } + + // Make the final trust history data save + tms.saveToDB() +} + +// Size returns the number of entries in the trust metric store +func (tms *TrustMetricStore) Size() int { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + return tms.size() +} + +// GetPeerTrustMetric returns a trust metric by peer key +func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tm, ok := tms.peerMetrics[key] + if !ok { + // If the metric is not available, we will create it + tm = NewMetricWithConfig(tms.config) + // The metric needs to be in the map + tms.peerMetrics[key] = tm + } + return tm +} + +// PeerDisconnected pauses the trust metric associated with the peer identified by the key +func (tms *TrustMetricStore) PeerDisconnected(key string) { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + // If the Peer that disconnected has a metric, pause it + if tm, ok := tms.peerMetrics[key]; ok { + tm.Pause() + } +} + +// Saves the history data for all peers to the store DB. +// This public method acquires the trust metric store lock +func (tms *TrustMetricStore) SaveToDB() { + tms.mtx.Lock() + defer tms.mtx.Unlock() + + tms.saveToDB() +} + +/* Private methods */ + +// size returns the number of entries in the store without acquiring the mutex +func (tms *TrustMetricStore) size() int { + return len(tms.peerMetrics) +} + +/* Loading & Saving */ +/* Both loadFromDB and savetoDB assume the mutex has been acquired */ + +// Loads the history data for all peers from the store DB +// cmn.Panics if file is corrupt +func (tms *TrustMetricStore) loadFromDB() bool { + // Obtain the history data we have so far + bytes := tms.db.Get(trustMetricKey) + if bytes == nil { + return false + } + + peers := make(map[string]MetricHistoryJSON, 0) + err := json.Unmarshal(bytes, &peers) + if err != nil { + cmn.PanicCrisis(cmn.Fmt("Could not unmarshal Trust Metric Store DB data: %v", err)) + } + + // If history data exists in the file, + // load it into trust metric + for key, p := range peers { + tm := NewMetricWithConfig(tms.config) + + tm.Init(p) + // Load the peer trust metric into the store + tms.peerMetrics[key] = tm + } + return true +} + +// Saves the history data for all peers to the store DB +func (tms *TrustMetricStore) saveToDB() { + tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) + + peers := make(map[string]MetricHistoryJSON, 0) + + for key, tm := range tms.peerMetrics { + // Add an entry for the peer identified by key + peers[key] = tm.HistoryJSON() + } + + // Write all the data back to the DB + bytes, err := json.Marshal(peers) + if err != nil { + tms.Logger.Error("Failed to encode the TrustHistory", "err", err) + return + } + tms.db.SetSync(trustMetricKey, bytes) +} + +// Periodically saves the trust history data to the DB +func (tms *TrustMetricStore) saveRoutine() { + t := time.NewTicker(defaultStorePeriodicSaveInterval) + defer t.Stop() +loop: + for { + select { + case <-t.C: + tms.SaveToDB() + case <-tms.Quit: + break loop + } + } +} diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go new file mode 100644 index 00000000..c0306bba --- /dev/null +++ b/p2p/trust/store_test.go @@ -0,0 +1,152 @@ +// Copyright 2017 Tendermint. All rights reserved. +// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. + +package trust + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/log" +) + +func TestTrustMetricStoreSaveLoad(t *testing.T) { + dir, err := ioutil.TempDir("", "trust_test") + if err != nil { + panic(err) + } + defer os.Remove(dir) + + historyDB := dbm.NewDB("trusthistory", "goleveldb", dir) + + config := TrustMetricConfig{ + TrackingWindow: 5 * time.Minute, + IntervalLength: 50 * time.Millisecond, + } + + // 0 peers saved + store := NewTrustMetricStore(historyDB, config) + store.SetLogger(log.TestingLogger()) + store.saveToDB() + // Load the data from the file + store = NewTrustMetricStore(historyDB, config) + store.SetLogger(log.TestingLogger()) + store.loadFromDB() + // Make sure we still have 0 entries + assert.Zero(t, store.Size()) + + // 100 peers + for i := 0; i < 100; i++ { + key := fmt.Sprintf("peer_%d", i) + tm := store.GetPeerTrustMetric(key) + + tm.BadEvents(10) + tm.GoodEvents(1) + } + + // Check that we have 100 entries and save + assert.Equal(t, 100, store.Size()) + // Give the metrics time to process the history data + time.Sleep(1 * time.Second) + + // Stop all the trust metrics and save + for _, tm := range store.peerMetrics { + tm.Stop() + } + store.saveToDB() + + // Load the data from the DB + store = NewTrustMetricStore(historyDB, config) + store.SetLogger(log.TestingLogger()) + store.loadFromDB() + + // Check that we still have 100 peers with imperfect trust values + assert.Equal(t, 100, store.Size()) + for _, tm := range store.peerMetrics { + assert.NotEqual(t, 1.0, tm.TrustValue()) + } + + // Stop all the trust metrics + for _, tm := range store.peerMetrics { + tm.Stop() + } +} + +func TestTrustMetricStoreConfig(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + config := TrustMetricConfig{ + ProportionalWeight: 0.5, + IntegralWeight: 0.5, + } + + // Create a store with custom config + store := NewTrustMetricStore(historyDB, config) + store.SetLogger(log.TestingLogger()) + + // Have the store make us a metric with the config + tm := store.GetPeerTrustMetric("TestKey") + + // Check that the options made it to the metric + assert.Equal(t, 0.5, tm.proportionalWeight) + assert.Equal(t, 0.5, tm.integralWeight) + tm.Stop() +} + +func TestTrustMetricStoreLookup(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + store := NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + + // Create 100 peers in the trust metric store + for i := 0; i < 100; i++ { + key := fmt.Sprintf("peer_%d", i) + store.GetPeerTrustMetric(key) + + // Check that the trust metric was successfully entered + ktm := store.peerMetrics[key] + assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) + } + + // Stop all the trust metrics + for _, tm := range store.peerMetrics { + tm.Stop() + } +} + +func TestTrustMetricStorePeerScore(t *testing.T) { + historyDB := dbm.NewDB("", "memdb", "") + + store := NewTrustMetricStore(historyDB, DefaultConfig()) + store.SetLogger(log.TestingLogger()) + + key := "TestKey" + tm := store.GetPeerTrustMetric(key) + + // This peer is innocent so far + first := tm.TrustScore() + assert.Equal(t, 100, first) + + // Add some undesirable events and disconnect + tm.BadEvents(1) + first = tm.TrustScore() + assert.NotEqual(t, 100, first) + tm.BadEvents(10) + second := tm.TrustScore() + + if second > first { + t.Errorf("A greater number of bad events should lower the trust score") + } + store.PeerDisconnected(key) + + // We will remember our experiences with this peer + tm = store.GetPeerTrustMetric(key) + assert.NotEqual(t, 100, tm.TrustScore()) + tm.Stop() +} diff --git a/p2p/types.go b/p2p/types.go index 1d3770b5..4e0994b7 100644 --- a/p2p/types.go +++ b/p2p/types.go @@ -55,12 +55,12 @@ func (info *NodeInfo) CompatibleWith(other *NodeInfo) error { } func (info *NodeInfo) ListenHost() string { - host, _, _ := net.SplitHostPort(info.ListenAddr) + host, _, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas return host } func (info *NodeInfo) ListenPort() int { - _, port, _ := net.SplitHostPort(info.ListenAddr) + _, port, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas port_i, err := strconv.Atoi(port) if err != nil { return -1 diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 74d4d4c5..d2338b95 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -97,11 +97,12 @@ func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { // Deferred cleanup defer func() { - err = nat.DeletePortMapping("tcp", intPort, extPort) - if err != nil { + if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) } - listener.Close() + if err := listener.Close(); err != nil { + logger.Error(cmn.Fmt("Listener closing error: %v", err)) + } }() supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go index 7d44d1e3..cac67a73 100644 --- a/p2p/upnp/upnp.go +++ b/p2p/upnp/upnp.go @@ -40,11 +40,10 @@ func Discover() (nat NAT, err error) { return } socket := conn.(*net.UDPConn) - defer socket.Close() + defer socket.Close() // nolint: errcheck - err = socket.SetDeadline(time.Now().Add(3 * time.Second)) - if err != nil { - return + if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + return nil, err } st := "InternetGatewayDevice:1" @@ -64,6 +63,9 @@ func Discover() (nat NAT, err error) { } var n int _, _, err = socket.ReadFromUDP(answerBytes) + if err != nil { + return + } for { n, _, err = socket.ReadFromUDP(answerBytes) if err != nil { @@ -198,7 +200,8 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) { if err != nil { return } - defer r.Body.Close() + defer r.Body.Close() // nolint: errcheck + if r.StatusCode >= 400 { err = errors.New(string(r.StatusCode)) return @@ -296,15 +299,21 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { var response *http.Response response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) if response != nil { - defer response.Body.Close() + defer response.Body.Close() // nolint: errcheck } if err != nil { return } var envelope Envelope data, err := ioutil.ReadAll(response.Body) + if err != nil { + return + } reader := bytes.NewReader(data) - xml.NewDecoder(reader).Decode(&envelope) + err = xml.NewDecoder(reader).Decode(&envelope) + if err != nil { + return + } info = statusInfo{envelope.Soap.ExternalIP.IPAddress} @@ -339,7 +348,7 @@ func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int var response *http.Response response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) if response != nil { - defer response.Body.Close() + defer response.Body.Close() // nolint: errcheck } if err != nil { return @@ -365,7 +374,7 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort var response *http.Response response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) if response != nil { - defer response.Body.Close() + defer response.Body.Close() // nolint: errcheck } if err != nil { return diff --git a/p2p/util.go b/p2p/util.go index 2be32026..a4c3ad58 100644 --- a/p2p/util.go +++ b/p2p/util.go @@ -7,9 +7,9 @@ import ( // doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. func doubleSha256(b []byte) []byte { hasher := sha256.New() - hasher.Write(b) + hasher.Write(b) // nolint: errcheck, gas sum := hasher.Sum(nil) hasher.Reset() - hasher.Write(sum) + hasher.Write(sum) // nolint: errcheck, gas return hasher.Sum(nil) } diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 9121e8db..2319fed8 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -12,12 +12,12 @@ type AppConnConsensus interface { SetResponseCallback(abcicli.Callback) Error() error - InitChainSync(types.RequestInitChain) (err error) + InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) - BeginBlockSync(types.RequestBeginBlock) (err error) + BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) DeliverTxAsync(tx []byte) *abcicli.ReqRes - EndBlockSync(height uint64) (types.ResponseEndBlock, error) - CommitSync() (res types.Result) + EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) + CommitSync() (*types.ResponseCommit, error) } type AppConnMempool interface { @@ -33,9 +33,9 @@ type AppConnMempool interface { type AppConnQuery interface { Error() error - EchoSync(string) (res types.Result) - InfoSync(types.RequestInfo) (types.ResponseInfo, error) - QuerySync(types.RequestQuery) (types.ResponseQuery, error) + EchoSync(string) (*types.ResponseEcho, error) + InfoSync(types.RequestInfo) (*types.ResponseInfo, error) + QuerySync(types.RequestQuery) (*types.ResponseQuery, error) // SetOptionSync(key string, value string) (res types.Result) } @@ -61,11 +61,11 @@ func (app *appConnConsensus) Error() error { return app.appConn.Error() } -func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (err error) { +func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { return app.appConn.InitChainSync(req) } -func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (err error) { +func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { return app.appConn.BeginBlockSync(req) } @@ -73,11 +73,11 @@ func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes { return app.appConn.DeliverTxAsync(tx) } -func (app *appConnConsensus) EndBlockSync(height uint64) (types.ResponseEndBlock, error) { - return app.appConn.EndBlockSync(height) +func (app *appConnConsensus) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { + return app.appConn.EndBlockSync(req) } -func (app *appConnConsensus) CommitSync() (res types.Result) { +func (app *appConnConsensus) CommitSync() (*types.ResponseCommit, error) { return app.appConn.CommitSync() } @@ -131,14 +131,14 @@ func (app *appConnQuery) Error() error { return app.appConn.Error() } -func (app *appConnQuery) EchoSync(msg string) (res types.Result) { +func (app *appConnQuery) EchoSync(msg string) (*types.ResponseEcho, error) { return app.appConn.EchoSync(msg) } -func (app *appConnQuery) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) { +func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { return app.appConn.InfoSync(req) } -func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (types.ResponseQuery, error) { +func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { return app.appConn.QuerySync(reqQuery) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 0c700140..0fbad602 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -17,7 +17,7 @@ import ( type AppConnTest interface { EchoAsync(string) *abcicli.ReqRes FlushSync() error - InfoSync(types.RequestInfo) (types.ResponseInfo, error) + InfoSync(types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { @@ -36,7 +36,7 @@ func (app *appConnTest) FlushSync() error { return app.appConn.FlushSync() } -func (app *appConnTest) InfoSync(req types.RequestInfo) (types.ResponseInfo, error) { +func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { return app.appConn.InfoSync(req) } @@ -51,7 +51,7 @@ func TestEcho(t *testing.T) { // Start server s := server.NewSocketServer(sockPath, dummy.NewDummyApplication()) s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if _, err := s.Start(); err != nil { + if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } defer s.Stop() @@ -62,7 +62,7 @@ func TestEcho(t *testing.T) { t.Fatalf("Error creating ABCI client: %v", err.Error()) } cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if _, err := cli.Start(); err != nil { + if err := cli.Start(); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -72,7 +72,9 @@ func TestEcho(t *testing.T) { for i := 0; i < 1000; i++ { proxy.EchoAsync(cmn.Fmt("echo-%v", i)) } - proxy.FlushSync() + if err := proxy.FlushSync(); err != nil { + t.Error(err) + } } func BenchmarkEcho(b *testing.B) { @@ -83,7 +85,7 @@ func BenchmarkEcho(b *testing.B) { // Start server s := server.NewSocketServer(sockPath, dummy.NewDummyApplication()) s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if _, err := s.Start(); err != nil { + if err := s.Start(); err != nil { b.Fatalf("Error starting socket server: %v", err.Error()) } defer s.Stop() @@ -94,7 +96,7 @@ func BenchmarkEcho(b *testing.B) { b.Fatalf("Error creating ABCI client: %v", err.Error()) } cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if _, err := cli.Start(); err != nil { + if err := cli.Start(); err != nil { b.Fatalf("Error starting ABCI client: %v", err.Error()) } @@ -106,7 +108,9 @@ func BenchmarkEcho(b *testing.B) { for i := 0; i < b.N; i++ { proxy.EchoAsync(echoString) } - proxy.FlushSync() + if err := proxy.FlushSync(); err != nil { + b.Error(err) + } b.StopTimer() // info := proxy.InfoSync(types.RequestInfo{""}) @@ -120,7 +124,7 @@ func TestInfo(t *testing.T) { // Start server s := server.NewSocketServer(sockPath, dummy.NewDummyApplication()) s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if _, err := s.Start(); err != nil { + if err := s.Start(); err != nil { t.Fatalf("Error starting socket server: %v", err.Error()) } defer s.Stop() @@ -131,7 +135,7 @@ func TestInfo(t *testing.T) { t.Fatalf("Error creating ABCI client: %v", err.Error()) } cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if _, err := cli.Start(); err != nil { + if err := cli.Start(); err != nil { t.Fatalf("Error starting ABCI client: %v", err.Error()) } diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 32c61520..5d89ef19 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -76,7 +76,7 @@ func (app *multiAppConn) OnStart() error { return errors.Wrap(err, "Error creating ABCI client (query connection)") } querycli.SetLogger(app.Logger.With("module", "abci-client", "connection", "query")) - if _, err := querycli.Start(); err != nil { + if err := querycli.Start(); err != nil { return errors.Wrap(err, "Error starting ABCI client (query connection)") } app.queryConn = NewAppConnQuery(querycli) @@ -87,7 +87,7 @@ func (app *multiAppConn) OnStart() error { return errors.Wrap(err, "Error creating ABCI client (mempool connection)") } memcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "mempool")) - if _, err := memcli.Start(); err != nil { + if err := memcli.Start(); err != nil { return errors.Wrap(err, "Error starting ABCI client (mempool connection)") } app.mempoolConn = NewAppConnMempool(memcli) @@ -98,7 +98,7 @@ func (app *multiAppConn) OnStart() error { return errors.Wrap(err, "Error creating ABCI client (consensus connection)") } concli.SetLogger(app.Logger.With("module", "abci-client", "connection", "consensus")) - if _, err := concli.Start(); err != nil { + if err := concli.Start(); err != nil { return errors.Wrap(err, "Error starting ABCI client (consensus connection)") } app.consensusConn = NewAppConnConsensus(concli) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index a1002182..40a42c18 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -6,12 +6,15 @@ import ( "github.com/stretchr/testify/require" + abci "github.com/tendermint/abci/types" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/types" ) +var waitForEventTimeout = 5 * time.Second + // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { k := []byte(cmn.RandStr(8)) @@ -25,14 +28,13 @@ func TestHeaderEvents(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. - st, err := c.Start() + err := c.Start() require.Nil(err, "%d: %+v", i, err) - require.True(st, "%d", i) defer c.Stop() } - evtTyp := types.EventStringNewBlockHeader() - evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second) + evtTyp := types.EventNewBlockHeader + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) require.Nil(err, "%d: %+v", i, err) _, ok := evt.Unwrap().(types.EventDataNewBlockHeader) require.True(ok, "%d: %#v", i, evt) @@ -46,28 +48,27 @@ func TestBlockEvents(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. - st, err := c.Start() + err := c.Start() require.Nil(err, "%d: %+v", i, err) - require.True(st, "%d", i) defer c.Stop() } // listen for a new block; ensure height increases by 1 - var firstBlockHeight int - for i := 0; i < 3; i++ { - evtTyp := types.EventStringNewBlock() - evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second) - require.Nil(err, "%d: %+v", i, err) + var firstBlockHeight int64 + for j := 0; j < 3; j++ { + evtTyp := types.EventNewBlock + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) + require.Nil(err, "%d: %+v", j, err) blockEvent, ok := evt.Unwrap().(types.EventDataNewBlock) - require.True(ok, "%d: %#v", i, evt) + require.True(ok, "%d: %#v", j, evt) block := blockEvent.Block - if i == 0 { + if j == 0 { firstBlockHeight = block.Header.Height continue } - require.Equal(block.Header.Height, firstBlockHeight+i) + require.Equal(block.Header.Height, firstBlockHeight+int64(j)) } } } @@ -78,30 +79,29 @@ func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. - st, err := c.Start() + err := c.Start() require.Nil(err, "%d: %+v", i, err) - require.True(st, "%d", i) defer c.Stop() } // make the tx _, _, tx := MakeTxKV() - evtTyp := types.EventStringTx(types.Tx(tx)) + evtTyp := types.EventTx // send async txres, err := c.BroadcastTxAsync(tx) require.Nil(err, "%+v", err) - require.True(txres.Code.IsOK()) + require.Equal(txres.Code, abci.CodeTypeOK) // FIXME // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second) + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) require.Nil(err, "%d: %+v", i, err) // and make sure it has the proper info txe, ok := evt.Unwrap().(types.EventDataTx) require.True(ok, "%d: %#v", i, evt) // make sure this is the proper tx require.EqualValues(tx, txe.Tx) - require.True(txe.Code.IsOK()) + require.True(txe.Result.IsOK()) } } @@ -111,29 +111,28 @@ func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { // start for this test it if it wasn't already running if !c.IsRunning() { // if so, then we start it, listen, and stop it. - st, err := c.Start() + err := c.Start() require.Nil(err, "%d: %+v", i, err) - require.True(st, "%d", i) defer c.Stop() } // make the tx _, _, tx := MakeTxKV() - evtTyp := types.EventStringTx(types.Tx(tx)) + evtTyp := types.EventTx - // send async + // send sync txres, err := c.BroadcastTxSync(tx) require.Nil(err, "%+v", err) - require.True(txres.Code.IsOK()) + require.Equal(txres.Code, abci.CodeTypeOK) // FIXME // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, 1*time.Second) + evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) require.Nil(err, "%d: %+v", i, err) // and make sure it has the proper info txe, ok := evt.Unwrap().(types.EventDataTx) require.True(ok, "%d: %#v", i, evt) // make sure this is the proper tx require.EqualValues(tx, txe.Tx) - require.True(txe.Code.IsOK()) + require.True(txe.Result.IsOK()) } } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index bc26ea57..e41c2d65 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -1,20 +1,20 @@ package client import ( + "context" + "fmt" "time" "github.com/pkg/errors" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - events "github.com/tendermint/tmlibs/events" ) // Waiter is informed of current height, decided whether to quit early -type Waiter func(delta int) (abort error) +type Waiter func(delta int64) (abort error) // DefaultWaitStrategy is the standard backoff algorithm, // but you can plug in another one -func DefaultWaitStrategy(delta int) (abort error) { +func DefaultWaitStrategy(delta int64) (abort error) { if delta > 10 { return errors.Errorf("Waiting for %d blocks... aborting", delta) } else if delta > 0 { @@ -32,11 +32,11 @@ func DefaultWaitStrategy(delta int) (abort error) { // // If waiter is nil, we use DefaultWaitStrategy, but you can also // provide your own implementation -func WaitForHeight(c StatusClient, h int, waiter Waiter) error { +func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { if waiter == nil { waiter = DefaultWaitStrategy } - delta := 1 + delta := int64(1) for delta > 0 { s, err := c.Status() if err != nil { @@ -56,33 +56,25 @@ func WaitForHeight(c StatusClient, h int, waiter Waiter) error { // when the timeout duration has expired. // // This handles subscribing and unsubscribing under the hood -func WaitForOneEvent(evsw types.EventSwitch, - evtTyp string, timeout time.Duration) (types.TMEventData, error) { - listener := cmn.RandStr(12) - - evts, quit := make(chan events.EventData, 10), make(chan bool, 1) - // start timeout count-down - go func() { - time.Sleep(timeout) - quit <- true - }() +func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (types.TMEventData, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + evts := make(chan interface{}, 1) // register for the next event of this type - evsw.AddListenerForEvent(listener, evtTyp, func(data events.EventData) { - evts <- data - }) + query := fmt.Sprintf("%s='%s'", types.EventTypeKey, evtTyp) + err := c.Subscribe(ctx, query, evts) + if err != nil { + return types.TMEventData{}, errors.Wrap(err, "failed to subscribe") + } + // make sure to unregister after the test is over - defer evsw.RemoveListenerForEvent(evtTyp, listener) - // defer evsw.RemoveListener(listener) // this also works + defer c.Unsubscribe(ctx, query) select { - case <-quit: - return types.TMEventData{}, errors.New("timed out waiting for event") case evt := <-evts: - tmevt, ok := evt.(types.TMEventData) - if ok { - return tmevt, nil - } - return types.TMEventData{}, errors.Errorf("Got unexpected event type: %#v", evt) + return evt.(types.TMEventData), nil + case <-ctx.Done(): + return types.TMEventData{}, errors.New("timed out waiting for event") } } diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index fe186122..cef46247 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -50,7 +50,7 @@ func TestWaitForHeight(t *testing.T) { // since we can't update in a background goroutine (test --race) // we use the callback to update the status height - myWaiter := func(delta int) error { + myWaiter := func(delta int64) error { // update the height for the next call m.Call.Response = &ctypes.ResultStatus{LatestBlockHeight: 15} return client.DefaultWaitStrategy(delta) @@ -66,11 +66,11 @@ func TestWaitForHeight(t *testing.T) { require.Nil(pre.Error) prer, ok := pre.Response.(*ctypes.ResultStatus) require.True(ok) - assert.Equal(10, prer.LatestBlockHeight) + assert.Equal(int64(10), prer.LatestBlockHeight) post := r.Calls[4] require.Nil(post.Error) postr, ok := post.Response.(*ctypes.ResultStatus) require.True(ok) - assert.Equal(15, postr.LatestBlockHeight) + assert.Equal(int64(15), postr.LatestBlockHeight) } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index e63fcd4b..1f49ea4d 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "sync" "github.com/pkg/errors" @@ -11,7 +12,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" - events "github.com/tendermint/tmlibs/events" + cmn "github.com/tendermint/tmlibs/common" ) /* @@ -40,10 +41,9 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { } var ( - _ Client = (*HTTP)(nil) - _ NetworkClient = (*HTTP)(nil) - _ types.EventSwitch = (*HTTP)(nil) - _ types.EventSwitch = (*WSEvents)(nil) + _ Client = (*HTTP)(nil) + _ NetworkClient = (*HTTP)(nil) + _ EventsClient = (*HTTP)(nil) ) func (c *HTTP) Status() (*ctypes.ResultStatus, error) { @@ -123,7 +123,7 @@ func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { return result, nil } -func (c *HTTP) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) { +func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { result := new(ctypes.ResultBlockchainInfo) _, err := c.rpc.Call("blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, @@ -143,7 +143,7 @@ func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { return result, nil } -func (c *HTTP) Block(height *int) (*ctypes.ResultBlock, error) { +func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) { result := new(ctypes.ResultBlock) _, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result) if err != nil { @@ -152,7 +152,7 @@ func (c *HTTP) Block(height *int) (*ctypes.ResultBlock, error) { return result, nil } -func (c *HTTP) Commit(height *int) (*ctypes.ResultCommit, error) { +func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) { result := new(ctypes.ResultCommit) _, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result) if err != nil { @@ -163,18 +163,31 @@ func (c *HTTP) Commit(height *int) (*ctypes.ResultCommit, error) { func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { result := new(ctypes.ResultTx) - query := map[string]interface{}{ + params := map[string]interface{}{ "hash": hash, "prove": prove, } - _, err := c.rpc.Call("tx", query, result) + _, err := c.rpc.Call("tx", params, result) if err != nil { return nil, errors.Wrap(err, "Tx") } return result, nil } -func (c *HTTP) Validators(height *int) (*ctypes.ResultValidators, error) { +func (c *HTTP) TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) { + results := new([]*ctypes.ResultTx) + params := map[string]interface{}{ + "query": query, + "prove": prove, + } + _, err := c.rpc.Call("tx_search", params, results) + if err != nil { + return nil, errors.Wrap(err, "TxSearch") + } + return *results, nil +} + +func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { result := new(ctypes.ResultValidators) _, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result) if err != nil { @@ -186,128 +199,113 @@ func (c *HTTP) Validators(height *int) (*ctypes.ResultValidators, error) { /** websocket event stuff here... **/ type WSEvents struct { - types.EventSwitch + cmn.BaseService remote string endpoint string ws *rpcclient.WSClient + subscriptions map[string]chan<- interface{} + mtx sync.RWMutex + // used for signaling the goroutine that feeds ws -> EventSwitch quit chan bool done chan bool - - // used to maintain counts of actively listened events - // so we can properly subscribe/unsubscribe - // FIXME: thread-safety??? - // FIXME: reuse code from tmlibs/events??? - evtCount map[string]int // count how many time each event is subscribed - listeners map[string][]string // keep track of which events each listener is listening to } func newWSEvents(remote, endpoint string) *WSEvents { - return &WSEvents{ - EventSwitch: types.NewEventSwitch(), - endpoint: endpoint, - remote: remote, - quit: make(chan bool, 1), - done: make(chan bool, 1), - evtCount: map[string]int{}, - listeners: map[string][]string{}, + wsEvents := &WSEvents{ + endpoint: endpoint, + remote: remote, + quit: make(chan bool, 1), + done: make(chan bool, 1), + subscriptions: make(map[string]chan<- interface{}), } + + wsEvents.BaseService = *cmn.NewBaseService(nil, "WSEvents", wsEvents) + return wsEvents } // Start is the only way I could think the extend OnStart from // events.eventSwitch. If only it wasn't private... // BaseService.Start -> eventSwitch.OnStart -> WSEvents.Start -func (w *WSEvents) Start() (bool, error) { - st, err := w.EventSwitch.Start() - // if we did start, then OnStart here... - if st && err == nil { - ws := rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { - w.redoSubscriptions() - })) - _, err = ws.Start() - if err == nil { - w.ws = ws - go w.eventListener() - } +func (w *WSEvents) Start() error { + ws := rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { + w.redoSubscriptions() + })) + err := ws.Start() + if err == nil { + w.ws = ws + go w.eventListener() } - return st, errors.Wrap(err, "StartWSEvent") + return err } // Stop wraps the BaseService/eventSwitch actions as Start does -func (w *WSEvents) Stop() bool { - stop := w.EventSwitch.Stop() - if stop { - // send a message to quit to stop the eventListener - w.quit <- true - <-w.done - w.ws.Stop() - w.ws = nil - } - return stop +func (w *WSEvents) Stop() error { + // send a message to quit to stop the eventListener + w.quit <- true + <-w.done + w.ws.Stop() + w.ws = nil + return nil } -/** TODO: more intelligent subscriptions! **/ -func (w *WSEvents) AddListenerForEvent(listenerID, event string, cb events.EventCallback) { - // no one listening -> subscribe - if w.evtCount[event] == 0 { - w.subscribe(event) +func (w *WSEvents) Subscribe(ctx context.Context, query string, out chan<- interface{}) error { + if ch := w.getSubscription(query); ch != nil { + return errors.New("already subscribed") } - // if this listener was already listening to this event, return early - for _, s := range w.listeners[listenerID] { - if event == s { - return - } + + err := w.ws.Subscribe(ctx, query) + if err != nil { + return errors.Wrap(err, "failed to subscribe") } - // otherwise, add this event to this listener - w.evtCount[event] += 1 - w.listeners[listenerID] = append(w.listeners[listenerID], event) - w.EventSwitch.AddListenerForEvent(listenerID, event, cb) + + w.mtx.Lock() + w.subscriptions[query] = out + w.mtx.Unlock() + + return nil } -func (w *WSEvents) RemoveListenerForEvent(event string, listenerID string) { - // if this listener is listening already, splice it out - found := false - l := w.listeners[listenerID] - for i, s := range l { - if event == s { - found = true - w.listeners[listenerID] = append(l[:i], l[i+1:]...) - break - } - } - // if the listener wasn't already listening to the event, exit early - if !found { - return +func (w *WSEvents) Unsubscribe(ctx context.Context, query string) error { + err := w.ws.Unsubscribe(ctx, query) + if err != nil { + return err } - // now we can update the subscriptions - w.evtCount[event] -= 1 - if w.evtCount[event] == 0 { - w.unsubscribe(event) + w.mtx.Lock() + defer w.mtx.Unlock() + ch, ok := w.subscriptions[query] + if ok { + close(ch) + delete(w.subscriptions, query) } - w.EventSwitch.RemoveListenerForEvent(event, listenerID) + + return nil } -func (w *WSEvents) RemoveListener(listenerID string) { - // remove all counts for this listener - for _, s := range w.listeners[listenerID] { - w.evtCount[s] -= 1 - if w.evtCount[s] == 0 { - w.unsubscribe(s) - } +func (w *WSEvents) UnsubscribeAll(ctx context.Context) error { + err := w.ws.UnsubscribeAll(ctx) + if err != nil { + return err } - w.listeners[listenerID] = nil - // then let the switch do it's magic - w.EventSwitch.RemoveListener(listenerID) + w.mtx.Lock() + defer w.mtx.Unlock() + for _, ch := range w.subscriptions { + close(ch) + } + w.subscriptions = make(map[string]chan<- interface{}) + return nil } -// After being reconnected, it is necessary to redo subscription -// to server otherwise no data will be automatically received +// After being reconnected, it is necessary to redo subscription to server +// otherwise no data will be automatically received. func (w *WSEvents) redoSubscriptions() { - for event, _ := range w.evtCount { - w.subscribe(event) + for query := range w.subscriptions { + // NOTE: no timeout for resubscribing + // FIXME: better logging/handling of errors?? + w.ws.Subscribe(context.Background(), query) } } @@ -325,10 +323,15 @@ func (w *WSEvents) eventListener() { fmt.Printf("ws err: %+v\n", resp.Error.Error()) continue } - err := w.parseEvent(*resp.Result) + result := new(ctypes.ResultEvent) + err := json.Unmarshal(resp.Result, result) if err != nil { - // FIXME: better logging/handling of errors?? - fmt.Printf("ws result: %+v\n", err) + // ignore silently (eg. subscribe, unsubscribe and maybe other events) + // TODO: ? + continue + } + if ch := w.getSubscription(result.Query); ch != nil { + ch <- result.Data } case <-w.quit: // send a message so we can wait for the routine to exit @@ -339,34 +342,8 @@ func (w *WSEvents) eventListener() { } } -// parseEvent unmarshals the json message and converts it into -// some implementation of types.TMEventData, and sends it off -// on the merry way to the EventSwitch -func (w *WSEvents) parseEvent(data []byte) (err error) { - result := new(ctypes.ResultEvent) - err = json.Unmarshal(data, result) - if err != nil { - // ignore silently (eg. subscribe, unsubscribe and maybe other events) - // TODO: ? - return nil - } - // looks good! let's fire this baby! - w.EventSwitch.FireEvent(result.Name, result.Data) - return nil -} - -// no way of exposing these failures, so we panic. -// is this right? or silently ignore??? -func (w *WSEvents) subscribe(event string) { - err := w.ws.Subscribe(context.TODO(), event) - if err != nil { - panic(err) - } -} - -func (w *WSEvents) unsubscribe(event string) { - err := w.ws.Unsubscribe(context.TODO(), event) - if err != nil { - panic(err) - } +func (w *WSEvents) getSubscription(query string) chan<- interface{} { + w.mtx.RLock() + defer w.mtx.RUnlock() + return w.subscriptions[query] } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 10689a56..c38f188e 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -20,9 +20,12 @@ implementation. package client import ( + "context" + data "github.com/tendermint/go-wire/data" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" ) // ABCIClient groups together the functionality that principally @@ -43,16 +46,17 @@ type ABCIClient interface { // SignClient groups together the interfaces need to get valid // signatures and prove anything about the chain type SignClient interface { - Block(height *int) (*ctypes.ResultBlock, error) - Commit(height *int) (*ctypes.ResultCommit, error) - Validators(height *int) (*ctypes.ResultValidators, error) + Block(height *int64) (*ctypes.ResultBlock, error) + Commit(height *int64) (*ctypes.ResultCommit, error) + Validators(height *int64) (*ctypes.ResultValidators, error) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) + TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) } // HistoryClient shows us data from genesis to now in large chunks. type HistoryClient interface { Genesis() (*ctypes.ResultGenesis, error) - BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) + BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) } type StatusClient interface { @@ -64,14 +68,12 @@ type StatusClient interface { // if you want to listen for events, test if it also // implements events.EventSwitch type Client interface { + cmn.Service ABCIClient SignClient HistoryClient StatusClient - - // this Client is reactive, you can subscribe to any TMEventData - // type, given the proper string. see tendermint/types/events.go - types.EventSwitch + EventsClient } // NetworkClient is general info about the network state. May not @@ -83,3 +85,11 @@ type NetworkClient interface { NetInfo() (*ctypes.ResultNetInfo, error) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) } + +// EventsClient is reactive, you can subscribe to any message, given the proper +// string. see tendermint/types/events.go +type EventsClient interface { + Subscribe(ctx context.Context, query string, out chan<- interface{}) error + Unsubscribe(ctx context.Context, query string) error + UnsubscribeAll(ctx context.Context) error +} diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index c6adfc5f..40c24912 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -1,22 +1,32 @@ package client import ( + "context" + + "github.com/pkg/errors" + data "github.com/tendermint/go-wire/data" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" + tmquery "github.com/tendermint/tmlibs/pubsub/query" +) + +const ( + // event bus subscriber + subscriber = "rpc-localclient" ) /* Local is a Client implementation that directly executes the rpc -functions on a given node, without going through HTTP or GRPC +functions on a given node, without going through HTTP or GRPC. This implementation is useful for: * Running tests against a node in-process without the overhead of going through an http server -* Communication between an ABCI app and tendermin core when they +* Communication between an ABCI app and Tendermint core when they are compiled in process. For real clients, you probably want to use client.HTTP. For more @@ -24,7 +34,9 @@ powerful control during testing, you probably want the "client/mock" package. */ type Local struct { node *nm.Node - types.EventSwitch + + *types.EventBus + subscriptions map[string]*tmquery.Query } // NewLocal configures a client that calls the Node directly. @@ -33,24 +45,26 @@ type Local struct { // you can only have one node per process. So make sure test cases // don't run in parallel, or try to simulate an entire network in // one process... -func NewLocal(node *nm.Node) Local { +func NewLocal(node *nm.Node) *Local { node.ConfigureRPC() - return Local{ - node: node, - EventSwitch: node.EventSwitch(), + return &Local{ + node: node, + EventBus: node.EventBus(), + subscriptions: make(map[string]*tmquery.Query), } } var ( - _ Client = Local{} + _ Client = (*Local)(nil) _ NetworkClient = Local{} + _ EventsClient = (*Local)(nil) ) -func (c Local) Status() (*ctypes.ResultStatus, error) { +func (Local) Status() (*ctypes.ResultStatus, error) { return core.Status() } -func (c Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { +func (Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return core.ABCIInfo() } @@ -58,54 +72,90 @@ func (c Local) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) } -func (c Local) ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { +func (Local) ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { return core.ABCIQuery(path, data, opts.Height, opts.Trusted) } -func (c Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return core.BroadcastTxCommit(tx) } -func (c Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxAsync(tx) } -func (c Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return core.BroadcastTxSync(tx) } -func (c Local) NetInfo() (*ctypes.ResultNetInfo, error) { +func (Local) NetInfo() (*ctypes.ResultNetInfo, error) { return core.NetInfo() } -func (c Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { +func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { return core.DumpConsensusState() } -func (c Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { +func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(seeds) } -func (c Local) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) { +func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return core.BlockchainInfo(minHeight, maxHeight) } -func (c Local) Genesis() (*ctypes.ResultGenesis, error) { +func (Local) Genesis() (*ctypes.ResultGenesis, error) { return core.Genesis() } -func (c Local) Block(height *int) (*ctypes.ResultBlock, error) { +func (Local) Block(height *int64) (*ctypes.ResultBlock, error) { return core.Block(height) } -func (c Local) Commit(height *int) (*ctypes.ResultCommit, error) { +func (Local) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(height) } -func (c Local) Validators(height *int) (*ctypes.ResultValidators, error) { +func (Local) Validators(height *int64) (*ctypes.ResultValidators, error) { return core.Validators(height) } -func (c Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return core.Tx(hash, prove) } + +func (Local) TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) { + return core.TxSearch(query, prove) +} + +func (c *Local) Subscribe(ctx context.Context, query string, out chan<- interface{}) error { + q, err := tmquery.New(query) + if err != nil { + return errors.Wrap(err, "failed to subscribe") + } + if err = c.EventBus.Subscribe(ctx, subscriber, q, out); err != nil { + return errors.Wrap(err, "failed to subscribe") + } + c.subscriptions[query] = q + return nil +} + +func (c *Local) Unsubscribe(ctx context.Context, query string) error { + q, ok := c.subscriptions[query] + if !ok { + return errors.New("subscription not found") + } + if err := c.EventBus.Unsubscribe(ctx, subscriber, q); err != nil { + return errors.Wrap(err, "failed to unsubscribe") + } + delete(c.subscriptions, query) + return nil +} + +func (c *Local) UnsubscribeAll(ctx context.Context) error { + if err := c.EventBus.UnsubscribeAll(ctx, subscriber); err != nil { + return errors.Wrap(err, "failed to unsubscribe") + } + c.subscriptions = make(map[string]*tmquery.Query) + return nil +} diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 2ed012e4..4593d059 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -32,13 +32,13 @@ func (a ABCIApp) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuer func (a ABCIApp) ABCIQueryWithOptions(path string, data data.Bytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { q := a.App.Query(abci.RequestQuery{data, path, opts.Height, opts.Trusted}) - return &ctypes.ResultABCIQuery{q.Result()}, nil + return &ctypes.ResultABCIQuery{q}, nil } func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(tx) - if !res.CheckTx.IsOK() { + if res.CheckTx.IsErr() { return &res, nil } res.DeliverTx = a.App.DeliverTx(tx) @@ -48,8 +48,8 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(tx) // and this gets written in a background thread... - if c.IsOK() { - go func() { a.App.DeliverTx(tx) }() + if !c.IsErr() { + go func() { a.App.DeliverTx(tx) }() // nolint: errcheck } return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil } @@ -57,8 +57,8 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(tx) // and this gets written in a background thread... - if c.IsOK() { - go func() { a.App.DeliverTx(tx) }() + if !c.IsErr() { + go func() { a.App.DeliverTx(tx) }() // nolint: errcheck } return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil } @@ -91,7 +91,7 @@ func (m ABCIMock) ABCIQueryWithOptions(path string, data data.Bytes, opts client return nil, err } resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{resQuery.Result()}, nil + return &ctypes.ResultABCIQuery{resQuery}, nil } func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { @@ -135,7 +135,7 @@ func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { type QueryArgs struct { Path string Data data.Bytes - Height uint64 + Height int64 Trusted bool } diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index a7afa089..0f83cc5f 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -22,7 +22,7 @@ func TestABCIMock(t *testing.T) { assert, require := assert.New(t), require.New(t) key, value := []byte("foo"), []byte("bar") - height := uint64(10) + height := int64(10) goodTx := types.Tx{0x01, 0xff} badTx := types.Tx{0x12, 0x21} @@ -37,8 +37,8 @@ func TestABCIMock(t *testing.T) { BroadcastCommit: mock.Call{ Args: goodTx, Response: &ctypes.ResultBroadcastTxCommit{ - CheckTx: abci.Result{Data: data.Bytes("stand")}, - DeliverTx: abci.Result{Data: data.Bytes("deliver")}, + CheckTx: abci.ResponseCheckTx{Data: data.Bytes("stand")}, + DeliverTx: abci.ResponseDeliverTx{Data: data.Bytes("deliver")}, }, Error: errors.New("bad tx"), }, @@ -51,7 +51,8 @@ func TestABCIMock(t *testing.T) { assert.Equal("foobar", err.Error()) // query always returns the response - query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + query := _query.Response require.Nil(err) require.NotNil(query) assert.EqualValues(key, query.Key) @@ -79,6 +80,8 @@ func TestABCIMock(t *testing.T) { func TestABCIRecorder(t *testing.T) { assert, require := assert.New(t), require.New(t) + + // This mock returns errors on everything but Query m := mock.ABCIMock{ Info: mock.Call{Response: abci.ResponseInfo{ Data: "data", @@ -92,8 +95,11 @@ func TestABCIRecorder(t *testing.T) { require.Equal(0, len(r.Calls)) - r.ABCIInfo() - r.ABCIQueryWithOptions("path", data.Bytes("data"), client.ABCIQueryOptions{Trusted: false}) + _, err := r.ABCIInfo() + assert.Nil(err, "expected no err on info") + + _, err = r.ABCIQueryWithOptions("path", data.Bytes("data"), client.ABCIQueryOptions{Trusted: false}) + assert.NotNil(err, "expected error on query") require.Equal(2, len(r.Calls)) info := r.Calls[0] @@ -118,11 +124,14 @@ func TestABCIRecorder(t *testing.T) { assert.EqualValues("data", qa.Data) assert.False(qa.Trusted) - // now add some broadcasts + // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} - r.BroadcastTxCommit(txs[0]) - r.BroadcastTxSync(txs[1]) - r.BroadcastTxAsync(txs[2]) + _, err = r.BroadcastTxCommit(txs[0]) + assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxSync(txs[1]) + assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxAsync(txs[2]) + assert.NotNil(err, "expected err on broadcast") require.Equal(5, len(r.Calls)) @@ -160,12 +169,13 @@ func TestABCIApp(t *testing.T) { tx := fmt.Sprintf("%s=%s", key, value) res, err := m.BroadcastTxCommit(types.Tx(tx)) require.Nil(err) - assert.True(res.CheckTx.Code.IsOK()) + assert.True(res.CheckTx.IsOK()) require.NotNil(res.DeliverTx) - assert.True(res.DeliverTx.Code.IsOK()) + assert.True(res.DeliverTx.IsOK()) // check the key - qres, err := m.ABCIQueryWithOptions("/key", data.Bytes(key), client.ABCIQueryOptions{Trusted: true}) + _qres, err := m.ABCIQueryWithOptions("/key", data.Bytes(key), client.ABCIQueryOptions{Trusted: true}) + qres := _qres.Response require.Nil(err) assert.EqualValues(value, qres.Value) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index b5973474..dc75e04c 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -21,6 +21,7 @@ import ( "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" ) // Client wraps arbitrary implementations of the various interfaces. @@ -33,8 +34,8 @@ type Client struct { client.SignClient client.HistoryClient client.StatusClient - // create a mock with types.NewEventSwitch() - types.EventSwitch + client.EventsClient + cmn.Service } var _ client.Client = Client{} @@ -110,7 +111,7 @@ func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { return core.UnsafeDialSeeds(seeds) } -func (c Client) BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return core.BlockchainInfo(minHeight, maxHeight) } @@ -118,14 +119,14 @@ func (c Client) Genesis() (*ctypes.ResultGenesis, error) { return core.Genesis() } -func (c Client) Block(height *int) (*ctypes.ResultBlock, error) { +func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { return core.Block(height) } -func (c Client) Commit(height *int) (*ctypes.ResultCommit, error) { +func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(height) } -func (c Client) Validators(height *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { return core.Validators(height) } diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index d329a120..c32d08bd 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,13 +1,16 @@ package client_test import ( + "fmt" "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/abci/types" "github.com/tendermint/iavl" + "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" @@ -18,7 +21,7 @@ func getHTTPClient() *client.HTTP { return client.NewHTTP(rpcAddr, "/websocket") } -func getLocalClient() client.Local { +func getLocalClient() *client.Local { return client.NewLocal(node) } @@ -108,8 +111,9 @@ func TestABCIQuery(t *testing.T) { // wait before querying client.WaitForHeight(c, apph, nil) - qres, err := c.ABCIQuery("/key", k) - if assert.Nil(t, err) && assert.True(t, qres.Code.IsOK()) { + res, err := c.ABCIQuery("/key", k) + qres := res.Response + if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { assert.EqualValues(t, v, qres.Value) } } @@ -135,14 +139,17 @@ func TestAppCalls(t *testing.T) { k, v, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(tx) require.Nil(err, "%d: %+v", i, err) - require.True(bres.DeliverTx.Code.IsOK()) + require.True(bres.DeliverTx.IsOK()) txh := bres.Height apph := txh + 1 // this is where the tx will be applied to the state // wait before querying - client.WaitForHeight(c, apph, nil) - qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) - if assert.Nil(err) && assert.True(qres.Code.IsOK()) { + if err := client.WaitForHeight(c, apph, nil); err != nil { + t.Error(err) + } + _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) + qres := _qres.Response + if assert.Nil(err) && assert.True(qres.IsOK()) { // assert.Equal(k, data.GetKey()) // only returned for proofs assert.EqualValues(v, qres.Value) } @@ -151,7 +158,7 @@ func TestAppCalls(t *testing.T) { // ptx, err := c.Tx(bres.Hash, true) ptx, err := c.Tx(bres.Hash, true) require.Nil(err, "%d: %+v", i, err) - assert.Equal(txh, ptx.Height) + assert.EqualValues(txh, ptx.Height) assert.EqualValues(tx, ptx.Tx) // and we can even check the block is added @@ -189,8 +196,9 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommit, commit2.Commit) // and we got a proof that works! - pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) - if assert.Nil(err) && assert.True(pres.Code.IsOK()) { + _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) + pres := _pres.Response + if assert.Nil(err) && assert.True(pres.IsOK()) { proof, err := iavl.ReadKeyExistsProof(pres.Proof) if assert.Nil(err) { key := pres.Key @@ -213,7 +221,7 @@ func TestBroadcastTxSync(t *testing.T) { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxSync(tx) require.Nil(err, "%d: %+v", i, err) - require.True(bres.Code.IsOK()) + require.Equal(bres.Code, abci.CodeTypeOK) // FIXME require.Equal(initMempoolSize+1, mempool.Size()) @@ -231,8 +239,8 @@ func TestBroadcastTxCommit(t *testing.T) { _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(tx) require.Nil(err, "%d: %+v", i, err) - require.True(bres.CheckTx.Code.IsOK()) - require.True(bres.DeliverTx.Code.IsOK()) + require.True(bres.CheckTx.IsOK()) + require.True(bres.DeliverTx.IsOK()) require.Equal(0, mempool.Size()) } @@ -278,10 +286,10 @@ func TestTx(t *testing.T) { require.NotNil(err) } else { require.Nil(err, "%+v", err) - assert.Equal(txHeight, ptx.Height) + assert.EqualValues(txHeight, ptx.Height) assert.EqualValues(tx, ptx.Tx) - assert.Equal(0, ptx.Index) - assert.True(ptx.TxResult.Code.IsOK()) + assert.Zero(ptx.Index) + assert.True(ptx.TxResult.IsOK()) // time to verify the proof proof := ptx.Proof @@ -292,3 +300,50 @@ func TestTx(t *testing.T) { } } } + +func TestTxSearch(t *testing.T) { + // first we broadcast a tx + c := getHTTPClient() + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(t, err, "%+v", err) + + txHeight := bres.Height + txHash := bres.Hash + + anotherTxHash := types.Tx("a different tx").Hash() + + for i, c := range GetClients() { + t.Logf("client %d", i) + + // now we query for the tx. + // since there's only one tx, we know index=0. + results, err := c.TxSearch(fmt.Sprintf("tx.hash='%v'", txHash), true) + require.Nil(t, err, "%+v", err) + require.Len(t, results, 1) + + ptx := results[0] + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + + // time to verify the proof + proof := ptx.Proof + if assert.EqualValues(t, tx, proof.Data) { + assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + } + + // we query for non existing tx + results, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false) + require.Nil(t, err, "%+v", err) + require.Len(t, results, 0) + + // we query using a tag (see dummy application) + results, err = c.TxSearch("app.creator='jae'", false) + require.Nil(t, err, "%+v", err) + if len(results) == 0 { + t.Fatal("expected a lot of transactions") + } + } +} diff --git a/rpc/client/types.go b/rpc/client/types.go index dc573edd..89bd2f98 100644 --- a/rpc/client/types.go +++ b/rpc/client/types.go @@ -3,7 +3,7 @@ package client // ABCIQueryOptions can be used to provide options for ABCIQuery call other // than the DefaultABCIQueryOptions. type ABCIQueryOptions struct { - Height uint64 + Height int64 Trusted bool } diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 564c0bc6..a49b52b6 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -45,9 +45,9 @@ import ( // |-----------+--------+---------+----------+------------------------------------------------| // | path | string | false | false | Path to the data ("/a/b/c") | // | data | []byte | false | true | Data | -// | height | uint64 | 0 | false | Height (0 means latest) | +// | height | int64 | 0 | false | Height (0 means latest) | // | trusted | bool | false | false | Does not include a proof of the data inclusion | -func ABCIQuery(path string, data data.Bytes, height uint64, trusted bool) (*ctypes.ResultABCIQuery, error) { +func ABCIQuery(path string, data data.Bytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, @@ -58,9 +58,7 @@ func ABCIQuery(path string, data data.Bytes, height uint64, trusted bool) (*ctyp return nil, err } logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{ - resQuery.Result(), - }, nil + return &ctypes.ResultABCIQuery{*resQuery}, nil } // Get some info about the application. @@ -93,5 +91,5 @@ func ABCIInfo() (*ctypes.ResultABCIInfo, error) { if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{resInfo}, nil + return &ctypes.ResultABCIInfo{*resInfo}, nil } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 6b5e2166..9d409845 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -61,20 +61,27 @@ import ( // ``` // // -func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, error) { +func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + if minHeight == 0 { + minHeight = 1 + } + if maxHeight == 0 { maxHeight = blockStore.Height() } else { - maxHeight = cmn.MinInt(blockStore.Height(), maxHeight) - } - if minHeight == 0 { - minHeight = cmn.MaxInt(1, maxHeight-20) - } else { - minHeight = cmn.MaxInt(minHeight, maxHeight-20) + maxHeight = cmn.MinInt64(blockStore.Height(), maxHeight) } + // maximum 20 block metas + const limit int64 = 20 + minHeight = cmn.MaxInt64(minHeight, maxHeight-limit) + logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) + if minHeight > maxHeight { + return nil, fmt.Errorf("min height %d can't be greater than max height %d", minHeight, maxHeight) + } + blockMetas := []*types.BlockMeta{} for height := maxHeight; height >= minHeight; height-- { blockMeta := blockStore.LoadBlockMeta(height) @@ -184,7 +191,7 @@ func BlockchainInfo(minHeight, maxHeight int) (*ctypes.ResultBlockchainInfo, err // "jsonrpc": "2.0" // } // ``` -func Block(heightPtr *int) (*ctypes.ResultBlock, error) { +func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { if heightPtr == nil { height := blockStore.Height() blockMeta := blockStore.LoadBlockMeta(height) @@ -275,7 +282,7 @@ func Block(heightPtr *int) (*ctypes.ResultBlock, error) { // "jsonrpc": "2.0" // } // ``` -func Commit(heightPtr *int) (*ctypes.ResultCommit, error) { +func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { if heightPtr == nil { height := blockStore.Height() header := blockStore.LoadBlockMeta(height).Header diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 75ce08a9..755f1589 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -42,7 +42,7 @@ import ( // "jsonrpc": "2.0" // } // ``` -func Validators(heightPtr *int) (*ctypes.ResultValidators, error) { +func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { if heightPtr == nil { blockHeight, validators := consensusState.GetValidators() return &ctypes.ResultValidators{blockHeight, validators}, nil diff --git a/rpc/core/dev.go b/rpc/core/dev.go index a3c970d4..0b515476 100644 --- a/rpc/core/dev.go +++ b/rpc/core/dev.go @@ -29,7 +29,9 @@ func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { pprof.StopCPUProfile() - profFile.Close() + if err := profFile.Close(); err != nil { + return nil, err + } return &ctypes.ResultUnsafeProfile{}, nil } @@ -38,8 +40,12 @@ func UnsafeWriteHeapProfile(filename string) (*ctypes.ResultUnsafeProfile, error if err != nil { return nil, err } - pprof.WriteHeapProfile(memProfFile) - memProfFile.Close() + if err := pprof.WriteHeapProfile(memProfFile); err != nil { + return nil, err + } + if err := memProfFile.Close(); err != nil { + return nil, err + } return &ctypes.ResultUnsafeProfile{}, nil } diff --git a/rpc/core/events.go b/rpc/core/events.go index 00fd9a08..81f1c919 100644 --- a/rpc/core/events.go +++ b/rpc/core/events.go @@ -1,9 +1,14 @@ package core import ( + "context" + + "github.com/pkg/errors" + ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tendermint/types" + tmtypes "github.com/tendermint/tendermint/types" + tmquery "github.com/tendermint/tmlibs/pubsub/query" ) // Subscribe for events via WebSocket. @@ -33,14 +38,35 @@ import ( // | event | string | "" | true | Event name | // // -func Subscribe(wsCtx rpctypes.WSRPCContext, event string) (*ctypes.ResultSubscribe, error) { - logger.Info("Subscribe to event", "remote", wsCtx.GetRemoteAddr(), "event", event) - types.AddListenerForEvent(wsCtx.GetEventSwitch(), wsCtx.GetRemoteAddr(), event, func(msg types.TMEventData) { - // NOTE: EventSwitch callbacks must be nonblocking - // NOTE: RPCResponses of subscribed events have id suffix "#event" - tmResult := &ctypes.ResultEvent{event, msg} - wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Request.ID+"#event", tmResult)) - }) +func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Subscribe to query", "remote", addr, "query", query) + + q, err := tmquery.New(query) + if err != nil { + return nil, errors.Wrap(err, "failed to parse a query") + } + + err = wsCtx.AddSubscription(query, q) + if err != nil { + return nil, errors.Wrap(err, "failed to add subscription") + } + + ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + defer cancel() + ch := make(chan interface{}) + err = eventBus.Subscribe(ctx, addr, q, ch) + if err != nil { + return nil, errors.Wrap(err, "failed to subscribe") + } + + go func() { + for event := range ch { + tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)} + wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Request.ID+"#event", tmResult)) + } + }() + return &ctypes.ResultSubscribe{}, nil } @@ -71,8 +97,21 @@ func Subscribe(wsCtx rpctypes.WSRPCContext, event string) (*ctypes.ResultSubscri // | event | string | "" | true | Event name | // // -func Unsubscribe(wsCtx rpctypes.WSRPCContext, event string) (*ctypes.ResultUnsubscribe, error) { - logger.Info("Unsubscribe to event", "remote", wsCtx.GetRemoteAddr(), "event", event) - wsCtx.GetEventSwitch().RemoveListenerForEvent(event, wsCtx.GetRemoteAddr()) +func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Unsubscribe from query", "remote", addr, "query", query) + q, ok := wsCtx.DeleteSubscription(query) + if !ok { + return nil, errors.New("subscription not found") + } + eventBus.Unsubscribe(context.Background(), addr, q.(*tmquery.Query)) + return &ctypes.ResultUnsubscribe{}, nil +} + +func UnsubscribeAll(wsCtx rpctypes.WSRPCContext) (*ctypes.ResultUnsubscribe, error) { + addr := wsCtx.GetRemoteAddr() + logger.Info("Unsubscribe from all", "remote", addr) + eventBus.UnsubscribeAll(context.Background(), addr) + wsCtx.DeleteAllSubscriptions() return &ctypes.ResultUnsubscribe{}, nil } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 94fc0efc..3f663c37 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -1,9 +1,12 @@ package core import ( + "context" "fmt" "time" + "github.com/pkg/errors" + abci "github.com/tendermint/abci/types" data "github.com/tendermint/go-wire/data" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -147,29 +150,35 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // |-----------+------+---------+----------+-----------------| // | tx | Tx | nil | true | The transaction | func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // subscribe to tx being committed in block - deliverTxResCh := make(chan types.EventDataTx, 1) - types.AddListenerForEvent(eventSwitch, "rpc", types.EventStringTx(tx), func(data types.TMEventData) { - deliverTxResCh <- data.Unwrap().(types.EventDataTx) - }) + ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) + defer cancel() + deliverTxResCh := make(chan interface{}) + q := types.EventQueryTxFor(tx) + err := eventBus.Subscribe(ctx, "mempool", q, deliverTxResCh) + if err != nil { + err = errors.Wrap(err, "failed to subscribe to tx") + logger.Error("Error on broadcastTxCommit", "err", err) + return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) + } + defer eventBus.Unsubscribe(context.Background(), "mempool", q) // broadcast the tx and register checktx callback checkTxResCh := make(chan *abci.Response, 1) - err := mempool.CheckTx(tx, func(res *abci.Response) { + err = mempool.CheckTx(tx, func(res *abci.Response) { checkTxResCh <- res }) if err != nil { - logger.Error("err", "err", err) - return nil, fmt.Errorf("Error broadcasting transaction: %v", err) + logger.Error("Error on broadcastTxCommit", "err", err) + return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) } checkTxRes := <-checkTxResCh checkTxR := checkTxRes.GetCheckTx() - if checkTxR.Code != abci.CodeType_OK { + if checkTxR.Code != abci.CodeTypeOK { // CheckTx failed! return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxR.Result(), - DeliverTx: abci.Result{}, + CheckTx: *checkTxR, + DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), }, nil } @@ -179,30 +188,25 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // TODO: configurable? timer := time.NewTimer(60 * 2 * time.Second) select { - case deliverTxRes := <-deliverTxResCh: + case deliverTxResMsg := <-deliverTxResCh: + deliverTxRes := deliverTxResMsg.(types.TMEventData).Unwrap().(types.EventDataTx) // The tx was included in a block. - deliverTxR := &abci.ResponseDeliverTx{ - Code: deliverTxRes.Code, - Data: deliverTxRes.Data, - Log: deliverTxRes.Log, - } + deliverTxR := deliverTxRes.Result logger.Info("DeliverTx passed ", "tx", data.Bytes(tx), "response", deliverTxR) return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxR.Result(), - DeliverTx: deliverTxR.Result(), + CheckTx: *checkTxR, + DeliverTx: deliverTxR, Hash: tx.Hash(), Height: deliverTxRes.Height, }, nil case <-timer.C: logger.Error("failed to include tx") return &ctypes.ResultBroadcastTxCommit{ - CheckTx: checkTxR.Result(), - DeliverTx: abci.Result{}, + CheckTx: *checkTxR, + DeliverTx: abci.ResponseDeliverTx{}, Hash: tx.Hash(), }, fmt.Errorf("Timed out waiting for transaction to be included in a block") } - - panic("Should never happen!") } // Get unconfirmed transactions including their number. diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 20141cb9..d0b0f87d 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -1,6 +1,8 @@ package core import ( + "time" + crypto "github.com/tendermint/go-crypto" "github.com/tendermint/tendermint/consensus" cstypes "github.com/tendermint/tendermint/consensus/types" @@ -12,12 +14,14 @@ import ( "github.com/tendermint/tmlibs/log" ) +var subscribeTimeout = 5 * time.Second + //---------------------------------------------- // These interfaces are used by RPC and must be thread safe type Consensus interface { GetState() *sm.State - GetValidators() (int, []*types.Validator) + GetValidators() (int64, []*types.Validator) GetRoundState() *cstypes.RoundState } @@ -36,7 +40,6 @@ type P2P interface { var ( // external, thread safe interfaces - eventSwitch types.EventSwitch proxyAppQuery proxy.AppConnQuery // interfaces defined in types and above @@ -51,14 +54,11 @@ var ( addrBook *p2p.AddrBook txIndexer txindex.TxIndexer consensusReactor *consensus.ConsensusReactor + eventBus *types.EventBus // thread safe logger log.Logger ) -func SetEventSwitch(evsw types.EventSwitch) { - eventSwitch = evsw -} - func SetBlockStore(bs types.BlockStore) { blockStore = bs } @@ -102,3 +102,7 @@ func SetConsensusReactor(conR *consensus.ConsensusReactor) { func SetLogger(l log.Logger) { logger = l } + +func SetEventBus(b *types.EventBus) { + eventBus = b +} diff --git a/rpc/core/routes.go b/rpc/core/routes.go index b1dbd378..111c010a 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -7,8 +7,9 @@ import ( // TODO: better system than "unsafe" prefix var Routes = map[string]*rpc.RPCFunc{ // subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpc.NewWSRPCFunc(Subscribe, "event"), - "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "event"), + "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), + "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"), + "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), // info API "status": rpc.NewRPCFunc(Status, ""), @@ -18,6 +19,7 @@ var Routes = map[string]*rpc.RPCFunc{ "block": rpc.NewRPCFunc(Block, "height"), "commit": rpc.NewRPCFunc(Commit, "height"), "tx": rpc.NewRPCFunc(Tx, "hash,prove"), + "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove"), "validators": rpc.NewRPCFunc(Validators, "height"), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, ""), diff --git a/rpc/core/status.go b/rpc/core/status.go index 4a8d84ec..0cb7acc1 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -1,6 +1,8 @@ package core import ( + "time" + data "github.com/tendermint/go-wire/data" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" @@ -56,18 +58,20 @@ import ( func Status() (*ctypes.ResultStatus, error) { latestHeight := blockStore.Height() var ( - latestBlockMeta *types.BlockMeta - latestBlockHash data.Bytes - latestAppHash data.Bytes - latestBlockTime int64 + latestBlockMeta *types.BlockMeta + latestBlockHash data.Bytes + latestAppHash data.Bytes + latestBlockTimeNano int64 ) if latestHeight != 0 { latestBlockMeta = blockStore.LoadBlockMeta(latestHeight) latestBlockHash = latestBlockMeta.BlockID.Hash latestAppHash = latestBlockMeta.Header.AppHash - latestBlockTime = latestBlockMeta.Header.Time.UnixNano() + latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() } + latestBlockTime := time.Unix(0, latestBlockTimeNano) + return &ctypes.ResultStatus{ NodeInfo: p2pSwitch.NodeInfo(), PubKey: pubKey, diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 03a911e2..f592326b 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -6,6 +6,7 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" + tmquery "github.com/tendermint/tmlibs/pubsub/query" ) // Tx allows you to query the transaction results. `nil` could mean the @@ -82,20 +83,121 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return nil, fmt.Errorf("Tx (%X) not found", hash) } - height := int(r.Height) // XXX - index := int(r.Index) + height := r.Height + index := r.Index var proof types.TxProof if prove { block := blockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(index) + proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } return &ctypes.ResultTx{ Height: height, - Index: index, - TxResult: r.Result.Result(), + Index: uint32(index), + TxResult: r.Result, Tx: r.Tx, Proof: proof, }, nil } + +// TxSearch allows you to query for multiple transactions results. +// +// ```shell +// curl "localhost:46657/tx_search?query=\"account.owner='Ivan'\"&prove=true" +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:46657", "/websocket") +// q, err := tmquery.New("account.owner='Ivan'") +// tx, err := client.TxSearch(q, true) +// ``` +// +// > The above command returns JSON structured like this: +// +// ```json +// { +// "result": [ +// { +// "proof": { +// "Proof": { +// "aunts": [ +// "J3LHbizt806uKnABNLwG4l7gXCA=", +// "iblMO/M1TnNtlAefJyNCeVhjAb0=", +// "iVk3ryurVaEEhdeS0ohAJZ3wtB8=", +// "5hqMkTeGqpct51ohX0lZLIdsn7Q=", +// "afhsNxFnLlZgFDoyPpdQSe0bR8g=" +// ] +// }, +// "Data": "mvZHHa7HhZ4aRT0xMDA=", +// "RootHash": "F6541223AA46E428CB1070E9840D2C3DF3B6D776", +// "Total": 32, +// "Index": 31 +// }, +// "tx": "mvZHHa7HhZ4aRT0xMDA=", +// "tx_result": {}, +// "index": 31, +// "height": 12 +// } +// ], +// "id": "", +// "jsonrpc": "2.0" +// } +// ``` +// +// Returns transactions matching the given query. +// +// ### Query Parameters +// +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+-----------------------------------------------------------| +// | query | string | "" | true | Query | +// | prove | bool | false | false | Include proofs of the transactions inclusion in the block | +// +// ### Returns +// +// - `proof`: the `types.TxProof` object +// - `tx`: `[]byte` - the transaction +// - `tx_result`: the `abci.Result` object +// - `index`: `int` - index of the transaction +// - `height`: `int` - height of the block where this transaction was in +func TxSearch(query string, prove bool) ([]*ctypes.ResultTx, error) { + // if index is disabled, return error + if _, ok := txIndexer.(*null.TxIndex); ok { + return nil, fmt.Errorf("Transaction indexing is disabled.") + } + + q, err := tmquery.New(query) + if err != nil { + return nil, err + } + + results, err := txIndexer.Search(q) + if err != nil { + return nil, err + } + + // TODO: we may want to consider putting a maximum on this length and somehow + // informing the user that things were truncated. + apiResults := make([]*ctypes.ResultTx, len(results)) + var proof types.TxProof + for i, r := range results { + height := r.Height + index := r.Index + + if prove { + block := blockStore.LoadBlock(height) + proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + } + + apiResults[i] = &ctypes.ResultTx{ + Height: height, + Index: index, + TxResult: r.Result, + Tx: r.Tx, + Proof: proof, + } + } + + return apiResults, nil +} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 874e351d..3d1e7a21 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -2,6 +2,7 @@ package core_types import ( "strings" + "time" abci "github.com/tendermint/abci/types" crypto "github.com/tendermint/go-crypto" @@ -12,7 +13,7 @@ import ( ) type ResultBlockchainInfo struct { - LastHeight int `json:"last_height"` + LastHeight int64 `json:"last_height"` BlockMetas []*types.BlockMeta `json:"block_metas"` } @@ -51,8 +52,8 @@ type ResultStatus struct { PubKey crypto.PubKey `json:"pub_key"` LatestBlockHash data.Bytes `json:"latest_block_hash"` LatestAppHash data.Bytes `json:"latest_app_hash"` - LatestBlockHeight int `json:"latest_block_height"` - LatestBlockTime int64 `json:"latest_block_time"` // nano + LatestBlockHeight int64 `json:"latest_block_height"` + LatestBlockTime time.Time `json:"latest_block_time"` Syncing bool `json:"syncing"` } @@ -86,7 +87,7 @@ type Peer struct { } type ResultValidators struct { - BlockHeight int `json:"block_height"` + BlockHeight int64 `json:"block_height"` Validators []*types.Validator `json:"validators"` } @@ -96,26 +97,26 @@ type ResultDumpConsensusState struct { } type ResultBroadcastTx struct { - Code abci.CodeType `json:"code"` - Data data.Bytes `json:"data"` - Log string `json:"log"` + Code uint32 `json:"code"` + Data data.Bytes `json:"data"` + Log string `json:"log"` Hash data.Bytes `json:"hash"` } type ResultBroadcastTxCommit struct { - CheckTx abci.Result `json:"check_tx"` - DeliverTx abci.Result `json:"deliver_tx"` - Hash data.Bytes `json:"hash"` - Height int `json:"height"` + CheckTx abci.ResponseCheckTx `json:"check_tx"` + DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` + Hash data.Bytes `json:"hash"` + Height int64 `json:"height"` } type ResultTx struct { - Height int `json:"height"` - Index int `json:"index"` - TxResult abci.Result `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Height int64 `json:"height"` + Index uint32 `json:"index"` + TxResult abci.ResponseDeliverTx `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` } type ResultUnconfirmedTxs struct { @@ -128,7 +129,7 @@ type ResultABCIInfo struct { } type ResultABCIQuery struct { - *abci.ResultQuery `json:"response"` + Response abci.ResponseQuery `json:"response"` } type ResultUnsafeFlushMempool struct{} @@ -140,6 +141,6 @@ type ResultSubscribe struct{} type ResultUnsubscribe struct{} type ResultEvent struct { - Name string `json:"name"` - Data types.TMEventData `json:"data"` + Query string `json:"query"` + Data types.TMEventData `json:"data"` } diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index b08a7833..f36b5800 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -1,7 +1,7 @@ package core_grpc import ( - context "golang.org/x/net/context" + "context" abci "github.com/tendermint/abci/types" core "github.com/tendermint/tendermint/rpc/core" @@ -10,6 +10,11 @@ import ( type broadcastAPI struct { } +func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { + // dummy so we can check if the server is up + return &ResponsePing{}, nil +} + func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { res, err := core.BroadcastTxCommit(req.Tx) if err != nil { diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go index 1c6498df..80d736f5 100644 --- a/rpc/grpc/client_server.go +++ b/rpc/grpc/client_server.go @@ -25,7 +25,7 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) { grpcServer := grpc.NewServer() RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) - go grpcServer.Serve(ln) + go grpcServer.Serve(ln) // nolint: errcheck return ln, nil } diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go index b62006a1..030a22b8 100644 --- a/rpc/grpc/grpc_test.go +++ b/rpc/grpc/grpc_test.go @@ -1,11 +1,11 @@ package core_grpc_test import ( + "context" "os" "testing" "github.com/stretchr/testify/require" - "golang.org/x/net/context" "github.com/tendermint/abci/example/dummy" "github.com/tendermint/tendermint/rpc/grpc" diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index d373f097..cf7a5ec7 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -9,7 +9,9 @@ It is generated from these files: types.proto It has these top-level messages: + RequestPing RequestBroadcastTx + ResponsePing ResponseBroadcastTx */ package core_grpc @@ -20,7 +22,8 @@ import math "math" import types "github.com/tendermint/abci/types" import ( - context "golang.org/x/net/context" + "context" + grpc "google.golang.org/grpc" ) @@ -35,6 +38,14 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +type RequestPing struct { +} + +func (m *RequestPing) Reset() { *m = RequestPing{} } +func (m *RequestPing) String() string { return proto.CompactTextString(m) } +func (*RequestPing) ProtoMessage() {} +func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + type RequestBroadcastTx struct { Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` } @@ -42,7 +53,7 @@ type RequestBroadcastTx struct { func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *RequestBroadcastTx) GetTx() []byte { if m != nil { @@ -51,15 +62,23 @@ func (m *RequestBroadcastTx) GetTx() []byte { return nil } +type ResponsePing struct { +} + +func (m *ResponsePing) Reset() { *m = ResponsePing{} } +func (m *ResponsePing) String() string { return proto.CompactTextString(m) } +func (*ResponsePing) ProtoMessage() {} +func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` } func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { if m != nil { @@ -76,7 +95,9 @@ func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { } func init() { + proto.RegisterType((*RequestPing)(nil), "core_grpc.RequestPing") proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") + proto.RegisterType((*ResponsePing)(nil), "core_grpc.ResponsePing") proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") } @@ -91,6 +112,7 @@ const _ = grpc.SupportPackageIsVersion4 // Client API for BroadcastAPI service type BroadcastAPIClient interface { + Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) } @@ -102,6 +124,15 @@ func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { return &broadcastAPIClient{cc} } +func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { + out := new(ResponsePing) + err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { out := new(ResponseBroadcastTx) err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) @@ -114,6 +145,7 @@ func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadca // Server API for BroadcastAPI service type BroadcastAPIServer interface { + Ping(context.Context, *RequestPing) (*ResponsePing, error) BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) } @@ -121,6 +153,24 @@ func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { s.RegisterService(&_BroadcastAPI_serviceDesc, srv) } +func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPing) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BroadcastAPIServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/core_grpc.BroadcastAPI/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) + } + return interceptor(ctx, in, info, handler) +} + func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RequestBroadcastTx) if err := dec(in); err != nil { @@ -143,6 +193,10 @@ var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ ServiceName: "core_grpc.BroadcastAPI", HandlerType: (*BroadcastAPIServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _BroadcastAPI_Ping_Handler, + }, { MethodName: "BroadcastTx", Handler: _BroadcastAPI_BroadcastTx_Handler, @@ -155,20 +209,22 @@ var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("types.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 226 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, + // 264 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f, 0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, - 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xc9, 0x2d, 0x2e, 0xd0, 0x07, - 0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xa4, 0xc2, 0x25, 0x14, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xe2, - 0x54, 0x94, 0x9f, 0x98, 0x92, 0x9c, 0x58, 0x5c, 0x12, 0x52, 0x21, 0xc4, 0xc7, 0xc5, 0x54, 0x52, - 0x21, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x13, 0xc4, 0x54, 0x52, 0xa1, 0x54, 0xc7, 0x25, 0x1c, 0x94, - 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x8a, 0xac, 0xcc, 0x90, 0x8b, 0x23, 0x39, 0x23, 0x35, 0x39, - 0x3b, 0x1e, 0xaa, 0x98, 0xdb, 0x48, 0x4c, 0x0f, 0x62, 0x38, 0x4c, 0xb5, 0x33, 0x48, 0x3a, 0xa4, - 0x22, 0x88, 0x3d, 0x19, 0xc2, 0x10, 0x32, 0xe1, 0xe2, 0x4c, 0x2c, 0x28, 0x48, 0xcd, 0x4b, 0x01, - 0xe9, 0x61, 0x02, 0xeb, 0x11, 0x47, 0xd3, 0xe3, 0x08, 0x96, 0x0f, 0xa9, 0x08, 0xe2, 0x48, 0x84, - 0xb2, 0x8c, 0x62, 0xb8, 0x78, 0xe0, 0xf6, 0x3a, 0x06, 0x78, 0x0a, 0xf9, 0x70, 0x71, 0x23, 0xbb, - 0x43, 0x56, 0x0f, 0xee, 0x7d, 0x3d, 0x4c, 0xdf, 0x48, 0xc9, 0xa1, 0x48, 0x63, 0x78, 0x23, 0x89, - 0x0d, 0x1c, 0x14, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x68, 0x73, 0x87, 0xb0, 0x52, 0x01, - 0x00, 0x00, + 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x4c, 0x4a, 0xce, 0xd4, 0x07, + 0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xc4, 0xcb, 0xc5, 0x1d, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, + 0x90, 0x99, 0x97, 0xae, 0xa4, 0xc2, 0x25, 0x04, 0xe5, 0x3a, 0x15, 0xe5, 0x27, 0xa6, 0x24, 0x27, + 0x16, 0x97, 0x84, 0x54, 0x08, 0xf1, 0x71, 0x31, 0x95, 0x54, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, + 0x04, 0x31, 0x95, 0x54, 0x28, 0xf1, 0x71, 0xf1, 0x04, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, + 0x82, 0x75, 0x35, 0x32, 0x72, 0x09, 0xc3, 0x04, 0x90, 0xf5, 0x19, 0x72, 0x71, 0x24, 0x67, 0xa4, + 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, 0x89, 0xe9, 0x41, 0x2c, 0x87, 0xa9, 0x76, 0x06, 0x49, + 0x87, 0x54, 0x04, 0xb1, 0x27, 0x43, 0x18, 0x42, 0xe6, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0x99, 0x65, + 0xa9, 0x45, 0x20, 0x4d, 0x4c, 0x60, 0x4d, 0x12, 0x68, 0x9a, 0x5c, 0x20, 0x0a, 0x42, 0x2a, 0x82, + 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xa9, 0x8c, 0x5c, 0x3c, 0x70, 0xbb, 0x1d, 0x03, 0x3c, 0x85, 0xcc, + 0xb9, 0x58, 0x40, 0x8e, 0x13, 0x12, 0xd3, 0x83, 0x87, 0x8d, 0x1e, 0x92, 0x57, 0xa5, 0xc4, 0x51, + 0xc4, 0x11, 0xbe, 0x11, 0xf2, 0xe1, 0xe2, 0x46, 0xf6, 0x84, 0x2c, 0xa6, 0x7e, 0x24, 0x69, 0x29, + 0x39, 0x2c, 0xc6, 0x20, 0xc9, 0x27, 0xb1, 0x81, 0xc3, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x92, 0x29, 0xd9, 0x42, 0xaf, 0x01, 0x00, 0x00, } diff --git a/rpc/grpc/types.proto b/rpc/grpc/types.proto index a7d18dae..35462594 100644 --- a/rpc/grpc/types.proto +++ b/rpc/grpc/types.proto @@ -1,7 +1,7 @@ syntax = "proto3"; package core_grpc; -import "github.com/tendermint/abci/blob/master/types/types.proto"; +import "github.com/tendermint/abci/types/types.proto"; //---------------------------------------- // Message types @@ -9,6 +9,9 @@ import "github.com/tendermint/abci/blob/master/types/types.proto"; //---------------------------------------- // Request types +message RequestPing { +} + message RequestBroadcastTx { bytes tx = 1; } @@ -16,6 +19,9 @@ message RequestBroadcastTx { //---------------------------------------- // Response types +message ResponsePing{ +} + message ResponseBroadcastTx{ types.ResponseCheckTx check_tx = 1; types.ResponseDeliverTx deliver_tx = 2; @@ -25,5 +31,6 @@ message ResponseBroadcastTx{ // Service Definition service BroadcastAPI { + rpc Ping(RequestPing) returns (ResponsePing) ; rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ; } diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index 1f06112d..a1b23a25 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -93,7 +93,8 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul if err != nil { return nil, err } - defer httpResponse.Body.Close() + defer httpResponse.Body.Close() // nolint: errcheck + responseBytes, err := ioutil.ReadAll(httpResponse.Body) if err != nil { return nil, err @@ -128,7 +129,8 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in if err != nil { return nil, err } - defer resp.Body.Close() + defer resp.Body.Close() // nolint: errcheck + responseBytes, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err @@ -153,7 +155,7 @@ func unmarshalResponseBytes(responseBytes []byte, result interface{}) (interface return nil, errors.Errorf("Response error: %v", response.Error) } // unmarshal the RawMessage into the result - err = json.Unmarshal(*response.Result, result) + err = json.Unmarshal(response.Result, result) if err != nil { return nil, errors.Errorf("Error unmarshalling rpc response result: %v", err) } diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 6e924290..e4ed442e 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -47,10 +47,10 @@ type WSClient struct { onReconnect func() // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + send chan types.RPCRequest // user requests + backlog chan types.RPCRequest // stores a single user request received during a conn failure + reconnectAfter chan error // reconnect requests + readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine wg sync.WaitGroup @@ -168,12 +168,14 @@ func (c *WSClient) OnStop() {} // Stop overrides cmn.Service#Stop. There is no other way to wait until Quit // channel is closed. -func (c *WSClient) Stop() bool { - success := c.BaseService.Stop() - // only close user-facing channels when we can't write to them - c.wg.Wait() - close(c.ResponsesCh) - return success +func (c *WSClient) Stop() error { + err := c.BaseService.Stop() + if err == nil { + // only close user-facing channels when we can't write to them + c.wg.Wait() + close(c.ResponsesCh) + } + return err } // IsReconnecting returns true if the client is reconnecting right now. @@ -290,10 +292,11 @@ func (c *WSClient) processBacklog() error { select { case request := <-c.backlog: if c.writeWait > 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } } - err := c.conn.WriteJSON(request) - if err != nil { + if err := c.conn.WriteJSON(request); err != nil { c.Logger.Error("failed to resend request", "err", err) c.reconnectAfter <- err // requeue request @@ -312,8 +315,7 @@ func (c *WSClient) reconnectRoutine() { case originalError := <-c.reconnectAfter: // wait until writeRoutine and readRoutine finish c.wg.Wait() - err := c.reconnect() - if err != nil { + if err := c.reconnect(); err != nil { c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) c.Stop() return @@ -352,7 +354,10 @@ func (c *WSClient) writeRoutine() { defer func() { ticker.Stop() - c.conn.Close() + if err := c.conn.Close(); err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + } c.wg.Done() }() @@ -360,10 +365,11 @@ func (c *WSClient) writeRoutine() { select { case request := <-c.send: if c.writeWait > 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } } - err := c.conn.WriteJSON(request) - if err != nil { + if err := c.conn.WriteJSON(request); err != nil { c.Logger.Error("failed to send request", "err", err) c.reconnectAfter <- err // add request to the backlog, so we don't lose it @@ -372,10 +378,11 @@ func (c *WSClient) writeRoutine() { } case <-ticker.C: if c.writeWait > 0 { - c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) + if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { + c.Logger.Error("failed to set write deadline", "err", err) + } } - err := c.conn.WriteMessage(websocket.PingMessage, []byte{}) - if err != nil { + if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { c.Logger.Error("failed to write ping", "err", err) c.reconnectAfter <- err return @@ -387,7 +394,9 @@ func (c *WSClient) writeRoutine() { case <-c.readRoutineQuit: return case <-c.Quit: - c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { + c.Logger.Error("failed to write message", "err", err) + } return } } @@ -397,7 +406,10 @@ func (c *WSClient) writeRoutine() { // executing all reads from this goroutine. func (c *WSClient) readRoutine() { defer func() { - c.conn.Close() + if err := c.conn.Close(); err != nil { + // ignore error; it will trigger in tests + // likely because it's closing an already closed connection + } c.wg.Done() }() @@ -415,7 +427,9 @@ func (c *WSClient) readRoutine() { for { // reset deadline for every message type (control or data) if c.readWait > 0 { - c.conn.SetReadDeadline(time.Now().Add(c.readWait)) + if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil { + c.Logger.Error("failed to set read deadline", "err", err) + } } _, data, err := c.conn.ReadMessage() if err != nil { @@ -449,17 +463,17 @@ func (c *WSClient) readRoutine() { /////////////////////////////////////////////////////////////////////////////// // Predefined methods -// Subscribe to an event. Note the server must have a "subscribe" route +// Subscribe to a query. Note the server must have a "subscribe" route // defined. -func (c *WSClient) Subscribe(ctx context.Context, eventType string) error { - params := map[string]interface{}{"event": eventType} +func (c *WSClient) Subscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} return c.Call(ctx, "subscribe", params) } -// Unsubscribe from an event. Note the server must have a "unsubscribe" route +// Unsubscribe from a query. Note the server must have a "unsubscribe" route // defined. -func (c *WSClient) Unsubscribe(ctx context.Context, eventType string) error { - params := map[string]interface{}{"event": eventType} +func (c *WSClient) Unsubscribe(ctx context.Context, query string) error { + params := map[string]interface{}{"query": query} return c.Call(ctx, "unsubscribe", params) } diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 23f19dc0..cc789728 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -17,6 +17,8 @@ import ( types "github.com/tendermint/tendermint/rpc/lib/types" ) +var wsCallTimeout = 5 * time.Second + type myHandler struct { closeConnAfterRead bool mtx sync.RWMutex @@ -32,7 +34,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { panic(err) } - defer conn.Close() + defer conn.Close() // nolint: errcheck for { messageType, _, err := conn.ReadMessage() if err != nil { @@ -41,12 +43,14 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mtx.RLock() if h.closeConnAfterRead { - conn.Close() + if err := conn.Close(); err != nil { + panic(err) + } } h.mtx.RUnlock() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: &res}) + emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res}) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } @@ -100,7 +104,9 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { go callWgDoneOnResult(t, c, &wg) // hacky way to abort the connection before write - c.conn.Close() + if err := c.conn.Close(); err != nil { + t.Error(err) + } // results in WS write error, the client should resend on reconnect call(t, "a", c) @@ -133,14 +139,18 @@ func TestWSClientReconnectFailure(t *testing.T) { }() // hacky way to abort the connection before write - c.conn.Close() + if err := c.conn.Close(); err != nil { + t.Error(err) + } s.Close() // results in WS write error // provide timeout to avoid blocking - ctx, cancel := context.WithTimeout(context.Background(), time.Second) + ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) defer cancel() - c.Call(ctx, "a", make(map[string]interface{})) + if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { + t.Error(err) + } // expect to reconnect almost immediately time.Sleep(10 * time.Millisecond) @@ -186,7 +196,7 @@ func TestNotBlockingOnStop(t *testing.T) { func startClient(t *testing.T, addr net.Addr) *WSClient { c := NewWSClient(addr.String(), "/websocket") - _, err := c.Start() + err := c.Start() require.Nil(t, err) c.SetLogger(log.TestingLogger()) return c @@ -204,7 +214,7 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { if resp.Error != nil { t.Fatalf("unexpected error: %v", resp.Error) } - if *resp.Result != nil { + if resp.Result != nil { wg.Done() } case <-c.Quit: diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go index 0ea4e5c6..2bc43859 100644 --- a/rpc/lib/doc.go +++ b/rpc/lib/doc.go @@ -77,7 +77,7 @@ Now start the server: ``` mux := http.NewServeMux() rpcserver.RegisterRPCFuncs(mux, Routes) -wm := rpcserver.NewWebsocketManager(Routes, nil) +wm := rpcserver.NewWebsocketManager(Routes) mux.HandleFunc("/websocket", wm.WebsocketHandler) logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) go func() { diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 2ec3014d..be170985 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -114,7 +114,7 @@ func setup() { tcpLogger := logger.With("socket", "tcp") mux := http.NewServeMux() server.RegisterRPCFuncs(mux, Routes, tcpLogger) - wm := server.NewWebsocketManager(Routes, nil, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) + wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) go func() { @@ -127,7 +127,7 @@ func setup() { unixLogger := logger.With("socket", "unix") mux2 := http.NewServeMux() server.RegisterRPCFuncs(mux2, Routes, unixLogger) - wm = server.NewWebsocketManager(Routes, nil) + wm = server.NewWebsocketManager(Routes) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) go func() { @@ -216,19 +216,17 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { return "", err } - select { - case msg := <-cl.ResponsesCh: - if msg.Error != nil { - return "", err + msg := <-cl.ResponsesCh + if msg.Error != nil { + return "", err - } - result := new(ResultEcho) - err = json.Unmarshal(*msg.Result, result) - if err != nil { - return "", nil - } - return result.Value, nil } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + if err != nil { + return "", nil + } + return result.Value, nil } func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { @@ -240,19 +238,17 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { return []byte{}, err } - select { - case msg := <-cl.ResponsesCh: - if msg.Error != nil { - return []byte{}, msg.Error + msg := <-cl.ResponsesCh + if msg.Error != nil { + return []byte{}, msg.Error - } - result := new(ResultEchoBytes) - err = json.Unmarshal(*msg.Result, result) - if err != nil { - return []byte{}, nil - } - return result.Value, nil } + result := new(ResultEchoBytes) + err = json.Unmarshal(msg.Result, result) + if err != nil { + return []byte{}, nil + } + return result.Value, nil } func testWithWSClient(t *testing.T, cl *client.WSClient) { @@ -282,7 +278,7 @@ func TestServersAndClientsBasic(t *testing.T) { cl3 := client.NewWSClient(addr, websocketEndpoint) cl3.SetLogger(log.TestingLogger()) - _, err := cl3.Start() + err := cl3.Start() require.Nil(t, err) fmt.Printf("=== testing server on %s using %v client", addr, cl3) testWithWSClient(t, cl3) @@ -311,7 +307,7 @@ func TestQuotedStringArg(t *testing.T) { func TestWSNewWSRPCFunc(t *testing.T) { cl := client.NewWSClient(tcpAddr, websocketEndpoint) cl.SetLogger(log.TestingLogger()) - _, err := cl.Start() + err := cl.Start() require.Nil(t, err) defer cl.Stop() @@ -322,23 +318,21 @@ func TestWSNewWSRPCFunc(t *testing.T) { err = cl.Call(context.Background(), "echo_ws", params) require.Nil(t, err) - select { - case msg := <-cl.ResponsesCh: - if msg.Error != nil { - t.Fatal(err) - } - result := new(ResultEcho) - err = json.Unmarshal(*msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) + msg := <-cl.ResponsesCh + if msg.Error != nil { + t.Fatal(err) } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + require.Nil(t, err) + got := result.Value + assert.Equal(t, got, val) } func TestWSHandlesArrayParams(t *testing.T) { cl := client.NewWSClient(tcpAddr, websocketEndpoint) cl.SetLogger(log.TestingLogger()) - _, err := cl.Start() + err := cl.Start() require.Nil(t, err) defer cl.Stop() @@ -347,17 +341,15 @@ func TestWSHandlesArrayParams(t *testing.T) { err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) require.Nil(t, err) - select { - case msg := <-cl.ResponsesCh: - if msg.Error != nil { - t.Fatalf("%+v", err) - } - result := new(ResultEcho) - err = json.Unmarshal(*msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) + msg := <-cl.ResponsesCh + if msg.Error != nil { + t.Fatalf("%+v", err) } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + require.Nil(t, err) + got := result.Value + assert.Equal(t, got, val) } // TestWSClientPingPong checks that a client & server exchange pings @@ -365,7 +357,7 @@ func TestWSHandlesArrayParams(t *testing.T) { func TestWSClientPingPong(t *testing.T) { cl := client.NewWSClient(tcpAddr, websocketEndpoint) cl.SetLogger(log.TestingLogger()) - _, err := cl.Start() + err := cl.Start() require.Nil(t, err) defer cl.Stop() diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go index 3a3c48f0..c8182169 100644 --- a/rpc/lib/server/handlers.go +++ b/rpc/lib/server/handlers.go @@ -18,7 +18,6 @@ import ( types "github.com/tendermint/tendermint/rpc/lib/types" cmn "github.com/tendermint/tmlibs/common" - events "github.com/tendermint/tmlibs/events" "github.com/tendermint/tmlibs/log" ) @@ -100,7 +99,11 @@ func funcReturnTypes(f interface{}) []reflect.Type { // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - b, _ := ioutil.ReadAll(r.Body) + b, err := ioutil.ReadAll(r.Body) + if err != nil { + WriteRPCResponseHTTP(w, types.RPCInvalidRequestError("", errors.Wrap(err, "Error reading request body"))) + return + } // if its an empty request (like from a browser), // just display a list of functions if len(b) == 0 { @@ -109,7 +112,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han } var request types.RPCRequest - err := json.Unmarshal(b, &request) + err = json.Unmarshal(b, &request) if err != nil { WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request"))) return @@ -350,9 +353,10 @@ const ( defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 ) -// a single websocket connection -// contains listener id, underlying ws connection, -// and the event switch for subscribing to events +// a single websocket connection contains listener id, underlying ws +// connection, and the event switch for subscribing to events. +// +// In case of an error, the connection is stopped. type wsConnection struct { cmn.BaseService @@ -361,7 +365,8 @@ type wsConnection struct { writeChan chan types.RPCResponse funcMap map[string]*RPCFunc - evsw events.EventSwitch + + subscriptions map[string]interface{} // write channel capacity writeChanCapacity int @@ -374,19 +379,23 @@ type wsConnection struct { // Send pings to server with this period. Must be less than readWait, but greater than zero. pingPeriod time.Duration + + // called before stopping the connection. + onDisconnect func(remoteAddr string) } -// NewWSConnection wraps websocket.Conn. See the commentary on the -// func(*wsConnection) functions for a detailed description of how to configure -// ping period and pong wait time. -// NOTE: if the write buffer is full, pongs may be dropped, which may cause clients to disconnect. -// see https://github.com/gorilla/websocket/issues/97 -func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, evsw events.EventSwitch, options ...func(*wsConnection)) *wsConnection { +// NewWSConnection wraps websocket.Conn. +// +// See the commentary on the func(*wsConnection) functions for a detailed +// description of how to configure ping period and pong wait time. NOTE: if the +// write buffer is full, pongs may be dropped, which may cause clients to +// disconnect. see https://github.com/gorilla/websocket/issues/97 +func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, options ...func(*wsConnection)) *wsConnection { wsc := &wsConnection{ remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, funcMap: funcMap, - evsw: evsw, + subscriptions: make(map[string]interface{}), writeWait: defaultWSWriteWait, writeChanCapacity: defaultWSWriteChanCapacity, readWait: defaultWSReadWait, @@ -431,7 +440,16 @@ func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { } } -// OnStart starts the read and write routines. It blocks until the connection closes. +// OnDisconnect called before stopping the connection. +// It should only be used in the constructor - not Goroutine-safe. +func OnDisconnect(cb func(remoteAddr string)) func(*wsConnection) { + return func(wsc *wsConnection) { + wsc.onDisconnect = cb + } +} + +// OnStart implements cmn.Service by starting the read and write routines. It +// blocks until the connection closes. func (wsc *wsConnection) OnStart() error { wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) @@ -443,13 +461,13 @@ func (wsc *wsConnection) OnStart() error { return nil } -// OnStop unsubscribes from all events. +// OnStop implements cmn.Service by calling OnDisconnect callback. func (wsc *wsConnection) OnStop() { - if wsc.evsw != nil { - wsc.evsw.RemoveListener(wsc.remoteAddr) - } // Both read and write loops close the websocket connection when they exit their loops. // The writeChan is never closed, to allow WriteRPCResponse() to fail. + if wsc.onDisconnect != nil { + wsc.onDisconnect(wsc.remoteAddr) + } } // GetRemoteAddr returns the remote address of the underlying connection. @@ -458,12 +476,6 @@ func (wsc *wsConnection) GetRemoteAddr() string { return wsc.remoteAddr } -// GetEventSwitch returns the event switch. -// It implements WSRPCConnection -func (wsc *wsConnection) GetEventSwitch() events.EventSwitch { - return wsc.evsw -} - // WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. // It implements WSRPCConnection. It is Goroutine-safe. func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { @@ -487,6 +499,28 @@ func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { } } +func (wsc *wsConnection) AddSubscription(query string, data interface{}) error { + if _, ok := wsc.subscriptions[query]; ok { + return errors.New("Already subscribed") + } + + wsc.subscriptions[query] = data + return nil +} + +func (wsc *wsConnection) DeleteSubscription(query string) (interface{}, bool) { + data, ok := wsc.subscriptions[query] + if ok { + delete(wsc.subscriptions, query) + return data, true + } + return nil, false +} + +func (wsc *wsConnection) DeleteAllSubscriptions() { + wsc.subscriptions = make(map[string]interface{}) +} + // Read from the socket and subscribe to or unsubscribe from events func (wsc *wsConnection) readRoutine() { defer func() { @@ -499,7 +533,7 @@ func (wsc *wsConnection) readRoutine() { wsc.WriteRPCResponse(types.RPCInternalError("unknown", err)) go wsc.readRoutine() } else { - wsc.baseConn.Close() + wsc.baseConn.Close() // nolint: errcheck } }() @@ -513,7 +547,9 @@ func (wsc *wsConnection) readRoutine() { return default: // reset deadline for every type of message (control or data) - wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) + if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { + wsc.Logger.Error("failed to set read deadline", "err", err) + } var in []byte _, in, err := wsc.baseConn.ReadMessage() if err != nil { @@ -585,7 +621,9 @@ func (wsc *wsConnection) writeRoutine() { pingTicker := time.NewTicker(wsc.pingPeriod) defer func() { pingTicker.Stop() - wsc.baseConn.Close() + if err := wsc.baseConn.Close(); err != nil { + wsc.Logger.Error("Error closing connection", "err", err) + } }() // https://github.com/gorilla/websocket/issues/97 @@ -632,7 +670,9 @@ func (wsc *wsConnection) writeRoutine() { // All writes to the websocket must (re)set the write deadline. // If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553) func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { - wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)) + if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { + return err + } return wsc.baseConn.WriteMessage(msgType, msg) } @@ -644,17 +684,16 @@ func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error type WebsocketManager struct { websocket.Upgrader funcMap map[string]*RPCFunc - evsw events.EventSwitch logger log.Logger wsConnOptions []func(*wsConnection) } -// NewWebsocketManager returns a new WebsocketManager that routes according to the given funcMap, listens on the given event switch, -// and connects to the server with the given connection options. -func NewWebsocketManager(funcMap map[string]*RPCFunc, evsw events.EventSwitch, wsConnOptions ...func(*wsConnection)) *WebsocketManager { +// NewWebsocketManager returns a new WebsocketManager that routes according to +// the given funcMap and connects to the server with the given connection +// options. +func NewWebsocketManager(funcMap map[string]*RPCFunc, wsConnOptions ...func(*wsConnection)) *WebsocketManager { return &WebsocketManager{ funcMap: funcMap, - evsw: evsw, Upgrader: websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { // TODO ??? @@ -681,10 +720,13 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ } // register connection - con := NewWSConnection(wsConn, wm.funcMap, wm.evsw, wm.wsConnOptions...) + con := NewWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) wm.logger.Info("New websocket connection", "remote", con.remoteAddr) - con.Start() // Blocking + err = con.Start() // Blocking + if err != nil { + wm.logger.Error("Error starting connection", "err", err) + } } // rpc.websocket @@ -741,5 +783,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st buf.WriteString("") w.Header().Set("Content-Type", "text/html") w.WriteHeader(200) - w.Write(buf.Bytes()) + w.Write(buf.Bytes()) // nolint: errcheck } diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go new file mode 100644 index 00000000..664bbd91 --- /dev/null +++ b/rpc/lib/server/handlers_test.go @@ -0,0 +1,97 @@ +package rpcserver_test + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + rs "github.com/tendermint/tendermint/rpc/lib/server" + types "github.com/tendermint/tendermint/rpc/lib/types" + "github.com/tendermint/tmlibs/log" +) + +func testMux() *http.ServeMux { + funcMap := map[string]*rs.RPCFunc{ + "c": rs.NewRPCFunc(func(s string, i int) (string, error) { return "foo", nil }, "s,i"), + } + mux := http.NewServeMux() + buf := new(bytes.Buffer) + logger := log.NewTMLogger(buf) + rs.RegisterRPCFuncs(mux, funcMap, logger) + + return mux +} + +func statusOK(code int) bool { return code >= 200 && code <= 299 } + +// Ensure that nefarious/unintended inputs to `params` +// do not crash our RPC handlers. +// See Issue https://github.com/tendermint/tendermint/issues/708. +func TestRPCParams(t *testing.T) { + mux := testMux() + tests := []struct { + payload string + wantErr string + }{ + // bad + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found"}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found"}, + {`{"method": "c", "id": "0", "params": a}`, "invalid character"}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1"}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "of type int"}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string"}, + + // good + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, ""}, + {`{"method": "c", "id": "0", "params": {}}`, ""}, + {`{"method": "c", "id": "0", "params": ["a", 10]}`, ""}, + } + + for i, tt := range tests { + req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + // Always expecting back a JSONRPCResponse + assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("#%d: err reading body: %v", i, err) + continue + } + + recv := new(types.RPCResponse) + assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + + if tt.wantErr == "" { + assert.Nil(t, recv.Error, "#%d: not expecting an error", i) + } else { + assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) + // The wanted error is either in the message or the data + assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) + } + } +} + +func TestRPCNotification(t *testing.T) { + mux := testMux() + body := strings.NewReader(`{"jsonrpc": "2.0"}`) + req, _ := http.NewRequest("POST", "http://localhost/", body) + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + + // Always expecting back a JSONRPCResponse + require.True(t, statusOK(res.StatusCode), "should always return 2XX") + blob, err := ioutil.ReadAll(res.Body) + require.Nil(t, err, "reading from the body should not give back an error") + require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") +} diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 7623337d..515baf5d 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -56,7 +56,7 @@ func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RP w.Header().Set("Content-Type", "application/json") w.WriteHeader(httpCode) - w.Write(jsonBytes) + w.Write(jsonBytes) // nolint: errcheck, gas } func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { @@ -66,7 +66,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(200) - w.Write(jsonBytes) + w.Write(jsonBytes) // nolint: errcheck, gas } //----------------------------------------------------------------------------- diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index 3c6d6edd..a86226f2 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -150,7 +150,7 @@ func TestParseRPC(t *testing.T) { {`{"name": "john", "height": 22}`, 22, "john", false}, // defaults {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, - // should fail - wrong types/lenght + // should fail - wrong types/length {`["flew", 7]`, 0, "", true}, {`[7,"flew",100]`, 0, "", true}, {`{"name": -12, "height": "fred"}`, 0, "", true}, diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go index 86f9264d..bac7c240 100644 --- a/rpc/lib/types/types.go +++ b/rpc/lib/types/types.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/pkg/errors" - events "github.com/tendermint/tmlibs/events" ) //---------------------------------------- @@ -68,14 +67,14 @@ func (err RPCError) Error() string { } type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID string `json:"id"` - Result *json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` + JSONRPC string `json:"jsonrpc"` + ID string `json:"id"` + Result json.RawMessage `json:"result,omitempty"` + Error *RPCError `json:"error,omitempty"` } func NewRPCSuccessResponse(id string, res interface{}) RPCResponse { - var raw *json.RawMessage + var rawMsg json.RawMessage if res != nil { var js []byte @@ -83,11 +82,10 @@ func NewRPCSuccessResponse(id string, res interface{}) RPCResponse { if err != nil { return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) } - rawMsg := json.RawMessage(js) - raw = &rawMsg + rawMsg = json.RawMessage(js) } - return RPCResponse{JSONRPC: "2.0", ID: id, Result: raw} + return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} } func NewRPCErrorResponse(id string, code int, msg string, data string) RPCResponse { @@ -135,9 +133,12 @@ func RPCServerError(id string, err error) RPCResponse { // *wsConnection implements this interface. type WSRPCConnection interface { GetRemoteAddr() string - GetEventSwitch() events.EventSwitch WriteRPCResponse(resp RPCResponse) TryWriteRPCResponse(resp RPCResponse) bool + + AddSubscription(string, interface{}) error + DeleteSubscription(string) (interface{}, bool) + DeleteAllSubscriptions() } // websocket-only RPCFuncs take this as the first parameter. diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 55e27f5b..73da30ad 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -1,6 +1,7 @@ package rpctest import ( + "context" "fmt" "math/rand" "os" @@ -13,11 +14,35 @@ import ( cfg "github.com/tendermint/tendermint/config" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/proxy" + ctypes "github.com/tendermint/tendermint/rpc/core/types" core_grpc "github.com/tendermint/tendermint/rpc/grpc" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" ) -var config *cfg.Config +var globalConfig *cfg.Config + +func waitForRPC() { + laddr := GetConfig().RPC.ListenAddress + client := rpcclient.NewJSONRPCClient(laddr) + result := new(ctypes.ResultStatus) + for { + _, err := client.Call("status", map[string]interface{}{}, result) + if err == nil { + return + } + } +} + +func waitForGRPC() { + client := GetGRPCClient() + for { + _, err := client.Ping(context.Background(), &core_grpc.RequestPing{}) + if err == nil { + return + } + } +} // f**ing long, but unique for each test func makePathname() string { @@ -46,29 +71,39 @@ func makeAddrs() (string, string, string) { // GetConfig returns a config for the test cases as a singleton func GetConfig() *cfg.Config { - if config == nil { + if globalConfig == nil { pathname := makePathname() - config = cfg.ResetTestRoot(pathname) + globalConfig = cfg.ResetTestRoot(pathname) // and we use random ports to run in parallel tm, rpc, grpc := makeAddrs() - config.P2P.ListenAddress = tm - config.RPC.ListenAddress = rpc - config.RPC.GRPCListenAddress = grpc + globalConfig.P2P.ListenAddress = tm + globalConfig.RPC.ListenAddress = rpc + globalConfig.RPC.GRPCListenAddress = grpc + globalConfig.TxIndex.IndexTags = "app.creator" // see dummy application } - return config + return globalConfig } func GetGRPCClient() core_grpc.BroadcastAPIClient { - grpcAddr := config.RPC.GRPCListenAddress + grpcAddr := globalConfig.RPC.GRPCListenAddress return core_grpc.StartGRPCClient(grpcAddr) } // StartTendermint starts a test tendermint server in a go routine and returns when it is initialized func StartTendermint(app abci.Application) *nm.Node { node := NewTendermint(app) - node.Start() + err := node.Start() + if err != nil { + panic(err) + } + + // wait for rpc + waitForRPC() + waitForGRPC() + fmt.Println("Tendermint running!") + return node } diff --git a/scripts/cutWALUntil/main.go b/scripts/cutWALUntil/main.go index a7948a26..84336895 100644 --- a/scripts/cutWALUntil/main.go +++ b/scripts/cutWALUntil/main.go @@ -22,9 +22,9 @@ func main() { os.Exit(1) } - var heightToStop uint64 + var heightToStop int64 var err error - if heightToStop, err = strconv.ParseUint(os.Args[2], 10, 64); err != nil { + if heightToStop, err = strconv.ParseInt(os.Args[2], 10, 64); err != nil { panic(fmt.Errorf("failed to parse height: %v", err)) } diff --git a/scripts/dist_build.sh b/scripts/dist_build.sh index 3e6d5abc..873bacf1 100755 --- a/scripts/dist_build.sh +++ b/scripts/dist_build.sh @@ -11,7 +11,6 @@ cd "$DIR" # Get the git commit GIT_COMMIT="$(git rev-parse --short HEAD)" -GIT_DESCRIBE="$(git describe --tags --always)" GIT_IMPORT="github.com/tendermint/tendermint/version" # Determine the arch/os combos we're building for @@ -25,12 +24,14 @@ make tools make get_vendor_deps # Build! +# ldflags: -s Omit the symbol table and debug information. +# -w Omit the DWARF symbol table. echo "==> Building..." "$(which gox)" \ -os="${XC_OS}" \ -arch="${XC_ARCH}" \ -osarch="!darwin/arm !solaris/amd64 !freebsd/amd64" \ - -ldflags "-X ${GIT_IMPORT}.GitCommit='${GIT_COMMIT}' -X ${GIT_IMPORT}.GitDescribe='${GIT_DESCRIBE}'" \ + -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit='${GIT_COMMIT}'" \ -output "build/pkg/{{.OS}}_{{.Arch}}/tendermint" \ -tags="${BUILD_TAGS}" \ github.com/tendermint/tendermint/cmd/tendermint diff --git a/scripts/tendermint-builder/Dockerfile b/scripts/tendermint-builder/Dockerfile index 0c5130c5..2d3c0ef5 100644 --- a/scripts/tendermint-builder/Dockerfile +++ b/scripts/tendermint-builder/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.9.0 +FROM golang:1.9.2 RUN apt-get update && apt-get install -y --no-install-recommends \ zip \ diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 2cf40c57..e44ed4b1 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -41,10 +41,18 @@ func main() { panic(fmt.Errorf("failed to marshal msg: %v", err)) } - os.Stdout.Write(json) - os.Stdout.Write([]byte("\n")) - if end, ok := msg.Msg.(cs.EndHeightMessage); ok { - os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) + _, err = os.Stdout.Write(json) + if err == nil { + _, err = os.Stdout.Write([]byte("\n")) + } + if err == nil { + if end, ok := msg.Msg.(cs.EndHeightMessage); ok { + _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) // nolint: errcheck, gas + } + } + if err != nil { + fmt.Println("Failed to write message", err) + os.Exit(1) } } } diff --git a/state/errors.go b/state/errors.go index 4a87384a..f7520cf6 100644 --- a/state/errors.go +++ b/state/errors.go @@ -9,22 +9,22 @@ type ( ErrProxyAppConn error ErrUnknownBlock struct { - Height int + Height int64 } ErrBlockHashMismatch struct { CoreHash []byte AppHash []byte - Height int + Height int64 } ErrAppBlockHeightTooHigh struct { - CoreHeight int - AppHeight int + CoreHeight int64 + AppHeight int64 } ErrLastStateMismatch struct { - Height int + Height int64 Core []byte App []byte } @@ -35,7 +35,7 @@ type ( } ErrNoValSetForHeight struct { - Height int + Height int64 } ) diff --git a/state/execution.go b/state/execution.go index b917bfbe..1905b62f 100644 --- a/state/execution.go +++ b/state/execution.go @@ -8,7 +8,6 @@ import ( abci "github.com/tendermint/abci/types" crypto "github.com/tendermint/go-crypto" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" @@ -20,14 +19,14 @@ import ( // ValExecBlock executes the block, but does NOT mutate State. // + validates the block // + executes block.Txs on the proxyAppConn -func (s *State) ValExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) { +func (s *State) ValExecBlock(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) { // Validate the block. if err := s.validateBlock(block); err != nil { return nil, ErrInvalidBlock(err) } // Execute the block txs - abciResponses, err := execBlockOnProxyApp(eventCache, proxyAppConn, block, s.logger) + abciResponses, err := execBlockOnProxyApp(txEventPublisher, proxyAppConn, block, s.logger) if err != nil { // There was some error in proxyApp // TODO Report error and wait for proxyApp to be available. @@ -40,7 +39,7 @@ func (s *State) ValExecBlock(eventCache types.Fireable, proxyAppConn proxy.AppCo // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set // TODO: Generate a bitmap or otherwise store tx validity in state. -func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, block *types.Block, logger log.Logger) (*ABCIResponses, error) { +func execBlockOnProxyApp(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, block *types.Block, logger log.Logger) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 txIndex := 0 @@ -54,38 +53,35 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo // TODO: make use of this info // Blocks may include invalid txs. // reqDeliverTx := req.(abci.RequestDeliverTx) - txError := "" txResult := r.DeliverTx - if txResult.Code == abci.CodeType_OK { + if txResult.Code == abci.CodeTypeOK { validTxs++ } else { logger.Debug("Invalid tx", "code", txResult.Code, "log", txResult.Log) invalidTxs++ - txError = txResult.Code.String() } - abciResponses.DeliverTx[txIndex] = txResult - txIndex++ - // NOTE: if we count we can access the tx from the block instead of // pulling it from the req - event := types.EventDataTx{ + txEventPublisher.PublishEventTx(types.EventDataTx{types.TxResult{ Height: block.Height, + Index: uint32(txIndex), Tx: types.Tx(req.GetDeliverTx().Tx), - Data: txResult.Data, - Code: txResult.Code, - Log: txResult.Log, - Error: txError, - } - types.FireEventTx(eventCache, event) + Result: *txResult, + }}) + + abciResponses.DeliverTx[txIndex] = txResult + txIndex++ } } proxyAppConn.SetResponseCallback(proxyCb) // Begin block - err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ + _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ block.Hash(), types.TM2PB.Header(block.Header), + nil, + nil, }) if err != nil { logger.Error("Error in proxyAppConn.BeginBlock", "err", err) @@ -101,7 +97,7 @@ func execBlockOnProxyApp(eventCache types.Fireable, proxyAppConn proxy.AppConnCo } // End block - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(uint64(block.Height)) + abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{block.Height}) if err != nil { logger.Error("Error in proxyAppConn.EndBlock", "err", err) return nil, err @@ -128,7 +124,7 @@ func updateValidators(validators *types.ValidatorSet, changedValidators []*abci. address := pubkey.Address() power := int64(v.Power) - // mind the overflow from uint64 + // mind the overflow from int64 if power < 0 { return errors.New(cmn.Fmt("Power (%d) overflows int64", v.Power)) } @@ -160,6 +156,7 @@ func updateValidators(validators *types.ValidatorSet, changedValidators []*abci. // return a bit array of validators that signed the last commit // NOTE: assumes commits have already been authenticated +/* function is currently unused func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray { signed := cmn.NewBitArray(len(block.LastCommit.Precommits)) for i, precommit := range block.LastCommit.Precommits { @@ -169,6 +166,7 @@ func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray { } return signed } +*/ //----------------------------------------------------- // Validate block @@ -208,24 +206,20 @@ func (s *State) validateBlock(block *types.Block) error { //----------------------------------------------------------------------------- // ApplyBlock validates & executes the block, updates state w/ ABCI responses, // then commits and updates the mempool atomically, then saves state. -// Transaction results are optionally indexed. // ApplyBlock validates the block against the state, executes it against the app, // commits it, and saves the block and state. It's the only function that needs to be called // from outside this package to process and commit an entire block. -func (s *State) ApplyBlock(eventCache types.Fireable, proxyAppConn proxy.AppConnConsensus, +func (s *State) ApplyBlock(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, block *types.Block, partsHeader types.PartSetHeader, mempool types.Mempool) error { - abciResponses, err := s.ValExecBlock(eventCache, proxyAppConn, block) + abciResponses, err := s.ValExecBlock(txEventPublisher, proxyAppConn, block) if err != nil { return fmt.Errorf("Exec failed for application: %v", err) } fail.Fail() // XXX - // index txs. This could run in the background - s.indexTxs(abciResponses) - // save the results before we commit s.SaveABCIResponses(abciResponses) @@ -256,7 +250,11 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl defer mempool.Unlock() // Commit block, get hash back - res := proxyAppConn.CommitSync() + res, err := proxyAppConn.CommitSync() + if err != nil { + s.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) + return err + } if res.IsErr() { s.logger.Error("Error in proxyAppConn.CommitSync", "err", res) return res @@ -271,38 +269,23 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl s.AppHash = res.Data // Update mempool. - mempool.Update(block.Height, block.Txs) - - return nil -} - -func (s *State) indexTxs(abciResponses *ABCIResponses) { - // save the tx results using the TxIndexer - // NOTE: these may be overwriting, but the values should be the same. - batch := txindex.NewBatch(len(abciResponses.DeliverTx)) - for i, d := range abciResponses.DeliverTx { - tx := abciResponses.txs[i] - batch.Add(types.TxResult{ - Height: uint64(abciResponses.Height), - Index: uint32(i), - Tx: tx, - Result: *d, - }) - } - s.TxIndexer.AddBatch(batch) + return mempool.Update(block.Height, block.Txs) } // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger) ([]byte, error) { - var eventCache types.Fireable // nil - _, err := execBlockOnProxyApp(eventCache, appConnConsensus, block, logger) + _, err := execBlockOnProxyApp(types.NopEventBus{}, appConnConsensus, block, logger) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err } // Commit block, get hash back - res := appConnConsensus.CommitSync() + res, err := appConnConsensus.CommitSync() + if err != nil { + logger.Error("Client error during proxyAppConn.CommitSync", "err", res) + return nil, err + } if res.IsErr() { logger.Error("Error in proxyAppConn.CommitSync", "err", res) return nil, res diff --git a/state/execution_test.go b/state/execution_test.go index 425f59ed..64f17094 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -3,13 +3,11 @@ package state import ( "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/abci/example/dummy" crypto "github.com/tendermint/go-crypto" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" @@ -25,22 +23,19 @@ var ( func TestApplyBlock(t *testing.T) { cc := proxy.NewLocalClientCreator(dummy.NewDummyApplication()) proxyApp := proxy.NewAppConns(cc, nil) - _, err := proxyApp.Start() + err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() state := state() state.SetLogger(log.TestingLogger()) - indexer := &dummyIndexer{0} - state.TxIndexer = indexer // make block block := makeBlock(1, state) - err = state.ApplyBlock(nil, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), types.MockMempool{}) + err = state.ApplyBlock(types.NopEventBus{}, proxyApp.Consensus(), block, block.MakePartSet(testPartSize).Header(), types.MockMempool{}) require.Nil(t, err) - assert.Equal(t, nTxsPerBlock, indexer.Indexed) // test indexing works // TODO check state and mempool } @@ -48,9 +43,9 @@ func TestApplyBlock(t *testing.T) { //---------------------------------------------------------------------------- // make some bogus txs -func makeTxs(blockNum int) (txs []types.Tx) { +func makeTxs(height int64) (txs []types.Tx) { for i := 0; i < nTxsPerBlock; i++ { - txs = append(txs, types.Tx([]byte{byte(blockNum), byte(i)})) + txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) } return txs } @@ -59,32 +54,19 @@ func state() *State { s, _ := MakeGenesisState(dbm.NewMemDB(), &types.GenesisDoc{ ChainID: chainID, Validators: []types.GenesisValidator{ - types.GenesisValidator{privKey.PubKey(), 10000, "test"}, + {privKey.PubKey(), 10000, "test"}, }, AppHash: nil, }) return s } -func makeBlock(num int, state *State) *types.Block { +func makeBlock(height int64, state *State) *types.Block { prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} valHash := state.Validators.Hash() prevBlockID := types.BlockID{prevHash, prevParts} - block, _ := types.MakeBlock(num, chainID, makeTxs(num), new(types.Commit), + block, _ := types.MakeBlock(height, chainID, makeTxs(height), new(types.Commit), prevBlockID, valHash, state.AppHash, testPartSize) return block } - -// dummyIndexer increments counter every time we index transaction. -type dummyIndexer struct { - Indexed int -} - -func (indexer *dummyIndexer) Get(hash []byte) (*types.TxResult, error) { - return nil, nil -} -func (indexer *dummyIndexer) AddBatch(batch *txindex.Batch) error { - indexer.Indexed += batch.Size() - return nil -} diff --git a/state/state.go b/state/state.go index 4241f9de..47de859e 100644 --- a/state/state.go +++ b/state/state.go @@ -15,8 +15,6 @@ import ( wire "github.com/tendermint/go-wire" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" ) @@ -25,7 +23,7 @@ var ( abciResponsesKey = []byte("abciResponsesKey") ) -func calcValidatorsKey(height int) []byte { +func calcValidatorsKey(height int64) []byte { return []byte(cmn.Fmt("validatorsKey:%v", height)) } @@ -47,7 +45,7 @@ type State struct { // These fields are updated by SetBlockAndValidators. // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) // LastValidators is used to validate block.LastCommit. - LastBlockHeight int + LastBlockHeight int64 LastBlockID types.BlockID LastBlockTime time.Time Validators *types.ValidatorSet @@ -56,14 +54,11 @@ type State struct { // the change only applies to the next block. // So, if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 - LastHeightValidatorsChanged int + LastHeightValidatorsChanged int64 // AppHash is updated after Commit AppHash []byte - // TxIndexer indexes transactions - TxIndexer txindex.TxIndexer `json:"-"` - logger log.Logger } @@ -95,7 +90,7 @@ func loadState(db dbm.DB, key []byte) *State { return nil } - s := &State{db: db, TxIndexer: &null.TxIndex{}} + s := &State{db: db} r, n, err := bytes.NewReader(buf), new(int), new(error) wire.ReadBinaryPtr(&s, r, 0, n, err) if *err != nil { @@ -114,8 +109,6 @@ func (s *State) SetLogger(l log.Logger) { } // Copy makes a copy of the State for mutating. -// NOTE: Does not create a copy of TxIndexer. It creates a new pointer that points to the same -// underlying TxIndexer. func (s *State) Copy() *State { return &State{ db: s.db, @@ -125,7 +118,6 @@ func (s *State) Copy() *State { Validators: s.Validators.Copy(), LastValidators: s.LastValidators.Copy(), AppHash: s.AppHash, - TxIndexer: s.TxIndexer, LastHeightValidatorsChanged: s.LastHeightValidatorsChanged, logger: s.logger, ChainID: s.ChainID, @@ -171,7 +163,7 @@ func (s *State) LoadABCIResponses() *ABCIResponses { } // LoadValidators loads the ValidatorSet for a given height. -func (s *State) LoadValidators(height int) (*types.ValidatorSet, error) { +func (s *State) LoadValidators(height int64) (*types.ValidatorSet, error) { valInfo := s.loadValidators(height) if valInfo == nil { return nil, ErrNoValSetForHeight{height} @@ -188,7 +180,7 @@ func (s *State) LoadValidators(height int) (*types.ValidatorSet, error) { return valInfo.ValidatorSet, nil } -func (s *State) loadValidators(height int) *ValidatorsInfo { +func (s *State) loadValidators(height int64) *ValidatorsInfo { buf := s.db.Get(calcValidatorsKey(height)) if len(buf) == 0 { return nil @@ -264,7 +256,7 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ } -func (s *State) setBlockAndValidators(height int, blockID types.BlockID, blockTime time.Time, +func (s *State) setBlockAndValidators(height int64, blockID types.BlockID, blockTime time.Time, prevValSet, nextValSet *types.ValidatorSet) { s.LastBlockHeight = height @@ -284,10 +276,10 @@ func (s *State) GetValidators() (last *types.ValidatorSet, current *types.Valida // ABCIResponses retains the responses of the various ABCI calls during block processing. // It is persisted to disk before calling Commit. type ABCIResponses struct { - Height int + Height int64 DeliverTx []*abci.ResponseDeliverTx - EndBlock abci.ResponseEndBlock + EndBlock *abci.ResponseEndBlock txs types.Txs // reference for indexing results by hash } @@ -311,7 +303,7 @@ func (a *ABCIResponses) Bytes() []byte { // ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { ValidatorSet *types.ValidatorSet - LastHeightChanged int + LastHeightChanged int64 } // Bytes serializes the ValidatorsInfo using go-wire @@ -368,7 +360,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { } } - // we do not need indexer during replay and in tests return &State{ db: db, @@ -381,7 +372,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { Validators: types.NewValidatorSet(validators), LastValidators: types.NewValidatorSet(nil), AppHash: genDoc.AppHash, - TxIndexer: &null.TxIndex{}, LastHeightValidatorsChanged: 1, }, nil } diff --git a/state/state_test.go b/state/state_test.go index 7bb43afa..9b78b387 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -78,9 +78,9 @@ func TestABCIResponsesSaveLoad(t *testing.T) { // build mock responses block := makeBlock(2, state) abciResponses := NewABCIResponses(block) - abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo")} - abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok"} - abciResponses.EndBlock = abci.ResponseEndBlock{Diffs: []*abci.Validator{ + abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: []*abci.KVPair{}} + abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: []*abci.KVPair{}} + abciResponses.EndBlock = &abci.ResponseEndBlock{Diffs: []*abci.Validator{ { PubKey: crypto.GenPrivKeyEd25519().PubKey().Bytes(), Power: 10, @@ -138,7 +138,7 @@ func TestValidatorChangesSaveLoad(t *testing.T) { assert := assert.New(t) // change vals at these heights - changeHeights := []int{1, 2, 4, 5, 10, 15, 16, 17, 20} + changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) // each valset is just one validator. @@ -155,7 +155,7 @@ func TestValidatorChangesSaveLoad(t *testing.T) { highestHeight := changeHeights[N-1] + 5 changeIndex := 0 pubkey := pubkeys[changeIndex] - for i := 1; i < highestHeight; i++ { + for i := int64(1); i < highestHeight; i++ { // when we get to a change height, // use the next pubkey if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { @@ -171,7 +171,7 @@ func TestValidatorChangesSaveLoad(t *testing.T) { testCases := make([]valChangeTestCase, highestHeight) changeIndex = 0 pubkey = pubkeys[changeIndex] - for i := 1; i < highestHeight+1; i++ { + for i := int64(1); i < highestHeight+1; i++ { // we we get to the height after a change height // use the next pubkey (note our counter starts at 0 this time) if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { @@ -192,18 +192,19 @@ func TestValidatorChangesSaveLoad(t *testing.T) { } } -func makeHeaderPartsResponses(state *State, height int, +func makeHeaderPartsResponses(state *State, height int64, pubkey crypto.PubKey) (*types.Header, types.PartSetHeader, *ABCIResponses) { block := makeBlock(height, state) _, val := state.Validators.GetByIndex(0) abciResponses := &ABCIResponses{ - Height: height, + Height: height, + EndBlock: &abci.ResponseEndBlock{Diffs: []*abci.Validator{}}, } // if the pubkey is new, remove the old and add the new if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - abciResponses.EndBlock = abci.ResponseEndBlock{ + abciResponses.EndBlock = &abci.ResponseEndBlock{ Diffs: []*abci.Validator{ {val.PubKey.Bytes(), 0}, {pubkey.Bytes(), 10}, @@ -215,6 +216,6 @@ func makeHeaderPartsResponses(state *State, height int, } type valChangeTestCase struct { - height int + height int64 vals crypto.PubKey } diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index 66897905..bd51fbb2 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -4,20 +4,24 @@ import ( "errors" "github.com/tendermint/tendermint/types" + "github.com/tendermint/tmlibs/pubsub/query" ) // TxIndexer interface defines methods to index and search transactions. type TxIndexer interface { - // AddBatch analyzes, indexes or stores a batch of transactions. - // NOTE: We do not specify Index method for analyzing a single transaction - // here because it bears heavy perfomance loses. Almost all advanced indexers - // support batching. + // AddBatch analyzes, indexes and stores a batch of transactions. AddBatch(b *Batch) error + // Index analyzes, indexes and stores a single transaction. + Index(result *types.TxResult) error + // Get returns the transaction specified by hash or nil if the transaction is not indexed // or stored. Get(hash []byte) (*types.TxResult, error) + + // Search allows you to query for transactions. + Search(q *query.Query) ([]*types.TxResult, error) } //---------------------------------------------------- @@ -26,18 +30,18 @@ type TxIndexer interface { // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []types.TxResult + Ops []*types.TxResult } // NewBatch creates a new Batch. func NewBatch(n int) *Batch { return &Batch{ - Ops: make([]types.TxResult, n), + Ops: make([]*types.TxResult, n), } } // Add or update an entry for the given result.Index. -func (b *Batch) Add(result types.TxResult) error { +func (b *Batch) Add(result *types.TxResult) error { b.Ops[result.Index] = result return nil } diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go new file mode 100644 index 00000000..3e5fab12 --- /dev/null +++ b/state/txindex/indexer_service.go @@ -0,0 +1,49 @@ +package txindex + +import ( + "context" + + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" +) + +const ( + subscriber = "IndexerService" +) + +type IndexerService struct { + cmn.BaseService + + idr TxIndexer + eventBus *types.EventBus +} + +func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService { + is := &IndexerService{idr: idr, eventBus: eventBus} + is.BaseService = *cmn.NewBaseService(nil, "IndexerService", is) + return is +} + +// OnStart implements cmn.Service by subscribing for all transactions +// and indexing them by tags. +func (is *IndexerService) OnStart() error { + ch := make(chan interface{}) + if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, ch); err != nil { + return err + } + go func() { + for event := range ch { + // TODO: may be not perfomant to write one event at a time + txResult := event.(types.TMEventData).Unwrap().(types.EventDataTx).TxResult + is.idr.Index(&txResult) + } + }() + return nil +} + +// OnStop implements cmn.Service by unsubscribing from all transactions. +func (is *IndexerService) OnStop() { + if is.eventBus.IsRunning() { + _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) + } +} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index db075e54..d40fe80f 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -2,25 +2,57 @@ package kv import ( "bytes" + "encoding/hex" "fmt" + "strconv" + "strings" + "time" - "github.com/tendermint/go-wire" - - db "github.com/tendermint/tmlibs/db" + "github.com/pkg/errors" + abci "github.com/tendermint/abci/types" + wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" + db "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/pubsub/query" ) -// TxIndex is the simplest possible indexer, backed by Key-Value storage (levelDB). -// It can only index transaction by its identifier. +const ( + tagKeySeparator = "/" +) + +var _ txindex.TxIndexer = (*TxIndex)(nil) + +// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). type TxIndex struct { - store db.DB + store db.DB + tagsToIndex []string + indexAllTags bool } -// NewTxIndex returns new instance of TxIndex. -func NewTxIndex(store db.DB) *TxIndex { - return &TxIndex{store: store} +// NewTxIndex creates new KV indexer. +func NewTxIndex(store db.DB, options ...func(*TxIndex)) *TxIndex { + txi := &TxIndex{store: store, tagsToIndex: make([]string, 0), indexAllTags: false} + for _, o := range options { + o(txi) + } + return txi +} + +// IndexTags is an option for setting which tags to index. +func IndexTags(tags []string) func(*TxIndex) { + return func(txi *TxIndex) { + txi.tagsToIndex = tags + } +} + +// IndexAllTags is an option for indexing all tags. +func IndexAllTags() func(*TxIndex) { + return func(txi *TxIndex) { + txi.indexAllTags = true + } } // Get gets transaction from the TxIndex storage and returns it or nil if the @@ -46,13 +78,328 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { return txResult, nil } -// AddBatch writes a batch of transactions into the TxIndex storage. +// AddBatch indexes a batch of transactions using the given list of tags. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() + for _, result := range b.Ops { - rawBytes := wire.BinaryBytes(&result) - storeBatch.Set(result.Tx.Hash(), rawBytes) + hash := result.Tx.Hash() + + // index tx by tags + for _, tag := range result.Result.Tags { + if txi.indexAllTags || cmn.StringInSlice(tag.Key, txi.tagsToIndex) { + storeBatch.Set(keyForTag(tag, result), hash) + } + } + + // index tx by hash + rawBytes := wire.BinaryBytes(result) + storeBatch.Set(hash, rawBytes) } + storeBatch.Write() return nil } + +// Index indexes a single transaction using the given list of tags. +func (txi *TxIndex) Index(result *types.TxResult) error { + b := txi.store.NewBatch() + + hash := result.Tx.Hash() + + // index tx by tags + for _, tag := range result.Result.Tags { + if txi.indexAllTags || cmn.StringInSlice(tag.Key, txi.tagsToIndex) { + b.Set(keyForTag(tag, result), hash) + } + } + + // index tx by hash + rawBytes := wire.BinaryBytes(result) + b.Set(hash, rawBytes) + + b.Write() + return nil +} + +// Search performs a search using the given query. It breaks the query into +// conditions (like "tx.height > 5"). For each condition, it queries the DB +// index. One special use cases here: (1) if "tx.hash" is found, it returns tx +// result for it (2) for range queries it is better for the client to provide +// both lower and upper bounds, so we are not performing a full scan. Results +// from querying indexes are then intersected and returned to the caller. +func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { + var hashes [][]byte + var hashesInitialized bool + + // get a list of conditions (like "tx.height > 5") + conditions := q.Conditions() + + // if there is a hash condition, return the result immediately + hash, err, ok := lookForHash(conditions) + if err != nil { + return nil, errors.Wrap(err, "error during searching for a hash in the query") + } else if ok { + res, err := txi.Get(hash) + if res == nil { + return []*types.TxResult{}, nil + } else { + return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result") + } + } + + // conditions to skip because they're handled before "everything else" + skipIndexes := make([]int, 0) + + // if there is a height condition ("tx.height=3"), extract it for faster lookups + height, heightIndex := lookForHeight(conditions) + if heightIndex >= 0 { + skipIndexes = append(skipIndexes, heightIndex) + } + + // extract ranges + // if both upper and lower bounds exist, it's better to get them in order not + // no iterate over kvs that are not within range. + ranges, rangeIndexes := lookForRanges(conditions) + if len(ranges) > 0 { + skipIndexes = append(skipIndexes, rangeIndexes...) + + for _, r := range ranges { + if !hashesInitialized { + hashes = txi.matchRange(r, startKeyForRange(r, height)) + hashesInitialized = true + } else { + hashes = intersect(hashes, txi.matchRange(r, startKeyForRange(r, height))) + } + } + } + + // for all other conditions + for i, c := range conditions { + if cmn.IntInSlice(i, skipIndexes) { + continue + } + + if !hashesInitialized { + hashes = txi.match(c, startKey(c, height)) + hashesInitialized = true + } else { + hashes = intersect(hashes, txi.match(c, startKey(c, height))) + } + } + + results := make([]*types.TxResult, len(hashes)) + i := 0 + for _, h := range hashes { + results[i], err = txi.Get(h) + if err != nil { + return nil, errors.Wrapf(err, "failed to get Tx{%X}", h) + } + i++ + } + + return results, nil +} + +func lookForHash(conditions []query.Condition) (hash []byte, err error, ok bool) { + for _, c := range conditions { + if c.Tag == types.TxHashKey { + decoded, err := hex.DecodeString(c.Operand.(string)) + return decoded, err, true + } + } + return +} + +func lookForHeight(conditions []query.Condition) (height int64, index int) { + for i, c := range conditions { + if c.Tag == types.TxHeightKey { + return c.Operand.(int64), i + } + } + return 0, -1 +} + +// special map to hold range conditions +// Example: account.number => queryRange{lowerBound: 1, upperBound: 5} +type queryRanges map[string]queryRange + +type queryRange struct { + key string + lowerBound interface{} // int || time.Time + includeLowerBound bool + upperBound interface{} // int || time.Time + includeUpperBound bool +} + +func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) { + ranges = make(queryRanges) + for i, c := range conditions { + if isRangeOperation(c.Op) { + r, ok := ranges[c.Tag] + if !ok { + r = queryRange{key: c.Tag} + } + switch c.Op { + case query.OpGreater: + r.lowerBound = c.Operand + case query.OpGreaterEqual: + r.includeLowerBound = true + r.lowerBound = c.Operand + case query.OpLess: + r.upperBound = c.Operand + case query.OpLessEqual: + r.includeUpperBound = true + r.upperBound = c.Operand + } + ranges[c.Tag] = r + indexes = append(indexes, i) + } + } + return ranges, indexes +} + +func isRangeOperation(op query.Operator) bool { + switch op { + case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: + return true + default: + return false + } +} + +func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte) { + if c.Op == query.OpEqual { + it := txi.store.IteratorPrefix(startKey) + defer it.Release() + for it.Next() { + hashes = append(hashes, it.Value()) + } + } else if c.Op == query.OpContains { + // XXX: doing full scan because startKey does not apply here + // For example, if startKey = "account.owner=an" and search query = "accoutn.owner CONSISTS an" + // we can't iterate with prefix "account.owner=an" because we might miss keys like "account.owner=Ulan" + it := txi.store.Iterator() + defer it.Release() + for it.Next() { + if !isTagKey(it.Key()) { + continue + } + if strings.Contains(extractValueFromKey(it.Key()), c.Operand.(string)) { + hashes = append(hashes, it.Value()) + } + } + } else { + panic("other operators should be handled already") + } + return +} + +func (txi *TxIndex) matchRange(r queryRange, startKey []byte) (hashes [][]byte) { + it := txi.store.IteratorPrefix(startKey) + defer it.Release() +LOOP: + for it.Next() { + if !isTagKey(it.Key()) { + continue + } + if r.upperBound != nil { + // no other way to stop iterator other than checking for upperBound + switch (r.upperBound).(type) { + case int64: + v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) + if err == nil && v == r.upperBound { + if r.includeUpperBound { + hashes = append(hashes, it.Value()) + } + break LOOP + } + // XXX: passing time in a ABCI Tags is not yet implemented + // case time.Time: + // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) + // if v == r.upperBound { + // break + // } + } + } + hashes = append(hashes, it.Value()) + } + return +} + +/////////////////////////////////////////////////////////////////////////////// +// Keys + +func startKey(c query.Condition, height int64) []byte { + var key string + if height > 0 { + key = fmt.Sprintf("%s/%v/%d", c.Tag, c.Operand, height) + } else { + key = fmt.Sprintf("%s/%v", c.Tag, c.Operand) + } + return []byte(key) +} + +func startKeyForRange(r queryRange, height int64) []byte { + if r.lowerBound == nil { + return []byte(fmt.Sprintf("%s", r.key)) + } + + var lowerBound interface{} + if r.includeLowerBound { + lowerBound = r.lowerBound + } else { + switch t := r.lowerBound.(type) { + case int64: + lowerBound = t + 1 + case time.Time: + lowerBound = t.Unix() + 1 + default: + panic("not implemented") + } + } + var key string + if height > 0 { + key = fmt.Sprintf("%s/%v/%d", r.key, lowerBound, height) + } else { + key = fmt.Sprintf("%s/%v", r.key, lowerBound) + } + return []byte(key) +} + +func isTagKey(key []byte) bool { + return strings.Count(string(key), tagKeySeparator) == 3 +} + +func extractValueFromKey(key []byte) string { + parts := strings.SplitN(string(key), tagKeySeparator, 3) + return parts[1] +} + +func keyForTag(tag *abci.KVPair, result *types.TxResult) []byte { + switch tag.ValueType { + case abci.KVPair_STRING: + return []byte(fmt.Sprintf("%s/%v/%d/%d", tag.Key, tag.ValueString, result.Height, result.Index)) + case abci.KVPair_INT: + return []byte(fmt.Sprintf("%s/%v/%d/%d", tag.Key, tag.ValueInt, result.Height, result.Index)) + // case abci.KVPair_TIME: + // return []byte(fmt.Sprintf("%s/%d/%d/%d", tag.Key, tag.ValueTime.Unix(), result.Height, result.Index)) + default: + panic(fmt.Sprintf("Undefined value type: %v", tag.ValueType)) + } +} + +/////////////////////////////////////////////////////////////////////////////// +// Utils + +func intersect(as, bs [][]byte) [][]byte { + i := make([][]byte, 0, cmn.MinInt(len(as), len(bs))) + for _, a := range as { + for _, b := range bs { + if bytes.Equal(a, b) { + i = append(i, a) + } + } + } + return i +} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 903189c2..0eac1760 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -1,6 +1,7 @@ package kv import ( + "fmt" "io/ioutil" "os" "testing" @@ -11,41 +12,160 @@ import ( "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" db "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/pubsub/query" ) func TestTxIndex(t *testing.T) { - indexer := &TxIndex{store: db.NewMemDB()} + indexer := NewTxIndex(db.NewMemDB()) tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: ""}} + txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []*abci.KVPair{}}} hash := tx.Hash() batch := txindex.NewBatch(1) - batch.Add(*txResult) + if err := batch.Add(txResult); err != nil { + t.Error(err) + } err := indexer.AddBatch(batch) - require.Nil(t, err) + require.NoError(t, err) loadedTxResult, err := indexer.Get(hash) - require.Nil(t, err) + require.NoError(t, err) assert.Equal(t, txResult, loadedTxResult) + + tx2 := types.Tx("BYE BYE WORLD") + txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []*abci.KVPair{}}} + hash2 := tx2.Hash() + + err = indexer.Index(txResult2) + require.NoError(t, err) + + loadedTxResult2, err := indexer.Get(hash2) + require.NoError(t, err) + assert.Equal(t, txResult2, loadedTxResult2) +} + +func TestTxSearch(t *testing.T) { + allowedTags := []string{"account.number", "account.owner", "account.date"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + txResult := txResultWithTags([]*abci.KVPair{ + {Key: "account.number", ValueType: abci.KVPair_INT, ValueInt: 1}, + {Key: "account.owner", ValueType: abci.KVPair_STRING, ValueString: "Ivan"}, + {Key: "not_allowed", ValueType: abci.KVPair_STRING, ValueString: "Vlad"}, + }) + hash := txResult.Tx.Hash() + + err := indexer.Index(txResult) + require.NoError(t, err) + + testCases := []struct { + q string + resultsLength int + }{ + // search by hash + {fmt.Sprintf("tx.hash = '%X'", hash), 1}, + // search by exact match (one tag) + {"account.number = 1", 1}, + // search by exact match (two tags) + {"account.number = 1 AND account.owner = 'Ivan'", 1}, + // search by exact match (two tags) + {"account.number = 1 AND account.owner = 'Vlad'", 0}, + // search by range + {"account.number >= 1 AND account.number <= 5", 1}, + // search by range (lower bound) + {"account.number >= 1", 1}, + // search by range (upper bound) + {"account.number <= 5", 1}, + // search using not allowed tag + {"not_allowed = 'boom'", 0}, + // search for not existing tx result + {"account.number >= 2 AND account.number <= 5", 0}, + // search using not existing tag + {"account.date >= TIME 2013-05-03T14:45:00Z", 0}, + // search using CONTAINS + {"account.owner CONTAINS 'an'", 1}, + // search using CONTAINS + {"account.owner CONTAINS 'Vlad'", 0}, + } + + for _, tc := range testCases { + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(query.MustParse(tc.q)) + assert.NoError(t, err) + + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 { + assert.Equal(t, []*types.TxResult{txResult}, results) + } + }) + } +} + +func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { + allowedTags := []string{"account.number"} + indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) + + txResult := txResultWithTags([]*abci.KVPair{ + {Key: "account.number", ValueType: abci.KVPair_INT, ValueInt: 1}, + {Key: "account.number", ValueType: abci.KVPair_INT, ValueInt: 2}, + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + results, err := indexer.Search(query.MustParse("account.number >= 1")) + assert.NoError(t, err) + + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) +} + +func TestIndexAllTags(t *testing.T) { + indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) + + txResult := txResultWithTags([]*abci.KVPair{ + abci.KVPairString("account.owner", "Ivan"), + abci.KVPairInt("account.number", 1), + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + results, err := indexer.Search(query.MustParse("account.number >= 1")) + assert.NoError(t, err) + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) + + results, err = indexer.Search(query.MustParse("account.owner = 'Ivan'")) + assert.NoError(t, err) + assert.Len(t, results, 1) + assert.Equal(t, []*types.TxResult{txResult}, results) +} + +func txResultWithTags(tags []*abci.KVPair) *types.TxResult { + tx := types.Tx("HELLO WORLD") + return &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: tags}} } func benchmarkTxIndex(txsCount int, b *testing.B) { tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: ""}} + txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: []*abci.KVPair{}}} dir, err := ioutil.TempDir("", "tx_index_db") if err != nil { b.Fatal(err) } - defer os.RemoveAll(dir) + defer os.RemoveAll(dir) // nolint: errcheck store := db.NewDB("tx_index", "leveldb", dir) - indexer := &TxIndex{store: store} + indexer := NewTxIndex(store) batch := txindex.NewBatch(txsCount) for i := 0; i < txsCount; i++ { - batch.Add(*txResult) + if err := batch.Add(txResult); err != nil { + b.Fatal(err) + } txResult.Index += 1 } diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 4939d6d8..0764faa9 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -5,8 +5,11 @@ import ( "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" + "github.com/tendermint/tmlibs/pubsub/query" ) +var _ txindex.TxIndexer = (*TxIndex)(nil) + // TxIndex acts as a /dev/null. type TxIndex struct{} @@ -19,3 +22,12 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } + +// Index is a noop and always returns nil. +func (txi *TxIndex) Index(result *types.TxResult) error { + return nil +} + +func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { + return []*types.TxResult{}, nil +} diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh index 0198f85c..22a3ddb8 100644 --- a/test/app/counter_test.sh +++ b/test/app/counter_test.sh @@ -14,20 +14,41 @@ TESTNAME=$1 # Send some txs function getCode() { + set +u R=$1 - if [[ "$R" == "{}" ]]; then + set -u + if [[ "$R" == "" ]]; then + echo -1 + fi + + if [[ $(echo $R | jq 'has("code")') == "true" ]]; then + # this wont actually work if theres an error ... + echo "$R" | jq ".code" + else # protobuf auto adds `omitempty` to everything so code OK and empty data/log # will not even show when marshalled into json # apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ... echo 0 - else - # this wont actually work if theres an error ... - echo "$R" | jq .code fi } +# build grpc client if needed +if [[ "$GRPC_BROADCAST_TX" != "" ]]; then + if [ -f grpc_client ]; then + rm grpc_client + fi + echo "... building grpc_client" + go build -o grpc_client grpc_client.go +fi + function sendTx() { TX=$1 + set +u + SHOULD_ERR=$2 + if [ "$SHOULD_ERR" == "" ]; then + SHOULD_ERR=false + fi + set -u if [[ "$GRPC_BROADCAST_TX" == "" ]]; then RESPONSE=$(curl -s localhost:46657/broadcast_tx_commit?tx=0x"$TX") IS_ERR=$(echo "$RESPONSE" | jq 'has("error")') @@ -36,11 +57,6 @@ function sendTx() { RESPONSE=$(echo "$RESPONSE" | jq '.result') else - if [ -f grpc_client ]; then - rm grpc_client - fi - echo "... building grpc_client" - go build -o grpc_client grpc_client.go RESPONSE=$(./grpc_client "$TX") IS_ERR=false ERROR="" @@ -64,11 +80,20 @@ function sendTx() { echo "TX $TX" echo "RESPONSE $RESPONSE" echo "ERROR $ERROR" + echo "IS_ERR $IS_ERR" echo "----" - if $IS_ERR; then - echo "Unexpected error sending tx ($TX): $ERROR" - exit 1 + if $SHOULD_ERR; then + if [[ "$IS_ERR" != "true" ]]; then + echo "Expected error sending tx ($TX)" + exit 1 + fi + else + if [[ "$IS_ERR" == "true" ]]; then + echo "Unexpected error sending tx ($TX)" + exit 1 + fi + fi } @@ -86,12 +111,7 @@ fi echo "... sending tx. expect error" # second time should get rejected by the mempool (return error and non-zero code) -sendTx $TX -echo "CHECKTX CODE: $CHECK_TX_CODE" -if [[ "$CHECK_TX_CODE" == 0 ]]; then - echo "Got zero exit code for $TX. Expected tx to be rejected by mempool. $RESPONSE" - exit 1 -fi +sendTx $TX true echo "... sending tx. expect no error" diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index e43b8ae3..9d024b1b 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -5,7 +5,7 @@ import ( "fmt" "os" - "golang.org/x/net/context" + "context" "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/rpc/grpc" diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 7e5cecef..4e98ecc7 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.9.0 +FROM golang:1.9.2 # Add testing deps for curl RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list @@ -17,6 +17,7 @@ WORKDIR $REPO ADD glide.yaml glide.yaml ADD glide.lock glide.lock ADD Makefile Makefile +RUN make tools RUN make get_vendor_deps # Install the apps diff --git a/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh index 00b33963..3224c042 100644 --- a/test/p2p/atomic_broadcast/test.sh +++ b/test/p2p/atomic_broadcast/test.sh @@ -13,47 +13,61 @@ N=$1 echo "" # run the test on each of them -for i in `seq 1 $N`; do - addr=$(test/p2p/ip.sh $i):46657 +for i in $(seq 1 "$N"); do + addr=$(test/p2p/ip.sh "$i"):46657 - # current state - HASH1=`curl -s $addr/status | jq .result.latest_app_hash` - - # - send a tx - TX=aadeadbeefbeefbeef0$i - echo "Broadcast Tx $TX" - curl -s $addr/broadcast_tx_commit?tx=0x$TX - echo "" + # current state + HASH1=$(curl -s "$addr/status" | jq .result.latest_app_hash) - # we need to wait another block to get the new app_hash - h1=`curl -s $addr/status | jq .result.latest_block_height` - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.latest_block_height` - done + # - send a tx + TX=aadeadbeefbeefbeef0$i + echo "Broadcast Tx $TX" + curl -s "$addr/broadcast_tx_commit?tx=0x$TX" + echo "" - # check that hash was updated - HASH2=`curl -s $addr/status | jq .result.latest_app_hash` - if [[ "$HASH1" == "$HASH2" ]]; then - echo "Expected state hash to update from $HASH1. Got $HASH2" - exit 1 - fi + # we need to wait another block to get the new app_hash + h1=$(curl -s "$addr/status" | jq .result.latest_block_height) + h2=$h1 + while [ "$h2" == "$h1" ]; do + sleep 1 + h2=$(curl -s "$addr/status" | jq .result.latest_block_height) + done - # check we get the same new hash on all other nodes - for j in `seq 1 $N`; do - if [[ "$i" != "$j" ]]; then - addrJ=$(test/p2p/ip.sh $j):46657 - HASH3=`curl -s $addrJ/status | jq .result.latest_app_hash` - - if [[ "$HASH2" != "$HASH3" ]]; then - echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2" - exit 1 - fi - fi - done + # wait for all other peers to get to this height + minHeight=$h2 + for j in $(seq 1 "$N"); do + if [[ "$i" != "$j" ]]; then + addrJ=$(test/p2p/ip.sh "$j"):46657 - echo "All nodes are up to date" + h=$(curl -s "$addrJ/status" | jq .result.latest_block_height) + while [ "$h" -lt "$minHeight" ]; do + sleep 1 + h=$(curl -s "$addrJ/status" | jq .result.latest_block_height) + done + fi + done + + # check that hash was updated + HASH2=$(curl -s "$addr/status" | jq .result.latest_app_hash) + if [[ "$HASH1" == "$HASH2" ]]; then + echo "Expected state hash to update from $HASH1. Got $HASH2" + exit 1 + fi + + # check we get the same new hash on all other nodes + for j in $(seq 1 "$N"); do + if [[ "$i" != "$j" ]]; then + addrJ=$(test/p2p/ip.sh "$j"):46657 + HASH3=$(curl -s "$addrJ/status" | jq .result.latest_app_hash) + + if [[ "$HASH2" != "$HASH3" ]]; then + echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2" + exit 1 + fi + fi + done + + echo "All nodes are up to date" done echo "" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 283228f7..d5ede231 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,27 +14,17 @@ set +eu echo "starting tendermint peer ID=$ID" # start tendermint container on the network -if [[ "$CIRCLECI" == true ]]; then - set -u - docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh $ID) \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" -else - set -u - docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh $ID) \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=info --proxy_app="$APP_PROXY" -fi - +# NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be +# treated as one flag. +set -u +docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" diff --git a/test/run_test.sh b/test/run_test.sh index 6e4823f1..cecd2c72 100644 --- a/test/run_test.sh +++ b/test/run_test.sh @@ -6,6 +6,10 @@ pwd BRANCH=$(git rev-parse --abbrev-ref HEAD) echo "Current branch: $BRANCH" +# run the linter +# TODO: drop the `_test` once we're ballin' enough +make metalinter_test + # run the go unit tests with coverage bash test/test_cover.sh diff --git a/test/test.sh b/test/test.sh index 2e164fb3..64d7bfc7 100755 --- a/test/test.sh +++ b/test/test.sh @@ -18,14 +18,12 @@ echo "* [$(date +"%T")] removing run_test container" docker rm -vf run_test set -e -set +u -if [[ "$CIRCLECI" == true ]]; then - echo - echo "* [$(date +"%T")] starting rsyslog container" - docker rm -f rsyslog || true - docker run -d -v "$LOGS_DIR:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog -fi +echo +echo "* [$(date +"%T")] starting rsyslog container" +docker rm -f rsyslog || true +docker run -d -v "$LOGS_DIR:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog +set +u if [[ "$SKIP_BUILD" == "" ]]; then echo echo "* [$(date +"%T")] building docker image" diff --git a/types/block.go b/types/block.go index 738e5a00..4c91c5fe 100644 --- a/types/block.go +++ b/types/block.go @@ -23,7 +23,7 @@ type Block struct { // MakeBlock returns a new block and corresponding partset from the given information. // TODO: Add version information to the Block struct. -func MakeBlock(height int, chainID string, txs []Tx, commit *Commit, +func MakeBlock(height int64, chainID string, txs []Tx, commit *Commit, prevBlockID BlockID, valHash, appHash []byte, partSize int) (*Block, *PartSet) { block := &Block{ Header: &Header{ @@ -45,7 +45,7 @@ func MakeBlock(height int, chainID string, txs []Tx, commit *Commit, } // ValidateBasic performs basic validation that doesn't involve state data. -func (b *Block) ValidateBasic(chainID string, lastBlockHeight int, lastBlockID BlockID, +func (b *Block) ValidateBasic(chainID string, lastBlockHeight int64, lastBlockID BlockID, lastBlockTime time.Time, appHash []byte) error { if b.ChainID != chainID { return errors.New(cmn.Fmt("Wrong Block.Header.ChainID. Expected %v, got %v", chainID, b.ChainID)) @@ -158,7 +158,7 @@ func (b *Block) StringShort() string { // Header defines the structure of a Tendermint block header type Header struct { ChainID string `json:"chain_id"` - Height int `json:"height"` + Height int64 `json:"height"` Time time.Time `json:"time"` NumTxs int `json:"num_txs"` // XXX: Can we get rid of this? LastBlockID BlockID `json:"last_block_id"` @@ -250,7 +250,7 @@ func (commit *Commit) FirstPrecommit() *Vote { } // Height returns the height of the commit -func (commit *Commit) Height() int { +func (commit *Commit) Height() int64 { if len(commit.Precommits) == 0 { return 0 } diff --git a/types/canonical_json.go b/types/canonical_json.go index 5f1a0aca..a2e91164 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -18,7 +18,7 @@ type CanonicalJSONPartSetHeader struct { type CanonicalJSONProposal struct { BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` - Height int `json:"height"` + Height int64 `json:"height"` POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` POLRound int `json:"pol_round"` Round int `json:"round"` @@ -26,13 +26,13 @@ type CanonicalJSONProposal struct { type CanonicalJSONVote struct { BlockID CanonicalJSONBlockID `json:"block_id"` - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Type byte `json:"type"` } type CanonicalJSONHeartbeat struct { - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Sequence int `json:"sequence"` ValidatorAddress data.Bytes `json:"validator_address"` diff --git a/types/event_buffer.go b/types/event_buffer.go new file mode 100644 index 00000000..6f236e8e --- /dev/null +++ b/types/event_buffer.go @@ -0,0 +1,46 @@ +package types + +// Interface assertions +var _ TxEventPublisher = (*TxEventBuffer)(nil) + +// TxEventBuffer is a buffer of events, which uses a slice to temporarily store +// events. +type TxEventBuffer struct { + next TxEventPublisher + capacity int + events []EventDataTx +} + +// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given +// capacity. +func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer { + return &TxEventBuffer{ + next: next, + capacity: capacity, + events: make([]EventDataTx, 0, capacity), + } +} + +// Len returns the number of events cached. +func (b TxEventBuffer) Len() int { + return len(b.events) +} + +// PublishEventTx buffers an event to be fired upon finality. +func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error { + b.events = append(b.events, e) + return nil +} + +// Flush publishes events by running next.PublishWithTags on all cached events. +// Blocks. Clears cached events. +func (b *TxEventBuffer) Flush() error { + for _, e := range b.events { + err := b.next.PublishEventTx(e) + if err != nil { + return err + } + } + b.events = make([]EventDataTx, 0, b.capacity) + return nil +} diff --git a/types/event_buffer_test.go b/types/event_buffer_test.go new file mode 100644 index 00000000..74ae9da2 --- /dev/null +++ b/types/event_buffer_test.go @@ -0,0 +1,21 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type eventBusMock struct{} + +func (eventBusMock) PublishEventTx(e EventDataTx) error { + return nil +} + +func TestEventBuffer(t *testing.T) { + b := NewTxEventBuffer(eventBusMock{}, 1) + b.PublishEventTx(EventDataTx{}) + assert.Equal(t, 1, b.Len()) + b.Flush() + assert.Equal(t, 0, b.Len()) +} diff --git a/types/event_bus.go b/types/event_bus.go new file mode 100644 index 00000000..6cee1d82 --- /dev/null +++ b/types/event_bus.go @@ -0,0 +1,168 @@ +package types + +import ( + "context" + "fmt" + + abci "github.com/tendermint/abci/types" + cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" + tmpubsub "github.com/tendermint/tmlibs/pubsub" +) + +const defaultCapacity = 1000 + +// EventBus is a common bus for all events going through the system. All calls +// are proxied to underlying pubsub server. All events must be published using +// EventBus to ensure correct data types. +type EventBus struct { + cmn.BaseService + pubsub *tmpubsub.Server +} + +// NewEventBus returns a new event bus. +func NewEventBus() *EventBus { + return NewEventBusWithBufferCapacity(defaultCapacity) +} + +// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. +func NewEventBusWithBufferCapacity(cap int) *EventBus { + // capacity could be exposed later if needed + pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) + b := &EventBus{pubsub: pubsub} + b.BaseService = *cmn.NewBaseService(nil, "EventBus", b) + return b +} + +func (b *EventBus) SetLogger(l log.Logger) { + b.BaseService.SetLogger(l) + b.pubsub.SetLogger(l.With("module", "pubsub")) +} + +func (b *EventBus) OnStart() error { + return b.pubsub.OnStart() +} + +func (b *EventBus) OnStop() { + b.pubsub.OnStop() +} + +func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return b.pubsub.Subscribe(ctx, subscriber, query, out) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return b.pubsub.Unsubscribe(ctx, subscriber, query) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Publish(eventType string, eventData TMEventData) error { + // no explicit deadline for publishing events + ctx := context.Background() + b.pubsub.PublishWithTags(ctx, eventData, map[string]interface{}{EventTypeKey: eventType}) + return nil +} + +//--- block, tx, and vote events + +func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error { + return b.Publish(EventNewBlock, TMEventData{event}) +} + +func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error { + return b.Publish(EventNewBlockHeader, TMEventData{event}) +} + +func (b *EventBus) PublishEventVote(event EventDataVote) error { + return b.Publish(EventVote, TMEventData{event}) +} + +// PublishEventTx publishes tx event with tags from Result. Note it will add +// predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names +// will be overwritten. +func (b *EventBus) PublishEventTx(event EventDataTx) error { + // no explicit deadline for publishing events + ctx := context.Background() + + tags := make(map[string]interface{}) + + // validate and fill tags from tx result + for _, tag := range event.Result.Tags { + // basic validation + if tag.Key == "" { + b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx) + continue + } + + switch tag.ValueType { + case abci.KVPair_STRING: + tags[tag.Key] = tag.ValueString + case abci.KVPair_INT: + tags[tag.Key] = tag.ValueInt + } + } + + // add predefined tags + logIfTagExists(EventTypeKey, tags, b.Logger) + tags[EventTypeKey] = EventTx + + logIfTagExists(TxHashKey, tags, b.Logger) + tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash()) + + logIfTagExists(TxHeightKey, tags, b.Logger) + tags[TxHeightKey] = event.Height + + b.pubsub.PublishWithTags(ctx, TMEventData{event}, tags) + return nil +} + +func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error { + return b.Publish(EventProposalHeartbeat, TMEventData{event}) +} + +//--- EventDataRoundState events + +func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error { + return b.Publish(EventNewRoundStep, TMEventData{event}) +} + +func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error { + return b.Publish(EventTimeoutPropose, TMEventData{event}) +} + +func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error { + return b.Publish(EventTimeoutWait, TMEventData{event}) +} + +func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error { + return b.Publish(EventNewRound, TMEventData{event}) +} + +func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error { + return b.Publish(EventCompleteProposal, TMEventData{event}) +} + +func (b *EventBus) PublishEventPolka(event EventDataRoundState) error { + return b.Publish(EventPolka, TMEventData{event}) +} + +func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error { + return b.Publish(EventUnlock, TMEventData{event}) +} + +func (b *EventBus) PublishEventRelock(event EventDataRoundState) error { + return b.Publish(EventRelock, TMEventData{event}) +} + +func (b *EventBus) PublishEventLock(event EventDataRoundState) error { + return b.Publish(EventLock, TMEventData{event}) +} + +func logIfTagExists(tag string, tags map[string]interface{}, logger log.Logger) { + if value, ok := tags[tag]; ok { + logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) + } +} diff --git a/types/event_bus_test.go b/types/event_bus_test.go new file mode 100644 index 00000000..aa97092f --- /dev/null +++ b/types/event_bus_test.go @@ -0,0 +1,122 @@ +package types + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + tmpubsub "github.com/tendermint/tmlibs/pubsub" +) + +func BenchmarkEventBus(b *testing.B) { + benchmarks := []struct { + name string + numClients int + randQueries bool + randEvents bool + }{ + {"10Clients1Query1Event", 10, false, false}, + {"100Clients", 100, false, false}, + {"1000Clients", 1000, false, false}, + + {"10ClientsRandQueries1Event", 10, true, false}, + {"100Clients", 100, true, false}, + {"1000Clients", 1000, true, false}, + + {"10ClientsRandQueriesRandEvents", 10, true, true}, + {"100Clients", 100, true, true}, + {"1000Clients", 1000, true, true}, + + {"10Clients1QueryRandEvents", 10, false, true}, + {"100Clients", 100, false, true}, + {"1000Clients", 1000, false, true}, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + }) + } +} + +func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { + // for random* functions + rand.Seed(time.Now().Unix()) + + eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache + eventBus.Start() + defer eventBus.Stop() + + ctx := context.Background() + q := EventQueryNewBlock + + for i := 0; i < numClients; i++ { + ch := make(chan interface{}) + go func() { + for range ch { + } + }() + if randQueries { + q = randQuery() + } + eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q, ch) + } + + eventType := EventNewBlock + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if randEvents { + eventType = randEvent() + } + + eventBus.Publish(eventType, TMEventData{"Gamora"}) + } +} + +var events = []string{EventBond, + EventUnbond, + EventRebond, + EventDupeout, + EventFork, + EventNewBlock, + EventNewBlockHeader, + EventNewRound, + EventNewRoundStep, + EventTimeoutPropose, + EventCompleteProposal, + EventPolka, + EventUnlock, + EventLock, + EventRelock, + EventTimeoutWait, + EventVote} + +func randEvent() string { + return events[rand.Intn(len(events))] +} + +var queries = []tmpubsub.Query{EventQueryBond, + EventQueryUnbond, + EventQueryRebond, + EventQueryDupeout, + EventQueryFork, + EventQueryNewBlock, + EventQueryNewBlockHeader, + EventQueryNewRound, + EventQueryNewRoundStep, + EventQueryTimeoutPropose, + EventQueryCompleteProposal, + EventQueryPolka, + EventQueryUnlock, + EventQueryLock, + EventQueryRelock, + EventQueryTimeoutWait, + EventQueryVote} + +func randQuery() tmpubsub.Query { + return queries[rand.Intn(len(queries))] +} diff --git a/types/events.go b/types/events.go index 79e17fe0..08ebf46d 100644 --- a/types/events.go +++ b/types/events.go @@ -1,55 +1,52 @@ package types import ( - // for registering TMEventData as events.EventData - abci "github.com/tendermint/abci/types" + "fmt" + "github.com/tendermint/go-wire/data" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/events" + tmpubsub "github.com/tendermint/tmlibs/pubsub" + tmquery "github.com/tendermint/tmlibs/pubsub/query" ) -// Functions to generate eventId strings +// Reserved event types +const ( + EventBond = "Bond" + EventCompleteProposal = "CompleteProposal" + EventDupeout = "Dupeout" + EventFork = "Fork" + EventLock = "Lock" + EventNewBlock = "NewBlock" + EventNewBlockHeader = "NewBlockHeader" + EventNewRound = "NewRound" + EventNewRoundStep = "NewRoundStep" + EventPolka = "Polka" + EventRebond = "Rebond" + EventRelock = "Relock" + EventTimeoutPropose = "TimeoutPropose" + EventTimeoutWait = "TimeoutWait" + EventTx = "Tx" + EventUnbond = "Unbond" + EventUnlock = "Unlock" + EventVote = "Vote" + EventProposalHeartbeat = "ProposalHeartbeat" +) -// Reserved -func EventStringBond() string { return "Bond" } -func EventStringUnbond() string { return "Unbond" } -func EventStringRebond() string { return "Rebond" } -func EventStringDupeout() string { return "Dupeout" } -func EventStringFork() string { return "Fork" } -func EventStringTx(tx Tx) string { return cmn.Fmt("Tx:%X", tx.Hash()) } - -func EventStringNewBlock() string { return "NewBlock" } -func EventStringNewBlockHeader() string { return "NewBlockHeader" } -func EventStringNewRound() string { return "NewRound" } -func EventStringNewRoundStep() string { return "NewRoundStep" } -func EventStringTimeoutPropose() string { return "TimeoutPropose" } -func EventStringCompleteProposal() string { return "CompleteProposal" } -func EventStringPolka() string { return "Polka" } -func EventStringUnlock() string { return "Unlock" } -func EventStringLock() string { return "Lock" } -func EventStringRelock() string { return "Relock" } -func EventStringTimeoutWait() string { return "TimeoutWait" } -func EventStringVote() string { return "Vote" } - -func EventStringProposalHeartbeat() string { return "ProposalHeartbeat" } - -//---------------------------------------- +/////////////////////////////////////////////////////////////////////////////// +// ENCODING / DECODING +/////////////////////////////////////////////////////////////////////////////// var ( - EventDataNameNewBlock = "new_block" - EventDataNameNewBlockHeader = "new_block_header" - EventDataNameTx = "tx" - EventDataNameRoundState = "round_state" - EventDataNameVote = "vote" - - EventDataNameProposalHeartbeat = "proposer_heartbeat" + EventDataNameNewBlock = "new_block" + EventDataNameNewBlockHeader = "new_block_header" + EventDataNameTx = "tx" + EventDataNameRoundState = "round_state" + EventDataNameVote = "vote" + EventDataNameProposalHeartbeat = "proposal_heartbeat" ) -//---------------------------------------- - // implements events.EventData type TMEventDataInner interface { - events.EventData + // empty interface } type TMEventData struct { @@ -81,14 +78,12 @@ func (tmr TMEventData) Empty() bool { } const ( - EventDataTypeNewBlock = byte(0x01) - EventDataTypeFork = byte(0x02) - EventDataTypeTx = byte(0x03) - EventDataTypeNewBlockHeader = byte(0x04) - - EventDataTypeRoundState = byte(0x11) - EventDataTypeVote = byte(0x12) - + EventDataTypeNewBlock = byte(0x01) + EventDataTypeFork = byte(0x02) + EventDataTypeTx = byte(0x03) + EventDataTypeNewBlockHeader = byte(0x04) + EventDataTypeRoundState = byte(0x11) + EventDataTypeVote = byte(0x12) EventDataTypeProposalHeartbeat = byte(0x20) ) @@ -114,12 +109,7 @@ type EventDataNewBlockHeader struct { // All txs fire EventDataTx type EventDataTx struct { - Height int `json:"height"` - Tx Tx `json:"tx"` - Data data.Bytes `json:"data"` - Log string `json:"log"` - Code abci.CodeType `json:"code"` - Error string `json:"error"` // this is redundant information for now + TxResult } type EventDataProposalHeartbeat struct { @@ -128,7 +118,7 @@ type EventDataProposalHeartbeat struct { // NOTE: This goes into the replay WAL type EventDataRoundState struct { - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Step string `json:"step"` @@ -140,112 +130,51 @@ type EventDataVote struct { Vote *Vote } -func (_ EventDataNewBlock) AssertIsTMEventData() {} -func (_ EventDataNewBlockHeader) AssertIsTMEventData() {} -func (_ EventDataTx) AssertIsTMEventData() {} -func (_ EventDataRoundState) AssertIsTMEventData() {} -func (_ EventDataVote) AssertIsTMEventData() {} +/////////////////////////////////////////////////////////////////////////////// +// PUBSUB +/////////////////////////////////////////////////////////////////////////////// -func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {} +const ( + // EventTypeKey is a reserved key, used to specify event type in tags. + EventTypeKey = "tm.event" + // TxHashKey is a reserved key, used to specify transaction's hash. + // see EventBus#PublishEventTx + TxHashKey = "tx.hash" + // TxHeightKey is a reserved key, used to specify transaction block's height. + // see EventBus#PublishEventTx + TxHeightKey = "tx.height" +) -//---------------------------------------- -// Wrappers for type safety +var ( + EventQueryBond = queryForEvent(EventBond) + EventQueryUnbond = queryForEvent(EventUnbond) + EventQueryRebond = queryForEvent(EventRebond) + EventQueryDupeout = queryForEvent(EventDupeout) + EventQueryFork = queryForEvent(EventFork) + EventQueryNewBlock = queryForEvent(EventNewBlock) + EventQueryNewBlockHeader = queryForEvent(EventNewBlockHeader) + EventQueryNewRound = queryForEvent(EventNewRound) + EventQueryNewRoundStep = queryForEvent(EventNewRoundStep) + EventQueryTimeoutPropose = queryForEvent(EventTimeoutPropose) + EventQueryCompleteProposal = queryForEvent(EventCompleteProposal) + EventQueryPolka = queryForEvent(EventPolka) + EventQueryUnlock = queryForEvent(EventUnlock) + EventQueryLock = queryForEvent(EventLock) + EventQueryRelock = queryForEvent(EventRelock) + EventQueryTimeoutWait = queryForEvent(EventTimeoutWait) + EventQueryVote = queryForEvent(EventVote) + EventQueryProposalHeartbeat = queryForEvent(EventProposalHeartbeat) + EventQueryTx = queryForEvent(EventTx) +) -type Fireable interface { - events.Fireable +func EventQueryTxFor(tx Tx) tmpubsub.Query { + return tmquery.MustParse(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTx, TxHashKey, tx.Hash())) } -type Eventable interface { - SetEventSwitch(EventSwitch) +func queryForEvent(eventType string) tmpubsub.Query { + return tmquery.MustParse(fmt.Sprintf("%s='%s'", EventTypeKey, eventType)) } -type EventSwitch interface { - events.EventSwitch -} - -type EventCache interface { - Fireable - Flush() -} - -func NewEventSwitch() EventSwitch { - return events.NewEventSwitch() -} - -func NewEventCache(evsw EventSwitch) EventCache { - return events.NewEventCache(evsw) -} - -// All events should be based on this FireEvent to ensure they are TMEventData -func fireEvent(fireable events.Fireable, event string, data TMEventData) { - if fireable != nil { - fireable.FireEvent(event, data) - } -} - -func AddListenerForEvent(evsw EventSwitch, id, event string, cb func(data TMEventData)) { - evsw.AddListenerForEvent(id, event, func(data events.EventData) { - cb(data.(TMEventData)) - }) - -} - -//--- block, tx, and vote events - -func FireEventNewBlock(fireable events.Fireable, block EventDataNewBlock) { - fireEvent(fireable, EventStringNewBlock(), TMEventData{block}) -} - -func FireEventNewBlockHeader(fireable events.Fireable, header EventDataNewBlockHeader) { - fireEvent(fireable, EventStringNewBlockHeader(), TMEventData{header}) -} - -func FireEventVote(fireable events.Fireable, vote EventDataVote) { - fireEvent(fireable, EventStringVote(), TMEventData{vote}) -} - -func FireEventTx(fireable events.Fireable, tx EventDataTx) { - fireEvent(fireable, EventStringTx(tx.Tx), TMEventData{tx}) -} - -//--- EventDataRoundState events - -func FireEventNewRoundStep(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringNewRoundStep(), TMEventData{rs}) -} - -func FireEventTimeoutPropose(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringTimeoutPropose(), TMEventData{rs}) -} - -func FireEventTimeoutWait(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringTimeoutWait(), TMEventData{rs}) -} - -func FireEventNewRound(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringNewRound(), TMEventData{rs}) -} - -func FireEventCompleteProposal(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringCompleteProposal(), TMEventData{rs}) -} - -func FireEventPolka(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringPolka(), TMEventData{rs}) -} - -func FireEventUnlock(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringUnlock(), TMEventData{rs}) -} - -func FireEventRelock(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringRelock(), TMEventData{rs}) -} - -func FireEventLock(fireable events.Fireable, rs EventDataRoundState) { - fireEvent(fireable, EventStringLock(), TMEventData{rs}) -} - -func FireEventProposalHeartbeat(fireable events.Fireable, rs EventDataProposalHeartbeat) { - fireEvent(fireable, EventStringProposalHeartbeat(), TMEventData{rs}) +type TxEventPublisher interface { + PublishEventTx(EventDataTx) error } diff --git a/types/heartbeat.go b/types/heartbeat.go index 64676ea6..da9b342b 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -18,7 +18,7 @@ import ( type Heartbeat struct { ValidatorAddress data.Bytes `json:"validator_address"` ValidatorIndex int `json:"validator_index"` - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Sequence int `json:"sequence"` Signature crypto.Signature `json:"signature"` diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index 8a096712..660ccd0f 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -40,17 +40,17 @@ func TestHeartbeatWriteSignBytes(t *testing.T) { hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} hb.WriteSignBytes("0xdeadbeef", buf, &n, &err) - require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`) + require.Equal(t, buf.String(), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`) buf.Reset() plainHb := &Heartbeat{} plainHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) - require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`) + require.Equal(t, buf.String(), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`) require.Panics(t, func() { buf.Reset() var nilHb *Heartbeat nilHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) - require.Equal(t, string(buf.Bytes()), "null") + require.Equal(t, buf.String(), "null") }) } diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go new file mode 100644 index 00000000..06b70987 --- /dev/null +++ b/types/nop_event_bus.go @@ -0,0 +1,77 @@ +package types + +import ( + "context" + + tmpubsub "github.com/tendermint/tmlibs/pubsub" +) + +type NopEventBus struct{} + +func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return nil +} + +func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return nil +} + +func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return nil +} + +//--- block, tx, and vote events + +func (NopEventBus) PublishEventNewBlock(block EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(header EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventVote(vote EventDataVote) error { + return nil +} + +func (NopEventBus) PublishEventTx(tx EventDataTx) error { + return nil +} + +//--- EventDataRoundState events + +func (NopEventBus) PublishEventNewRoundStep(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutPropose(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutWait(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventNewRound(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventCompleteProposal(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventPolka(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventUnlock(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventRelock(rs EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventLock(rs EventDataRoundState) error { + return nil +} diff --git a/types/part_set.go b/types/part_set.go index e15d2cab..e8a0997c 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -34,7 +34,7 @@ func (part *Part) Hash() []byte { return part.hash } else { hasher := ripemd160.New() - hasher.Write(part.Bytes) // doesn't err + hasher.Write(part.Bytes) // nolint: errcheck, gas part.hash = hasher.Sum(nil) return part.hash } diff --git a/types/priv_validator.go b/types/priv_validator.go index 8834eb7c..5dfd521f 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -51,7 +51,7 @@ type PrivValidator interface { type PrivValidatorFS struct { Address data.Bytes `json:"address"` PubKey crypto.PubKey `json:"pub_key"` - LastHeight int `json:"last_height"` + LastHeight int64 `json:"last_height"` LastRound int `json:"last_round"` LastStep int8 `json:"last_step"` LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures @@ -222,7 +222,7 @@ func (privVal *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) // signBytesHRS signs the given signBytes if the height/round/step (HRS) // are greater than the latest state. If the HRS are equal, // it returns the privValidator.LastSignature. -func (privVal *PrivValidatorFS) signBytesHRS(height, round int, step int8, signBytes []byte) (crypto.Signature, error) { +func (privVal *PrivValidatorFS) signBytesHRS(height int64, round int, step int8, signBytes []byte) (crypto.Signature, error) { sig := crypto.Signature{} // If height regression, err diff --git a/types/priv_validator_test.go b/types/priv_validator_test.go index ac91de86..3b13ed90 100644 --- a/types/priv_validator_test.go +++ b/types/priv_validator_test.go @@ -20,7 +20,7 @@ func TestGenLoadValidator(t *testing.T) { _, tempFilePath := cmn.Tempfile("priv_validator_") privVal := GenPrivValidatorFS(tempFilePath) - height := 100 + height := int64(100) privVal.LastHeight = height privVal.Save() addr := privVal.GetAddress() @@ -34,7 +34,9 @@ func TestLoadOrGenValidator(t *testing.T) { assert := assert.New(t) _, tempFilePath := cmn.Tempfile("priv_validator_") - os.Remove(tempFilePath) + if err := os.Remove(tempFilePath); err != nil { + t.Error(err) + } privVal := LoadOrGenPrivValidatorFS(tempFilePath) addr := privVal.GetAddress() privVal = LoadOrGenPrivValidatorFS(tempFilePath) @@ -97,7 +99,7 @@ func TestSignVote(t *testing.T) { block1 := BlockID{[]byte{1, 2, 3}, PartSetHeader{}} block2 := BlockID{[]byte{3, 2, 1}, PartSetHeader{}} - height, round := 10, 1 + height, round := int64(10), 1 voteType := VoteTypePrevote // sign a vote for first time @@ -131,7 +133,7 @@ func TestSignProposal(t *testing.T) { block1 := PartSetHeader{5, []byte{1, 2, 3}} block2 := PartSetHeader{10, []byte{3, 2, 1}} - height, round := 10, 1 + height, round := int64(10), 1 // sign a proposal for first time proposal := newProposal(height, round, block1) @@ -156,7 +158,7 @@ func TestSignProposal(t *testing.T) { } } -func newVote(addr data.Bytes, idx, height, round int, typ byte, blockID BlockID) *Vote { +func newVote(addr data.Bytes, idx int, height int64, round int, typ byte, blockID BlockID) *Vote { return &Vote{ ValidatorAddress: addr, ValidatorIndex: idx, @@ -167,7 +169,7 @@ func newVote(addr data.Bytes, idx, height, round int, typ byte, blockID BlockID) } } -func newProposal(height, round int, partsHeader PartSetHeader) *Proposal { +func newProposal(height int64, round int, partsHeader PartSetHeader) *Proposal { return &Proposal{ Height: height, Round: round, diff --git a/types/proposal.go b/types/proposal.go index 8efa91b6..93e78896 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -20,7 +20,7 @@ var ( // to be considered valid. It may depend on votes from a previous round, // a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. type Proposal struct { - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` BlockPartsHeader PartSetHeader `json:"block_parts_header"` POLRound int `json:"pol_round"` // -1 if null. @@ -30,7 +30,7 @@ type Proposal struct { // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { +func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { return &Proposal{ Height: height, Round: round, diff --git a/types/proposal_test.go b/types/proposal_test.go index d1c99184..352ba8de 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -30,7 +30,10 @@ func BenchmarkProposalWriteSignBytes(b *testing.B) { func BenchmarkProposalSign(b *testing.B) { privVal := GenPrivValidatorFS("") for i := 0; i < b.N; i++ { - privVal.Signer.Sign(SignBytes("test_chain_id", testProposal)) + _, err := privVal.Signer.Sign(SignBytes("test_chain_id", testProposal)) + if err != nil { + b.Error(err) + } } } diff --git a/types/protobuf.go b/types/protobuf.go index c8c9f843..c97b5387 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -13,9 +13,9 @@ type tm2pb struct{} func (tm2pb) Header(header *Header) *types.Header { return &types.Header{ ChainId: header.ChainID, - Height: uint64(header.Height), - Time: uint64(header.Time.Unix()), - NumTxs: uint64(header.NumTxs), + Height: header.Height, + Time: header.Time.Unix(), + NumTxs: int32(header.NumTxs), // XXX: overflow LastBlockId: TM2PB.BlockID(header.LastBlockID), LastCommitHash: header.LastCommitHash, DataHash: header.DataHash, @@ -32,7 +32,7 @@ func (tm2pb) BlockID(blockID BlockID) *types.BlockID { func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) *types.PartSetHeader { return &types.PartSetHeader{ - Total: uint64(partSetHeader.Total), + Total: int32(partSetHeader.Total), // XXX: overflow Hash: partSetHeader.Hash, } } @@ -40,7 +40,7 @@ func (tm2pb) PartSetHeader(partSetHeader PartSetHeader) *types.PartSetHeader { func (tm2pb) Validator(val *Validator) *types.Validator { return &types.Validator{ PubKey: val.PubKey.Bytes(), - Power: uint64(val.VotingPower), + Power: val.VotingPower, } } diff --git a/types/services.go b/types/services.go index e34d846b..0e007554 100644 --- a/types/services.go +++ b/types/services.go @@ -25,10 +25,10 @@ type Mempool interface { Size() int CheckTx(Tx, func(*abci.Response)) error Reap(int) Txs - Update(height int, txs Txs) + Update(height int64, txs Txs) error Flush() - TxsAvailable() <-chan int + TxsAvailable() <-chan int64 EnableTxsAvailable() } @@ -42,9 +42,9 @@ func (m MockMempool) Unlock() {} func (m MockMempool) Size() int { return 0 } func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil } func (m MockMempool) Reap(n int) Txs { return Txs{} } -func (m MockMempool) Update(height int, txs Txs) {} +func (m MockMempool) Update(height int64, txs Txs) error { return nil } func (m MockMempool) Flush() {} -func (m MockMempool) TxsAvailable() <-chan int { return make(chan int) } +func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) } func (m MockMempool) EnableTxsAvailable() {} //------------------------------------------------------ @@ -53,14 +53,14 @@ func (m MockMempool) EnableTxsAvailable() {} // BlockStoreRPC is the block store interface used by the RPC. // UNSTABLE type BlockStoreRPC interface { - Height() int + Height() int64 - LoadBlockMeta(height int) *BlockMeta - LoadBlock(height int) *Block - LoadBlockPart(height int, index int) *Part + LoadBlockMeta(height int64) *BlockMeta + LoadBlock(height int64) *Block + LoadBlockPart(height int64, index int) *Part - LoadBlockCommit(height int) *Commit - LoadSeenCommit(height int) *Commit + LoadBlockCommit(height int64) *Commit + LoadSeenCommit(height int64) *Commit } // BlockStore defines the BlockStore interface. diff --git a/types/tx.go b/types/tx.go index fbea8ff5..5761b83e 100644 --- a/types/tx.go +++ b/types/tx.go @@ -116,7 +116,7 @@ func (tp TxProof) Validate(dataHash []byte) error { // // One usage is indexing transaction results. type TxResult struct { - Height uint64 `json:"height"` + Height int64 `json:"height"` Index uint32 `json:"index"` Tx Tx `json:"tx"` Result abci.ResponseDeliverTx `json:"result"` diff --git a/types/validator.go b/types/validator.go index 7b167b27..c5d064e0 100644 --- a/types/validator.go +++ b/types/validator.go @@ -71,7 +71,7 @@ func (v *Validator) String() string { } // Hash computes the unique ID of a validator with a given voting power. -// It exludes the Accum value, which changes with every round. +// It excludes the Accum value, which changes with every round. func (v *Validator) Hash() []byte { return wire.BinaryRipemd160(struct { Address data.Bytes diff --git a/types/validator_set.go b/types/validator_set.go index 132957c1..134e4e06 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -53,7 +53,7 @@ func (valSet *ValidatorSet) IncrementAccum(times int) { // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() for _, val := range valSet.Validators { - val.Accum += int64(val.VotingPower) * int64(times) // TODO: mind overflow + val.Accum += val.VotingPower * int64(times) // TODO: mind overflow validatorsHeap.Push(val, accumComparable{val}) } @@ -100,9 +100,10 @@ func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Valida } // GetByIndex returns the validator by index. -// It returns nil values if index >= len(ValidatorSet.Validators) +// It returns nil values if index < 0 or +// index >= len(ValidatorSet.Validators) func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index >= len(valSet.Validators) { + if index < 0 || index >= len(valSet.Validators) { return nil, nil } val = valSet.Validators[index] @@ -222,7 +223,7 @@ func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } // Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int, commit *Commit) error { +func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { if valSet.Size() != len(commit.Precommits) { return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) } @@ -282,7 +283,7 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height // * 10% of the valset can't just declare themselves kings // * If the validator set is 3x old size, we need more proof to trust func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, - blockID BlockID, height int, commit *Commit) error { + blockID BlockID, height int64, commit *Commit) error { if newSet.Size() != len(commit.Precommits) { return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) diff --git a/types/validator_set_test.go b/types/validator_set_test.go index a285adee..572b7b00 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/tendermint/go-crypto" - wire "github.com/tendermint/go-wire" + "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" ) diff --git a/types/vote.go b/types/vote.go index 65841568..bb8679f4 100644 --- a/types/vote.go +++ b/types/vote.go @@ -17,6 +17,7 @@ var ( ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address") ErrVoteInvalidSignature = errors.New("Invalid signature") ErrVoteInvalidBlockHash = errors.New("Invalid block hash") + ErrVoteNil = errors.New("Nil vote") ) type ErrVoteConflictingVotes struct { @@ -50,7 +51,7 @@ func IsVoteTypeValid(type_ byte) bool { type Vote struct { ValidatorAddress data.Bytes `json:"validator_address"` ValidatorIndex int `json:"validator_index"` - Height int `json:"height"` + Height int64 `json:"height"` Round int `json:"round"` Type byte `json:"type"` BlockID BlockID `json:"block_id"` // zero if vote is nil. diff --git a/types/vote_set.go b/types/vote_set.go index dcfb0088..941852a8 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -45,7 +45,7 @@ import ( */ type VoteSet struct { chainID string - height int + height int64 round int type_ byte @@ -60,7 +60,7 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int, round int, type_ byte, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *ValidatorSet) *VoteSet { if height == 0 { cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -83,7 +83,7 @@ func (voteSet *VoteSet) ChainID() string { return voteSet.chainID } -func (voteSet *VoteSet) Height() int { +func (voteSet *VoteSet) Height() int64 { if voteSet == nil { return 0 } else { @@ -123,6 +123,7 @@ func (voteSet *VoteSet) Size() int { // Conflicting votes return added=*, err=ErrVoteConflictingVotes. // NOTE: vote should not be mutated after adding. // NOTE: VoteSet must not be nil +// NOTE: Vote must not be nil func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { if voteSet == nil { cmn.PanicSanity("AddVote() on nil VoteSet") @@ -135,6 +136,9 @@ func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { // NOTE: Validates as much as possible before attempting to verify the signature. func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { + if vote == nil { + return false, ErrVoteNil + } valIndex := vote.ValidatorIndex valAddr := vote.ValidatorAddress blockKey := vote.BlockID.Key() @@ -519,7 +523,7 @@ func (vs *blockVotes) getByIndex(index int) *Vote { // Common interface between *consensus.VoteSet and types.Commit type VoteSetReader interface { - Height() int + Height() int64 Round() int Type() byte Size() int diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 5a757a00..b093c44f 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -4,13 +4,13 @@ import ( "bytes" "testing" - "github.com/tendermint/go-crypto" + crypto "github.com/tendermint/go-crypto" cmn "github.com/tendermint/tmlibs/common" tst "github.com/tendermint/tmlibs/test" ) // NOTE: privValidators are in order -func randVoteSet(height int, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []*PrivValidatorFS) { +func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []*PrivValidatorFS) { valSet, privValidators := RandValidatorSet(numValidators, votingPower) return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators } @@ -24,7 +24,7 @@ func withValidator(vote *Vote, addr []byte, idx int) *Vote { } // Convenience: Return new vote with different height -func withHeight(vote *Vote, height int) *Vote { +func withHeight(vote *Vote, height int64) *Vote { vote = vote.Copy() vote.Height = height return vote @@ -69,7 +69,7 @@ func signAddVote(privVal *PrivValidatorFS, vote *Vote, voteSet *VoteSet) (bool, } func TestAddVote(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) val0 := privValidators[0] @@ -112,7 +112,7 @@ func TestAddVote(t *testing.T) { } func Test2_3Majority(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) voteProto := &Vote{ @@ -126,7 +126,10 @@ func Test2_3Majority(t *testing.T) { // 6 out of 10 voted for nil. for i := 0; i < 6; i++ { vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - signAddVote(privValidators[i], vote, voteSet) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } } blockID, ok := voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { @@ -136,7 +139,10 @@ func Test2_3Majority(t *testing.T) { // 7th validator voted for some blockhash { vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) - signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + _, err := signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { t.Errorf("There should be no 2/3 majority") @@ -146,7 +152,10 @@ func Test2_3Majority(t *testing.T) { // 8th validator voted for nil. { vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) - signAddVote(privValidators[7], vote, voteSet) + _, err := signAddVote(privValidators[7], vote, voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if !ok || !blockID.IsZero() { t.Errorf("There should be 2/3 majority for nil") @@ -155,7 +164,7 @@ func Test2_3Majority(t *testing.T) { } func Test2_3MajorityRedux(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1) blockHash := crypto.CRandBytes(32) @@ -174,7 +183,10 @@ func Test2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := 0; i < 66; i++ { vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - signAddVote(privValidators[i], vote, voteSet) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } } blockID, ok := voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { @@ -184,7 +196,10 @@ func Test2_3MajorityRedux(t *testing.T) { // 67th validator voted for nil { vote := withValidator(voteProto, privValidators[66].GetAddress(), 66) - signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { t.Errorf("There should be no 2/3 majority: last vote added was nil") @@ -195,7 +210,10 @@ func Test2_3MajorityRedux(t *testing.T) { { vote := withValidator(voteProto, privValidators[67].GetAddress(), 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash") @@ -206,7 +224,10 @@ func Test2_3MajorityRedux(t *testing.T) { { vote := withValidator(voteProto, privValidators[68].GetAddress(), 68) blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} - signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total") @@ -216,7 +237,10 @@ func Test2_3MajorityRedux(t *testing.T) { // 70th validator voted for different BlockHash { vote := withValidator(voteProto, privValidators[69].GetAddress(), 69) - signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + _, err := signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if ok || !blockID.IsZero() { t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash") @@ -226,7 +250,10 @@ func Test2_3MajorityRedux(t *testing.T) { // 71st validator voted for the right BlockHash & BlockPartsHeader { vote := withValidator(voteProto, privValidators[70].GetAddress(), 70) - signAddVote(privValidators[70], vote, voteSet) + _, err := signAddVote(privValidators[70], vote, voteSet) + if err != nil { + t.Error(err) + } blockID, ok = voteSet.TwoThirdsMajority() if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) { t.Errorf("There should be 2/3 majority") @@ -235,7 +262,7 @@ func Test2_3MajorityRedux(t *testing.T) { } func TestBadVotes(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) voteProto := &Vote{ @@ -294,7 +321,7 @@ func TestBadVotes(t *testing.T) { } func TestConflicts(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1) blockHash1 := cmn.RandBytes(32) blockHash2 := cmn.RandBytes(32) @@ -423,7 +450,7 @@ func TestConflicts(t *testing.T) { } func TestMakeCommit(t *testing.T) { - height, round := 1, 0 + height, round := int64(1), 0 voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1) blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} @@ -439,7 +466,10 @@ func TestMakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := 0; i < 6; i++ { vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - signAddVote(privValidators[i], vote, voteSet) + _, err := signAddVote(privValidators[i], vote, voteSet) + if err != nil { + t.Error(err) + } } // MakeCommit should fail. @@ -450,13 +480,20 @@ func TestMakeCommit(t *testing.T) { vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) vote = withBlockHash(vote, cmn.RandBytes(32)) vote = withBlockPartsHeader(vote, PartSetHeader{123, cmn.RandBytes(32)}) - signAddVote(privValidators[6], vote, voteSet) + + _, err := signAddVote(privValidators[6], vote, voteSet) + if err != nil { + t.Error(err) + } } // The 8th voted like everyone else. { vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) - signAddVote(privValidators[7], vote, voteSet) + _, err := signAddVote(privValidators[7], vote, voteSet) + if err != nil { + t.Error(err) + } } commit := voteSet.MakeCommit() diff --git a/version/version.go b/version/version.go index 637cc5de..54081b35 100644 --- a/version/version.go +++ b/version/version.go @@ -1,19 +1,20 @@ package version const Maj = "0" -const Min = "12" -const Fix = "1" +const Min = "13" +const Fix = "0" var ( - // The full version string - Version = "0.12.1" + // Version is the current version of Tendermint + // Must be a string because scripts like dist.sh read this file. + Version = "0.13.0" - // GitCommit is set with --ldflags "-X main.gitCommit=$(git rev-parse HEAD)" + // GitCommit is the current HEAD set using ldflags. GitCommit string ) func init() { if GitCommit != "" { - Version += "-" + GitCommit[:8] + Version += "-" + GitCommit } }