Merge pull request #927 from tendermint/release-v0.13.0

Release v0.13.0
This commit is contained in:
Ethan Buchman 2017-12-06 03:57:49 -05:00 committed by GitHub
commit a2b92c0745
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
206 changed files with 6332 additions and 2730 deletions

View File

@ -19,3 +19,8 @@ coverage:
comment:
layout: "header, diff"
behavior: default # update if exists else create new
ignore:
- "docs"
- "*.md"
- "*.rst"

View File

@ -8,10 +8,7 @@ end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[Makefile]
indent_style = tab
[*.sh]
[*.{sh,Makefile}]
indent_style = tab
[*.proto]

View File

@ -27,6 +27,38 @@ BUG FIXES:
- Graceful handling/recovery for apps that have non-determinism or fail to halt
- Graceful handling/recovery for violations of safety, or liveness
## 0.13.0 (December 6, 2017)
BREAKING CHANGES:
- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc.
- types: block heights are now `int64` everywhere
- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled
- node: EventSwitch methods now refer to EventBus
- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified
- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch
- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe
- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery
- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event`
- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds
- mempool: cached transactions return an error instead of an ABCI response with BadNonce
FEATURES:
- rpc: new `/unsubscribe_all` WebSocket RPC endpoint
- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries
- p2p/trust: new trust metric for tracking peers. See ADR-006
- config: TxIndexConfig allows to set what DeliverTx tags to index
IMPROVEMENTS:
- New asynchronous events system using `tmlibs/pubsub`
- logging: Various small improvements
- consensus: Graceful shutdown when app crashes
- tests: Fix various non-deterministic errors
- p2p: more defensive programming
BUG FIXES:
- consensus: fix panic where prs.ProposalBlockParts is not initialized
- p2p: fix panic on bad channel
## 0.12.1 (November 27, 2017)
BUG FIXES:

View File

@ -8,9 +8,9 @@ Please make sure to use `gofmt` before every commit - the easiest way to do this
## Forking
Please note that Go requires code to live under absolute paths, which complicates forking.
While my fork lives at `https://github.com/ebuchman/tendermint`,
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
Please note that Go requires code to live under absolute paths, which complicates forking.
While my fork lives at `https://github.com/ebuchman/tendermint`,
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
Instead, we use `git remote` to add the fork as a new remote for the original repo,
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
@ -38,11 +38,22 @@ We use [glide](https://github.com/masterminds/glide) to manage dependencies.
That said, the master branch of every Tendermint repository should just build with `go get`, which means they should be kept up-to-date with their dependencies so we can get away with telling people they can just `go get` our software.
Since some dependencies are not under our control, a third party may break our build, in which case we can fall back on `glide install`. Even for dependencies under our control, glide helps us keeps multiple repos in sync as they evolve. Anything with an executable, such as apps, tools, and the core, should use glide.
Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date.
Run `bash scripts/glide/status.sh` to get a list of vendored dependencies that may not be up-to-date.
## Vagrant
If you are a [Vagrant](https://www.vagrantup.com/) user, all you have to do to get started hacking Tendermint is:
```
vagrant up
vagrant ssh
cd ~/go/src/github.com/tendermint/tendermint
make test
```
## Testing
All repos should be hooked up to circle.
All repos should be hooked up to circle.
If they have `.go` files in the root directory, they will be automatically tested by circle using `go test -v -race ./...`. If not, they will need a `circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and includes its continuous integration status using a badge in the `README.md`.
## Branching Model and Release

View File

@ -1,8 +1,8 @@
FROM alpine:3.6
# This is the release of tendermint to pull in.
ENV TM_VERSION 0.11.0
ENV TM_SHA256SUM 7e443bac4d42f12e7beaf9cee63b4a565dad8c58895291fdedde8057088b70c5
ENV TM_VERSION 0.12.0
ENV TM_SHA256SUM be17469e92f04fc2a3663f891da28edbaa6c37c4d2f746736571887f4790555a
# Tendermint will be looking for genesis file in /tendermint (unless you change
# `genesis_file` in config.toml). You can put your config.toml and private

View File

@ -1,6 +1,7 @@
# Supported tags and respective `Dockerfile` links
- `0.11.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile)
- `0.12.0`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile)
- `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile)
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
@ -12,7 +13,7 @@
# Quick reference
* **Where to get help:**
[Chat on Rocket](https://cosmos.rocket.chat/)
https://tendermint.com/community
* **Where to file issues:**
https://github.com/tendermint/tendermint/issues

View File

@ -1 +0,0 @@
The installation guide has moved to the [docs directory](docs/guides/install-from-source.md) in order to easily be rendered by the website. Please update your links accordingly.

View File

@ -1,30 +1,32 @@
GOTOOLS = \
github.com/mitchellh/gox \
github.com/tcnksm/ghr \
github.com/Masterminds/glide \
github.com/alecthomas/gometalinter
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
all: install test
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short HEAD`"
install: get_vendor_deps
@go install --ldflags '-extldflags "-static"' \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" ./cmd/tendermint
all: get_vendor_deps install test
install:
CGO_ENABLED=0 go install $(BUILD_FLAGS) ./cmd/tendermint
build:
go build \
--ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse HEAD`" -o build/tendermint ./cmd/tendermint/
CGO_ENABLED=0 go build $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint/
build_race:
go build -race -o build/tendermint ./cmd/tendermint
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint
# dist builds binaries for all platforms and packages them for distribution
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
test:
@echo "--> Running linter"
@make metalinter_test
@echo "--> Running go test"
@go test $(PACKAGES)
@ -35,7 +37,7 @@ test_race:
test_integrations:
@bash ./test/test.sh
release:
test_release:
@go test -tags release $(PACKAGES)
test100:
@ -58,29 +60,57 @@ get_deps:
grep -v /vendor/ | sort | uniq | \
xargs go get -v -d
get_vendor_deps: ensure_tools
update_deps:
@echo "--> Updating dependencies"
@go get -d -u ./...
get_vendor_deps:
@hash glide 2>/dev/null || go get github.com/Masterminds/glide
@rm -rf vendor/
@echo "--> Running glide install"
@glide install
update_deps: tools
@echo "--> Updating dependencies"
@go get -d -u ./...
revision:
-echo `git rev-parse --verify HEAD` > $(TMHOME)/revision
-echo `git rev-parse --verify HEAD` >> $(TMHOME)/revision_history
update_tools:
@echo "--> Updating tools"
@go get -u $(GOTOOLS)
tools:
go get -u -v $(GOTOOLS)
ensure_tools:
go get $(GOTOOLS)
@echo "--> Installing tools"
@go get $(GOTOOLS)
@gometalinter --install
### Formatting, linting, and vetting
megacheck:
@for pkg in ${PACKAGES}; do megacheck "$$pkg"; done
metalinter:
@gometalinter --vendor --deadline=600s --enable-all --disable=lll ./...
metalinter_test:
@gometalinter --vendor --deadline=600s --disable-all \
--enable=deadcode \
--enable=misspell \
--enable=safesql \
./...
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools
# --enable=gas \
#--enable=maligned \
#--enable=dupl \
#--enable=errcheck \
#--enable=goconst \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=gosimple \
#--enable=gotype \
#--enable=ineffassign \
#--enable=interfacer \
#--enable=megacheck \
#--enable=staticcheck \
#--enable=structcheck \
#--enable=unconvert \
#--enable=unparam \
#--enable=unused \
#--enable=varcheck \
#--enable=vet \
#--enable=vetshadow \
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps update_tools tools test_release

View File

@ -8,6 +8,7 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm)
[![Rocket.Chat](https://demo.rocket.chat/images/join-chat.svg)](https://cosmos.rocket.chat/)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
@ -23,7 +24,7 @@ _NOTE: This is alpha software. Please contact us if you intend to run it in prod
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For more information, from introduction to install to application development, [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master).
For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/).
## Install
@ -33,13 +34,13 @@ To install from source, you should be able to:
`go get -u github.com/tendermint/tendermint/cmd/tendermint`
For more details (or if it fails), [read the docs](http://tendermint.readthedocs.io/projects/tools/en/master/install.html).
For more details (or if it fails), [read the docs](https://tendermint.readthedocs.io/en/master/install.html).
## Resources
### Tendermint Core
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](http://tendermint.readthedocs.io/projects/tools/en/master). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
All resources involving the use of, building application on, or developing for, tendermint, can be found at [Read The Docs](https://tendermint.readthedocs.io/en/master/). Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs.
### Sub-projects

1
Vagrantfile vendored
View File

@ -17,6 +17,7 @@ Vagrant.configure("2") do |config|
usermod -a -G docker vagrant
apt-get autoremove -y
apt-get install -y --no-install-recommends git
curl -O https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz
tar -xvf go1.9.linux-amd64.tar.gz
rm -rf /usr/local/go

View File

@ -2,11 +2,13 @@ package benchmarks
import (
"testing"
"time"
"github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/p2p"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
)
@ -26,7 +28,7 @@ func BenchmarkEncodeStatusWire(b *testing.B) {
PubKey: pubKey,
LatestBlockHash: []byte("SOMEBYTES"),
LatestBlockHeight: 123,
LatestBlockTime: 1234,
LatestBlockTime: time.Unix(0, 1234),
}
b.StartTimer()

View File

@ -18,12 +18,16 @@ func BenchmarkFileWrite(b *testing.B) {
b.StartTimer()
for i := 0; i < b.N; i++ {
file.Write([]byte(testString))
_, err := file.Write([]byte(testString))
if err != nil {
b.Error(err)
}
}
file.Close()
err = os.Remove("benchmark_file_write.out")
if err != nil {
if err := file.Close(); err != nil {
b.Error(err)
}
if err := os.Remove("benchmark_file_write.out"); err != nil {
b.Error(err)
}
}

View File

@ -24,9 +24,6 @@ import bytes "bytes"
import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import io "io"
@ -392,31 +389,6 @@ func (this *PubKeyEd25519) GoString() string {
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringTest(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringTest(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
if e == nil {
return "nil"
}
s := "map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "}"
return s
}
func (m *ResultStatus) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
@ -586,24 +558,6 @@ func (m *PubKeyEd25519) MarshalTo(data []byte) (int, error) {
return i, nil
}
func encodeFixed64Test(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Test(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTest(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
@ -689,9 +643,6 @@ func sovTest(x uint64) (n int) {
}
return n
}
func sozTest(x uint64) (n int) {
return sovTest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ResultStatus) String() string {
if this == nil {
return "nil"
@ -742,14 +693,6 @@ func (this *PubKeyEd25519) String() string {
}, "")
return s
}
func valueToStringTest(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ResultStatus) Unmarshal(data []byte) error {
var hasFields [1]uint64
l := len(data)

View File

@ -12,7 +12,7 @@ import (
func main() {
wsc := rpcclient.NewWSClient("127.0.0.1:46657", "/websocket")
_, err := wsc.Start()
err := wsc.Start()
if err != nil {
cmn.Exit(err.Error())
}

View File

@ -52,22 +52,22 @@ type BlockPool struct {
mtx sync.Mutex
// block requests
requesters map[int]*bpRequester
height int // the lowest key in requesters.
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
numPending int32 // number of requests pending assignment or block response
// peers
peers map[string]*bpPeer
maxPeerHeight int
maxPeerHeight int64
requestsCh chan<- BlockRequest
timeoutsCh chan<- string
}
func NewBlockPool(start int, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool {
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, timeoutsCh chan<- string) *BlockPool {
bp := &BlockPool{
peers: make(map[string]*bpPeer),
requesters: make(map[int]*bpRequester),
requesters: make(map[int64]*bpRequester),
height: start,
numPending: 0,
@ -132,7 +132,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
}
}
func (pool *BlockPool) GetStatus() (height int, numPending int32, lenRequesters int) {
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -195,7 +195,7 @@ func (pool *BlockPool) PopRequest() {
// Invalidates the block at pool.height,
// Remove the peer and redo request from others.
func (pool *BlockPool) RedoRequest(height int) {
func (pool *BlockPool) RedoRequest(height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -232,15 +232,15 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int
}
}
// MaxPeerHeight returns the heighest height reported by a peer
func (pool *BlockPool) MaxPeerHeight() int {
// MaxPeerHeight returns the highest height reported by a peer.
func (pool *BlockPool) MaxPeerHeight() int64 {
pool.mtx.Lock()
defer pool.mtx.Unlock()
return pool.maxPeerHeight
}
// Sets the peer's alleged blockchain height.
func (pool *BlockPool) SetPeerHeight(peerID string, height int) {
func (pool *BlockPool) SetPeerHeight(peerID string, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -279,7 +279,7 @@ func (pool *BlockPool) removePeer(peerID string) {
// Pick an available peer with at least the given minHeight.
// If no peers are available, returns nil.
func (pool *BlockPool) pickIncrAvailablePeer(minHeight int) *bpPeer {
func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -304,17 +304,24 @@ func (pool *BlockPool) makeNextRequester() {
pool.mtx.Lock()
defer pool.mtx.Unlock()
nextHeight := pool.height + len(pool.requesters)
nextHeight := pool.height + pool.requestersLen()
request := newBPRequester(pool, nextHeight)
// request.SetLogger(pool.Logger.With("height", nextHeight))
pool.requesters[nextHeight] = request
pool.numPending++
request.Start()
err := request.Start()
if err != nil {
request.Logger.Error("Error starting request", "err", err)
}
}
func (pool *BlockPool) sendRequest(height int, peerID string) {
func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID string) {
if !pool.IsRunning() {
return
}
@ -334,7 +341,8 @@ func (pool *BlockPool) debug() string {
defer pool.mtx.Unlock()
str := ""
for h := pool.height; h < pool.height+len(pool.requesters); h++ {
nextHeight := pool.height + pool.requestersLen()
for h := pool.height; h < nextHeight; h++ {
if pool.requesters[h] == nil {
str += cmn.Fmt("H(%v):X ", h)
} else {
@ -352,7 +360,7 @@ type bpPeer struct {
id string
recvMonitor *flow.Monitor
height int
height int64
numPending int32
timeout *time.Timer
didTimeout bool
@ -360,7 +368,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID string, height int) *bpPeer {
func newBPPeer(pool *BlockPool, peerID string, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@ -421,7 +429,7 @@ func (peer *bpPeer) onTimeout() {
type bpRequester struct {
cmn.BaseService
pool *BlockPool
height int
height int64
gotBlockCh chan struct{}
redoCh chan struct{}
@ -430,7 +438,7 @@ type bpRequester struct {
block *types.Block
}
func newBPRequester(pool *BlockPool, height int) *bpRequester {
func newBPRequester(pool *BlockPool, height int64) *bpRequester {
bpr := &bpRequester{
pool: pool,
height: height,
@ -542,6 +550,6 @@ OUTER_LOOP:
//-------------------------------------
type BlockRequest struct {
Height int
Height int64
PeerID string
}

View File

@ -16,27 +16,32 @@ func init() {
type testPeer struct {
id string
height int
height int64
}
func makePeers(numPeers int, minHeight, maxHeight int) map[string]testPeer {
func makePeers(numPeers int, minHeight, maxHeight int64) map[string]testPeer {
peers := make(map[string]testPeer, numPeers)
for i := 0; i < numPeers; i++ {
peerID := cmn.RandStr(12)
height := minHeight + rand.Intn(maxHeight-minHeight)
height := minHeight + rand.Int63n(maxHeight-minHeight)
peers[peerID] = testPeer{peerID, height}
}
return peers
}
func TestBasic(t *testing.T) {
start := 42
start := int64(42)
peers := makePeers(10, start+1, 1000)
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
pool.Start()
err := pool.Start()
if err != nil {
t.Error(err)
}
defer pool.Stop()
// Introduce each peer.
@ -82,13 +87,16 @@ func TestBasic(t *testing.T) {
}
func TestTimeout(t *testing.T) {
start := 42
start := int64(42)
peers := makePeers(10, start+1, 1000)
timeoutsCh := make(chan string, 100)
requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger())
pool.Start()
err := pool.Start()
if err != nil {
t.Error(err)
}
defer pool.Stop()
for _, peer := range peers {

View File

@ -49,14 +49,11 @@ type BlockchainReactor struct {
requestsCh chan BlockRequest
timeoutsCh chan string
evsw types.EventSwitch
eventBus *types.EventBus
}
// NewBlockchainReactor returns new reactor instance.
func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor {
if state.LastBlockHeight == store.Height()-1 {
store.height-- // XXX HACK, make this better
}
if state.LastBlockHeight != store.Height() {
cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()))
}
@ -88,9 +85,11 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
// OnStart implements cmn.Service.
func (bcR *BlockchainReactor) OnStart() error {
bcR.BaseReactor.OnStart()
if err := bcR.BaseReactor.OnStart(); err != nil {
return err
}
if bcR.fastSync {
_, err := bcR.pool.Start()
err := bcR.pool.Start()
if err != nil {
return err
}
@ -108,7 +107,7 @@ func (bcR *BlockchainReactor) OnStop() {
// GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
{
ID: BlockchainChannel,
Priority: 10,
SendQueueCapacity: 1000,
@ -121,6 +120,8 @@ func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
if !peer.Send(BlockchainChannel, struct{ BlockchainMessage }{&bcStatusResponseMessage{bcR.store.Height()}}) {
// doing nothing, will try later in `poolRoutine`
}
// peer is added to the pool once we receive the first
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
}
// RemovePeer implements Reactor by removing peer from the pool.
@ -224,7 +225,7 @@ FOR_LOOP:
}
case <-statusUpdateTicker.C:
// ask for status updates
go bcR.BroadcastStatusRequest()
go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers()
@ -271,7 +272,7 @@ FOR_LOOP:
// NOTE: we could improve performance if we
// didn't make the app commit to disk every block
// ... but we would need a way to get the hash without it persisting
err := bcR.state.ApplyBlock(bcR.evsw, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{})
err := bcR.state.ApplyBlock(bcR.eventBus, bcR.proxyAppConn, first, firstPartsHeader, types.MockMempool{})
if err != nil {
// TODO This is bad, are we zombie?
cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
@ -299,9 +300,9 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
return nil
}
// SetEventSwitch implements events.Eventable
func (bcR *BlockchainReactor) SetEventSwitch(evsw types.EventSwitch) {
bcR.evsw = evsw
// SetEventBus sets event bus.
func (bcR *BlockchainReactor) SetEventBus(b *types.EventBus) {
bcR.eventBus = b
}
//-----------------------------------------------------------------------------
@ -343,7 +344,7 @@ func DecodeMessage(bz []byte, maxSize int) (msgType byte, msg BlockchainMessage,
//-------------------------------------
type bcBlockRequestMessage struct {
Height int
Height int64
}
func (m *bcBlockRequestMessage) String() string {
@ -351,7 +352,7 @@ func (m *bcBlockRequestMessage) String() string {
}
type bcNoBlockResponseMessage struct {
Height int
Height int64
}
func (brm *bcNoBlockResponseMessage) String() string {
@ -372,7 +373,7 @@ func (m *bcBlockResponseMessage) String() string {
//-------------------------------------
type bcStatusRequestMessage struct {
Height int
Height int64
}
func (m *bcStatusRequestMessage) String() string {
@ -382,7 +383,7 @@ func (m *bcStatusRequestMessage) String() string {
//-------------------------------------
type bcStatusResponseMessage struct {
Height int
Height int64
}
func (m *bcStatusResponseMessage) String() string {

View File

@ -14,7 +14,7 @@ import (
"github.com/tendermint/tendermint/types"
)
func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor {
func newBlockchainReactor(maxBlockHeight int64) *BlockchainReactor {
logger := log.TestingLogger()
config := cfg.ResetTestRoot("blockchain_reactor_test")
@ -34,7 +34,7 @@ func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor {
bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig())
// Lastly: let's add some blocks in
for blockHeight := 1; blockHeight <= maxBlockHeight; blockHeight++ {
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
firstBlock := makeBlock(blockHeight, state)
secondBlock := makeBlock(blockHeight+1, state)
firstParts := firstBlock.MakePartSet(state.Params.BlockGossipParams.BlockPartSizeBytes)
@ -45,7 +45,7 @@ func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor {
}
func TestNoBlockMessageResponse(t *testing.T) {
maxBlockHeight := 20
maxBlockHeight := int64(20)
bcr := newBlockchainReactor(maxBlockHeight)
bcr.Start()
@ -58,7 +58,7 @@ func TestNoBlockMessageResponse(t *testing.T) {
chID := byte(0x01)
tests := []struct {
height int
height int64
existent bool
}{
{maxBlockHeight + 2, false},
@ -93,19 +93,19 @@ func TestNoBlockMessageResponse(t *testing.T) {
//----------------------------------------------
// utility funcs
func makeTxs(blockNumber int) (txs []types.Tx) {
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(blockNumber), byte(i)}))
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(blockNumber int, state *sm.State) *types.Block {
func makeBlock(height int64, state *sm.State) *types.Block {
prevHash := state.LastBlockID.Hash
prevParts := types.PartSetHeader{}
valHash := state.Validators.Hash()
prevBlockID := types.BlockID{prevHash, prevParts}
block, _ := types.MakeBlock(blockNumber, "test_chain", makeTxs(blockNumber),
block, _ := types.MakeBlock(height, "test_chain", makeTxs(height),
new(types.Commit), prevBlockID, valHash, state.AppHash, state.Params.BlockGossipParams.BlockPartSizeBytes)
return block
}

View File

@ -9,7 +9,7 @@ import (
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
)
@ -32,7 +32,7 @@ type BlockStore struct {
db dbm.DB
mtx sync.RWMutex
height int
height int64
}
func NewBlockStore(db dbm.DB) *BlockStore {
@ -44,7 +44,7 @@ func NewBlockStore(db dbm.DB) *BlockStore {
}
// Height() returns the last known contiguous block height.
func (bs *BlockStore) Height() int {
func (bs *BlockStore) Height() int64 {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
return bs.height
@ -58,7 +58,7 @@ func (bs *BlockStore) GetReader(key []byte) io.Reader {
return bytes.NewReader(bytez)
}
func (bs *BlockStore) LoadBlock(height int) *types.Block {
func (bs *BlockStore) LoadBlock(height int64) *types.Block {
var n int
var err error
r := bs.GetReader(calcBlockMetaKey(height))
@ -67,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
}
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
}
bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
@ -76,12 +76,12 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
}
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
if err != nil {
PanicCrisis(Fmt("Error reading block: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
}
return block
}
func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
var n int
var err error
r := bs.GetReader(calcBlockPartKey(height, index))
@ -90,12 +90,12 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
}
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
if err != nil {
PanicCrisis(Fmt("Error reading block part: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
}
return part
}
func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
var n int
var err error
r := bs.GetReader(calcBlockMetaKey(height))
@ -104,14 +104,14 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
}
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
}
return blockMeta
}
// The +2/3 and other Precommit-votes for block at `height`.
// This Commit comes from block.LastCommit for `height+1`.
func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit {
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
var n int
var err error
r := bs.GetReader(calcBlockCommitKey(height))
@ -120,13 +120,13 @@ func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit {
}
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
}
return commit
}
// NOTE: the Precommit-vote heights are for the block at `height`
func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
var n int
var err error
r := bs.GetReader(calcSeenCommitKey(height))
@ -135,7 +135,7 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
}
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err))
cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
}
return commit
}
@ -148,10 +148,10 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets"))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
}
// Save block meta
@ -185,9 +185,9 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
bs.db.SetSync(nil, nil)
}
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
}
partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -195,19 +195,19 @@ func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
//-----------------------------------------------------------------------------
func calcBlockMetaKey(height int) []byte {
func calcBlockMetaKey(height int64) []byte {
return []byte(fmt.Sprintf("H:%v", height))
}
func calcBlockPartKey(height int, partIndex int) []byte {
func calcBlockPartKey(height int64, partIndex int) []byte {
return []byte(fmt.Sprintf("P:%v:%v", height, partIndex))
}
func calcBlockCommitKey(height int) []byte {
func calcBlockCommitKey(height int64) []byte {
return []byte(fmt.Sprintf("C:%v", height))
}
func calcSeenCommitKey(height int) []byte {
func calcSeenCommitKey(height int64) []byte {
return []byte(fmt.Sprintf("SC:%v", height))
}
@ -216,13 +216,13 @@ func calcSeenCommitKey(height int) []byte {
var blockStoreKey = []byte("blockStore")
type BlockStoreStateJSON struct {
Height int
Height int64
}
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj)
if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err))
cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
}
db.SetSync(blockStoreKey, bytes)
}
@ -237,7 +237,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj)
if err != nil {
PanicCrisis(Fmt("Could not unmarshal bytes: %X", bytes))
cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
}
return bsj
}

View File

@ -19,7 +19,10 @@ var GenValidatorCmd = &cobra.Command{
func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidatorFS("")
privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t")
privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t")
if err != nil {
panic(err)
}
fmt.Printf(`%v
`, string(privValidatorJSONBytes))
}

View File

@ -28,12 +28,14 @@ func initFiles(cmd *cobra.Command, args []string) {
genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
}
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{
genDoc.Validators = []types.GenesisValidator{{
PubKey: privValidator.GetPubKey(),
Power: 10,
}}
genDoc.SaveAs(genFile)
if err := genDoc.SaveAs(genFile); err != nil {
panic(err)
}
}
logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile())

View File

@ -25,10 +25,13 @@ var ResetPrivValidatorCmd = &cobra.Command{
}
// ResetAll removes the privValidator files.
// Exported so other CLI tools can use it
// Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger)
os.RemoveAll(dbDir)
if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir)
}

View File

@ -26,8 +26,12 @@ const (
// modify in the test cases.
// NOTE: it unsets all TM* env variables.
func isolate(cmds ...*cobra.Command) cli.Executable {
os.Unsetenv("TMHOME")
os.Unsetenv("TM_HOME")
if err := os.Unsetenv("TMHOME"); err != nil {
panic(err)
}
if err := os.Unsetenv("TM_HOME"); err != nil {
panic(err)
}
viper.Reset()
config = cfg.DefaultConfig()

View File

@ -49,7 +49,7 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
return fmt.Errorf("Failed to create node: %v", err)
}
if _, err := n.Start(); err != nil {
if err := n.Start(); err != nil {
return fmt.Errorf("Failed to start node: %v", err)
} else {
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())

View File

@ -63,7 +63,9 @@ func testnetFiles(cmd *cobra.Command, args []string) {
// Write genesis file.
for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i)
genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json"))
if err := genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")); err != nil {
panic(err)
}
}
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))

View File

@ -37,5 +37,7 @@ func main() {
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
cmd.Execute()
if err := cmd.Execute(); err != nil {
panic(err)
}
}

View File

@ -16,6 +16,7 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
}
// DefaultConfig returns a default configuration for a Tendermint node
@ -26,6 +27,7 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
}
}
@ -37,6 +39,7 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: DefaultMempoolConfig(),
Consensus: TestConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
}
}
@ -93,9 +96,6 @@ type BaseConfig struct {
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter_peers"` // false
// What indexer to use for transactions
TxIndex string `mapstructure:"tx_index"`
// Database backend: leveldb | memdb
DBBackend string `mapstructure:"db_backend"`
@ -115,7 +115,6 @@ func DefaultBaseConfig() BaseConfig {
ProfListenAddress: "",
FastSync: true,
FilterPeers: false,
TxIndex: "kv",
DBBackend: "leveldb",
DBPath: "data",
}
@ -255,7 +254,7 @@ func TestP2PConfig() *P2PConfig {
return conf
}
// AddrBookFile returns the full path to the address bool
// AddrBookFile returns the full path to the address book
func (p *P2PConfig) AddrBookFile() string {
return rootify(p.AddrBook, p.RootDir)
}
@ -412,6 +411,41 @@ func (c *ConsensusConfig) SetWalFile(walFile string) {
c.walFile = walFile
}
//-----------------------------------------------------------------------------
// TxIndexConfig
// TxIndexConfig defines the confuguration for the transaction
// indexer, including tags to index.
type TxIndexConfig struct {
// What indexer to use for transactions
//
// Options:
// 1) "null" (default)
// 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
Indexer string `mapstructure:"indexer"`
// Comma-separated list of tags to index (by default the only tag is tx hash)
//
// It's recommended to index only a subset of tags due to possible memory
// bloat. This is, of course, depends on the indexer's DB and the volume of
// transactions.
IndexTags string `mapstructure:"index_tags"`
// When set to true, tells indexer to index all tags. Note this may be not
// desirable (see the comment above). IndexTags has a precedence over
// IndexAllTags (i.e. when given both, IndexTags will be indexed).
IndexAllTags bool `mapstructure:"index_all_tags"`
}
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{
Indexer: "kv",
IndexTags: "",
IndexAllTags: false,
}
}
//-----------------------------------------------------------------------------
// Utils

View File

@ -12,8 +12,12 @@ import (
/****** these are for production settings ***********/
func EnsureRoot(rootDir string) {
cmn.EnsureDir(rootDir, 0700)
cmn.EnsureDir(rootDir+"/data", 0700)
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
cmn.PanicSanity(err.Error())
}
configFilePath := path.Join(rootDir, "config.toml")
@ -53,21 +57,23 @@ func ResetTestRoot(testName string) *Config {
rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak
if cmn.FileExists(rootDir + "_bak") {
err := os.RemoveAll(rootDir + "_bak")
if err != nil {
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Move ~/.tendermint_test to ~/.tendermint_test_bak
if cmn.FileExists(rootDir) {
err := os.Rename(rootDir, rootDir+"_bak")
if err != nil {
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
cmn.PanicSanity(err.Error())
}
}
// Create new dir
cmn.EnsureDir(rootDir, 0700)
cmn.EnsureDir(rootDir+"/data", 0700)
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
cmn.PanicSanity(err.Error())
}
configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json")

View File

@ -24,7 +24,7 @@ func TestEnsureRoot(t *testing.T) {
// setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err)
defer os.RemoveAll(tmpDir)
defer os.RemoveAll(tmpDir) // nolint: errcheck
// create root dir
EnsureRoot(tmpDir)

View File

@ -1,16 +1,17 @@
package consensus
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto"
data "github.com/tendermint/go-wire/data"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/events"
)
func init() {
@ -41,7 +42,43 @@ func TestByzantine(t *testing.T) {
switches[i].SetLogger(p2pLogger.With("validator", i))
}
eventChans := make([]chan interface{}, N)
reactors := make([]p2p.Reactor, N)
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
// make byzantine
css[i].decideProposal = func(j int) func(int64, int) {
return func(height int64, round int) {
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
}
}(i)
css[i].doPrevote = func(height int64, round int) {}
}
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events", "validator", i))
err := eventBus.Start()
require.NoError(t, err)
defer eventBus.Stop()
eventChans[i] = make(chan interface{}, 1)
err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
require.NoError(t, err)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus)
var conRI p2p.Reactor // nolint: gotype, gosimple
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}
reactors[i] = conRI
}
defer func() {
for _, r := range reactors {
if rr, ok := r.(*ByzantineReactor); ok {
@ -51,40 +88,6 @@ func TestByzantine(t *testing.T) {
}
}
}()
eventChans := make([]chan interface{}, N)
eventLogger := logger.With("module", "events")
for i := 0; i < N; i++ {
if i == 0 {
css[i].privValidator = NewByzantinePrivValidator(css[i].privValidator)
// make byzantine
css[i].decideProposal = func(j int) func(int, int) {
return func(height, round int) {
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
}
}(i)
css[i].doPrevote = func(height, round int) {}
}
eventSwitch := events.NewEventSwitch()
eventSwitch.SetLogger(eventLogger.With("validator", i))
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states
conR.SetLogger(logger.With("validator", i))
conR.SetEventSwitch(eventSwitch)
var conRI p2p.Reactor
conRI = conR
if i == 0 {
conRI = NewByzantineReactor(conR)
}
reactors[i] = conRI
}
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
// ignore new switch s, we already made ours
@ -159,7 +162,7 @@ func TestByzantine(t *testing.T) {
//-------------------------------
// byzantine consensus functions
func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusState, sw *p2p.Switch) {
func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *ConsensusState, sw *p2p.Switch) {
// byzantine user should create two proposals and try to split the vote.
// Avoid sending on internalMsgQueue and running consensus state.
@ -167,13 +170,17 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS
block1, blockParts1 := cs.createProposalBlock()
polRound, polBlockID := cs.Votes.POLInfo()
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil {
t.Error(err)
}
// Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock()
polRound, polBlockID = cs.Votes.POLInfo()
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err
if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil {
t.Error(err)
}
block1Hash := block1.Hash()
block2Hash := block2.Hash()
@ -190,7 +197,7 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS
}
}
func sendProposalAndParts(height, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
// proposal
msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, struct{ ConsensusMessage }{msg})
@ -286,12 +293,12 @@ func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote
}
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
proposal.Signature, err = privVal.Sign(types.SignBytes(chainID, proposal))
proposal.Signature, _ = privVal.Sign(types.SignBytes(chainID, proposal))
return nil
}
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
heartbeat.Signature, err = privVal.Sign(types.SignBytes(chainID, heartbeat))
heartbeat.Signature, _ = privVal.Sign(types.SignBytes(chainID, heartbeat))
return nil
}

View File

@ -1,35 +0,0 @@
package consensus
import (
"github.com/tendermint/tendermint/types"
)
// XXX: WARNING: these functions can halt the consensus as firing events is synchronous.
// Make sure to read off the channels, and in the case of subscribeToEventRespond, to write back on it
// NOTE: if chanCap=0, this blocks on the event being consumed
func subscribeToEvent(evsw types.EventSwitch, receiver, eventID string, chanCap int) chan interface{} {
// listen for event
ch := make(chan interface{}, chanCap)
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
})
return ch
}
// NOTE: this blocks on receiving a response after the event is consumed
func subscribeToEventRespond(evsw types.EventSwitch, receiver, eventID string) chan interface{} {
// listen for event
ch := make(chan interface{})
types.AddListenerForEvent(evsw, receiver, eventID, func(data types.TMEventData) {
ch <- data
<-ch
})
return ch
}
func discardFromChan(ch chan interface{}, n int) {
for i := 0; i < n; i++ {
<-ch
}
}

View File

@ -2,6 +2,7 @@ package consensus
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
@ -30,6 +31,10 @@ import (
"github.com/go-kit/kit/log/term"
)
const (
testSubscriber = "test-client"
)
// genesis, chain_id, priv_val
var config *cfg.Config // NOTE: must be reset for each _test.go file
var ensureTimeout = time.Second * 2
@ -49,12 +54,12 @@ func ResetConfig(name string) *cfg.Config {
type validatorStub struct {
Index int // Validator index. NOTE: we don't assume validator set changes.
Height int
Height int64
Round int
types.PrivValidator
}
var testMinPower = 10
var testMinPower int64 = 10
func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub {
return &validatorStub{
@ -108,13 +113,13 @@ func incrementRound(vss ...*validatorStub) {
//-------------------------------------------------------------------------------
// Functions for transitioning the consensus state
func startTestRound(cs *ConsensusState, height, round int) {
func startTestRound(cs *ConsensusState, height int64, round int) {
cs.enterNewRound(height, round)
cs.startRoutines(0)
}
// Create proposal block from cs1 but sign it with vs
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height, round int) (proposal *types.Proposal, block *types.Block) {
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
block, blockParts := cs1.createProposalBlock()
if block == nil { // on error
panic("error creating proposal block")
@ -208,11 +213,14 @@ func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lo
// genesis
func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} {
voteCh0 := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh0 := make(chan interface{})
err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
}
voteCh := make(chan interface{})
go func() {
for {
v := <-voteCh0
for v := range voteCh0 {
vote := v.(types.TMEventData).Unwrap().(types.EventDataVote)
// we only fire for our own votes
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
@ -231,8 +239,12 @@ func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Applica
}
func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
// Get BlockStore
blockDB := dbm.NewMemDB()
return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
}
func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState {
// Get BlockStore
blockStore := bc.NewBlockStore(blockDB)
// one for mempool, one for consensus
@ -252,10 +264,10 @@ func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv typ
cs.SetLogger(log.TestingLogger())
cs.SetPrivValidator(pv)
evsw := types.NewEventSwitch()
evsw.SetLogger(log.TestingLogger().With("module", "events"))
cs.SetEventSwitch(evsw)
evsw.Start()
eventBus := types.NewEventBus()
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start()
cs.SetEventBus(eventBus)
return cs
}
@ -267,13 +279,13 @@ func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS {
return privValidator
}
func fixedConsensusStateDummy() *ConsensusState {
func fixedConsensusStateDummy(config *cfg.Config, logger log.Logger) *ConsensusState {
stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile())
state.SetLogger(log.TestingLogger().With("module", "state"))
state.SetLogger(logger.With("module", "state"))
privValidator := loadPrivValidator(config)
cs := newConsensusState(state, privValidator, dummy.NewDummyApplication())
cs.SetLogger(log.TestingLogger())
cs.SetLogger(logger)
return cs
}
@ -297,7 +309,7 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) {
//-------------------------------------------------------------------------------
func ensureNoNewStep(stepCh chan interface{}) {
func ensureNoNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
select {
case <-timer.C:
@ -307,7 +319,7 @@ func ensureNoNewStep(stepCh chan interface{}) {
}
}
func ensureNewStep(stepCh chan interface{}) {
func ensureNewStep(stepCh <-chan interface{}) {
timer := time.NewTimer(ensureTimeout)
select {
case <-timer.C:
@ -360,12 +372,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
// nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState {
genDoc, privVals := randGenesisDoc(nValidators, false, int64(testMinPower))
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers)
logger := consensusLogger()
for i := 0; i < nPeers; i++ {
db := dbm.NewMemDB() // each state needs its own db
state, _ := sm.MakeGenesisState(db, genDoc)
state.SetLogger(log.TestingLogger().With("module", "state"))
state.SetLogger(logger.With("module", "state", "validator", i))
state.Save()
thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i))
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
@ -382,7 +395,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
app.InitChain(abci.RequestInitChain{Validators: vals})
css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app)
css[i].SetLogger(log.TestingLogger())
css[i].SetLogger(logger.With("validator", i))
css[i].SetTimeoutTicker(tickerFunc())
}
return css
@ -451,12 +464,12 @@ type mockTicker struct {
fired bool
}
func (m *mockTicker) Start() (bool, error) {
return true, nil
func (m *mockTicker) Start() error {
return nil
}
func (m *mockTicker) Stop() bool {
return true
func (m *mockTicker) Stop() error {
return nil
}
func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) {

View File

@ -2,13 +2,17 @@ package consensus
import (
"encoding/binary"
"fmt"
"testing"
"time"
abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/types"
"github.com/stretchr/testify/assert"
"github.com/tendermint/abci/example/code"
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/types"
)
func init() {
@ -22,16 +26,15 @@ func TestNoProgressUntilTxsAvailable(t *testing.T) {
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs.mempool.EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
startTestRound(cs, height, round)
ensureNewStep(newBlockCh) // first block gets committed
ensureNoNewStep(newBlockCh)
deliverTxsRange(cs, 0, 2)
deliverTxsRange(cs, 0, 1)
ensureNewStep(newBlockCh) // commit txs
ensureNewStep(newBlockCh) // commit updated app hash
ensureNoNewStep(newBlockCh)
}
func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
@ -41,7 +44,7 @@ func TestProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs.mempool.EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
startTestRound(cs, height, round)
ensureNewStep(newBlockCh) // first block gets committed
@ -56,9 +59,9 @@ func TestProgressInHigherRound(t *testing.T) {
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs.mempool.EnableTxsAvailable()
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1)
timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1)
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
cs.setProposal = func(proposal *types.Proposal) error {
if cs.Height == 2 && cs.Round == 0 {
// dont set the proposal in round 0 so we timeout and
@ -73,7 +76,7 @@ func TestProgressInHigherRound(t *testing.T) {
ensureNewStep(newRoundCh) // first round at first height
ensureNewStep(newBlockCh) // first block gets committed
ensureNewStep(newRoundCh) // first round at next height
deliverTxsRange(cs, 0, 2) // we deliver txs, but dont set a proposal so we get the next round
deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
<-timeoutCh
ensureNewStep(newRoundCh) // wait for the next round
ensureNewStep(newBlockCh) // now we can commit the block
@ -92,11 +95,10 @@ func deliverTxsRange(cs *ConsensusState, start, end int) {
}
func TestTxConcurrentWithCommit(t *testing.T) {
state, privVals := randGenesisState(1, false, 10)
cs := newConsensusState(state, privVals[0], NewCounterApplication())
height, round := cs.Height, cs.Round
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
NTxs := 10000
go deliverTxsRange(cs, 0, NTxs)
@ -121,41 +123,43 @@ func TestRmBadTx(t *testing.T) {
// increment the counter by 1
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0))
app.DeliverTx(txBytes)
app.Commit()
ch := make(chan struct{})
cbCh := make(chan struct{})
resDeliver := app.DeliverTx(txBytes)
assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.False(t, resCommit.IsErr(), cmn.Fmt("expected no error. got %v", resCommit))
emptyMempoolCh := make(chan struct{})
checkTxRespCh := make(chan struct{})
go func() {
// Try to send the tx through the mempool.
// CheckTx should not err, but the app should return a bad abci code
// and the tx should get removed from the pool
err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) {
if r.GetCheckTx().Code != abci.CodeType_BadNonce {
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
t.Fatalf("expected checktx to return bad nonce, got %v", r)
}
cbCh <- struct{}{}
checkTxRespCh <- struct{}{}
})
if err != nil {
t.Fatal("Error after CheckTx: %v", err)
t.Fatalf("Error after CheckTx: %v", err)
}
// check for the tx
for {
time.Sleep(time.Second)
txs := cs.mempool.Reap(1)
if len(txs) == 0 {
ch <- struct{}{}
return
emptyMempoolCh <- struct{}{}
}
time.Sleep(10 * time.Millisecond)
}
}()
// Wait until the tx returns
ticker := time.After(time.Second * 5)
select {
case <-cbCh:
case <-checkTxRespCh:
// success
case <-ticker:
t.Fatalf("Timed out waiting for tx to return")
@ -164,7 +168,7 @@ func TestRmBadTx(t *testing.T) {
// Wait until the tx is removed
ticker = time.After(time.Second * 5)
select {
case <-ch:
case <-emptyMempoolCh:
// success
case <-ticker:
t.Fatalf("Timed out waiting for tx to be removed")
@ -187,33 +191,41 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)}
}
func (app *CounterApplication) DeliverTx(tx []byte) abci.Result {
return runTx(tx, &app.txCount)
func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx {
txValue := txAsUint64(tx)
if txValue != uint64(app.txCount) {
return abci.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
app.txCount += 1
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *CounterApplication) CheckTx(tx []byte) abci.Result {
return runTx(tx, &app.mempoolTxCount)
func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx {
txValue := txAsUint64(tx)
if txValue != uint64(app.mempoolTxCount) {
return abci.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)}
}
app.mempoolTxCount += 1
return abci.ResponseCheckTx{Code: code.CodeTypeOK}
}
func runTx(tx []byte, countPtr *int) abci.Result {
count := *countPtr
func txAsUint64(tx []byte) uint64 {
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(tx):], tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(count) {
return abci.ErrBadNonce.AppendLog(cmn.Fmt("Invalid nonce. Expected %v, got %v", count, txValue))
}
*countPtr += 1
return abci.OK
return binary.BigEndian.Uint64(tx8)
}
func (app *CounterApplication) Commit() abci.Result {
func (app *CounterApplication) Commit() abci.ResponseCommit {
app.mempoolTxCount = app.txCount
if app.txCount == 0 {
return abci.OK
return abci.ResponseCommit{Code: code.CodeTypeOK}
} else {
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return abci.NewResultOK(hash, "")
return abci.ResponseCommit{Code: code.CodeTypeOK, Data: hash}
}
}

View File

@ -2,12 +2,14 @@ package consensus
import (
"bytes"
"errors"
"context"
"fmt"
"reflect"
"sync"
"time"
"github.com/pkg/errors"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@ -34,10 +36,10 @@ type ConsensusReactor struct {
p2p.BaseReactor // BaseService + p2p.Switch
conS *ConsensusState
evsw types.EventSwitch
mtx sync.RWMutex
fastSync bool
eventBus *types.EventBus
}
// NewConsensusReactor returns a new ConsensusReactor with the given consensusState.
@ -53,18 +55,22 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
// OnStart implements BaseService.
func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
conR.BaseReactor.OnStart()
if err := conR.BaseReactor.OnStart(); err != nil {
return err
}
// callbacks for broadcasting new steps and votes to peers
// upon their respective events (ie. uses evsw)
conR.registerEventCallbacks()
err := conR.startBroadcastRoutine()
if err != nil {
return err
}
if !conR.FastSync() {
_, err := conR.conS.Start()
err := conR.conS.Start()
if err != nil {
return err
}
}
return nil
}
@ -91,31 +97,34 @@ func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced in
// dont bother with the WAL if we fast synced
conR.conS.doWALCatchup = false
}
conR.conS.Start()
err := conR.conS.Start()
if err != nil {
conR.Logger.Error("Error starting conS", "err", err)
}
}
// GetChannels implements Reactor
func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
{
ID: StateChannel,
Priority: 5,
SendQueueCapacity: 100,
},
&p2p.ChannelDescriptor{
{
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096,
},
&p2p.ChannelDescriptor{
{
ID: VoteChannel,
Priority: 5,
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
},
&p2p.ChannelDescriptor{
{
ID: VoteSetBitsChannel,
Priority: 1,
SendQueueCapacity: 2,
@ -306,10 +315,10 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
}
}
// SetEventSwitch implements events.Eventable
func (conR *ConsensusReactor) SetEventSwitch(evsw types.EventSwitch) {
conR.evsw = evsw
conR.conS.SetEventSwitch(evsw)
// SetEventBus sets event bus.
func (conR *ConsensusReactor) SetEventBus(b *types.EventBus) {
conR.eventBus = b
conR.conS.SetEventBus(b)
}
// FastSync returns whether the consensus reactor is in fast-sync mode.
@ -321,24 +330,60 @@ func (conR *ConsensusReactor) FastSync() bool {
//--------------------------------------
// Listens for new steps and votes,
// broadcasting the result to peers
func (conR *ConsensusReactor) registerEventCallbacks() {
// startBroadcastRoutine subscribes for new round steps, votes and proposal
// heartbeats using the event bus and starts a go routine to broadcasts events
// to peers upon receiving them.
func (conR *ConsensusReactor) startBroadcastRoutine() error {
const subscriber = "consensus-reactor"
ctx := context.Background()
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringNewRoundStep(), func(data types.TMEventData) {
rs := data.Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
conR.broadcastNewRoundStep(rs)
})
// new round steps
stepsCh := make(chan interface{})
err := conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, stepsCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryNewRoundStep)
}
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringVote(), func(data types.TMEventData) {
edv := data.Unwrap().(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote)
})
// votes
votesCh := make(chan interface{})
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryVote, votesCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryVote)
}
types.AddListenerForEvent(conR.evsw, "conR", types.EventStringProposalHeartbeat(), func(data types.TMEventData) {
heartbeat := data.Unwrap().(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(heartbeat)
})
// proposal heartbeats
heartbeatsCh := make(chan interface{})
err = conR.eventBus.Subscribe(ctx, subscriber, types.EventQueryProposalHeartbeat, heartbeatsCh)
if err != nil {
return errors.Wrapf(err, "failed to subscribe %s to %s", subscriber, types.EventQueryProposalHeartbeat)
}
go func() {
for {
select {
case data, ok := <-stepsCh:
if ok { // a receive from a closed channel returns the zero value immediately
edrs := data.(types.TMEventData).Unwrap().(types.EventDataRoundState)
conR.broadcastNewRoundStep(edrs.RoundState.(*cstypes.RoundState))
}
case data, ok := <-votesCh:
if ok {
edv := data.(types.TMEventData).Unwrap().(types.EventDataVote)
conR.broadcastHasVoteMessage(edv.Vote)
}
case data, ok := <-heartbeatsCh:
if ok {
edph := data.(types.TMEventData).Unwrap().(types.EventDataProposalHeartbeat)
conR.broadcastProposalHeartbeatMessage(edph)
}
case <-conR.Quit:
conR.eventBus.UnsubscribeAll(ctx, subscriber)
return
}
}
}()
return nil
}
func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.EventDataProposalHeartbeat) {
@ -350,7 +395,6 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(heartbeat types.
}
func (conR *ConsensusReactor) broadcastNewRoundStep(rs *cstypes.RoundState) {
nrsMsg, csMsg := makeRoundStepMessages(rs)
if nrsMsg != nil {
conR.Switch.Broadcast(StateChannel, struct{ ConsensusMessage }{nrsMsg})
@ -448,6 +492,18 @@ OUTER_LOOP:
// If the peer is on a previous height, help catch up.
if (0 < prs.Height) && (prs.Height < rs.Height) {
heightLogger := logger.With("height", prs.Height)
// if we never received the commit message from the peer, the block parts wont be initialized
if prs.ProposalBlockParts == nil {
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
if blockMeta == nil {
cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d",
prs.Height, conR.conS.blockStore.Height()))
}
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
// continue the loop since prs is a copy and not effected by this initialization
continue OUTER_LOOP
}
conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer)
continue OUTER_LOOP
}
@ -527,9 +583,11 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
Round: prs.Round, // Not our height, so it doesn't matter.
Part: part,
}
logger.Debug("Sending block part for catchup", "round", prs.Round)
logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index)
if peer.Send(DataChannel, struct{ ConsensusMessage }{msg}) {
ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
} else {
logger.Debug("Sending block part for catchup failed")
}
return
} else {
@ -803,7 +861,7 @@ func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState {
// GetHeight returns an atomic snapshot of the PeerRoundState's height
// used by the mempool to ensure peers are caught up before broadcasting new txs
func (ps *PeerState) GetHeight() int {
func (ps *PeerState) GetHeight() int64 {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.PeerRoundState.Height
@ -828,8 +886,21 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
}
// InitProposalBlockParts initializes the peer's proposal block parts header and bit array.
func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.ProposalBlockParts != nil {
return
}
ps.ProposalBlockPartsHeader = partsHeader
ps.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total)
}
// SetHasProposalBlockPart sets the given block part index as known for the peer.
func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
@ -880,7 +951,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote
return nil, false
}
func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *cmn.BitArray {
func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray {
if !types.IsVoteTypeValid(type_) {
return nil
}
@ -927,7 +998,7 @@ func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *cmn.BitArra
}
// 'round': A round for which we have a +2/3 commit.
func (ps *PeerState) ensureCatchupCommitRound(height, round int, numValidators int) {
func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) {
if ps.Height != height {
return
}
@ -953,13 +1024,13 @@ func (ps *PeerState) ensureCatchupCommitRound(height, round int, numValidators i
// what votes this peer has received.
// NOTE: It's important to make sure that numValidators actually matches
// what the node sees as the number of validators for height.
func (ps *PeerState) EnsureVoteBitArrays(height int, numValidators int) {
func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
ps.ensureVoteBitArrays(height, numValidators)
}
func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) {
func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) {
if ps.Height == height {
if ps.Prevotes == nil {
ps.Prevotes = cmn.NewBitArray(numValidators)
@ -988,9 +1059,9 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex)
}
func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
logger := ps.logger.With("peerRound", ps.Round, "height", height, "round", round)
logger.Debug("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) {
logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.Height, ps.Round), "H/R", cmn.Fmt("%d/%d", height, round))
logger.Debug("setHasVote", "type", type_, "index", index)
// NOTE: some may be nil BitArrays -> no side effects.
psVotes := ps.getVoteBitArray(height, round, type_)
@ -1182,7 +1253,7 @@ func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) {
// NewRoundStepMessage is sent for every step taken in the ConsensusState.
// For every height/round/step transition
type NewRoundStepMessage struct {
Height int
Height int64
Round int
Step cstypes.RoundStepType
SecondsSinceStartTime int
@ -1199,7 +1270,7 @@ func (m *NewRoundStepMessage) String() string {
// CommitStepMessage is sent when a block is committed.
type CommitStepMessage struct {
Height int
Height int64
BlockPartsHeader types.PartSetHeader
BlockParts *cmn.BitArray
}
@ -1225,7 +1296,7 @@ func (m *ProposalMessage) String() string {
// ProposalPOLMessage is sent when a previous proposal is re-proposed.
type ProposalPOLMessage struct {
Height int
Height int64
ProposalPOLRound int
ProposalPOL *cmn.BitArray
}
@ -1239,7 +1310,7 @@ func (m *ProposalPOLMessage) String() string {
// BlockPartMessage is sent when gossipping a piece of the proposed block.
type BlockPartMessage struct {
Height int
Height int64
Round int
Part *types.Part
}
@ -1265,7 +1336,7 @@ func (m *VoteMessage) String() string {
// HasVoteMessage is sent to indicate that a particular vote has been received.
type HasVoteMessage struct {
Height int
Height int64
Round int
Type byte
Index int
@ -1280,7 +1351,7 @@ func (m *HasVoteMessage) String() string {
// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes.
type VoteSetMaj23Message struct {
Height int
Height int64
Round int
Type byte
BlockID types.BlockID
@ -1295,7 +1366,7 @@ func (m *VoteSetMaj23Message) String() string {
// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID.
type VoteSetBitsMessage struct {
Height int
Height int64
Round int
Type byte
BlockID types.BlockID

View File

@ -1,17 +1,21 @@
package consensus
import (
"context"
"fmt"
"os"
"runtime/pprof"
"sync"
"testing"
"time"
"github.com/tendermint/abci/example/dummy"
"github.com/tendermint/tmlibs/events"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
"github.com/stretchr/testify/require"
)
func init() {
@ -21,27 +25,30 @@ func init() {
//----------------------------------------------
// in-process testnets
func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEventRespond bool) ([]*ConsensusReactor, []chan interface{}) {
func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) {
reactors := make([]*ConsensusReactor, N)
eventChans := make([]chan interface{}, N)
eventBuses := make([]*types.EventBus, N)
logger := consensusLogger()
for i := 0; i < N; i++ {
/*thisLogger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
if err != nil { t.Fatal(err)}*/
thisLogger := logger
reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states
reactors[i].SetLogger(logger.With("validator", i))
reactors[i].conS.SetLogger(thisLogger.With("validator", i))
reactors[i].SetLogger(thisLogger.With("validator", i))
eventSwitch := events.NewEventSwitch()
eventSwitch.SetLogger(logger.With("module", "events", "validator", i))
_, err := eventSwitch.Start()
if err != nil {
t.Fatalf("Failed to start switch: %v", err)
}
eventBuses[i] = types.NewEventBus()
eventBuses[i].SetLogger(thisLogger.With("module", "events", "validator", i))
err := eventBuses[i].Start()
require.NoError(t, err)
reactors[i].SetEventSwitch(eventSwitch)
if subscribeEventRespond {
eventChans[i] = subscribeToEventRespond(eventSwitch, "tester", types.EventStringNewBlock())
} else {
eventChans[i] = subscribeToEvent(eventSwitch, "tester", types.EventStringNewBlock(), 1)
}
reactors[i].SetEventBus(eventBuses[i])
eventChans[i] = make(chan interface{}, 1)
err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i])
require.NoError(t, err)
}
// make connected switches and start all reactors
p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch {
@ -52,25 +59,29 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEven
// now that everyone is connected, start the state machines
// If we started the state machines before everyone was connected,
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
// TODO: is this still true with new pubsub?
for i := 0; i < N; i++ {
s := reactors[i].conS.GetState()
reactors[i].SwitchToConsensus(s, 0)
}
return reactors, eventChans
return reactors, eventChans, eventBuses
}
func stopConsensusNet(reactors []*ConsensusReactor) {
func stopConsensusNet(reactors []*ConsensusReactor, eventBuses []*types.EventBus) {
for _, r := range reactors {
r.Switch.Stop()
}
for _, b := range eventBuses {
b.Stop()
}
}
// Ensure a testnet makes blocks
func TestReactor(t *testing.T) {
N := 4
css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
reactors, eventChans := startConsensusNet(t, css, N, false)
defer stopConsensusNet(reactors)
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(reactors, eventBuses)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
@ -85,11 +96,14 @@ func TestReactorProposalHeartbeats(t *testing.T) {
func(c *cfg.Config) {
c.Consensus.CreateEmptyBlocks = false
})
reactors, eventChans := startConsensusNet(t, css, N, false)
defer stopConsensusNet(reactors)
reactors, eventChans, eventBuses := startConsensusNet(t, css, N)
defer stopConsensusNet(reactors, eventBuses)
heartbeatChans := make([]chan interface{}, N)
var err error
for i := 0; i < N; i++ {
heartbeatChans[i] = subscribeToEvent(css[i].evsw, "tester", types.EventStringProposalHeartbeat(), 1)
heartbeatChans[i] = make(chan interface{}, 1)
err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i])
require.NoError(t, err)
}
// wait till everyone sends a proposal heartbeat
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
@ -98,7 +112,9 @@ func TestReactorProposalHeartbeats(t *testing.T) {
}, css)
// send a tx
css[3].mempool.CheckTx([]byte{1, 2, 3}, nil)
if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil {
//t.Fatal(err)
}
// wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {
@ -113,8 +129,8 @@ func TestReactorProposalHeartbeats(t *testing.T) {
func TestVotingPowerChange(t *testing.T) {
nVals := 4
css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nVals, true)
defer stopConsensusNet(reactors)
reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals)
defer stopConsensusNet(reactors, eventBuses)
// map of active validators
activeVals := make(map[string]struct{})
@ -125,7 +141,6 @@ func TestVotingPowerChange(t *testing.T) {
// wait till everyone makes block 1
timeoutWaitGroup(t, nVals, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
@ -174,8 +189,9 @@ func TestValidatorSetChanges(t *testing.T) {
nPeers := 7
nVals := 4
css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentDummy)
reactors, eventChans := startConsensusNet(t, css, nPeers, true)
defer stopConsensusNet(reactors)
reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers)
defer stopConsensusNet(reactors, eventBuses)
// map of active validators
activeVals := make(map[string]struct{})
@ -186,7 +202,6 @@ func TestValidatorSetChanges(t *testing.T) {
// wait till everyone makes block 1
timeoutWaitGroup(t, nPeers, func(wg *sync.WaitGroup, j int) {
<-eventChans[j]
eventChans[j] <- struct{}{}
wg.Done()
}, css)
@ -194,7 +209,7 @@ func TestValidatorSetChanges(t *testing.T) {
t.Log("---------------------------- Testing adding one validator")
newValidatorPubKey1 := css[nVals].privValidator.GetPubKey()
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), uint64(testMinPower))
newValidatorTx1 := dummy.MakeValSetChangeTx(newValidatorPubKey1.Bytes(), testMinPower)
// wait till everyone makes block 2
// ensure the commit includes all validators
@ -214,7 +229,7 @@ func TestValidatorSetChanges(t *testing.T) {
// wait till everyone makes block 5
// it includes the commit for block 4, which should have the updated validator set
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing changing the voting power of one validator")
@ -226,7 +241,7 @@ func TestValidatorSetChanges(t *testing.T) {
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
@ -236,17 +251,17 @@ func TestValidatorSetChanges(t *testing.T) {
t.Log("---------------------------- Testing adding two validators at once")
newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey()
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), uint64(testMinPower))
newValidatorTx2 := dummy.MakeValSetChangeTx(newValidatorPubKey2.Bytes(), testMinPower)
newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey()
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), uint64(testMinPower))
newValidatorTx3 := dummy.MakeValSetChangeTx(newValidatorPubKey3.Bytes(), testMinPower)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
//---------------------------------------------------------------------------
t.Log("---------------------------- Testing removing two validators at once")
@ -259,7 +274,7 @@ func TestValidatorSetChanges(t *testing.T) {
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
delete(activeVals, string(newValidatorPubKey2.Address()))
delete(activeVals, string(newValidatorPubKey3.Address()))
waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css)
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css)
}
// Check we can make blocks with skip_timeout_commit=false
@ -271,8 +286,8 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
css[i].config.SkipTimeoutCommit = false
}
reactors, eventChans := startConsensusNet(t, css, N-1, false)
defer stopConsensusNet(reactors)
reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1)
defer stopConsensusNet(reactors, eventBuses)
// wait till everyone makes the first new block
timeoutWaitGroup(t, N-1, func(wg *sync.WaitGroup, j int) {
@ -283,19 +298,50 @@ func TestReactorWithTimeoutCommit(t *testing.T) {
func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
newBlockI := <-eventChans[j]
defer wg.Done()
newBlockI, ok := <-eventChans[j]
if !ok {
return
}
newBlock := newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
t.Logf("[WARN] Got block height=%v validator=%v", newBlock.Height, j)
t.Logf("Got block height=%v validator=%v", newBlock.Height, j)
err := validateBlock(newBlock, activeVals)
if err != nil {
t.Fatal(err)
}
for _, tx := range txs {
css[j].mempool.CheckTx(tx, nil)
if err = css[j].mempool.CheckTx(tx, nil); err != nil {
t.Fatal(err)
}
}
}, css)
}
func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) {
timeoutWaitGroup(t, n, func(wg *sync.WaitGroup, j int) {
defer wg.Done()
var newBlock *types.Block
LOOP:
for {
newBlockI, ok := <-eventChans[j]
if !ok {
return
}
newBlock = newBlockI.(types.TMEventData).Unwrap().(types.EventDataNewBlock).Block
if newBlock.LastCommit.Size() == len(updatedVals) {
t.Logf("Block with new validators height=%v validator=%v", newBlock.Height, j)
break LOOP
} else {
t.Logf("Block with no new validators height=%v validator=%v. Skipping...", newBlock.Height, j)
}
}
eventChans[j] <- struct{}{}
wg.Done()
err := validateBlock(newBlock, updatedVals)
if err != nil {
t.Fatal(err)
}
}, css)
}
@ -326,15 +372,20 @@ func timeoutWaitGroup(t *testing.T, n int, f func(*sync.WaitGroup, int), css []*
close(done)
}()
// we're running many nodes in-process, possibly in in a virtual machine,
// and spewing debug messages - making a block could take a while,
timeout := time.Second * 60
select {
case <-done:
case <-time.After(time.Second * 10):
case <-time.After(timeout):
for i, cs := range css {
fmt.Println("#################")
fmt.Println("Validator", i)
fmt.Println(cs.GetRoundState())
fmt.Println("")
t.Log("#################")
t.Log("Validator", i)
t.Log(cs.GetRoundState())
t.Log("")
}
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic("Timed out waiting for all validators to commit a block")
}
}

View File

@ -7,12 +7,12 @@ import (
"hash/crc32"
"io"
"reflect"
"strconv"
"strings"
//"strconv"
//"strings"
"time"
abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/tmlibs/autofile"
//auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
@ -90,8 +90,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan
// replay only those messages since the last block.
// timeoutRoutine should run concurrently to read off tickChan
func (cs *ConsensusState) catchupReplay(csHeight int) error {
func (cs *ConsensusState) catchupReplay(csHeight int64) error {
// set replayMode
cs.replayMode = true
defer func() { cs.replayMode = false }()
@ -99,16 +98,21 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// Ensure that ENDHEIGHT for this height doesn't exist
// NOTE: This is just a sanity check. As far as we know things work fine without it,
// and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT).
gr, found, err := cs.wal.SearchForEndHeight(uint64(csHeight))
gr, found, err := cs.wal.SearchForEndHeight(csHeight)
if err != nil {
return err
}
if gr != nil {
gr.Close()
if err := gr.Close(); err != nil {
return err
}
}
if found {
return errors.New(cmn.Fmt("WAL should not contain #ENDHEIGHT %d.", csHeight))
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
}
// Search for last height marker
gr, found, err = cs.wal.SearchForEndHeight(uint64(csHeight - 1))
gr, found, err = cs.wal.SearchForEndHeight(csHeight - 1)
if err == io.EOF {
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
} else if err != nil {
@ -117,7 +121,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
if !found {
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
}
defer gr.Close()
defer gr.Close() // nolint: errcheck
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
@ -146,7 +150,8 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// Parses marker lines of the form:
// #ENDHEIGHT: 12345
func makeHeightSearchFunc(height int) auto.SearchFunc {
/*
func makeHeightSearchFunc(height int64) auto.SearchFunc {
return func(line string) (int, error) {
line = strings.TrimRight(line, "\n")
parts := strings.Split(line, " ")
@ -165,7 +170,7 @@ func makeHeightSearchFunc(height int) auto.SearchFunc {
return -1, nil
}
}
}
}*/
//----------------------------------------------
// Recover from failure during block processing
@ -200,7 +205,10 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
return errors.New(cmn.Fmt("Error calling Info: %v", err))
}
blockHeight := int(res.LastBlockHeight) // XXX: beware overflow
blockHeight := int64(res.LastBlockHeight)
if blockHeight < 0 {
return fmt.Errorf("Got a negative last block height (%d) from the app", blockHeight)
}
appHash := res.LastBlockAppHash
h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
@ -222,7 +230,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
// Replay all blocks since appBlockHeight and ensure the result matches the current state.
// Returns the final AppHash or an error
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp proxy.AppConns) ([]byte, error) {
func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) {
storeBlockHeight := h.store.Height()
stateBlockHeight := h.state.LastBlockHeight
@ -231,7 +239,9 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
if appBlockHeight == 0 {
validators := types.TM2PB.Validators(h.state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
return nil, err
}
}
// First handle edge cases and constraints on the storeBlockHeight
@ -295,7 +305,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
return nil, nil
}
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int, mutateState bool) ([]byte, error) {
func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1.
//
@ -331,14 +341,13 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store
}
// ApplyBlock on the proxyApp with the last block.
func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([]byte, error) {
func (h *Handshaker) replayBlock(height int64, proxyApp proxy.AppConnConsensus) ([]byte, error) {
mempool := types.MockMempool{}
var eventCache types.Fireable // nil
block := h.store.LoadBlock(height)
meta := h.store.LoadBlockMeta(height)
if err := h.state.ApplyBlock(eventCache, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil {
if err := h.state.ApplyBlock(types.NopEventBus{}, proxyApp, block, meta.BlockID.PartsHeader, mempool); err != nil {
return nil, err
}
@ -350,7 +359,6 @@ func (h *Handshaker) replayBlock(height int, proxyApp proxy.AppConnConsensus) ([
func (h *Handshaker) checkAppHash(appHash []byte) error {
if !bytes.Equal(h.state.AppHash, appHash) {
panic(errors.New(cmn.Fmt("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash)).Error())
return nil
}
return nil
}
@ -365,7 +373,10 @@ func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppC
abciResponses: abciResponses,
})
cli, _ := clientCreator.NewABCIClient()
cli.Start()
err := cli.Start()
if err != nil {
panic(err)
}
return proxy.NewAppConnConsensus(cli)
}
@ -377,21 +388,17 @@ type mockProxyApp struct {
abciResponses *sm.ABCIResponses
}
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.Result {
func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
r := mock.abciResponses.DeliverTx[mock.txCount]
mock.txCount += 1
return abci.Result{
r.Code,
r.Data,
r.Log,
}
return *r
}
func (mock *mockProxyApp) EndBlock(height uint64) abci.ResponseEndBlock {
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
mock.txCount = 0
return mock.abciResponses.EndBlock
return *mock.abciResponses.EndBlock
}
func (mock *mockProxyApp) Commit() abci.Result {
return abci.NewResultOK(mock.appHash, "")
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
return abci.ResponseCommit{Code: abci.CodeTypeOK, Data: mock.appHash}
}

View File

@ -2,13 +2,15 @@ package consensus
import (
"bufio"
"errors"
"context"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
@ -18,6 +20,11 @@ import (
dbm "github.com/tendermint/tmlibs/db"
)
const (
// event bus subscriber
subscriber = "replay-file"
)
//--------------------------------------------------------
// replay messages interactively or all at once
@ -42,16 +49,23 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
cs.startForReplay()
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
newStepCh := make(chan interface{}, 1)
ctx := context.Background()
err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
if err != nil {
return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
}
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
// just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666)
fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
if err != nil {
return err
}
pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close()
defer pb.fp.Close() // nolint: errcheck
var nextN int // apply N msgs in a row
var msg *TimedWALMessage
@ -106,16 +120,17 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.
// go back count steps by resetting the state and running (pb.count - count) steps
func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
pb.cs.Stop()
pb.cs.Wait()
newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, pb.cs.blockStore, pb.cs.mempool)
newCS.SetEventSwitch(pb.cs.evsw)
newCS.SetEventBus(pb.cs.eventBus)
newCS.startForReplay()
pb.fp.Close()
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666)
if err := pb.fp.Close(); err != nil {
return err
}
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600)
if err != nil {
return err
}
@ -196,10 +211,20 @@ func (pb *playback) replayConsoleLoop() int {
// NOTE: "back" is not supported in the state machine design,
// so we restart and replay up to
ctx := context.Background()
// ensure all new step events are regenerated as expected
newStepCh := subscribeToEvent(pb.cs.evsw, "replay-test", types.EventStringNewRoundStep(), 1)
newStepCh := make(chan interface{}, 1)
err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh)
if err != nil {
cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
}
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
if len(tokens) == 1 {
pb.replayReset(1, newStepCh)
if err := pb.replayReset(1, newStepCh); err != nil {
pb.cs.Logger.Error("Replay reset error", "err", err)
}
} else {
i, err := strconv.Atoi(tokens[1])
if err != nil {
@ -207,7 +232,9 @@ func (pb *playback) replayConsoleLoop() int {
} else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else {
pb.replayReset(i, newStepCh)
if err := pb.replayReset(i, newStepCh); err != nil {
pb.cs.Logger.Error("Replay reset error", "err", err)
}
}
}
@ -265,19 +292,18 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
// Create proxyAppConn connection (consensus, mempool, query)
clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir())
proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore))
_, err = proxyApp.Start()
err = proxyApp.Start()
if err != nil {
cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err))
}
// Make event switch
eventSwitch := types.NewEventSwitch()
if _, err := eventSwitch.Start(); err != nil {
cmn.Exit(cmn.Fmt("Failed to start event switch: %v", err))
eventBus := types.NewEventBus()
if err := eventBus.Start(); err != nil {
cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err))
}
consensusState := NewConsensusState(csConfig, state.Copy(), proxyApp.Consensus(), blockStore, types.MockMempool{})
consensusState.SetEventSwitch(eventSwitch)
consensusState.SetEventBus(eventBus)
return consensusState
}

View File

@ -2,19 +2,24 @@ package consensus
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db"
@ -25,8 +30,10 @@ import (
"github.com/tendermint/tmlibs/log"
)
var consensusReplayConfig *cfg.Config
func init() {
config = ResetConfig("consensus_replay_test")
consensusReplayConfig = ResetConfig("consensus_replay_test")
}
// These tests ensure we can always recover from failure at any part of the consensus process.
@ -39,8 +46,7 @@ func init() {
// NOTE: Files in this dir are generated by running the `build.sh` therein.
// It's a simple way to generate wals for a single block, or multiple blocks, with random transactions,
// and different part sizes. The output is not deterministic, and the stepChanges may need to be adjusted
// after running it (eg. sometimes small_block2 will have 5 block parts, sometimes 6).
// and different part sizes. The output is not deterministic.
// It should only have to be re-run if there is some breaking change to the consensus data structures (eg. blocks, votes)
// or to the behaviour of the app (eg. computes app hash differently)
var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/consensus", "test_data")
@ -52,230 +58,209 @@ var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/con
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
// the priv validator changes step at these lines for a block with 1 val and 1 part
var baseStepChanges = []int{3, 6, 8}
func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) {
logger := log.TestingLogger()
state, _ := sm.GetState(stateDB, consensusReplayConfig.GenesisFile())
state.SetLogger(logger.With("module", "state"))
privValidator := loadPrivValidator(consensusReplayConfig)
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB)
cs.SetLogger(logger)
// test recovery from each line in each testCase
var testCases = []*testCase{
newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part)
newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part
newTestCase("small_block2", []int{3, 12, 14}), // small block with txs across 6 smaller block parts
}
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
// fmt.Printf("====== WAL: \n\r%s\n", bytes)
t.Logf("====== WAL: \n\r%s\n", bytes)
type testCase struct {
name string
log []byte //full cs wal
stepMap map[int]int8 // map lines of log to privval step
err := cs.Start()
require.NoError(t, err)
defer func() {
cs.Stop()
}()
proposeLine int
prevoteLine int
precommitLine int
}
func newTestCase(name string, stepChanges []int) *testCase {
if len(stepChanges) != 3 {
panic(cmn.Fmt("a full wal has 3 step changes! Got array %v", stepChanges))
}
return &testCase{
name: name,
log: readWAL(path.Join(data_dir, name+".cswal")),
stepMap: newMapFromChanges(stepChanges),
proposeLine: stepChanges[0],
prevoteLine: stepChanges[1],
precommitLine: stepChanges[2],
}
}
func newMapFromChanges(changes []int) map[int]int8 {
changes = append(changes, changes[2]+1) // so we add the last step change to the map
m := make(map[int]int8)
var count int
for changeNum, nextChange := range changes {
for ; count < nextChange; count++ {
m[count] = int8(changeNum)
}
}
return m
}
func readWAL(p string) []byte {
b, err := ioutil.ReadFile(p)
if err != nil {
panic(err)
}
return b
}
func writeWAL(walMsgs []byte) string {
walFile, err := ioutil.TempFile("", "wal")
if err != nil {
panic(fmt.Errorf("failed to create temp WAL file: %v", err))
}
_, err = walFile.Write(walMsgs)
if err != nil {
panic(fmt.Errorf("failed to write to temp WAL file: %v", err))
}
if err := walFile.Close(); err != nil {
panic(fmt.Errorf("failed to close temp WAL file: %v", err))
}
return walFile.Name()
}
func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) {
after := time.After(time.Second * 10)
// This is just a signal that we haven't halted; its not something contained
// in the WAL itself. Assuming the consensus state is running, replay of any
// WAL, including the empty one, should eventually be followed by a new
// block, or else something is wrong.
newBlockCh := make(chan interface{}, 1)
err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh)
require.NoError(t, err)
select {
case <-newBlockCh:
case <-after:
panic(cmn.Fmt("Timed out waiting for new block for case '%s' line %d", thisCase.name, i))
case <-time.After(10 * time.Second):
t.Fatalf("Timed out waiting for new block (see trace above)")
}
}
func runReplayTest(t *testing.T, cs *ConsensusState, walFile string, newBlockCh chan interface{},
thisCase *testCase, i int) {
cs.config.SetWalFile(walFile)
started, err := cs.Start()
if err != nil {
t.Fatalf("Cannot start consensus: %v", err)
}
if !started {
t.Error("Consensus did not start")
}
// Wait to make a new block.
// This is just a signal that we haven't halted; its not something contained in the WAL itself.
// Assuming the consensus state is running, replay of any WAL, including the empty one,
// should eventually be followed by a new block, or else something is wrong
waitForBlock(newBlockCh, thisCase, i)
cs.evsw.Stop()
cs.Stop()
LOOP:
func sendTxs(cs *ConsensusState, ctx context.Context) {
i := 0
for {
select {
case <-newBlockCh:
case <-ctx.Done():
return
default:
break LOOP
}
}
cs.Wait()
}
func toPV(pv types.PrivValidator) *types.PrivValidatorFS {
return pv.(*types.PrivValidatorFS)
}
func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, []byte, string) {
t.Log("-------------------------------------")
t.Logf("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter)
lineStep := nLines
if crashAfter {
lineStep -= 1
}
split := bytes.Split(thisCase.log, walSeparator)
lastMsg := split[nLines]
// we write those lines up to (not including) one with the signature
b := bytes.Join(split[:nLines], walSeparator)
b = append(b, walSeparator...)
walFile := writeWAL(b)
cs := fixedConsensusStateDummy()
// set the last step according to when we crashed vs the wal
toPV(cs.privValidator).LastHeight = 1 // first block
toPV(cs.privValidator).LastStep = thisCase.stepMap[lineStep]
t.Logf("[WARN] setupReplayTest LastStep=%v", toPV(cs.privValidator).LastStep)
newBlockCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewBlock(), 1)
return cs, newBlockCh, lastMsg, walFile
}
func readTimedWALMessage(t *testing.T, rawMsg []byte) TimedWALMessage {
b := bytes.NewBuffer(rawMsg)
// because rawMsg does not contain a separator and WALDecoder#Decode expects it
_, err := b.Write(walSeparator)
if err != nil {
t.Fatal(err)
}
dec := NewWALDecoder(b)
msg, err := dec.Decode()
if err != nil {
t.Fatalf("Error reading json data: %v", err)
}
return *msg
}
//-----------------------------------------------
// Test the log at every iteration, and set the privVal last step
// as if the log was written after signing, before the crash
func TestWALCrashAfterWrite(t *testing.T) {
for _, thisCase := range testCases {
splitSize := bytes.Count(thisCase.log, walSeparator)
for i := 0; i < splitSize-1; i++ {
t.Run(fmt.Sprintf("%s:%d", thisCase.name, i), func(t *testing.T) {
cs, newBlockCh, _, walFile := setupReplayTest(t, thisCase, i+1, true)
cs.config.TimeoutPropose = 100
runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1)
// cleanup
os.Remove(walFile)
})
cs.mempool.CheckTx([]byte{byte(i)}, nil)
i++
}
}
}
//-----------------------------------------------
// Test the log as if we crashed after signing but before writing.
// This relies on privValidator.LastSignature being set
// TestWALCrash uses crashing WAL to test we can recover from any WAL failure.
func TestWALCrash(t *testing.T) {
testCases := []struct {
name string
initFn func(*ConsensusState, context.Context)
heightToStop int64
}{
{"empty block",
func(cs *ConsensusState, ctx context.Context) {},
1},
{"block with a smaller part size",
func(cs *ConsensusState, ctx context.Context) {
// XXX: is there a better way to change BlockPartSizeBytes?
params := cs.state.Params
params.BlockPartSizeBytes = 512
cs.state.Params = params
sendTxs(cs, ctx)
},
1},
{"many non-empty blocks",
sendTxs,
3},
}
func TestWALCrashBeforeWritePropose(t *testing.T) {
for _, thisCase := range testCases {
lineNum := thisCase.proposeLine
t.Run(fmt.Sprintf("%s:%d", thisCase.name, lineNum), func(t *testing.T) {
// setup replay test where last message is a proposal
cs, newBlockCh, proposalMsg, walFile := setupReplayTest(t, thisCase, lineNum, false)
cs.config.TimeoutPropose = 100
msg := readTimedWALMessage(t, proposalMsg)
proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal)
toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
// cleanup
os.Remove(walFile)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop)
})
}
}
func TestWALCrashBeforeWritePrevote(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.prevoteLine, types.EventStringCompleteProposal())
func crashWALandCheckLiveness(t *testing.T, initFn func(*ConsensusState, context.Context), heightToStop int64) {
walPaniced := make(chan error)
crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop}
i := 1
LOOP:
for {
// fmt.Printf("====== LOOP %d\n", i)
t.Logf("====== LOOP %d\n", i)
// create consensus state from a clean slate
logger := log.NewNopLogger()
stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(stateDB, consensusReplayConfig.GenesisFile())
state.SetLogger(logger.With("module", "state"))
privValidator := loadPrivValidator(consensusReplayConfig)
blockDB := dbm.NewMemDB()
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB)
cs.SetLogger(logger)
// start sending transactions
ctx, cancel := context.WithCancel(context.Background())
go initFn(cs, ctx)
// clean up WAL file from the previous iteration
walFile := cs.config.WalFile()
os.Remove(walFile)
// set crashing WAL
csWal, err := cs.OpenWAL(walFile)
require.NoError(t, err)
crashingWal.next = csWal
// reset the message counter
crashingWal.msgIndex = 1
cs.wal = crashingWal
// start consensus state
err = cs.Start()
require.NoError(t, err)
i++
select {
case err := <-walPaniced:
t.Logf("WAL paniced: %v", err)
// make sure we can make blocks after a crash
startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB)
// stop consensus state and transactions sender (initFn)
cs.Stop()
cancel()
// if we reached the required height, exit
if _, ok := err.(ReachedHeightToStopError); ok {
break LOOP
}
case <-time.After(10 * time.Second):
t.Fatal("WAL did not panic for 10 seconds (check the log)")
}
}
}
func TestWALCrashBeforeWritePrecommit(t *testing.T) {
for _, thisCase := range testCases {
testReplayCrashBeforeWriteVote(t, thisCase, thisCase.precommitLine, types.EventStringPolka())
// crashingWAL is a WAL which crashes or rather simulates a crash during Save
// (before and after). It remembers a message for which we last panicked
// (lastPanicedForMsgIndex), so we don't panic for it in subsequent iterations.
type crashingWAL struct {
next WAL
panicCh chan error
heightToStop int64
msgIndex int // current message index
lastPanicedForMsgIndex int // last message for which we panicked
}
// WALWriteError indicates a WAL crash.
type WALWriteError struct {
msg string
}
func (e WALWriteError) Error() string {
return e.msg
}
// ReachedHeightToStopError indicates we've reached the required consensus
// height and may exit.
type ReachedHeightToStopError struct {
height int64
}
func (e ReachedHeightToStopError) Error() string {
return fmt.Sprintf("reached height to stop %d", e.height)
}
// Save simulate WAL's crashing by sending an error to the panicCh and then
// exiting the cs.receiveRoutine.
func (w *crashingWAL) Save(m WALMessage) {
if endMsg, ok := m.(EndHeightMessage); ok {
if endMsg.Height == w.heightToStop {
w.panicCh <- ReachedHeightToStopError{endMsg.Height}
runtime.Goexit()
} else {
w.next.Save(m)
}
return
}
if w.msgIndex > w.lastPanicedForMsgIndex {
w.lastPanicedForMsgIndex = w.msgIndex
_, file, line, _ := runtime.Caller(1)
w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)}
runtime.Goexit()
} else {
w.msgIndex++
w.next.Save(m)
}
}
func testReplayCrashBeforeWriteVote(t *testing.T, thisCase *testCase, lineNum int, eventString string) {
// setup replay test where last message is a vote
cs, newBlockCh, voteMsg, walFile := setupReplayTest(t, thisCase, lineNum, false)
types.AddListenerForEvent(cs.evsw, "tester", eventString, func(data types.TMEventData) {
msg := readTimedWALMessage(t, voteMsg)
vote := msg.Msg.(msgInfo).Msg.(*VoteMessage)
// Set LastSig
toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, vote.Vote)
toPV(cs.privValidator).LastSignature = vote.Vote.Signature
})
runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum)
func (w *crashingWAL) Group() *auto.Group { return w.next.Group() }
func (w *crashingWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) {
return w.next.SearchForEndHeight(height)
}
func (w *crashingWAL) Start() error { return w.next.Start() }
func (w *crashingWAL) Stop() error { return w.next.Stop() }
func (w *crashingWAL) Wait() { w.next.Wait() }
//------------------------------------------------------------------------------------------
// Handshake Tests
@ -320,6 +305,21 @@ func TestHandshakeReplayNone(t *testing.T) {
}
}
func writeWAL(walMsgs []byte) string {
walFile, err := ioutil.TempFile("", "wal")
if err != nil {
panic(fmt.Errorf("failed to create temp WAL file: %v", err))
}
_, err = walFile.Write(walMsgs)
if err != nil {
panic(fmt.Errorf("failed to write to temp WAL file: %v", err))
}
if err := walFile.Close(); err != nil {
panic(fmt.Errorf("failed to close temp WAL file: %v", err))
}
return walFile.Name()
}
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
config := ResetConfig("proxy_test_")
@ -339,7 +339,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
t.Fatal(err)
}
wal.SetLogger(log.TestingLogger())
if _, err := wal.Start(); err != nil {
if err := wal.Start(); err != nil {
t.Fatal(err)
}
chain, commits, err := makeBlockchainFromWAL(wal)
@ -368,7 +368,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
// now start the app using the handshake - it should sync
handshaker := NewHandshaker(state, store)
proxyApp := proxy.NewAppConns(clientCreator2, handshaker)
if _, err := proxyApp.Start(); err != nil {
if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
@ -397,7 +397,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) {
func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
testPartSize := st.Params.BlockPartSizeBytes
err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
err := st.ApplyBlock(types.NopEventBus{}, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool)
if err != nil {
panic(err)
}
@ -406,12 +406,14 @@ func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) {
func buildAppStateFromChain(proxyApp proxy.AppConns,
state *sm.State, chain []*types.Block, nBlocks int, mode uint) {
// start a new app without handshake, play nBlocks blocks
if _, err := proxyApp.Start(); err != nil {
if err := proxyApp.Start(); err != nil {
panic(err)
}
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
panic(err)
}
defer proxyApp.Stop()
switch mode {
@ -439,13 +441,15 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B
// run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "1")))
proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock))
if _, err := proxyApp.Start(); err != nil {
if err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators})
if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
panic(err)
}
var latestAppHash []byte
@ -477,7 +481,7 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B
//--------------------------
// utils for making blocks
func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
// Search for height marker
gr, found, err := wal.SearchForEndHeight(0)
if err != nil {
@ -486,7 +490,7 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) {
if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
}
defer gr.Close()
defer gr.Close() // nolint: errcheck
// log.Notice("Build a blockchain by reading from the WAL")
@ -586,21 +590,21 @@ func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBl
return &mockBlockStore{config, params, nil, nil}
}
func (bs *mockBlockStore) Height() int { return len(bs.chain) }
func (bs *mockBlockStore) LoadBlock(height int) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int) *types.BlockMeta {
func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) }
func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()},
Header: block.Header,
}
}
func (bs *mockBlockStore) LoadBlockPart(height int, index int) *types.Part { return nil }
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
}
func (bs *mockBlockStore) LoadBlockCommit(height int) *types.Commit {
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit {
return bs.commits[height-1]
}

View File

@ -4,7 +4,6 @@ import (
"bytes"
"errors"
"fmt"
"path/filepath"
"reflect"
"runtime/debug"
"sync"
@ -55,7 +54,7 @@ type msgInfo struct {
// internally generated messages which may update the state
type timeoutInfo struct {
Duration time.Duration `json:"duration"`
Height int `json:"height"`
Height int64 `json:"height"`
Round int `json:"round"`
Step cstypes.RoundStepType `json:"step"`
}
@ -91,13 +90,13 @@ type ConsensusState struct {
internalMsgQueue chan msgInfo
timeoutTicker TimeoutTicker
// we use PubSub to trigger msg broadcasts in the reactor,
// we use eventBus to trigger msg broadcasts in the reactor,
// and to notify external subscribers, eg. through a websocket
evsw types.EventSwitch
eventBus *types.EventBus
// a Write-Ahead Log ensures we can recover from any kind of crash
// and helps us avoid signing conflicting votes
wal *WAL
wal WAL
replayMode bool // so we don't log signing errors during replay
doWALCatchup bool // determines if we even try to do the catchup
@ -105,8 +104,8 @@ type ConsensusState struct {
nSteps int
// some functions can be overwritten for testing
decideProposal func(height, round int)
doPrevote func(height, round int)
decideProposal func(height int64, round int)
doPrevote func(height int64, round int)
setProposal func(proposal *types.Proposal) error
// closed when we finish shutting down
@ -125,6 +124,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppCon
timeoutTicker: NewTimeoutTicker(),
done: make(chan struct{}),
doWALCatchup: true,
wal: nilWAL{},
}
// set function defaults (may be overwritten before calling Start)
cs.decideProposal = cs.defaultDecideProposal
@ -148,9 +148,9 @@ func (cs *ConsensusState) SetLogger(l log.Logger) {
cs.timeoutTicker.SetLogger(l)
}
// SetEventSwitch implements events.Eventable
func (cs *ConsensusState) SetEventSwitch(evsw types.EventSwitch) {
cs.evsw = evsw
// SetEventBus sets event bus.
func (cs *ConsensusState) SetEventBus(b *types.EventBus) {
cs.eventBus = b
}
// String returns a string.
@ -179,7 +179,7 @@ func (cs *ConsensusState) getRoundState() *cstypes.RoundState {
}
// GetValidators returns a copy of the current validators.
func (cs *ConsensusState) GetValidators() (int, []*types.Validator) {
func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
@ -200,7 +200,7 @@ func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
}
// LoadCommit loads the commit for a given height.
func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
func (cs *ConsensusState) LoadCommit(height int64) *types.Commit {
cs.mtx.Lock()
defer cs.mtx.Unlock()
if height == cs.blockStore.Height() {
@ -212,19 +212,27 @@ func (cs *ConsensusState) LoadCommit(height int) *types.Commit {
// OnStart implements cmn.Service.
// It loads the latest state via the WAL, and starts the timeout and receive routines.
func (cs *ConsensusState) OnStart() error {
walFile := cs.config.WalFile()
if err := cs.OpenWAL(walFile); err != nil {
cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error())
return err
// we may set the WAL in testing before calling Start,
// so only OpenWAL if its still the nilWAL
if _, ok := cs.wal.(nilWAL); ok {
walFile := cs.config.WalFile()
wal, err := cs.OpenWAL(walFile)
if err != nil {
cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error())
return err
}
cs.wal = wal
}
// we need the timeoutRoutine for replay so
// we don't block on the tick chan.
// we don't block on the tick chan.
// NOTE: we will get a build up of garbage go routines
// firing on the tockChan until the receiveRoutine is started
// to deal with them (by that point, at most one will be valid)
cs.timeoutTicker.Start()
// firing on the tockChan until the receiveRoutine is started
// to deal with them (by that point, at most one will be valid)
err := cs.timeoutTicker.Start()
if err != nil {
return err
}
// we may have lost some votes if the process crashed
// reload from consensus log to catchup
@ -249,7 +257,11 @@ func (cs *ConsensusState) OnStart() error {
// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan
// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions
func (cs *ConsensusState) startRoutines(maxSteps int) {
cs.timeoutTicker.Start()
err := cs.timeoutTicker.Start()
if err != nil {
cs.Logger.Error("Error starting timeout ticker", "err", err)
return
}
go cs.receiveRoutine(maxSteps)
}
@ -260,7 +272,7 @@ func (cs *ConsensusState) OnStop() {
cs.timeoutTicker.Stop()
// Make BaseService.Wait() wait until cs.wal.Wait()
if cs.wal != nil && cs.IsRunning() {
if cs.IsRunning() {
cs.wal.Wait()
}
}
@ -273,25 +285,17 @@ func (cs *ConsensusState) Wait() {
}
// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability
func (cs *ConsensusState) OpenWAL(walFile string) (err error) {
err = cmn.EnsureDir(filepath.Dir(walFile), 0700)
if err != nil {
cs.Logger.Error("Error ensuring ConsensusState wal dir", "err", err.Error())
return err
}
cs.mtx.Lock()
defer cs.mtx.Unlock()
func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) {
wal, err := NewWAL(walFile, cs.config.WalLight)
if err != nil {
return err
cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err)
return nil, err
}
wal.SetLogger(cs.Logger.With("wal", walFile))
if _, err := wal.Start(); err != nil {
return err
if err := wal.Start(); err != nil {
return nil, err
}
cs.wal = wal
return nil
return wal, nil
}
//------------------------------------------------------------
@ -327,7 +331,7 @@ func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerKey string)
}
// AddProposalBlockPart inputs a part of the proposal block.
func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Part, peerKey string) error {
func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerKey string) error {
if peerKey == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
@ -341,18 +345,22 @@ func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Pa
// SetProposalAndBlock inputs the proposal and all block parts.
func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error {
cs.SetProposal(proposal, peerKey)
if err := cs.SetProposal(proposal, peerKey); err != nil {
return err
}
for i := 0; i < parts.Total(); i++ {
part := parts.GetPart(i)
cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey)
if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey); err != nil {
return err
}
}
return nil // TODO errors
return nil
}
//------------------------------------------------------------
// internal functions for managing the state
func (cs *ConsensusState) updateHeight(height int) {
func (cs *ConsensusState) updateHeight(height int64) {
cs.Height = height
}
@ -364,12 +372,12 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now())
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
}
// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan)
func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height, round int, step cstypes.RoundStepType) {
func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) {
cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step})
}
@ -480,9 +488,9 @@ func (cs *ConsensusState) newStep() {
rs := cs.RoundStateEvent()
cs.wal.Save(rs)
cs.nSteps += 1
// newStep is called by updateToStep in NewConsensusState before the evsw is set!
if cs.evsw != nil {
types.FireEventNewRoundStep(cs.evsw, rs)
// newStep is called by updateToStep in NewConsensusState before the eventBus is set!
if cs.eventBus != nil {
cs.eventBus.PublishEventNewRoundStep(rs)
}
}
@ -536,9 +544,7 @@ func (cs *ConsensusState) receiveRoutine(maxSteps int) {
// priv_val tracks LastSig
// close wal now that we're done writing to it
if cs.wal != nil {
cs.wal.Stop()
}
cs.wal.Stop()
close(cs.done)
return
@ -607,13 +613,13 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
case cstypes.RoundStepNewRound:
cs.enterPropose(ti.Height, 0)
case cstypes.RoundStepPropose:
types.FireEventTimeoutPropose(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent())
cs.enterPrevote(ti.Height, ti.Round)
case cstypes.RoundStepPrevoteWait:
types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
cs.enterPrecommit(ti.Height, ti.Round)
case cstypes.RoundStepPrecommitWait:
types.FireEventTimeoutWait(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent())
cs.enterNewRound(ti.Height, ti.Round+1)
default:
panic(cmn.Fmt("Invalid timeout step: %v", ti.Step))
@ -621,7 +627,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) {
}
func (cs *ConsensusState) handleTxsAvailable(height int) {
func (cs *ConsensusState) handleTxsAvailable(height int64) {
cs.mtx.Lock()
defer cs.mtx.Unlock()
// we only need to do this for round 0
@ -638,7 +644,7 @@ func (cs *ConsensusState) handleTxsAvailable(height int) {
// Enter: +2/3 precommits for nil at (height,round-1)
// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round)
// NOTE: cs.StartTime was already set for height.
func (cs *ConsensusState) enterNewRound(height int, round int) {
func (cs *ConsensusState) enterNewRound(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
cs.Logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -673,7 +679,7 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
}
cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping
types.FireEventNewRound(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventNewRound(cs.RoundStateEvent())
// Wait for txs to be available in the mempool
// before we enterPropose in round 0. If the last block changed the app hash,
@ -691,19 +697,16 @@ func (cs *ConsensusState) enterNewRound(height int, round int) {
// needProofBlock returns true on the first height (so the genesis app hash is signed right away)
// and where the last block (height-1) caused the app hash to change
func (cs *ConsensusState) needProofBlock(height int) bool {
func (cs *ConsensusState) needProofBlock(height int64) bool {
if height == 1 {
return true
}
lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
if !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) {
return true
}
return false
return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash)
}
func (cs *ConsensusState) proposalHeartbeat(height, round int) {
func (cs *ConsensusState) proposalHeartbeat(height int64, round int) {
counter := 0
addr := cs.privValidator.GetAddress()
valIndex, v := cs.Validators.GetByAddress(addr)
@ -726,8 +729,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) {
ValidatorIndex: valIndex,
}
cs.privValidator.SignHeartbeat(chainID, heartbeat)
heartbeatEvent := types.EventDataProposalHeartbeat{heartbeat}
types.FireEventProposalHeartbeat(cs.evsw, heartbeatEvent)
cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat})
counter += 1
time.Sleep(proposalHeartbeatIntervalSeconds * time.Second)
}
@ -736,7 +738,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) {
// Enter (CreateEmptyBlocks): from enterNewRound(height,round)
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *ConsensusState) enterPropose(height int, round int) {
func (cs *ConsensusState) enterPropose(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -783,7 +785,7 @@ func (cs *ConsensusState) isProposer() bool {
return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress())
}
func (cs *ConsensusState) defaultDecideProposal(height, round int) {
func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
var block *types.Block
var blockParts *types.PartSet
@ -871,7 +873,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Enter: any +2/3 prevotes for future round.
// Prevote for LockedBlock if we're locked, or ProposalBlock if valid.
// Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int, round int) {
func (cs *ConsensusState) enterPrevote(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -885,7 +887,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// fire event for how we got here
if cs.isProposalComplete() {
types.FireEventCompleteProposal(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent())
} else {
// we received +2/3 prevotes for a future round
// TODO: catchup event?
@ -900,7 +902,7 @@ func (cs *ConsensusState) enterPrevote(height int, round int) {
// (so we have more time to try and collect +2/3 prevotes for a single block)
}
func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
func (cs *ConsensusState) defaultDoPrevote(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round)
// If a block is locked, prevote that.
if cs.LockedBlock != nil {
@ -933,7 +935,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int, round int) {
}
// Enter: any +2/3 prevotes at next round.
func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -959,7 +961,7 @@ func (cs *ConsensusState) enterPrevoteWait(height int, round int) {
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
// else, precommit nil otherwise.
func (cs *ConsensusState) enterPrecommit(height int, round int) {
func (cs *ConsensusState) enterPrecommit(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -987,7 +989,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
}
// At this point +2/3 prevoted for a particular block or nil
types.FireEventPolka(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventPolka(cs.RoundStateEvent())
// the latest POLRound should be this round
polRound, _ := cs.Votes.POLInfo()
@ -1004,7 +1006,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
}
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
return
@ -1016,7 +1018,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
if cs.LockedBlock.HashesTo(blockID.Hash) {
cs.Logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking")
cs.LockedRound = round
types.FireEventRelock(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventRelock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
return
}
@ -1031,7 +1033,7 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.LockedRound = round
cs.LockedBlock = cs.ProposalBlock
cs.LockedBlockParts = cs.ProposalBlockParts
types.FireEventLock(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventLock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader)
return
}
@ -1047,12 +1049,12 @@ func (cs *ConsensusState) enterPrecommit(height int, round int) {
cs.ProposalBlock = nil
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader)
}
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{})
}
// Enter: any +2/3 precommits for next round.
func (cs *ConsensusState) enterPrecommitWait(height int, round int) {
func (cs *ConsensusState) enterPrecommitWait(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) {
cs.Logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
return
@ -1074,7 +1076,7 @@ func (cs *ConsensusState) enterPrecommitWait(height int, round int) {
}
// Enter: +2/3 precommits for block
func (cs *ConsensusState) enterCommit(height int, commitRound int) {
func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
cs.Logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
return
@ -1120,7 +1122,7 @@ func (cs *ConsensusState) enterCommit(height int, commitRound int) {
}
// If we have the block AND +2/3 commits for it, finalize.
func (cs *ConsensusState) tryFinalizeCommit(height int) {
func (cs *ConsensusState) tryFinalizeCommit(height int64) {
if cs.Height != height {
cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height))
}
@ -1142,7 +1144,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) {
}
// Increment height and goto cstypes.RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int) {
func (cs *ConsensusState) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
return
@ -1191,23 +1193,25 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// WAL replay for blocks with an #ENDHEIGHT
// As is, ConsensusState should not be started again
// until we successfully call ApplyBlock (ie. here or in Handshake after restart)
if cs.wal != nil {
cs.wal.Save(EndHeightMessage{uint64(height)})
}
cs.wal.Save(EndHeightMessage{height})
fail.Fail() // XXX
// Create a copy of the state for staging
// and an event cache for txs
stateCopy := cs.state.Copy()
eventCache := types.NewEventCache(cs.evsw)
txEventBuffer := types.NewTxEventBuffer(cs.eventBus, block.NumTxs)
// Execute and commit the block, update and save the state, and update the mempool.
// All calls to the proxyAppConn come here.
// NOTE: the block.AppHash wont reflect these txs until the next block
err := stateCopy.ApplyBlock(eventCache, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
err := stateCopy.ApplyBlock(txEventBuffer, cs.proxyAppConn, block, blockParts.Header(), cs.mempool)
if err != nil {
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
err := cmn.Kill()
if err != nil {
cs.Logger.Error("Failed to kill this process - please do so manually", "err", err)
}
return
}
@ -1220,9 +1224,12 @@ func (cs *ConsensusState) finalizeCommit(height int) {
// * Fire before persisting state, in ApplyBlock
// * Fire on start up if we haven't written any new WAL msgs
// Both options mean we may fire more than once. Is that fine ?
types.FireEventNewBlock(cs.evsw, types.EventDataNewBlock{block})
types.FireEventNewBlockHeader(cs.evsw, types.EventDataNewBlockHeader{block.Header})
eventCache.Flush()
cs.eventBus.PublishEventNewBlock(types.EventDataNewBlock{block})
cs.eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header})
err = txEventBuffer.Flush()
if err != nil {
cs.Logger.Error("Failed to flush event buffer", "err", err)
}
fail.Fail() // XXX
@ -1278,7 +1285,7 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, verify bool) (added bool, err error) {
func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part, verify bool) (added bool, err error) {
// Blocks might be reused, so round mismatch is OK
if cs.Height != height {
return false, nil
@ -1357,7 +1364,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
added, err = cs.LastCommit.AddVote(vote)
if added {
cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort()))
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
// if we can skip timeoutCommit and have all the votes now,
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
@ -1375,7 +1382,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
height := cs.Height
added, err = cs.Votes.AddVote(vote, peerKey)
if added {
types.FireEventVote(cs.evsw, types.EventDataVote{vote})
cs.eventBus.PublishEventVote(types.EventDataVote{vote})
switch vote.Type {
case types.VoteTypePrevote:
@ -1393,7 +1400,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerKey string) (added bool,
cs.LockedRound = 0
cs.LockedBlock = nil
cs.LockedBlockParts = nil
types.FireEventUnlock(cs.evsw, cs.RoundStateEvent())
cs.eventBus.PublishEventUnlock(cs.RoundStateEvent())
}
}
if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() {
@ -1487,7 +1494,7 @@ func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.Part
//---------------------------------------------------------
func CompareHRS(h1, r1 int, s1 cstypes.RoundStepType, h2, r2 int, s2 cstypes.RoundStepType) int {
func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int {
if h1 < h2 {
return -1
} else if h1 > h2 {

View File

@ -2,6 +2,7 @@ package consensus
import (
"bytes"
"context"
"fmt"
"testing"
"time"
@ -9,6 +10,8 @@ import (
cstypes "github.com/tendermint/tendermint/consensus/types"
"github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
tmpubsub "github.com/tendermint/tmlibs/pubsub"
)
func init() {
@ -56,8 +59,8 @@ func TestProposerSelection0(t *testing.T) {
cs1, vss := randConsensusState(4)
height, round := cs1.Height, cs1.Round
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
startTestRound(cs1, height, round)
@ -89,7 +92,7 @@ func TestProposerSelection0(t *testing.T) {
func TestProposerSelection2(t *testing.T) {
cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
// this time we jump in at round 2
incrementRound(vss[1:]...)
@ -121,7 +124,7 @@ func TestEnterProposeNoPrivValidator(t *testing.T) {
height, round := cs.Height, cs.Round
// Listen for propose timeout event
timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
startTestRound(cs, height, round)
@ -146,8 +149,8 @@ func TestEnterProposeYesPrivValidator(t *testing.T) {
// Listen for propose timeout event
timeoutCh := subscribeToEvent(cs.evsw, "tester", types.EventStringTimeoutPropose(), 1)
proposalCh := subscribeToEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
cs.enterNewRound(height, round)
cs.startRoutines(3)
@ -183,8 +186,8 @@ func TestBadProposal(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
@ -206,7 +209,9 @@ func TestBadProposal(t *testing.T) {
}
// set the proposal block
cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer")
if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
// start the machine
startTestRound(cs1, height, round)
@ -238,9 +243,17 @@ func TestFullRound1(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 0)
propCh := subscribeToEvent(cs.evsw, "tester", types.EventStringCompleteProposal(), 1)
newRoundCh := subscribeToEvent(cs.evsw, "tester", types.EventStringNewRound(), 1)
// NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
// before consensus can move to the next height (and cause a race condition)
cs.eventBus.Stop()
eventBus := types.NewEventBusWithBufferCapacity(0)
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
cs.SetEventBus(eventBus)
eventBus.Start()
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
startTestRound(cs, height, round)
@ -251,8 +264,6 @@ func TestFullRound1(t *testing.T) {
propBlockHash := re.(types.TMEventData).Unwrap().(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
<-voteCh // wait for prevote
// NOTE: voteChan cap of 0 ensures we can complete this
// before consensus can move to the next height (and cause a race condition)
validatePrevote(t, cs, round, vss[0], propBlockHash)
<-voteCh // wait for precommit
@ -268,7 +279,7 @@ func TestFullRoundNil(t *testing.T) {
cs, vss := randConsensusState(1)
height, round := cs.Height, cs.Round
voteCh := subscribeToEvent(cs.evsw, "tester", types.EventStringVote(), 1)
voteCh := subscribe(cs.eventBus, types.EventQueryVote)
cs.enterPrevote(height, round)
cs.startRoutines(4)
@ -287,8 +298,8 @@ func TestFullRound2(t *testing.T) {
vs2 := vss[1]
height, round := cs1.Height, cs1.Round
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1)
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
// start round and wait for propose and prevote
startTestRound(cs1, height, round)
@ -330,11 +341,11 @@ func TestLockNoPOL(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
/*
Round1 (cs1, B) // B B // B B2
@ -469,7 +480,9 @@ func TestLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer
// so set the proposal block
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "")
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil {
t.Fatal(err)
}
<-proposalCh
<-voteCh // prevote
@ -496,12 +509,12 @@ func TestLockPOLRelock(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlockHeader(), 1)
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
// everything done from perspective of cs1
@ -546,7 +559,9 @@ func TestLockPOLRelock(t *testing.T) {
<-timeoutWaitCh
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh
t.Log("### ONTO ROUND 1")
@ -609,11 +624,11 @@ func TestLockPOLUnlock(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// everything done from perspective of cs1
@ -658,7 +673,9 @@ func TestLockPOLUnlock(t *testing.T) {
lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh
t.Log("#### ONTO ROUND 1")
@ -704,10 +721,10 @@ func TestLockPOLSafety1(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
@ -745,7 +762,9 @@ func TestLockPOLSafety1(t *testing.T) {
incrementRound(vs2, vs3, vs4)
//XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer")
if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh
t.Log("### ONTO ROUND 1")
@ -802,7 +821,7 @@ func TestLockPOLSafety1(t *testing.T) {
// we should prevote what we're locked on
validatePrevote(t, cs1, 2, vss[0], propBlockHash)
newStepCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRoundStep(), 1)
newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
// add prevotes from the earlier round
addVotes(cs1, prevotes...)
@ -825,11 +844,11 @@ func TestLockPOLSafety2(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
unlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringUnlock(), 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// the block for R0: gets polkad but we miss it
@ -857,7 +876,9 @@ func TestLockPOLSafety2(t *testing.T) {
startTestRound(cs1, height, 1)
<-newRoundCh
cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer")
if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil {
t.Fatal(err)
}
<-proposalCh
<-voteCh // prevote
@ -882,7 +903,9 @@ func TestLockPOLSafety2(t *testing.T) {
if err := vs3.SignProposal(config.ChainID, newProp); err != nil {
t.Fatal(err)
}
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer")
if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
t.Fatal(err)
}
// Add the pol votes
addVotes(cs1, prevotes...)
@ -919,9 +942,9 @@ func TestSlashingPrevotes(t *testing.T) {
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
@ -954,9 +977,9 @@ func TestSlashingPrecommits(t *testing.T) {
vs2 := vss[1]
proposalCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringCompleteProposal() , 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringTimeoutWait() , 1)
newRoundCh := subscribeToEvent(cs1.evsw,"tester",types.EventStringNewRound() , 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
@ -1000,10 +1023,10 @@ func TestHalt1(t *testing.T) {
partSize := cs1.state.Params.BlockPartSizeBytes
proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1)
timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1)
newRoundCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewRound(), 1)
newBlockCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringNewBlock(), 1)
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
// start round and wait for propose and prevote
@ -1057,3 +1080,20 @@ func TestHalt1(t *testing.T) {
panic("expected height to increment")
}
}
// subscribe subscribes test client to the given query and returns a channel with cap = 1.
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
out := make(chan interface{}, 1)
err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
if err != nil {
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
}
return out
}
// discardFromChan reads n values from the channel.
func discardFromChan(ch <-chan interface{}, n int) {
for i := 0; i < n; i++ {
<-ch
}
}

View File

@ -52,19 +52,19 @@ function reset(){
reset
function empty_block(){
echo "==> Starting tendermint..."
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 5
echo "==> Killing tendermint..."
killall tendermint
# function empty_block(){
# echo "==> Starting tendermint..."
# tendermint node --proxy_app=persistent_dummy &> /dev/null &
# sleep 5
# echo "==> Killing tendermint..."
# killall tendermint
echo "==> Copying WAL log..."
$cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_empty_block.cswal
mv consensus/test_data/new_empty_block.cswal consensus/test_data/empty_block.cswal
# echo "==> Copying WAL log..."
# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_empty_block.cswal
# mv consensus/test_data/new_empty_block.cswal consensus/test_data/empty_block.cswal
reset
}
# reset
# }
function many_blocks(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
@ -84,63 +84,63 @@ function many_blocks(){
}
function small_block1(){
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
echo "==> Starting tendermint..."
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 10
echo "==> Killing tendermint..."
kill -9 $PID
killall tendermint
# function small_block1(){
# bash scripts/txs/random.sh 1000 36657 &> /dev/null &
# PID=$!
# echo "==> Starting tendermint..."
# tendermint node --proxy_app=persistent_dummy &> /dev/null &
# sleep 10
# echo "==> Killing tendermint..."
# kill -9 $PID
# killall tendermint
echo "==> Copying WAL log..."
$cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block1.cswal
mv consensus/test_data/new_small_block1.cswal consensus/test_data/small_block1.cswal
# echo "==> Copying WAL log..."
# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block1.cswal
# mv consensus/test_data/new_small_block1.cswal consensus/test_data/small_block1.cswal
reset
}
# reset
# }
# block part size = 512
function small_block2(){
cat "$TMHOME/genesis.json" | jq '. + {consensus_params: {block_size_params: {max_bytes: 22020096}, block_gossip_params: {block_part_size_bytes: 512}}}' > "$TMHOME/new_genesis.json"
mv "$TMHOME/new_genesis.json" "$TMHOME/genesis.json"
bash scripts/txs/random.sh 1000 36657 &> /dev/null &
PID=$!
echo "==> Starting tendermint..."
tendermint node --proxy_app=persistent_dummy &> /dev/null &
sleep 5
echo "==> Killing tendermint..."
kill -9 $PID
killall tendermint
# # block part size = 512
# function small_block2(){
# cat "$TMHOME/genesis.json" | jq '. + {consensus_params: {block_size_params: {max_bytes: 22020096}, block_gossip_params: {block_part_size_bytes: 512}}}' > "$TMHOME/new_genesis.json"
# mv "$TMHOME/new_genesis.json" "$TMHOME/genesis.json"
# bash scripts/txs/random.sh 1000 36657 &> /dev/null &
# PID=$!
# echo "==> Starting tendermint..."
# tendermint node --proxy_app=persistent_dummy &> /dev/null &
# sleep 5
# echo "==> Killing tendermint..."
# kill -9 $PID
# killall tendermint
echo "==> Copying WAL log..."
$cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block2.cswal
mv consensus/test_data/new_small_block2.cswal consensus/test_data/small_block2.cswal
# echo "==> Copying WAL log..."
# $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block2.cswal
# mv consensus/test_data/new_small_block2.cswal consensus/test_data/small_block2.cswal
reset
}
# reset
# }
case "$1" in
"small_block1")
small_block1
;;
"small_block2")
small_block2
;;
"empty_block")
empty_block
;;
# "small_block1")
# small_block1
# ;;
# "small_block2")
# small_block2
# ;;
# "empty_block")
# empty_block
# ;;
"many_blocks")
many_blocks
;;
*)
small_block1
small_block2
empty_block
# small_block1
# small_block2
# empty_block
many_blocks
esac

View File

@ -15,8 +15,8 @@ var (
// conditional on the height/round/step in the timeoutInfo.
// The timeoutInfo.Duration may be non-positive.
type TimeoutTicker interface {
Start() (bool, error)
Stop() bool
Start() error
Stop() error
Chan() <-chan timeoutInfo // on which to receive a timeout
ScheduleTimeout(ti timeoutInfo) // reset the timer

View File

@ -29,7 +29,7 @@ One for their LastCommit round, and another for the official commit round.
*/
type HeightVoteSet struct {
chainID string
height int
height int64
valSet *types.ValidatorSet
mtx sync.Mutex
@ -38,7 +38,7 @@ type HeightVoteSet struct {
peerCatchupRounds map[string][]int // keys: peer.Key; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *HeightVoteSet {
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
hvs := &HeightVoteSet{
chainID: chainID,
}
@ -46,7 +46,7 @@ func NewHeightVoteSet(chainID string, height int, valSet *types.ValidatorSet) *H
return hvs
}
func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) {
func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
@ -59,7 +59,7 @@ func (hvs *HeightVoteSet) Reset(height int, valSet *types.ValidatorSet) {
hvs.round = 0
}
func (hvs *HeightVoteSet) Height() int {
func (hvs *HeightVoteSet) Height() int64 {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
return hvs.height

View File

@ -47,7 +47,7 @@ func TestPeerCatchupRounds(t *testing.T) {
}
func makeVoteHR(t *testing.T, height, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
func makeVoteHR(t *testing.T, height int64, round int, privVals []*types.PrivValidatorFS, valIndex int) *types.Vote {
privVal := privVals[valIndex]
vote := &types.Vote{
ValidatorAddress: privVal.GetAddress(),

View File

@ -13,7 +13,7 @@ import (
// PeerRoundState contains the known state of a peer.
// NOTE: Read-only when returned by PeerState.GetRoundState().
type PeerRoundState struct {
Height int // Height peer is at
Height int64 // Height peer is at
Round int // Round peer is at, -1 if unknown.
Step RoundStepType // Step peer is at
StartTime time.Time // Estimated start of round 0 at this height

View File

@ -55,8 +55,10 @@ func (rs RoundStepType) String() string {
// It is Immutable when returned from ConsensusState.GetRoundState()
// TODO: Actually, only the top pointer is copied,
// so access to field pointers is still racey
// NOTE: Not thread safe. Should only be manipulated by functions downstream
// of the cs.receiveRoutine
type RoundState struct {
Height int // Height we are working on
Height int64 // Height we are working on
Round int
Step RoundStepType
StartTime time.Time
@ -76,11 +78,14 @@ type RoundState struct {
// RoundStateEvent returns the H/R/S of the RoundState as an event.
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
// XXX: copy the RoundState
// if we want to avoid this, we may need synchronous events after all
rs_ := *rs
edrs := types.EventDataRoundState{
Height: rs.Height,
Round: rs.Round,
Step: rs.Step.String(),
RoundState: rs,
RoundState: &rs_,
}
return edrs
}

View File

@ -6,8 +6,11 @@ import (
"fmt"
"hash/crc32"
"io"
"path/filepath"
"time"
"github.com/pkg/errors"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types"
auto "github.com/tendermint/tmlibs/autofile"
@ -29,7 +32,7 @@ type TimedWALMessage struct {
// EndHeightMessage marks the end of the given height inside WAL.
// @internal used by scripts/cutWALUntil util.
type EndHeightMessage struct {
Height uint64 `json:"height"`
Height int64 `json:"height"`
}
type WALMessage interface{}
@ -45,11 +48,22 @@ var _ = wire.RegisterInterface(
//--------------------------------------------------------
// Simple write-ahead logger
// WAL is an interface for any write-ahead logger.
type WAL interface {
Save(WALMessage)
Group() *auto.Group
SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error)
Start() error
Stop() error
Wait()
}
// Write ahead logger writes msgs to disk before they are processed.
// Can be used for crash-recovery and deterministic replay
// TODO: currently the wal is overwritten during replay catchup
// give it a mode so it's either reading or appending - must read to end to start appending again
type WAL struct {
type baseWAL struct {
cmn.BaseService
group *auto.Group
@ -58,38 +72,47 @@ type WAL struct {
enc *WALEncoder
}
func NewWAL(walFile string, light bool) (*WAL, error) {
func NewWAL(walFile string, light bool) (*baseWAL, error) {
err := cmn.EnsureDir(filepath.Dir(walFile), 0700)
if err != nil {
return nil, errors.Wrap(err, "failed to ensure WAL directory is in place")
}
group, err := auto.OpenGroup(walFile)
if err != nil {
return nil, err
}
wal := &WAL{
wal := &baseWAL{
group: group,
light: light,
enc: NewWALEncoder(group),
}
wal.BaseService = *cmn.NewBaseService(nil, "WAL", wal)
wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal)
return wal, nil
}
func (wal *WAL) OnStart() error {
func (wal *baseWAL) Group() *auto.Group {
return wal.group
}
func (wal *baseWAL) OnStart() error {
size, err := wal.group.Head.Size()
if err != nil {
return err
} else if size == 0 {
wal.Save(EndHeightMessage{0})
}
_, err = wal.group.Start()
err = wal.group.Start()
return err
}
func (wal *WAL) OnStop() {
func (wal *baseWAL) OnStop() {
wal.BaseService.OnStop()
wal.group.Stop()
}
// called in newStep and for each pass in receiveRoutine
func (wal *WAL) Save(msg WALMessage) {
func (wal *baseWAL) Save(msg WALMessage) {
if wal == nil {
return
}
@ -119,7 +142,7 @@ func (wal *WAL) Save(msg WALMessage) {
// Group reader will be nil if found equals false.
//
// CONTRACT: caller must close group reader.
func (wal *WAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, found bool, err error) {
func (wal *baseWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) {
var msg *TimedWALMessage
// NOTE: starting from the last file in the group because we're usually
@ -151,7 +174,6 @@ func (wal *WAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, found b
}
}
}
gr.Close()
}
@ -250,7 +272,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
}
var nn int
var res *TimedWALMessage
var res *TimedWALMessage // nolint: gosimple
res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
if err != nil {
return nil, fmt.Errorf("failed to decode data: %v", err)
@ -277,3 +299,14 @@ func readSeparator(r io.Reader) error {
}
return nil
}
type nilWAL struct{}
func (nilWAL) Save(m WALMessage) {}
func (nilWAL) Group() *auto.Group { return nil }
func (nilWAL) SearchForEndHeight(height int64) (gr *auto.GroupReader, found bool, err error) {
return nil, false, nil
}
func (nilWAL) Start() error { return nil }
func (nilWAL) Stop() error { return nil }
func (nilWAL) Wait() {}

View File

@ -2,10 +2,13 @@ package consensus
import (
"bytes"
"crypto/rand"
"path"
"sync"
"testing"
"time"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/consensus/types"
tmtypes "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common"
@ -45,8 +48,8 @@ func TestSearchForEndHeight(t *testing.T) {
t.Fatal(err)
}
h := 3
gr, found, err := wal.SearchForEndHeight(uint64(h))
h := int64(3)
gr, found, err := wal.SearchForEndHeight(h)
assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h))
assert.True(t, found, cmn.Fmt("expected to find end height for %d", h))
assert.NotNil(t, gr, "expected group not to be nil")
@ -58,5 +61,67 @@ func TestSearchForEndHeight(t *testing.T) {
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
assert.True(t, ok, "expected message of type EventDataRoundState")
assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height"))
}
var initOnce sync.Once
func registerInterfacesOnce() {
initOnce.Do(func() {
var _ = wire.RegisterInterface(
struct{ WALMessage }{},
wire.ConcreteType{[]byte{}, 0x10},
)
})
}
func nBytes(n int) []byte {
buf := make([]byte, n)
n, _ = rand.Read(buf)
return buf[:n]
}
func benchmarkWalDecode(b *testing.B, n int) {
registerInterfacesOnce()
buf := new(bytes.Buffer)
enc := NewWALEncoder(buf)
data := nBytes(n)
enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)})
encoded := buf.Bytes()
b.ResetTimer()
for i := 0; i < b.N; i++ {
buf.Reset()
buf.Write(encoded)
dec := NewWALDecoder(buf)
if _, err := dec.Decode(); err != nil {
b.Fatal(err)
}
}
b.ReportAllocs()
}
func BenchmarkWalDecode512B(b *testing.B) {
benchmarkWalDecode(b, 512)
}
func BenchmarkWalDecode10KB(b *testing.B) {
benchmarkWalDecode(b, 10*1024)
}
func BenchmarkWalDecode100KB(b *testing.B) {
benchmarkWalDecode(b, 100*1024)
}
func BenchmarkWalDecode1MB(b *testing.B) {
benchmarkWalDecode(b, 1024*1024)
}
func BenchmarkWalDecode10MB(b *testing.B) {
benchmarkWalDecode(b, 10*1024*1024)
}
func BenchmarkWalDecode100MB(b *testing.B) {
benchmarkWalDecode(b, 100*1024*1024)
}
func BenchmarkWalDecode1GB(b *testing.B) {
benchmarkWalDecode(b, 1024*1024*1024)
}

View File

@ -0,0 +1,238 @@
# ADR 006: Trust Metric Design
## Context
The proposed trust metric will allow Tendermint to maintain local trust rankings for peers it has directly interacted with, which can then be used to implement soft security controls. The calculations were obtained from the [TrustGuard](https://dl.acm.org/citation.cfm?id=1060808) project.
### Background
The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldnt cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped.
Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious nodes behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events.
Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node.
### References
S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005.
## Decision
The proposed trust metric will allow a developer to inform the trust metric store of all good and bad events relevant to a peer's behavior, and at any time, the metric can be queried for a peer's current trust ranking.
The three subsections below will cover the process being considered for calculating the trust ranking, the concept of the trust metric store, and the interface for the trust metric.
### Proposed Process
The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation.
The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components.
```math
(1) Proportional Value = a * R[i]
```
where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*:
`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula")
The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value:
```math
(2) Integral Value = b * H[i]
```
Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows:
```math
D[i] = R[i] H[i]
(3) Derivative Value = c(D[i]) * D[i]
```
Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows:
```math
TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i]
```
As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below:
```math
(4) j = index, where index > 0
```
Where *j* is one of *(0, 1, 2, … , m 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations:
```math
R[0] = raw data for current time interval
```
`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula")
### Trust Metric Store
Similar to the P2P subsystem AddrBook, the trust metric store will maintain information relevant to Tendermint peers. Additionally, the trust metric store will ensure that trust metrics will only be active for peers that a node is currently and directly engaged with.
Reactors will provide a peer key to the trust metric store in order to retrieve the associated trust metric. The trust metric can then record new positive and negative events experienced by the reactor, as well as provided the current trust score calculated by the metric.
When the node is shutting down, the trust metric store will save history data for trust metrics associated with all known peers. This saved information allows experiences with a peer to be preserved across node executions, which can span a tracking windows of days or weeks. The trust history data is loaded automatically during OnStart.
### Interface Detailed Design
Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below:
```go
// TrustMetric - keeps track of peer reliability
type TrustMetric struct {
// Private elements.
}
// Pause tells the metric to pause recording data over time intervals.
// All method calls that indicate events will unpause the metric
func (tm *TrustMetric) Pause() {}
// Stop tells the metric to stop recording data over time intervals
func (tm *TrustMetric) Stop() {}
// BadEvents indicates that an undesirable event(s) took place
func (tm *TrustMetric) BadEvents(num int) {}
// GoodEvents indicates that a desirable event(s) took place
func (tm *TrustMetric) GoodEvents(num int) {}
// TrustValue gets the dependable trust value; always between 0 and 1
func (tm *TrustMetric) TrustValue() float64 {}
// TrustScore gets a score based on the trust value always between 0 and 100
func (tm *TrustMetric) TrustScore() int {}
// NewMetric returns a trust metric with the default configuration
func NewMetric() *TrustMetric {}
//------------------------------------------------------------------------------------------------
// For example
tm := NewMetric()
tm.BadEvents(1)
score := tm.TrustScore()
tm.Stop()
```
Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered.
```go
// TrustMetricConfig - Configures the weight functions and time intervals for the metric
type TrustMetricConfig struct {
// Determines the percentage given to current behavior
ProportionalWeight float64
// Determines the percentage given to prior behavior
IntegralWeight float64
// The window of time that the trust metric will track events across.
// This can be set to cover many days without issue
TrackingWindow time.Duration
// Each interval should be short for adapability.
// Less than 30 seconds is too sensitive,
// and greater than 5 minutes will make the metric numb
IntervalLength time.Duration
}
// DefaultConfig returns a config with values that have been tested and produce desirable results
func DefaultConfig() TrustMetricConfig {}
// NewMetricWithConfig returns a trust metric with a custom configuration
func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {}
//------------------------------------------------------------------------------------------------
// For example
config := TrustMetricConfig{
TrackingWindow: time.Minute * 60 * 24, // one day
IntervalLength: time.Minute * 2,
}
tm := NewMetricWithConfig(config)
tm.BadEvents(10)
tm.Pause()
tm.GoodEvents(1) // becomes active again
```
A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration.
When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store.
In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer.
```go
// TrustMetricStore - Manages all trust metrics for peers
type TrustMetricStore struct {
cmn.BaseService
// Private elements
}
// OnStart implements Service
func (tms *TrustMetricStore) OnStart() error {}
// OnStop implements Service
func (tms *TrustMetricStore) OnStop() {}
// NewTrustMetricStore returns a store that saves data to the DB
// and uses the config when creating new trust metrics
func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {}
// Size returns the number of entries in the trust metric store
func (tms *TrustMetricStore) Size() int {}
// GetPeerTrustMetric returns a trust metric by peer key
func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {}
// PeerDisconnected pauses the trust metric associated with the peer identified by the key
func (tms *TrustMetricStore) PeerDisconnected(key string) {}
//------------------------------------------------------------------------------------------------
// For example
db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr)
tms := NewTrustMetricStore(db, DefaultConfig())
tm := tms.GetPeerTrustMetric(key)
tm.BadEvents(1)
tms.PeerDisconnected(key)
```
## Status
Approved.
## Consequences
### Positive
- The trust metric will allow Tendermint to make non-binary security and reliability decisions
- Will help Tendermint implement deterrents that provide soft security controls, yet avoids disruption on the network
- Will provide useful profiling information when analyzing performance over time related to peer interaction
### Negative
- Requires saving the trust metric history data across node executions
### Neutral
- Keep in mind that, good events need to be recorded just as bad events do using this implementation

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

View File

@ -106,7 +106,7 @@ ABCI Servers
+------------------------------------------------------------------+--------------------+--------------+
| `Spearmint <https://github.com/dennismckinnon/spearmint>`__ | Dennis Mckinnon | Javascript |
+------------------------------------------------------------------+--------------------+--------------+
| `py-tendermint <https://github.com/davebryson/py-tendermint>`__ | Dave Bryson | Python |
| `py-abci <https://github.com/davebryson/py-abci>`__ | Dave Bryson | Python |
+------------------------------------------------------------------+--------------------+--------------+
Deployment Tools

View File

@ -5,7 +5,7 @@ As a general purpose blockchain engine, Tendermint is agnostic to the
application you want to run. So, to run a complete blockchain that does
something useful, you must start two programs: one is Tendermint Core,
the other is your application, which can be written in any programming
language. Recall from `the intro to ABCI <introduction.rst#ABCI-Overview>`__ that
language. Recall from `the intro to ABCI <introduction.html#ABCI-Overview>`__ that
Tendermint Core handles all the p2p and consensus stuff, and just
forwards transactions to the application when they need to be validated,
or when they're ready to be committed to a block.

View File

@ -15,7 +15,7 @@ Install Go
^^^^^^^^^^
Make sure you have `installed Go <https://golang.org/doc/install>`__ and
set the ``GOPATH``.
set the ``GOPATH``. You should also put ``GOPATH/bin`` on your ``PATH``.
Get Source Code
^^^^^^^^^^^^^^^

View File

@ -98,7 +98,7 @@ This is to protect anyone from swapping votes between chains to fake (or
frame) a validator. Also note that this ``chainID`` is in the
``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the
basecoin app (`that is a different
chainID... <https://github.com/tendermint/basecoin/issues/32>`__).
chainID... <https://github.com/cosmos/cosmos-sdk/issues/32>`__).
Once we have those votes, and we calculated the proper `sign
bytes <https://godoc.org/github.com/tendermint/tendermint/types#Vote.WriteSignBytes>`__
@ -136,7 +136,7 @@ Block Hash
The `block
hash <https://godoc.org/github.com/tendermint/tendermint/types#Block.Hash>`__
is the `Simple Tree hash <Merkle-Trees#simple-tree-with-dictionaries>`__
is the `Simple Tree hash <./merkle.html#simple-tree-with-dictionaries>`__
of the fields of the block ``Header`` encoded as a list of
``KVPair``\ s.

View File

@ -6,9 +6,9 @@ For an overview of Merkle trees, see
There are two types of Merkle trees used in Tendermint.
- ```IAVL+ Tree`` <#iavl-tree>`__: An immutable self-balancing binary
- **IAVL+ Tree**: An immutable self-balancing binary
tree for persistent application state
- ```Simple Tree`` <#simple-tree>`__: A simple compact binary tree for
- **Simple Tree**: A simple compact binary tree for
a static list of items
IAVL+ Tree

36
glide.lock generated
View File

@ -1,8 +1,8 @@
hash: dce4a972f0e46b3c5e2b2b12913cde282eaaa7f5d7146def47fa509ceccbfe95
updated: 2017-11-28T04:03:55.53240986Z
hash: 09fc7f59ca6b718fe236368bb55f4801455295cfe455ea5865d544ee4dcfdc08
updated: 2017-12-06T03:31:34.476581624-05:00
imports:
- name: github.com/btcsuite/btcd
version: 8cea3866d0f7fb12d567a20744942c0d078c7d15
version: 2e60448ffcc6bf78332d1fe590260095f554dd78
subpackages:
- btcec
- name: github.com/ebuchman/fail-test
@ -28,7 +28,12 @@ imports:
- name: github.com/gogo/protobuf
version: 342cbe0a04158f6dcb03ca0079991a51a4248c02
subpackages:
- gogoproto
- jsonpb
- proto
- protoc-gen-gogo/descriptor
- sortkeys
- types
- name: github.com/golang/protobuf
version: 1e59b77b52bf8e4b449a57e6f79f21226d571845
subpackages:
@ -67,7 +72,7 @@ imports:
- name: github.com/pkg/errors
version: 645ef00459ed84a119197bfb8d8205042c6df63d
- name: github.com/rcrowley/go-metrics
version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c
version: e181e095bae94582363434144c61a9653aff6e50
- name: github.com/spf13/afero
version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536
subpackages:
@ -98,9 +103,10 @@ imports:
- leveldb/table
- leveldb/util
- name: github.com/tendermint/abci
version: 76ef8a0697c6179220a74c479b36c27a5b53008a
version: fca2b508c185b855af1446ec4afc19bdfc7b315d
subpackages:
- client
- example/code
- example/counter
- example/dummy
- server
@ -113,16 +119,17 @@ imports:
- name: github.com/tendermint/go-crypto
version: dd20358a264c772b4a83e477b0cfce4c88a7001d
- name: github.com/tendermint/go-wire
version: 2baffcb6b690057568bc90ef1d457efb150b979a
version: b6fc872b42d41158a60307db4da051dd6f179415
subpackages:
- data
- data/base58
- nowriter/tmlegacy
- name: github.com/tendermint/iavl
version: 594cc0c062a7174475f0ab654384038d77067917
subpackages:
- iavl
- name: github.com/tendermint/tmlibs
version: b854baa1fce7101c90b1d301b3359bb412f981c0
version: bfcc0217f120d3bee6730ba0789d2eb72fc2e889
subpackages:
- autofile
- cli
@ -130,13 +137,14 @@ imports:
- clist
- common
- db
- events
- flowrate
- log
- merkle
- pubsub
- pubsub/query
- test
- name: golang.org/x/crypto
version: 9f005a07e0d31d45e6656d241bb5c0f2efd4bc94
version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122
subpackages:
- curve25519
- nacl/box
@ -147,7 +155,7 @@ imports:
- ripemd160
- salsa20/salsa
- name: golang.org/x/net
version: 9dfe39835686865bff950a07b394c12a98ddc811
version: a8b9294777976932365dabb6640cf1468d95c70f
subpackages:
- context
- http2
@ -157,22 +165,22 @@ imports:
- lex/httplex
- trace
- name: golang.org/x/sys
version: b98136db334ff9cb24f28a68e3be3cb6608f7630
version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840
subpackages:
- unix
- name: golang.org/x/text
version: 88f656faf3f37f690df1a32515b479415e1a6769
version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294
subpackages:
- secure/bidirule
- transform
- unicode/bidi
- unicode/norm
- name: google.golang.org/genproto
version: 891aceb7c239e72692819142dfca057bdcbfcb96
version: 7f0da29060c682909f650ad8ed4e515bd74fa12a
subpackages:
- googleapis/rpc/status
- name: google.golang.org/grpc
version: f7bf885db0b7479a537ec317c6e48ce53145f3db
version: 401e0e00e4bb830a10496d64cd95e068c5bf50de
subpackages:
- balancer
- codes

View File

@ -18,7 +18,7 @@ import:
- package: github.com/spf13/viper
version: v1.0.0
- package: github.com/tendermint/abci
version: ~0.7.0
version: ~v0.8.0
subpackages:
- client
- example/dummy
@ -26,7 +26,7 @@ import:
- package: github.com/tendermint/go-crypto
version: ~0.4.1
- package: github.com/tendermint/go-wire
version: ~0.7.1
version: ~0.7.2
subpackages:
- data
- package: github.com/tendermint/iavl
@ -34,7 +34,7 @@ import:
subpackages:
- iavl
- package: github.com/tendermint/tmlibs
version: ~0.4.1
version: ~0.5.0
subpackages:
- autofile
- cli
@ -45,6 +45,7 @@ import:
- flowrate
- log
- merkle
- pubsub
- package: golang.org/x/crypto
subpackages:
- nacl/box
@ -54,7 +55,7 @@ import:
subpackages:
- context
- package: google.golang.org/grpc
version: v1.7.0
version: v1.7.3
testImport:
- package: github.com/go-kit/kit
subpackages:

View File

@ -12,10 +12,11 @@ import (
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/certifiers"
certerr "github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// SignStatusClient combines a SignClient and StatusClient.
type SignStatusClient interface {
rpcclient.SignClient
rpcclient.StatusClient
@ -23,31 +24,36 @@ type SignStatusClient interface {
type provider struct {
node SignStatusClient
lastHeight int
lastHeight int64
}
// NewProvider can wrap any rpcclient to expose it as
// a read-only provider.
func NewProvider(node SignStatusClient) certifiers.Provider {
func NewProvider(node SignStatusClient) lite.Provider {
return &provider{node: node}
}
// NewProvider can connects to a tendermint json-rpc endpoint
// NewHTTPProvider can connect to a tendermint json-rpc endpoint
// at the given url, and uses that as a read-only provider.
func NewHTTPProvider(remote string) certifiers.Provider {
func NewHTTPProvider(remote string) lite.Provider {
return &provider{
node: rpcclient.NewHTTP(remote, "/websocket"),
}
}
// StatusClient returns the internal node as a StatusClient
func (p *provider) StatusClient() rpcclient.StatusClient {
return p.node
}
// StoreCommit is a noop, as clients can only read from the chain...
func (p *provider) StoreCommit(_ certifiers.FullCommit) error { return nil }
func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil }
// GetHash gets the most recent validator and sees if it matches
//
// TODO: improve when the rpc interface supports more functionality
func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) {
var fc certifiers.FullCommit
func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) {
var fc lite.FullCommit
vals, err := p.node.Validators(nil)
// if we get no validators, or a different height, return an error
if err != nil {
@ -56,13 +62,13 @@ func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) {
p.updateHeight(vals.BlockHeight)
vhash := types.NewValidatorSet(vals.Validators).Hash()
if !bytes.Equal(hash, vhash) {
return fc, certerr.ErrCommitNotFound()
return fc, liteErr.ErrCommitNotFound()
}
return p.seedFromVals(vals)
}
// GetByHeight gets the validator set by height
func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) {
func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) {
commit, err := p.node.Commit(&h)
if err != nil {
return fc, err
@ -70,7 +76,8 @@ func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) {
return p.seedFromCommit(commit)
}
func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) {
// LatestCommit returns the newest commit stored.
func (p *provider) LatestCommit() (fc lite.FullCommit, err error) {
commit, err := p.GetLatestCommit()
if err != nil {
return fc, err
@ -89,24 +96,25 @@ func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) {
return p.node.Commit(&status.LatestBlockHeight)
}
func CommitFromResult(result *ctypes.ResultCommit) certifiers.Commit {
return (certifiers.Commit)(result.SignedHeader)
// CommitFromResult ...
func CommitFromResult(result *ctypes.ResultCommit) lite.Commit {
return (lite.Commit)(result.SignedHeader)
}
func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullCommit, error) {
func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) {
// now get the commits and build a full commit
commit, err := p.node.Commit(&vals.BlockHeight)
if err != nil {
return certifiers.FullCommit{}, err
return lite.FullCommit{}, err
}
fc := certifiers.NewFullCommit(
fc := lite.NewFullCommit(
CommitFromResult(commit),
types.NewValidatorSet(vals.Validators),
)
return fc, nil
}
func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.FullCommit, err error) {
func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) {
fc.Commit = CommitFromResult(commit)
// now get the proper validators
@ -118,7 +126,7 @@ func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.Fu
// make sure they match the commit (as we cannot enforce height)
vset := types.NewValidatorSet(vals.Validators)
if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) {
return fc, certerr.ErrValidatorsChanged()
return fc, liteErr.ErrValidatorsChanged()
}
p.updateHeight(commit.Header.Height)
@ -126,7 +134,7 @@ func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.Fu
return fc, nil
}
func (p *provider) updateHeight(h int) {
func (p *provider) updateHeight(h int64) {
if h > p.lastHeight {
p.lastHeight = h
}

View File

@ -1,17 +1,15 @@
package client_test
package client
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
rpcclient "github.com/tendermint/tendermint/rpc/client"
rpctest "github.com/tendermint/tendermint/rpc/test"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/certifiers/client"
certerr "github.com/tendermint/tendermint/certifiers/errors"
)
func TestProvider(t *testing.T) {
@ -20,11 +18,12 @@ func TestProvider(t *testing.T) {
cfg := rpctest.GetConfig()
rpcAddr := cfg.RPC.ListenAddress
chainID := cfg.ChainID
p := client.NewHTTPProvider(rpcAddr)
p := NewHTTPProvider(rpcAddr)
require.NotNil(t, p)
// let it produce some blocks
time.Sleep(500 * time.Millisecond)
err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil)
require.Nil(err)
// let's get the highest block
seed, err := p.LatestCommit()
@ -36,7 +35,7 @@ func TestProvider(t *testing.T) {
// let's check this is valid somehow
assert.Nil(seed.ValidateBasic(chainID))
cert := certifiers.NewStatic(chainID, seed.Validators)
cert := lite.NewStatic(chainID, seed.Validators)
// historical queries now work :)
lower := sh - 5
@ -54,7 +53,7 @@ func TestProvider(t *testing.T) {
// get by hash fails without match
seed, err = p.GetByHash([]byte("foobar"))
assert.NotNil(err)
assert.True(certerr.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
// storing the seed silently ignored
err = p.StoreCommit(seed)

View File

@ -1,4 +1,4 @@
package certifiers
package lite
import (
"bytes"
@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/types"
certerr "github.com/tendermint/tendermint/certifiers/errors"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// Certifier checks the votes to make sure the block really is signed properly.
@ -33,6 +33,7 @@ type FullCommit struct {
Validators *types.ValidatorSet `json:"validator_set"`
}
// NewFullCommit returns a new FullCommit.
func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit {
return FullCommit{
Commit: commit,
@ -40,13 +41,15 @@ func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit {
}
}
func (c Commit) Height() int {
// Height returns the height of the header.
func (c Commit) Height() int64 {
if c.Header == nil {
return 0
}
return c.Header.Height
}
// ValidatorsHash returns the hash of the validator set.
func (c Commit) ValidatorsHash() []byte {
if c.Header == nil {
return nil
@ -75,7 +78,7 @@ func (c Commit) ValidateBasic(chainID string) error {
// make sure the header and commit match (height and hash)
if c.Commit.Height() != c.Header.Height {
return certerr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height)
return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height)
}
hhash := c.Header.Hash()
chash := c.Commit.BlockID.Hash

View File

@ -1,5 +1,5 @@
/*
Package certifiers allows you to securely validate headers
Package lite allows you to securely validate headers
without a full node.
This library pulls together all the crypto and algorithms,
@ -130,4 +130,4 @@ to manually verify the new validator set hash using off-chain
means (the same as getting the initial hash).
*/
package certifiers
package lite

View File

@ -1,9 +1,9 @@
package certifiers
package lite
import (
"github.com/tendermint/tendermint/types"
certerr "github.com/tendermint/tendermint/certifiers/errors"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
var _ Certifier = &Dynamic{}
@ -19,33 +19,39 @@ var _ Certifier = &Dynamic{}
// going forward.
type Dynamic struct {
cert *Static
lastHeight int
lastHeight int64
}
func NewDynamic(chainID string, vals *types.ValidatorSet, height int) *Dynamic {
// NewDynamic returns a new dynamic certifier.
func NewDynamic(chainID string, vals *types.ValidatorSet, height int64) *Dynamic {
return &Dynamic{
cert: NewStatic(chainID, vals),
lastHeight: height,
}
}
// ChainID returns the chain id of this certifier.
func (c *Dynamic) ChainID() string {
return c.cert.ChainID()
}
// Validators returns the validators of this certifier.
func (c *Dynamic) Validators() *types.ValidatorSet {
return c.cert.vSet
}
// Hash returns the hash of this certifier.
func (c *Dynamic) Hash() []byte {
return c.cert.Hash()
}
func (c *Dynamic) LastHeight() int {
// LastHeight returns the last height of this certifier.
func (c *Dynamic) LastHeight() int64 {
return c.lastHeight
}
// Certify handles this with
// Certify will verify whether the commit is valid and will update the height if it is or return an
// error if it is not.
func (c *Dynamic) Certify(check Commit) error {
err := c.cert.Certify(check)
if err == nil {
@ -63,7 +69,7 @@ func (c *Dynamic) Update(fc FullCommit) error {
// ignore all checkpoints in the past -> only to the future
h := fc.Height()
if h <= c.lastHeight {
return certerr.ErrPastTime()
return liteErr.ErrPastTime()
}
// first, verify if the input is self-consistent....
@ -79,7 +85,7 @@ func (c *Dynamic) Update(fc FullCommit) error {
err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(),
commit.BlockID, h, commit)
if err != nil {
return certerr.ErrTooMuchChange()
return liteErr.ErrTooMuchChange()
}
// looks good, we can update

View File

@ -1,4 +1,4 @@
package certifiers_test
package lite_test
import (
"testing"
@ -8,8 +8,8 @@ import (
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
"github.com/tendermint/tendermint/lite/errors"
)
// TestDynamicCert just makes sure it still works like StaticCert
@ -18,17 +18,17 @@ func TestDynamicCert(t *testing.T) {
assert := assert.New(t)
// require := require.New(t)
keys := certifiers.GenValKeys(4)
keys := lite.GenValKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals := keys.ToValidators(20, 10)
// and a certifier based on our known set
chainID := "test-dyno"
cert := certifiers.NewDynamic(chainID, vals, 0)
cert := lite.NewDynamic(chainID, vals, 0)
cases := []struct {
keys certifiers.ValKeys
keys lite.ValKeys
vals *types.ValidatorSet
height int
height int64
first, last int // who actually signs
proper bool // true -> expect no error
changed bool // true -> expect validator change error
@ -65,12 +65,12 @@ func TestDynamicUpdate(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chainID := "test-dyno-up"
keys := certifiers.GenValKeys(5)
keys := lite.GenValKeys(5)
vals := keys.ToValidators(20, 0)
cert := certifiers.NewDynamic(chainID, vals, 40)
cert := lite.NewDynamic(chainID, vals, 40)
// one valid block to give us a sense of time
h := 100
h := int64(100)
good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), 0, len(keys))
err := cert.Certify(good)
require.Nil(err, "%+v", err)
@ -81,9 +81,9 @@ func TestDynamicUpdate(t *testing.T) {
// we try to update with some blocks
cases := []struct {
keys certifiers.ValKeys
keys lite.ValKeys
vals *types.ValidatorSet
height int
height int64
first, last int // who actually signs
proper bool // true -> expect no error
changed bool // true -> expect too much change error

View File

@ -19,34 +19,39 @@ func IsCommitNotFoundErr(err error) bool {
return err != nil && (errors.Cause(err) == errCommitNotFound)
}
// ErrCommitNotFound indicates that a the requested commit was not found.
func ErrCommitNotFound() error {
return errors.WithStack(errCommitNotFound)
}
// IsValidatorsChangedErr checks whether an error is due
// to a differing validator set
// to a differing validator set.
func IsValidatorsChangedErr(err error) bool {
return err != nil && (errors.Cause(err) == errValidatorsChanged)
}
// ErrValidatorsChanged indicates that the validator set was changed between two commits.
func ErrValidatorsChanged() error {
return errors.WithStack(errValidatorsChanged)
}
// IsTooMuchChangeErr checks whether an error is due to too much change
// between these validators sets
// between these validators sets.
func IsTooMuchChangeErr(err error) bool {
return err != nil && (errors.Cause(err) == errTooMuchChange)
}
// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3.
func ErrTooMuchChange() error {
return errors.WithStack(errTooMuchChange)
}
// IsPastTimeErr ...
func IsPastTimeErr(err error) bool {
return err != nil && (errors.Cause(err) == errPastTime)
}
// ErrPastTime ...
func ErrPastTime() error {
return errors.WithStack(errPastTime)
}
@ -57,6 +62,7 @@ func IsNoPathFoundErr(err error) bool {
return err != nil && (errors.Cause(err) == errNoPathFound)
}
// ErrNoPathFound ...
func ErrNoPathFound() error {
return errors.WithStack(errNoPathFound)
}
@ -64,7 +70,7 @@ func ErrNoPathFound() error {
//--------------------------------------------
type errHeightMismatch struct {
h1, h2 int
h1, h2 int64
}
func (e errHeightMismatch) Error() string {
@ -81,6 +87,6 @@ func IsHeightMismatchErr(err error) bool {
}
// ErrHeightMismatch returns an mismatch error with stack-trace
func ErrHeightMismatch(h1, h2 int) error {
func ErrHeightMismatch(h1, h2 int64) error {
return errors.WithStack(errHeightMismatch{h1, h2})
}

View File

@ -8,8 +8,8 @@ import (
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/certifiers"
certerr "github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
const (
@ -20,7 +20,7 @@ const (
)
// SaveFullCommit exports the seed in binary / go-wire style
func SaveFullCommit(fc certifiers.FullCommit, path string) error {
func SaveFullCommit(fc lite.FullCommit, path string) error {
f, err := os.Create(path)
if err != nil {
return errors.WithStack(err)
@ -33,7 +33,7 @@ func SaveFullCommit(fc certifiers.FullCommit, path string) error {
}
// SaveFullCommitJSON exports the seed in a json format
func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error {
func SaveFullCommitJSON(fc lite.FullCommit, path string) error {
f, err := os.Create(path)
if err != nil {
return errors.WithStack(err)
@ -44,12 +44,13 @@ func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error {
return errors.WithStack(err)
}
func LoadFullCommit(path string) (certifiers.FullCommit, error) {
var fc certifiers.FullCommit
// LoadFullCommit loads the full commit from the file system.
func LoadFullCommit(path string) (lite.FullCommit, error) {
var fc lite.FullCommit
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return fc, certerr.ErrCommitNotFound()
return fc, liteErr.ErrCommitNotFound()
}
return fc, errors.WithStack(err)
}
@ -60,12 +61,13 @@ func LoadFullCommit(path string) (certifiers.FullCommit, error) {
return fc, errors.WithStack(err)
}
func LoadFullCommitJSON(path string) (certifiers.FullCommit, error) {
var fc certifiers.FullCommit
// LoadFullCommitJSON loads the commit from the file system in JSON format.
func LoadFullCommitJSON(path string) (lite.FullCommit, error) {
var fc lite.FullCommit
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return fc, certerr.ErrCommitNotFound()
return fc, liteErr.ErrCommitNotFound()
}
return fc, errors.WithStack(err)
}

View File

@ -10,7 +10,7 @@ import (
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/lite"
)
func tmpFile() string {
@ -24,10 +24,10 @@ func TestSerializeFullCommits(t *testing.T) {
// some constants
appHash := []byte("some crazy thing")
chainID := "ser-ial"
h := 25
h := int64(25)
// build a fc
keys := certifiers.GenValKeys(5)
keys := lite.GenValKeys(5)
vals := keys.ToValidators(10, 0)
fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5)

View File

@ -24,16 +24,17 @@ import (
"github.com/pkg/errors"
"github.com/tendermint/tendermint/certifiers"
certerr "github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// nolint
const (
Ext = ".tsd"
ValDir = "validators"
CheckDir = "checkpoints"
dirPerm = os.FileMode(0755)
filePerm = os.FileMode(0644)
//filePerm = os.FileMode(0644)
)
type provider struct {
@ -43,7 +44,7 @@ type provider struct {
// NewProvider creates the parent dir and subdirs
// for validators and checkpoints as needed
func NewProvider(dir string) certifiers.Provider {
func NewProvider(dir string) lite.Provider {
valDir := filepath.Join(dir, ValDir)
checkDir := filepath.Join(dir, CheckDir)
for _, d := range []string{valDir, checkDir} {
@ -59,12 +60,13 @@ func (p *provider) encodeHash(hash []byte) string {
return hex.EncodeToString(hash) + Ext
}
func (p *provider) encodeHeight(h int) string {
func (p *provider) encodeHeight(h int64) string {
// pad up to 10^12 for height...
return fmt.Sprintf("%012d%s", h, Ext)
}
func (p *provider) StoreCommit(fc certifiers.FullCommit) error {
// StoreCommit saves a full commit after it has been verified.
func (p *provider) StoreCommit(fc lite.FullCommit) error {
// make sure the fc is self-consistent before saving
err := fc.ValidateBasic(fc.Commit.Header.ChainID)
if err != nil {
@ -85,11 +87,12 @@ func (p *provider) StoreCommit(fc certifiers.FullCommit) error {
return nil
}
func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) {
// GetByHeight returns the closest commit with height <= h.
func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) {
// first we look for exact match, then search...
path := filepath.Join(p.checkDir, p.encodeHeight(h))
fc, err := LoadFullCommit(path)
if certerr.IsCommitNotFoundErr(err) {
if liteErr.IsCommitNotFoundErr(err) {
path, err = p.searchForHeight(h)
if err == nil {
fc, err = LoadFullCommit(path)
@ -98,14 +101,15 @@ func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) {
return fc, err
}
func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) {
// LatestCommit returns the newest commit stored.
func (p *provider) LatestCommit() (fc lite.FullCommit, err error) {
// Note to future: please update by 2077 to avoid rollover
return p.GetByHeight(math.MaxInt32 - 1)
}
// search for height, looks for a file with highest height < h
// return certifiers.ErrCommitNotFound() if not there...
func (p *provider) searchForHeight(h int) (string, error) {
func (p *provider) searchForHeight(h int64) (string, error) {
d, err := os.Open(p.checkDir)
if err != nil {
return "", errors.WithStack(err)
@ -121,14 +125,15 @@ func (p *provider) searchForHeight(h int) (string, error) {
sort.Strings(files)
i := sort.SearchStrings(files, desired)
if i == 0 {
return "", certerr.ErrCommitNotFound()
return "", liteErr.ErrCommitNotFound()
}
found := files[i-1]
path := filepath.Join(p.checkDir, found)
return path, errors.WithStack(err)
}
func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) {
// GetByHash returns a commit exactly matching this validator hash.
func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) {
path := filepath.Join(p.valDir, p.encodeHash(hash))
return LoadFullCommit(path)
}

View File

@ -10,12 +10,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/certifiers"
certerr "github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/certifiers/files"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/lite/files"
)
func checkEqual(stored, loaded certifiers.FullCommit, chainID string) error {
func checkEqual(stored, loaded lite.FullCommit, chainID string) error {
err := loaded.ValidateBasic(chainID)
if err != nil {
return err
@ -36,28 +36,28 @@ func TestFileProvider(t *testing.T) {
chainID := "test-files"
appHash := []byte("some-data")
keys := certifiers.GenValKeys(5)
keys := lite.GenValKeys(5)
count := 10
// make a bunch of seeds...
seeds := make([]certifiers.FullCommit, count)
seeds := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
// two seeds for each validator, to check how we handle dups
// (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ...
vals := keys.ToValidators(10, int64(count/2))
h := 20 + 10*i
h := int64(20 + 10*i)
check := keys.GenCommit(chainID, h, nil, vals, appHash, 0, 5)
seeds[i] = certifiers.NewFullCommit(check, vals)
seeds[i] = lite.NewFullCommit(check, vals)
}
// check provider is empty
seed, err := p.GetByHeight(20)
require.NotNil(err)
assert.True(certerr.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
seed, err = p.GetByHash(seeds[3].ValidatorsHash())
require.NotNil(err)
assert.True(certerr.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
// now add them all to the provider
for _, s := range seeds {
@ -86,11 +86,11 @@ func TestFileProvider(t *testing.T) {
seed, err = p.GetByHeight(47)
if assert.Nil(err, "%+v", err) {
// we only step by 10, so 40 must be the one below this
assert.Equal(40, seed.Height())
assert.EqualValues(40, seed.Height())
}
// and proper error for too low
_, err = p.GetByHeight(5)
assert.NotNil(err)
assert.True(certerr.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
}

View File

@ -1,4 +1,4 @@
package certifiers
package lite
import (
"time"
@ -12,14 +12,14 @@ import (
//
// It lets us simulate signing with many keys, either ed25519 or secp256k1.
// The main use case is to create a set, and call GenCommit
// to get propely signed header for testing.
// to get properly signed header for testing.
//
// You can set different weights of validators each time you call
// ToValidators, and can optionally extend the validator set later
// with Extend or ExtendSecp
type ValKeys []crypto.PrivKey
// GenValKeys produces an array of private keys to generate commits
// GenValKeys produces an array of private keys to generate commits.
func GenValKeys(n int) ValKeys {
res := make(ValKeys, n)
for i := range res {
@ -28,7 +28,7 @@ func GenValKeys(n int) ValKeys {
return res
}
// Change replaces the key at index i
// Change replaces the key at index i.
func (v ValKeys) Change(i int) ValKeys {
res := make(ValKeys, len(v))
copy(res, v)
@ -36,13 +36,13 @@ func (v ValKeys) Change(i int) ValKeys {
return res
}
// Extend adds n more keys (to remove, just take a slice)
// Extend adds n more keys (to remove, just take a slice).
func (v ValKeys) Extend(n int) ValKeys {
extra := GenValKeys(n)
return append(v, extra...)
}
// GenSecpValKeys produces an array of secp256k1 private keys to generate commits
// GenSecpValKeys produces an array of secp256k1 private keys to generate commits.
func GenSecpValKeys(n int) ValKeys {
res := make(ValKeys, n)
for i := range res {
@ -51,7 +51,7 @@ func GenSecpValKeys(n int) ValKeys {
return res
}
// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice)
// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice).
func (v ValKeys) ExtendSecp(n int) ValKeys {
extra := GenSecpValKeys(n)
return append(v, extra...)
@ -60,7 +60,7 @@ func (v ValKeys) ExtendSecp(n int) ValKeys {
// ToValidators produces a list of validators from the set of keys
// The first key has weight `init` and it increases by `inc` every step
// so we can have all the same weight, or a simple linear distribution
// (should be enough for testing)
// (should be enough for testing).
func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet {
res := make([]*types.Validator, len(v))
for i, k := range v {
@ -69,7 +69,7 @@ func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet {
return types.NewValidatorSet(res)
}
// signHeader properly signs the header with all keys from first to last exclusive
// signHeader properly signs the header with all keys from first to last exclusive.
func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit {
votes := make([]*types.Vote, len(v))
@ -106,7 +106,9 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey
return vote
}
func genHeader(chainID string, height int, txs types.Txs,
// Silences warning that vals can also be merkle.Hashable
// nolint: interfacer
func genHeader(chainID string, height int64, txs types.Txs,
vals *types.ValidatorSet, appHash []byte) *types.Header {
return &types.Header{
@ -122,8 +124,8 @@ func genHeader(chainID string, height int, txs types.Txs,
}
}
// GenCommit calls genHeader and signHeader and combines them into a Commit
func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs,
// GenCommit calls genHeader and signHeader and combines them into a Commit.
func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs,
vals *types.ValidatorSet, appHash []byte, first, last int) Commit {
header := genHeader(chainID, height, txs, vals, appHash)
@ -134,8 +136,8 @@ func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs,
return check
}
// GenFullCommit calls genHeader and signHeader and combines them into a Commit
func (v ValKeys) GenFullCommit(chainID string, height int, txs types.Txs,
// GenFullCommit calls genHeader and signHeader and combines them into a Commit.
func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs,
vals *types.ValidatorSet, appHash []byte, first, last int) FullCommit {
header := genHeader(chainID, height, txs, vals, appHash)

View File

@ -1,11 +1,15 @@
package certifiers
package lite
import (
"github.com/tendermint/tendermint/types"
certerr "github.com/tendermint/tendermint/certifiers/errors"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// Inquiring wraps a dynamic certifier and implements an auto-update strategy. If a call to Certify
// fails due to a change it validator set, Inquiring will try and find a previous FullCommit which
// it can use to safely update the validator set. It uses a source provider to obtain the needed
// FullCommits. It stores properly validated data on the local system.
type Inquiring struct {
cert *Dynamic
// These are only properly validated data, from local system
@ -14,8 +18,14 @@ type Inquiring struct {
Source Provider
}
// NewInquiring returns a new Inquiring object. It uses the trusted provider to store validated
// data and the source provider to obtain missing FullCommits.
//
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source
// provider should be a client.HTTPProvider.
func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring {
// store the data in trusted
// TODO: StoredCommit() can return an error and we need to handle this.
trusted.StoreCommit(fc)
return &Inquiring{
@ -25,15 +35,18 @@ func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provid
}
}
// ChainID returns the chain id.
func (c *Inquiring) ChainID() string {
return c.cert.ChainID()
}
// Validators returns the validator set.
func (c *Inquiring) Validators() *types.ValidatorSet {
return c.cert.cert.vSet
}
func (c *Inquiring) LastHeight() int {
// LastHeight returns the last height.
func (c *Inquiring) LastHeight() int64 {
return c.cert.lastHeight
}
@ -50,7 +63,7 @@ func (c *Inquiring) Certify(commit Commit) error {
}
err = c.cert.Certify(commit)
if !certerr.IsValidatorsChangedErr(err) {
if !liteErr.IsValidatorsChangedErr(err) {
return err
}
err = c.updateToHash(commit.Header.ValidatorsHash)
@ -64,11 +77,11 @@ func (c *Inquiring) Certify(commit Commit) error {
}
// store the new checkpoint
c.trusted.StoreCommit(
NewFullCommit(commit, c.Validators()))
return nil
return c.trusted.StoreCommit(NewFullCommit(commit, c.Validators()))
}
// Update will verify if this is a valid change and update
// the certifying validator set if safe to do so.
func (c *Inquiring) Update(fc FullCommit) error {
err := c.useClosestTrust(fc.Height())
if err != nil {
@ -77,12 +90,12 @@ func (c *Inquiring) Update(fc FullCommit) error {
err = c.cert.Update(fc)
if err == nil {
c.trusted.StoreCommit(fc)
err = c.trusted.StoreCommit(fc)
}
return err
}
func (c *Inquiring) useClosestTrust(h int) error {
func (c *Inquiring) useClosestTrust(h int64) error {
closest, err := c.trusted.GetByHeight(h)
if err != nil {
return err
@ -106,14 +119,14 @@ func (c *Inquiring) updateToHash(vhash []byte) error {
}
err = c.cert.Update(fc)
// handle IsTooMuchChangeErr by using divide and conquer
if certerr.IsTooMuchChangeErr(err) {
if liteErr.IsTooMuchChangeErr(err) {
err = c.updateToHeight(fc.Height())
}
return err
}
// updateToHeight will use divide-and-conquer to find a path to h
func (c *Inquiring) updateToHeight(h int) error {
func (c *Inquiring) updateToHeight(h int64) error {
// try to update to this height (with checks)
fc, err := c.Source.GetByHeight(h)
if err != nil {
@ -121,12 +134,12 @@ func (c *Inquiring) updateToHeight(h int) error {
}
start, end := c.LastHeight(), fc.Height()
if end <= start {
return certerr.ErrNoPathFound()
return liteErr.ErrNoPathFound()
}
err = c.Update(fc)
// we can handle IsTooMuchChangeErr specially
if !certerr.IsTooMuchChangeErr(err) {
if !liteErr.IsTooMuchChangeErr(err) {
return err
}

View File

@ -1,4 +1,5 @@
package certifiers_test
// nolint: vetshadow
package lite_test
import (
"fmt"
@ -7,34 +8,33 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/lite"
)
func TestInquirerValidPath(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := certifiers.NewMemStoreProvider()
source := certifiers.NewMemStoreProvider()
trust := lite.NewMemStoreProvider()
source := lite.NewMemStoreProvider()
// set up the validators to generate test blocks
var vote int64 = 10
keys := certifiers.GenValKeys(5)
vals := keys.ToValidators(vote, 0)
keys := lite.GenValKeys(5)
// construct a bunch of commits, each with one more height than the last
chainID := "inquiry-test"
count := 50
commits := make([]certifiers.FullCommit, count)
commits := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
// extend the keys by 1 each time
keys = keys.Extend(1)
vals = keys.ToValidators(vote, 0)
h := 20 + 10*i
vals := keys.ToValidators(vote, 0)
h := int64(20 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys))
}
// initialize a certifier with the initial state
cert := certifiers.NewInquiring(chainID, commits[0], trust, source)
cert := lite.NewInquiring(chainID, commits[0], trust, source)
// this should fail validation....
commit := commits[count-1].Commit
@ -60,29 +60,28 @@ func TestInquirerValidPath(t *testing.T) {
func TestInquirerMinimalPath(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := certifiers.NewMemStoreProvider()
source := certifiers.NewMemStoreProvider()
trust := lite.NewMemStoreProvider()
source := lite.NewMemStoreProvider()
// set up the validators to generate test blocks
var vote int64 = 10
keys := certifiers.GenValKeys(5)
vals := keys.ToValidators(vote, 0)
keys := lite.GenValKeys(5)
// construct a bunch of commits, each with one more height than the last
chainID := "minimal-path"
count := 12
commits := make([]certifiers.FullCommit, count)
commits := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
// extend the validators, so we are just below 2/3
keys = keys.Extend(len(keys)/2 - 1)
vals = keys.ToValidators(vote, 0)
h := 5 + 10*i
vals := keys.ToValidators(vote, 0)
h := int64(5 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys))
}
// initialize a certifier with the initial state
cert := certifiers.NewInquiring(chainID, commits[0], trust, source)
cert := lite.NewInquiring(chainID, commits[0], trust, source)
// this should fail validation....
commit := commits[count-1].Commit
@ -108,29 +107,28 @@ func TestInquirerMinimalPath(t *testing.T) {
func TestInquirerVerifyHistorical(t *testing.T) {
assert, require := assert.New(t), require.New(t)
trust := certifiers.NewMemStoreProvider()
source := certifiers.NewMemStoreProvider()
trust := lite.NewMemStoreProvider()
source := lite.NewMemStoreProvider()
// set up the validators to generate test blocks
var vote int64 = 10
keys := certifiers.GenValKeys(5)
vals := keys.ToValidators(vote, 0)
keys := lite.GenValKeys(5)
// construct a bunch of commits, each with one more height than the last
chainID := "inquiry-test"
count := 10
commits := make([]certifiers.FullCommit, count)
commits := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
// extend the keys by 1 each time
keys = keys.Extend(1)
vals = keys.ToValidators(vote, 0)
h := 20 + 10*i
vals := keys.ToValidators(vote, 0)
h := int64(20 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys))
}
// initialize a certifier with the initial state
cert := certifiers.NewInquiring(chainID, commits[0], trust, source)
cert := lite.NewInquiring(chainID, commits[0], trust, source)
// store a few commits as trust
for _, i := range []int{2, 5} {

View File

@ -1,10 +1,10 @@
package certifiers
package lite
import (
"encoding/hex"
"sort"
certerr "github.com/tendermint/tendermint/certifiers/errors"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
type memStoreProvider struct {
@ -23,6 +23,7 @@ func (s fullCommits) Less(i, j int) bool {
return s[i].Height() < s[j].Height()
}
// NewMemStoreProvider returns a new in-memory provider.
func NewMemStoreProvider() Provider {
return &memStoreProvider{
byHeight: fullCommits{},
@ -34,6 +35,7 @@ func (m *memStoreProvider) encodeHash(hash []byte) string {
return hex.EncodeToString(hash)
}
// StoreCommit stores a FullCommit after verifying it.
func (m *memStoreProvider) StoreCommit(fc FullCommit) error {
// make sure the fc is self-consistent before saving
err := fc.ValidateBasic(fc.Commit.Header.ChainID)
@ -49,7 +51,8 @@ func (m *memStoreProvider) StoreCommit(fc FullCommit) error {
return nil
}
func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) {
// GetByHeight returns the FullCommit for height h or an error if the commit is not found.
func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) {
// search from highest to lowest
for i := len(m.byHeight) - 1; i >= 0; i-- {
fc := m.byHeight[i]
@ -57,22 +60,24 @@ func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) {
return fc, nil
}
}
return FullCommit{}, certerr.ErrCommitNotFound()
return FullCommit{}, liteErr.ErrCommitNotFound()
}
// GetByHash returns the FullCommit for the hash or an error if the commit is not found.
func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) {
var err error
fc, ok := m.byHash[m.encodeHash(hash)]
if !ok {
err = certerr.ErrCommitNotFound()
err = liteErr.ErrCommitNotFound()
}
return fc, err
}
// LatestCommit returns the latest FullCommit or an error if no commits exist.
func (m *memStoreProvider) LatestCommit() (FullCommit, error) {
l := len(m.byHeight)
if l == 0 {
return FullCommit{}, certerr.ErrCommitNotFound()
return FullCommit{}, liteErr.ErrCommitNotFound()
}
return m.byHeight[l-1], nil
}

View File

@ -1,37 +1,37 @@
package certifiers_test
package lite_test
import (
"fmt"
"testing"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/lite"
)
func BenchmarkGenCommit20(b *testing.B) {
keys := certifiers.GenValKeys(20)
keys := lite.GenValKeys(20)
benchmarkGenCommit(b, keys)
}
func BenchmarkGenCommit100(b *testing.B) {
keys := certifiers.GenValKeys(100)
keys := lite.GenValKeys(100)
benchmarkGenCommit(b, keys)
}
func BenchmarkGenCommitSec20(b *testing.B) {
keys := certifiers.GenSecpValKeys(20)
keys := lite.GenSecpValKeys(20)
benchmarkGenCommit(b, keys)
}
func BenchmarkGenCommitSec100(b *testing.B) {
keys := certifiers.GenSecpValKeys(100)
keys := lite.GenSecpValKeys(100)
benchmarkGenCommit(b, keys)
}
func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) {
func benchmarkGenCommit(b *testing.B, keys lite.ValKeys) {
chainID := fmt.Sprintf("bench-%d", len(keys))
vals := keys.ToValidators(20, 10)
for i := 0; i < b.N; i++ {
h := 1 + i
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
keys.GenCommit(chainID, h, nil, vals, appHash, 0, len(keys))
}
@ -39,7 +39,7 @@ func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) {
// this benchmarks generating one key
func BenchmarkGenValKeys(b *testing.B) {
keys := certifiers.GenValKeys(20)
keys := lite.GenValKeys(20)
for i := 0; i < b.N; i++ {
keys = keys.Extend(1)
}
@ -47,7 +47,7 @@ func BenchmarkGenValKeys(b *testing.B) {
// this benchmarks generating one key
func BenchmarkGenSecpValKeys(b *testing.B) {
keys := certifiers.GenSecpValKeys(20)
keys := lite.GenSecpValKeys(20)
for i := 0; i < b.N; i++ {
keys = keys.Extend(1)
}
@ -63,7 +63,7 @@ func BenchmarkToValidators100(b *testing.B) {
// this benchmarks constructing the validator set (.PubKey() * nodes)
func benchmarkToValidators(b *testing.B, nodes int) {
keys := certifiers.GenValKeys(nodes)
keys := lite.GenValKeys(nodes)
for i := 1; i <= b.N; i++ {
keys.ToValidators(int64(2*i), int64(i))
}
@ -75,36 +75,36 @@ func BenchmarkToValidatorsSec100(b *testing.B) {
// this benchmarks constructing the validator set (.PubKey() * nodes)
func benchmarkToValidatorsSec(b *testing.B, nodes int) {
keys := certifiers.GenSecpValKeys(nodes)
keys := lite.GenSecpValKeys(nodes)
for i := 1; i <= b.N; i++ {
keys.ToValidators(int64(2*i), int64(i))
}
}
func BenchmarkCertifyCommit20(b *testing.B) {
keys := certifiers.GenValKeys(20)
keys := lite.GenValKeys(20)
benchmarkCertifyCommit(b, keys)
}
func BenchmarkCertifyCommit100(b *testing.B) {
keys := certifiers.GenValKeys(100)
keys := lite.GenValKeys(100)
benchmarkCertifyCommit(b, keys)
}
func BenchmarkCertifyCommitSec20(b *testing.B) {
keys := certifiers.GenSecpValKeys(20)
keys := lite.GenSecpValKeys(20)
benchmarkCertifyCommit(b, keys)
}
func BenchmarkCertifyCommitSec100(b *testing.B) {
keys := certifiers.GenSecpValKeys(100)
keys := lite.GenSecpValKeys(100)
benchmarkCertifyCommit(b, keys)
}
func benchmarkCertifyCommit(b *testing.B, keys certifiers.ValKeys) {
func benchmarkCertifyCommit(b *testing.B, keys lite.ValKeys) {
chainID := "bench-certify"
vals := keys.ToValidators(20, 10)
cert := certifiers.NewStatic(chainID, vals)
cert := lite.NewStatic(chainID, vals)
check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), 0, len(keys))
for i := 0; i < b.N; i++ {
err := cert.Certify(check)

View File

@ -1,22 +1,18 @@
package certifiers
package lite
import (
certerr "github.com/tendermint/tendermint/certifiers/errors"
)
// Provider is used to get more validators by other means
// Provider is used to get more validators by other means.
//
// Examples: MemProvider, files.Provider, client.Provider....
// Examples: MemProvider, files.Provider, client.Provider, CacheProvider....
type Provider interface {
// StoreCommit saves a FullCommit after we have verified it,
// so we can query for it later. Important for updating our
// store of trusted commits
// store of trusted commits.
StoreCommit(fc FullCommit) error
// GetByHeight returns the closest commit with height <= h
GetByHeight(h int) (FullCommit, error)
// GetByHash returns a commit exactly matching this validator hash
// GetByHeight returns the closest commit with height <= h.
GetByHeight(h int64) (FullCommit, error)
// GetByHash returns a commit exactly matching this validator hash.
GetByHash(hash []byte) (FullCommit, error)
// LatestCommit returns the newest commit stored
// LatestCommit returns the newest commit stored.
LatestCommit() (FullCommit, error)
}
@ -28,6 +24,7 @@ type cacheProvider struct {
Providers []Provider
}
// NewCacheProvider returns a new provider which wraps multiple other providers.
func NewCacheProvider(providers ...Provider) Provider {
return cacheProvider{
Providers: providers,
@ -47,20 +44,18 @@ func (c cacheProvider) StoreCommit(fc FullCommit) (err error) {
return err
}
/*
GetByHeight should return the closest possible match from all providers.
The Cache is usually organized in order from cheapest call (memory)
to most expensive calls (disk/network). However, since GetByHeight returns
a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would
give us the exact match, a naive "stop at first non-error" would hide
the actual desired results.
Thus, we query each provider in order until we find an exact match
or we finished querying them all. If at least one returned a non-error,
then this returns the best match (minimum h-h').
*/
func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) {
// GetByHeight should return the closest possible match from all providers.
//
// The Cache is usually organized in order from cheapest call (memory)
// to most expensive calls (disk/network). However, since GetByHeight returns
// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would
// give us the exact match, a naive "stop at first non-error" would hide
// the actual desired results.
//
// Thus, we query each provider in order until we find an exact match
// or we finished querying them all. If at least one returned a non-error,
// then this returns the best match (minimum h-h').
func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) {
for _, p := range c.Providers {
var tfc FullCommit
tfc, err = p.GetByHeight(h)
@ -80,6 +75,7 @@ func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) {
return fc, err
}
// GetByHash returns the FullCommit for the hash or an error if the commit is not found.
func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) {
for _, p := range c.Providers {
fc, err = p.GetByHash(hash)
@ -90,6 +86,7 @@ func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) {
return fc, err
}
// LatestCommit returns the latest FullCommit or an error if no commit exists.
func (c cacheProvider) LatestCommit() (fc FullCommit, err error) {
for _, p := range c.Providers {
var tfc FullCommit
@ -104,22 +101,3 @@ func (c cacheProvider) LatestCommit() (fc FullCommit, err error) {
}
return fc, err
}
// missingProvider doens't store anything, always a miss
// Designed as a mock for testing
type missingProvider struct{}
func NewMissingProvider() Provider {
return missingProvider{}
}
func (missingProvider) StoreCommit(_ FullCommit) error { return nil }
func (missingProvider) GetByHeight(_ int) (FullCommit, error) {
return FullCommit{}, certerr.ErrCommitNotFound()
}
func (missingProvider) GetByHash(_ []byte) (FullCommit, error) {
return FullCommit{}, certerr.ErrCommitNotFound()
}
func (missingProvider) LatestCommit() (FullCommit, error) {
return FullCommit{}, certerr.ErrCommitNotFound()
}

View File

@ -1,4 +1,5 @@
package certifiers_test
// nolint: vetshadow
package lite_test
import (
"testing"
@ -6,48 +7,68 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/certifiers"
"github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// missingProvider doesn't store anything, always a miss
// Designed as a mock for testing
type missingProvider struct{}
// NewMissingProvider returns a provider which does not store anything and always misses.
func NewMissingProvider() lite.Provider {
return missingProvider{}
}
func (missingProvider) StoreCommit(lite.FullCommit) error { return nil }
func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) {
return lite.FullCommit{}, liteErr.ErrCommitNotFound()
}
func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) {
return lite.FullCommit{}, liteErr.ErrCommitNotFound()
}
func (missingProvider) LatestCommit() (lite.FullCommit, error) {
return lite.FullCommit{}, liteErr.ErrCommitNotFound()
}
func TestMemProvider(t *testing.T) {
p := certifiers.NewMemStoreProvider()
p := lite.NewMemStoreProvider()
checkProvider(t, p, "test-mem", "empty")
}
func TestCacheProvider(t *testing.T) {
p := certifiers.NewCacheProvider(
certifiers.NewMissingProvider(),
certifiers.NewMemStoreProvider(),
certifiers.NewMissingProvider(),
p := lite.NewCacheProvider(
NewMissingProvider(),
lite.NewMemStoreProvider(),
NewMissingProvider(),
)
checkProvider(t, p, "test-cache", "kjfhekfhkewhgit")
}
func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) {
func checkProvider(t *testing.T, p lite.Provider, chainID, app string) {
assert, require := assert.New(t), require.New(t)
appHash := []byte(app)
keys := certifiers.GenValKeys(5)
keys := lite.GenValKeys(5)
count := 10
// make a bunch of commits...
commits := make([]certifiers.FullCommit, count)
commits := make([]lite.FullCommit, count)
for i := 0; i < count; i++ {
// two commits for each validator, to check how we handle dups
// (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ...
vals := keys.ToValidators(10, int64(count/2))
h := 20 + 10*i
h := int64(20 + 10*i)
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5)
}
// check provider is empty
fc, err := p.GetByHeight(20)
require.NotNil(err)
assert.True(errors.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
fc, err = p.GetByHash(commits[3].ValidatorsHash())
require.NotNil(err)
assert.True(errors.IsCommitNotFoundErr(err))
assert.True(liteErr.IsCommitNotFoundErr(err))
// now add them all to the provider
for _, s := range commits {
@ -74,13 +95,13 @@ func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) {
fc, err = p.GetByHeight(47)
if assert.Nil(err) {
// we only step by 10, so 40 must be the one below this
assert.Equal(40, fc.Height())
assert.EqualValues(40, fc.Height())
}
}
// this will make a get height, and if it is good, set the data as well
func checkGetHeight(t *testing.T, p certifiers.Provider, ask, expect int) {
func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) {
fc, err := p.GetByHeight(ask)
require.Nil(t, err, "%+v", err)
if assert.Equal(t, expect, fc.Height()) {
@ -95,19 +116,19 @@ func TestCacheGetsBestHeight(t *testing.T) {
// we will write data to the second level of the cache (p2),
// and see what gets cached, stored in
p := certifiers.NewMemStoreProvider()
p2 := certifiers.NewMemStoreProvider()
cp := certifiers.NewCacheProvider(p, p2)
p := lite.NewMemStoreProvider()
p2 := lite.NewMemStoreProvider()
cp := lite.NewCacheProvider(p, p2)
chainID := "cache-best-height"
appHash := []byte("01234567")
keys := certifiers.GenValKeys(5)
keys := lite.GenValKeys(5)
count := 10
// set a bunch of commits
for i := 0; i < count; i++ {
vals := keys.ToValidators(10, int64(count/2))
h := 10 * (i + 1)
h := int64(10 * (i + 1))
fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5)
err := p2.StoreCommit(fc)
require.NoError(err)

View File

@ -1,4 +1,4 @@
package certifiers
package lite
import (
"bytes"
@ -7,7 +7,7 @@ import (
"github.com/tendermint/tendermint/types"
certerr "github.com/tendermint/tendermint/certifiers/errors"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
var _ Certifier = &Static{}
@ -25,6 +25,7 @@ type Static struct {
vhash []byte
}
// NewStatic returns a new certifier with a static validator set.
func NewStatic(chainID string, vals *types.ValidatorSet) *Static {
return &Static{
chainID: chainID,
@ -32,14 +33,17 @@ func NewStatic(chainID string, vals *types.ValidatorSet) *Static {
}
}
// ChainID returns the chain id.
func (c *Static) ChainID() string {
return c.chainID
}
// Validators returns the validator set.
func (c *Static) Validators() *types.ValidatorSet {
return c.vSet
}
// Hash returns the hash of the validator set.
func (c *Static) Hash() []byte {
if len(c.vhash) == 0 {
c.vhash = c.vSet.Hash()
@ -47,6 +51,7 @@ func (c *Static) Hash() []byte {
return c.vhash
}
// Certify makes sure that the commit is valid.
func (c *Static) Certify(commit Commit) error {
// do basic sanity checks
err := commit.ValidateBasic(c.chainID)
@ -56,7 +61,7 @@ func (c *Static) Certify(commit Commit) error {
// make sure it has the same validator set we have (static means static)
if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) {
return certerr.ErrValidatorsChanged()
return liteErr.ErrValidatorsChanged()
}
// then make sure we have the proper signatures for this

View File

@ -1,4 +1,4 @@
package certifiers_test
package lite_test
import (
"testing"
@ -7,8 +7,8 @@ import (
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/certifiers"
errors "github.com/tendermint/tendermint/certifiers/errors"
"github.com/tendermint/tendermint/lite"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
func TestStaticCert(t *testing.T) {
@ -16,17 +16,17 @@ func TestStaticCert(t *testing.T) {
assert := assert.New(t)
// require := require.New(t)
keys := certifiers.GenValKeys(4)
keys := lite.GenValKeys(4)
// 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do!
vals := keys.ToValidators(20, 10)
// and a certifier based on our known set
chainID := "test-static"
cert := certifiers.NewStatic(chainID, vals)
cert := lite.NewStatic(chainID, vals)
cases := []struct {
keys certifiers.ValKeys
keys lite.ValKeys
vals *types.ValidatorSet
height int
height int64
first, last int // who actually signs
proper bool // true -> expect no error
changed bool // true -> expect validator change error
@ -51,7 +51,7 @@ func TestStaticCert(t *testing.T) {
} else {
assert.NotNil(err)
if tc.changed {
assert.True(errors.IsValidatorsChangedErr(err), "%+v", err)
assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err)
}
}
}

View File

@ -3,6 +3,7 @@ package mempool
import (
"bytes"
"container/list"
"fmt"
"sync"
"sync/atomic"
"time"
@ -61,12 +62,12 @@ type Mempool struct {
proxyAppConn proxy.AppConnMempool
txs *clist.CList // concurrent linked-list of good txs
counter int64 // simple incrementing counter
height int // the last block Update()'d to
height int64 // the last block Update()'d to
rechecking int32 // for re-checking filtered txs on Update()
recheckCursor *clist.CElement // next expected response
recheckEnd *clist.CElement // re-checking stops here
notifiedTxsAvailable bool // true if fired on txsAvailable for this height
txsAvailable chan int // fires the next height once for each height, when the mempool is not empty
txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty
// Keep a cache of already-seen txs.
// This reduces the pressure on the proxyApp.
@ -80,7 +81,7 @@ type Mempool struct {
// NewMempool returns a new Mempool with the given configuration and connection to an application.
// TODO: Extract logger into arguments.
func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int) *Mempool {
func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64) *Mempool {
mempool := &Mempool{
config: config,
proxyAppConn: proxyAppConn,
@ -102,7 +103,7 @@ func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, he
// ensuring it will trigger once every height when transactions are available.
// NOTE: not thread safe - should only be called once, on startup
func (mem *Mempool) EnableTxsAvailable() {
mem.txsAvailable = make(chan int, 1)
mem.txsAvailable = make(chan int64, 1)
}
// SetLogger sets the Logger.
@ -110,6 +111,26 @@ func (mem *Mempool) SetLogger(l log.Logger) {
mem.logger = l
}
// CloseWAL closes and discards the underlying WAL file.
// Any further writes will not be relayed to disk.
func (mem *Mempool) CloseWAL() bool {
if mem == nil {
return false
}
mem.proxyMtx.Lock()
defer mem.proxyMtx.Unlock()
if mem.wal == nil {
return false
}
if err := mem.wal.Close(); err != nil && mem.logger != nil {
mem.logger.Error("Mempool.CloseWAL", "err", err)
}
mem.wal = nil
return true
}
func (mem *Mempool) initWAL() {
walDir := mem.config.WalDir()
if walDir != "" {
@ -171,17 +192,7 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
// CACHE
if mem.cache.Exists(tx) {
if cb != nil {
cb(&abci.Response{
Value: &abci.Response_CheckTx{
&abci.ResponseCheckTx{
Code: abci.CodeType_BadNonce, // TODO or duplicate tx
Log: "Duplicate transaction (ignored)",
},
},
})
}
return nil // TODO: return an error (?)
return fmt.Errorf("Tx already exists in cache")
}
mem.cache.Push(tx)
// END CACHE
@ -189,8 +200,14 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
// WAL
if mem.wal != nil {
// TODO: Notify administrators when WAL fails
mem.wal.Write([]byte(tx))
mem.wal.Write([]byte("\n"))
_, err := mem.wal.Write([]byte(tx))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
_, err = mem.wal.Write([]byte("\n"))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
}
// END WAL
@ -219,11 +236,11 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) {
switch r := res.Value.(type) {
case *abci.Response_CheckTx:
tx := req.GetCheckTx().Tx
if r.CheckTx.Code == abci.CodeType_OK {
if r.CheckTx.Code == abci.CodeTypeOK {
mem.counter++
memTx := &mempoolTx{
counter: mem.counter,
height: int64(mem.height),
height: mem.height,
tx: tx,
}
mem.txs.PushBack(memTx)
@ -251,7 +268,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+
"Expected %X, got %X", r.CheckTx.Data, memTx.tx))
}
if r.CheckTx.Code == abci.CodeType_OK {
if r.CheckTx.Code == abci.CodeTypeOK {
// Good, nothing to do.
} else {
// Tx became invalidated due to newly committed block.
@ -284,7 +301,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) {
// TxsAvailable returns a channel which fires once for every height,
// and only when transactions are available in the mempool.
// NOTE: the returned channel may be nil if EnableTxsAvailable was not called.
func (mem *Mempool) TxsAvailable() <-chan int {
func (mem *Mempool) TxsAvailable() <-chan int64 {
return mem.txsAvailable
}
@ -331,10 +348,10 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
// Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(height int, txs types.Txs) {
// TODO: check err ?
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx
func (mem *Mempool) Update(height int64, txs types.Txs) error {
if err := mem.proxyAppConn.FlushSync(); err != nil { // To flush async resCb calls e.g. from CheckTx
return err
}
// First, create a lookup map of txns in new txs.
txsMap := make(map[string]struct{})
for _, tx := range txs {
@ -357,6 +374,7 @@ func (mem *Mempool) Update(height int, txs types.Txs) {
// mem.recheckCursor re-scans mem.txs and possibly removes some txs.
// Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
}
return nil
}
func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx {
@ -405,8 +423,8 @@ type mempoolTx struct {
}
// Height returns the height for this transaction
func (memTx *mempoolTx) Height() int {
return int(atomic.LoadInt64(&memTx.height))
func (memTx *mempoolTx) Height() int64 {
return atomic.LoadInt64(&memTx.height)
}
//--------------------------------------------------------------------------------

View File

@ -1,18 +1,27 @@
package mempool
import (
"crypto/md5"
"crypto/rand"
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/tendermint/abci/example/counter"
"github.com/tendermint/abci/example/dummy"
abci "github.com/tendermint/abci/types"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
"github.com/stretchr/testify/require"
)
func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
@ -20,13 +29,16 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
appConnMem.Start()
err := appConnMem.Start()
if err != nil {
panic(err)
}
mempool := NewMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger())
return mempool
}
func ensureNoFire(t *testing.T, ch <-chan int, timeoutMS int) {
func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) {
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
select {
case <-ch:
@ -35,7 +47,7 @@ func ensureNoFire(t *testing.T, ch <-chan int, timeoutMS int) {
}
}
func ensureFire(t *testing.T, ch <-chan int, timeoutMS int) {
func ensureFire(t *testing.T, ch <-chan int64, timeoutMS int) {
timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond)
select {
case <-ch:
@ -49,10 +61,12 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
for i := 0; i < count; i++ {
txBytes := make([]byte, 20)
txs[i] = txBytes
rand.Read(txBytes)
err := mempool.CheckTx(txBytes, nil)
_, err := rand.Read(txBytes)
if err != nil {
t.Fatal("Error after CheckTx: %v", err)
t.Error(err)
}
if err := mempool.CheckTx(txBytes, nil); err != nil {
t.Fatalf("Error after CheckTx: %v", err)
}
}
return txs
@ -78,7 +92,9 @@ func TestTxsAvailable(t *testing.T) {
// it should fire once now for the new height
// since there are still txs left
committedTxs, txs := txs[:50], txs[50:]
mempool.Update(1, committedTxs)
if err := mempool.Update(1, committedTxs); err != nil {
t.Error(err)
}
ensureFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
@ -88,7 +104,9 @@ func TestTxsAvailable(t *testing.T) {
// now call update with all the txs. it should not fire as there are no txs left
committedTxs = append(txs, moreTxs...)
mempool.Update(2, committedTxs)
if err := mempool.Update(2, committedTxs); err != nil {
t.Error(err)
}
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
// send a bunch more txs, it should only fire once
@ -99,16 +117,16 @@ func TestTxsAvailable(t *testing.T) {
func TestSerialReap(t *testing.T) {
app := counter.NewCounterApplication(true)
app.SetOption("serial", "on")
app.SetOption(abci.RequestSetOption{"serial", "on"})
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
appConnCon, _ := cc.NewABCIClient()
appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus"))
if _, err := appConnCon.Start(); err != nil {
t.Fatalf("Error starting ABCI client: %v", err.Error())
}
err := appConnCon.Start()
require.Nil(t, err)
cacheMap := make(map[string]struct{})
deliverTxsRange := func(start, end int) {
// Deliver some txs.
for i := start; i < end; i++ {
@ -117,26 +135,23 @@ func TestSerialReap(t *testing.T) {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
err := mempool.CheckTx(txBytes, nil)
if err != nil {
t.Fatal("Error after CheckTx: %v", err)
_, cached := cacheMap[string(txBytes)]
if cached {
require.NotNil(t, err, "expected error for cached tx")
} else {
require.Nil(t, err, "expected no err for uncached tx")
}
cacheMap[string(txBytes)] = struct{}{}
// This will fail because not serial (incrementing)
// However, error should still be nil.
// It just won't show up on Reap().
// Duplicates are cached and should return error
err = mempool.CheckTx(txBytes, nil)
if err != nil {
t.Fatal("Error after CheckTx: %v", err)
}
require.NotNil(t, err, "Expected error after CheckTx on duplicated tx")
}
}
reapCheck := func(exp int) {
txs := mempool.Reap(-1)
if len(txs) != exp {
t.Fatalf("Expected to reap %v txs but got %v", exp, len(txs))
}
require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs)))
}
updateRange := func(start, end int) {
@ -146,7 +161,9 @@ func TestSerialReap(t *testing.T) {
binary.BigEndian.PutUint64(txBytes, uint64(i))
txs = append(txs, txBytes)
}
mempool.Update(0, txs)
if err := mempool.Update(0, txs); err != nil {
t.Error(err)
}
}
commitRange := func(start, end int) {
@ -154,13 +171,19 @@ func TestSerialReap(t *testing.T) {
for i := start; i < end; i++ {
txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(i))
res := appConnCon.DeliverTxSync(txBytes)
if !res.IsOK() {
res, err := appConnCon.DeliverTxSync(txBytes)
if err != nil {
t.Errorf("Client error committing tx: %v", err)
}
if res.IsErr() {
t.Errorf("Error committing tx. Code:%v result:%X log:%v",
res.Code, res.Data, res.Log)
}
}
res := appConnCon.CommitSync()
res, err := appConnCon.CommitSync()
if err != nil {
t.Errorf("Client error committing: %v", err)
}
if len(res.Data) != 8 {
t.Errorf("Error committing. Hash:%X log:%v", res.Data, res.Log)
}
@ -200,3 +223,63 @@ func TestSerialReap(t *testing.T) {
// We should have 600 now.
reapCheck(600)
}
func TestMempoolCloseWAL(t *testing.T) {
// 1. Create the temporary directory for mempool and WAL testing.
rootDir, err := ioutil.TempDir("", "mempool-test")
require.Nil(t, err, "expecting successful tmpdir creation")
defer os.RemoveAll(rootDir)
// 2. Ensure that it doesn't contain any elements -- Sanity check
m1, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 0, len(m1), "no matches yet")
// 3. Create the mempool
wcfg := *(cfg.DefaultMempoolConfig())
wcfg.RootDir = rootDir
app := dummy.NewDummyApplication()
cc := proxy.NewLocalClientCreator(app)
appConnMem, _ := cc.NewABCIClient()
mempool := NewMempool(&wcfg, appConnMem, 10)
// 4. Ensure that the directory contains the WAL file
m2, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 1, len(m2), "expecting the wal match in")
// 5. Write some contents to the WAL
mempool.CheckTx(types.Tx([]byte("foo")), nil)
walFilepath := mempool.wal.Path
sum1 := checksumFile(walFilepath, t)
// 6. Sanity check to ensure that the written TX matches the expectation.
require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written")
// 7. Invoke CloseWAL() and ensure it discards the
// WAL thus any other write won't go through.
require.True(t, mempool.CloseWAL(), "CloseWAL should CloseWAL")
mempool.CheckTx(types.Tx([]byte("bar")), nil)
sum2 := checksumFile(walFilepath, t)
require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded")
// 8. Second CloseWAL should do nothing
require.False(t, mempool.CloseWAL(), "CloseWAL should CloseWAL")
// 9. Sanity check to ensure that the WAL file still exists
m3, err := filepath.Glob(filepath.Join(rootDir, "*"))
require.Nil(t, err, "successful globbing expected")
require.Equal(t, 1, len(m3), "expecting the wal match in")
}
func checksumIt(data []byte) string {
h := md5.New()
h.Write(data)
return fmt.Sprintf("%x", h.Sum(nil))
}
func checksumFile(p string, t *testing.T) string {
data, err := ioutil.ReadFile(p)
require.Nil(t, err, "expecting successful read of %q", p)
return checksumIt(data)
}

View File

@ -28,7 +28,6 @@ type MempoolReactor struct {
p2p.BaseReactor
config *cfg.MempoolConfig
Mempool *Mempool
evsw types.EventSwitch
}
// NewMempoolReactor returns a new MempoolReactor with the given config and mempool.
@ -51,7 +50,7 @@ func (memR *MempoolReactor) SetLogger(l log.Logger) {
// It returns the list of channels for this reactor.
func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{
{
ID: MempoolChannel,
Priority: 5,
},
@ -98,7 +97,7 @@ func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) er
// PeerState describes the state of a peer.
type PeerState interface {
GetHeight() int
GetHeight() int64
}
// Peer describes a peer.
@ -150,11 +149,6 @@ func (memR *MempoolReactor) broadcastTxRoutine(peer Peer) {
}
}
// SetEventSwitch implements events.Eventable.
func (memR *MempoolReactor) SetEventSwitch(evsw types.EventSwitch) {
memR.evsw = evsw
}
//-----------------------------------------------------------------------------
// Messages

View File

@ -81,7 +81,7 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int
mempool := reactors[reactorIdx].Mempool
for mempool.Size() != len(txs) {
time.Sleep(time.Second)
time.Sleep(time.Millisecond * 100)
}
reapedTxs := mempool.Reap(len(txs))

View File

@ -2,6 +2,7 @@ package node
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
@ -21,6 +22,7 @@ import (
"github.com/tendermint/tendermint/consensus"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/trust"
"github.com/tendermint/tendermint/proxy"
rpccore "github.com/tendermint/tendermint/rpc/core"
grpccore "github.com/tendermint/tendermint/rpc/grpc"
@ -94,12 +96,13 @@ type Node struct {
privValidator types.PrivValidator // local node's validator key
// network
privKey crypto.PrivKeyEd25519 // local node's p2p key
sw *p2p.Switch // p2p connections
addrBook *p2p.AddrBook // known peers
privKey crypto.PrivKeyEd25519 // local node's p2p key
sw *p2p.Switch // p2p connections
addrBook *p2p.AddrBook // known peers
trustMetricStore *trust.TrustMetricStore // trust metrics for all peers
// services
evsw types.EventSwitch // pub/sub for services
eventBus *types.EventBus // pub/sub for services
blockStore *bc.BlockStore // store the blockchain to disk
bcReactor *bc.BlockchainReactor // for fast-syncing
mempoolReactor *mempl.MempoolReactor // for gossipping transactions
@ -108,6 +111,7 @@ type Node struct {
proxyApp proxy.AppConns // connection to the application
rpcListeners []net.Listener // rpc servers
txIndexer txindex.TxIndexer
indexerService *txindex.IndexerService
}
// NewNode returns a new, ready to go, Tendermint Node.
@ -162,7 +166,7 @@ func NewNode(config *cfg.Config,
handshaker.SetLogger(consensusLogger)
proxyApp := proxy.NewAppConns(clientCreator, handshaker)
proxyApp.SetLogger(logger.With("module", "proxy"))
if _, err := proxyApp.Start(); err != nil {
if err := proxyApp.Start(); err != nil {
return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
}
@ -170,30 +174,9 @@ func NewNode(config *cfg.Config,
state = sm.LoadState(stateDB)
state.SetLogger(stateLogger)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.TxIndex {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
}
txIndexer = kv.NewTxIndex(store)
default:
txIndexer = &null.TxIndex{}
}
state.TxIndexer = txIndexer
// Generate node PrivKey
privKey := crypto.GenPrivKeyEd25519()
// Make event switch
eventSwitch := types.NewEventSwitch()
eventSwitch.SetLogger(logger.With("module", "types"))
if _, err := eventSwitch.Start(); err != nil {
return nil, fmt.Errorf("Failed to start switch: %v", err)
}
// Decide whether to fast-sync or not
// We don't fast-sync when the only validator is us.
fastSync := config.FastSync
@ -245,9 +228,19 @@ func NewNode(config *cfg.Config,
// Optionally, start the pex reactor
var addrBook *p2p.AddrBook
var trustMetricStore *trust.TrustMetricStore
if config.P2P.PexReactor {
addrBook = p2p.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
// Get the trust metric history data
trustHistoryDB, err := dbProvider(&DBContext{"trusthistory", config})
if err != nil {
return nil, err
}
trustMetricStore = trust.NewTrustMetricStore(trustHistoryDB, trust.DefaultConfig())
trustMetricStore.SetLogger(p2pLogger)
pexReactor := p2p.NewPEXReactor(addrBook)
pexReactor.SetLogger(p2pLogger)
sw.AddReactor("PEX", pexReactor)
@ -263,31 +256,54 @@ func NewNode(config *cfg.Config,
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
if resQuery.IsErr() {
return resQuery
}
return errors.New(resQuery.Code.String())
return nil
})
sw.SetPubKeyFilter(func(pubkey crypto.PubKeyEd25519) error {
resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/pubkey/%X", pubkey.Bytes())})
if err != nil {
return err
}
if resQuery.Code.IsOK() {
return nil
if resQuery.IsErr() {
return resQuery
}
return errors.New(resQuery.Code.String())
return nil
})
}
// add the event switch to all services
// they should all satisfy events.Eventable
SetEventSwitch(eventSwitch, bcReactor, mempoolReactor, consensusReactor)
eventBus := types.NewEventBus()
eventBus.SetLogger(logger.With("module", "events"))
// services which will be publishing and/or subscribing for messages (events)
bcReactor.SetEventBus(eventBus)
consensusReactor.SetEventBus(eventBus)
// Transaction indexing
var txIndexer txindex.TxIndexer
switch config.TxIndex.Indexer {
case "kv":
store, err := dbProvider(&DBContext{"tx_index", config})
if err != nil {
return nil, err
}
if config.TxIndex.IndexTags != "" {
txIndexer = kv.NewTxIndex(store, kv.IndexTags(strings.Split(config.TxIndex.IndexTags, ",")))
} else if config.TxIndex.IndexAllTags {
txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
} else {
txIndexer = kv.NewTxIndex(store)
}
default:
txIndexer = &null.TxIndex{}
}
indexerService := txindex.NewIndexerService(txIndexer, eventBus)
// run the profile server
profileHost := config.ProfListenAddress
if profileHost != "" {
go func() {
logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
}()
@ -298,11 +314,11 @@ func NewNode(config *cfg.Config,
genesisDoc: genDoc,
privValidator: privValidator,
privKey: privKey,
sw: sw,
addrBook: addrBook,
privKey: privKey,
sw: sw,
addrBook: addrBook,
trustMetricStore: trustMetricStore,
evsw: eventSwitch,
blockStore: blockStore,
bcReactor: bcReactor,
mempoolReactor: mempoolReactor,
@ -310,6 +326,8 @@ func NewNode(config *cfg.Config,
consensusReactor: consensusReactor,
proxyApp: proxyApp,
txIndexer: txIndexer,
indexerService: indexerService,
eventBus: eventBus,
}
node.BaseService = *cmn.NewBaseService(logger, "Node", node)
return node, nil
@ -317,6 +335,11 @@ func NewNode(config *cfg.Config,
// OnStart starts the Node. It implements cmn.Service.
func (n *Node) OnStart() error {
err := n.eventBus.Start()
if err != nil {
return err
}
// Run the RPC server first
// so we can eg. receive txs for the first block
if n.config.RPC.ListenAddress != "" {
@ -335,7 +358,7 @@ func (n *Node) OnStart() error {
// Start the switch
n.sw.SetNodeInfo(n.makeNodeInfo())
n.sw.SetNodePrivKey(n.privKey)
_, err := n.sw.Start()
err = n.sw.Start()
if err != nil {
return err
}
@ -349,6 +372,12 @@ func (n *Node) OnStart() error {
}
}
// start tx indexer
err = n.indexerService.Start()
if err != nil {
return err
}
return nil
}
@ -366,9 +395,13 @@ func (n *Node) OnStop() {
n.Logger.Error("Error closing listener", "listener", l, "err", err)
}
}
n.eventBus.Stop()
n.indexerService.Stop()
}
// RunForever waits for an interupt signal and stops the node.
// RunForever waits for an interrupt signal and stops the node.
func (n *Node) RunForever() {
// Sleep forever and then...
cmn.TrapSignal(func() {
@ -376,13 +409,6 @@ func (n *Node) RunForever() {
})
}
// SetEventSwitch adds the event switch to reactors, mempool, etc.
func SetEventSwitch(evsw types.EventSwitch, eventables ...types.Eventable) {
for _, e := range eventables {
e.SetEventSwitch(evsw)
}
}
// AddListener adds a listener to accept inbound peer connections.
// It should be called before starting the Node.
// The first listener is the primary listener (in NodeInfo)
@ -393,7 +419,6 @@ func (n *Node) AddListener(l p2p.Listener) {
// ConfigureRPC sets all variables in rpccore so they will serve
// rpc calls from this node
func (n *Node) ConfigureRPC() {
rpccore.SetEventSwitch(n.evsw)
rpccore.SetBlockStore(n.blockStore)
rpccore.SetConsensusState(n.consensusState)
rpccore.SetMempool(n.mempoolReactor.Mempool)
@ -404,6 +429,7 @@ func (n *Node) ConfigureRPC() {
rpccore.SetProxyAppQuery(n.proxyApp.Query())
rpccore.SetTxIndexer(n.txIndexer)
rpccore.SetConsensusReactor(n.consensusReactor)
rpccore.SetEventBus(n.eventBus)
rpccore.SetLogger(n.Logger.With("module", "rpc"))
}
@ -420,7 +446,13 @@ func (n *Node) startRPC() ([]net.Listener, error) {
for i, listenAddr := range listenAddrs {
mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server")
wm := rpcserver.NewWebsocketManager(rpccore.Routes, n.evsw)
onDisconnect := rpcserver.OnDisconnect(func(remoteAddr string) {
err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
if err != nil {
rpcLogger.Error("Error unsubsribing from all on disconnect", "err", err)
}
})
wm := rpcserver.NewWebsocketManager(rpccore.Routes, onDisconnect)
wm.SetLogger(rpcLogger.With("protocol", "websocket"))
mux.HandleFunc("/websocket", wm.WebsocketHandler)
rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, rpcLogger)
@ -469,9 +501,9 @@ func (n *Node) MempoolReactor() *mempl.MempoolReactor {
return n.mempoolReactor
}
// EventSwitch returns the Node's EventSwitch.
func (n *Node) EventSwitch() types.EventSwitch {
return n.evsw
// EventBus returns the Node's EventBus.
func (n *Node) EventBus() *types.EventBus {
return n.eventBus
}
// PrivValidator returns the Node's PrivValidator.
@ -509,11 +541,8 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
},
}
// include git hash in the nodeInfo if available
// TODO: use ld-flags
/*if rev, err := cmn.ReadFile(n.config.GetString("revision_file")); err == nil {
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("revision=%v", string(rev)))
}*/
rpcListenAddr := n.config.RPC.ListenAddress
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr))
if !n.sw.IsListening() {
return nodeInfo
@ -522,13 +551,8 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo {
p2pListener := n.sw.Listeners()[0]
p2pHost := p2pListener.ExternalAddress().IP.String()
p2pPort := p2pListener.ExternalAddress().Port
rpcListenAddr := n.config.RPC.ListenAddress
// We assume that the rpcListener has the same ExternalAddress.
// This is probably true because both P2P and RPC listeners use UPnP,
// except of course if the rpc is only bound to localhost
nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort)
nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr))
return nodeInfo
}

View File

@ -1,6 +1,7 @@
package node
import (
"context"
"testing"
"time"
@ -9,30 +10,39 @@ import (
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"
)
func TestNodeStartStop(t *testing.T) {
config := cfg.ResetTestRoot("node_node_test")
// Create & start node
// create & start node
n, err := DefaultNewNode(config, log.TestingLogger())
assert.NoError(t, err, "expected no err on DefaultNewNode")
n.Start()
err1 := n.Start()
if err1 != nil {
t.Error(err1)
}
t.Logf("Started node %v", n.sw.NodeInfo())
// Wait a bit to initialize
// TODO remove time.Sleep(), make asynchronous.
time.Sleep(time.Second * 2)
// wait for the node to produce a block
blockCh := make(chan interface{})
err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh)
assert.NoError(t, err)
select {
case <-blockCh:
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for the node to produce a block")
}
ch := make(chan struct{}, 1)
// stop the node
go func() {
n.Stop()
ch <- struct{}{}
}()
ticker := time.NewTicker(time.Second * 5)
select {
case <-ch:
case <-ticker.C:
case <-n.Quit:
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for shutdown")
}
}

View File

@ -4,9 +4,9 @@
`tendermint/tendermint/p2p` provides an abstraction around peer-to-peer communication.<br/>
## Peer/MConnection/Channel
## MConnection
Each peer has one `MConnection` (multiplex connection) instance.
`MConnection` is a multiplex connection:
__multiplex__ *noun* a system or signal involving simultaneous transmission of
several messages along a single channel of communication.
@ -16,6 +16,43 @@ Each `MConnection` handles message transmission on multiple abstract communicati
The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection.
The `MConnection` supports three packet types: Ping, Pong, and Msg.
### Ping and Pong
The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively
When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message.
When a ping is received on the `MConnection`, a pong is sent in response.
If a pong is not received in sufficient time, the peer's score should be decremented (TODO).
### Msg
Messages in channels are chopped into smaller msgPackets for multiplexing.
```
type msgPacket struct {
ChannelID byte
EOF byte // 1 means message ends here.
Bytes []byte
}
```
The msgPacket is serialized using go-wire, and prefixed with a 0x3.
The received `Bytes` of a sequential set of packets are appended together
until a packet with `EOF=1` is received, at which point the complete serialized message
is returned for processing by the corresponding channels `onReceive` function.
### Multiplexing
Messages are sent from a single `sendRoutine`, which loops over a select statement that results in the sending
of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels.
Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time.
Messages are chosen for a batch one a time from the channel with the lowest ratio of recently sent bytes to channel priority.
## Sending Messages
There are two methods for sending messages:
```go
func (m MConnection) Send(chID byte, msg interface{}) bool {}
@ -31,6 +68,12 @@ queue is full.
`Send()` and `TrySend()` are also exposed for each `Peer`.
## Peer
Each peer has one `MConnection` instance, and includes other information such as whether the connection
was outbound, whether the connection should be recreated if it closes, various identity information about the node,
and other higher level thread-safe data used by the reactors.
## Switch/Reactor
The `Switch` handles peer connections and exposes an API to receive incoming messages

View File

@ -7,6 +7,7 @@ package p2p
import (
"encoding/binary"
"encoding/json"
"fmt"
"math"
"math/rand"
"net"
@ -40,7 +41,7 @@ const (
// old buckets over which an address group will be spread.
oldBucketsPerGroup = 4
// new buckets over which an source address group will be spread.
// new buckets over which a source address group will be spread.
newBucketsPerGroup = 32
// buckets a frequently seen new address may end up in.
@ -79,18 +80,22 @@ const (
type AddrBook struct {
cmn.BaseService
mtx sync.Mutex
// immutable after creation
filePath string
routabilityStrict bool
rand *rand.Rand
key string
ourAddrs map[string]*NetAddress
addrLookup map[string]*knownAddress // new & old
addrNew []map[string]*knownAddress
addrOld []map[string]*knownAddress
wg sync.WaitGroup
nOld int
nNew int
// accessed concurrently
mtx sync.Mutex
rand *rand.Rand
ourAddrs map[string]*NetAddress
addrLookup map[string]*knownAddress // new & old
bucketsOld []map[string]*knownAddress
bucketsNew []map[string]*knownAddress
nOld int
nNew int
wg sync.WaitGroup
}
// NewAddrBook creates a new address book.
@ -112,23 +117,29 @@ func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
func (a *AddrBook) init() {
a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
// New addr buckets
a.addrNew = make([]map[string]*knownAddress, newBucketCount)
for i := range a.addrNew {
a.addrNew[i] = make(map[string]*knownAddress)
a.bucketsNew = make([]map[string]*knownAddress, newBucketCount)
for i := range a.bucketsNew {
a.bucketsNew[i] = make(map[string]*knownAddress)
}
// Old addr buckets
a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
for i := range a.addrOld {
a.addrOld[i] = make(map[string]*knownAddress)
a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount)
for i := range a.bucketsOld {
a.bucketsOld[i] = make(map[string]*knownAddress)
}
}
// OnStart implements Service.
func (a *AddrBook) OnStart() error {
a.BaseService.OnStart()
if err := a.BaseService.OnStart(); err != nil {
return err
}
a.loadFromFile(a.filePath)
// wg.Add to ensure that any invocation of .Wait()
// later on will wait for saveRoutine to terminate.
a.wg.Add(1)
go a.saveRoutine()
return nil
}
@ -141,6 +152,7 @@ func (a *AddrBook) Wait() {
a.wg.Wait()
}
// AddOurAddress adds another one of our addresses.
func (a *AddrBook) AddOurAddress(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -148,6 +160,7 @@ func (a *AddrBook) AddOurAddress(addr *NetAddress) {
a.ourAddrs[addr.String()] = addr
}
// OurAddresses returns a list of our addresses.
func (a *AddrBook) OurAddresses() []*NetAddress {
addrs := []*NetAddress{}
for _, addr := range a.ourAddrs {
@ -156,18 +169,20 @@ func (a *AddrBook) OurAddresses() []*NetAddress {
return addrs
}
// AddAddress adds the given address as received from the given source.
// NOTE: addr must not be nil
func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) error {
a.mtx.Lock()
defer a.mtx.Unlock()
a.Logger.Info("Add address to book", "addr", addr, "src", src)
a.addAddress(addr, src)
return a.addAddress(addr, src)
}
// NeedMoreAddrs returns true if there are not have enough addresses in the book.
func (a *AddrBook) NeedMoreAddrs() bool {
return a.Size() < needAddressThreshold
}
// Size returns the number of addresses in the book.
func (a *AddrBook) Size() int {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -178,7 +193,12 @@ func (a *AddrBook) size() int {
return a.nNew + a.nOld
}
// Pick an address to connect to with new/old bias.
// PickAddress picks an address to connect to.
// The address is picked randomly from an old or new bucket according
// to the newBias argument, which must be between [0, 100] (or else is truncated to that range)
// and determines how biased we are to pick an address from a new bucket.
// PickAddress returns nil if the AddrBook is empty or if we try to pick
// from an empty bucket.
func (a *AddrBook) PickAddress(newBias int) *NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -197,40 +217,34 @@ func (a *AddrBook) PickAddress(newBias int) *NetAddress {
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
// pick random Old bucket.
var bucket map[string]*knownAddress = nil
for len(bucket) == 0 {
bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
// pick a random peer from a random bucket
var bucket map[string]*knownAddress
pickFromOldBucket := (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation
if (pickFromOldBucket && a.nOld == 0) ||
(!pickFromOldBucket && a.nNew == 0) {
return nil
}
// loop until we pick a random non-empty bucket
for len(bucket) == 0 {
if pickFromOldBucket {
bucket = a.bucketsOld[a.rand.Intn(len(a.bucketsOld))]
} else {
bucket = a.bucketsNew[a.rand.Intn(len(a.bucketsNew))]
}
// pick a random ka from bucket.
randIndex := a.rand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
randIndex--
}
// pick a random index and loop over the map to return that index
randIndex := a.rand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
cmn.PanicSanity("Should not happen")
} else {
// pick random New bucket.
var bucket map[string]*knownAddress = nil
for len(bucket) == 0 {
bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
}
// pick a random ka from bucket.
randIndex := a.rand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
randIndex--
}
cmn.PanicSanity("Should not happen")
randIndex--
}
return nil
}
// MarkGood marks the peer as good and moves it into an "old" bucket.
// XXX: we never call this!
func (a *AddrBook) MarkGood(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -244,6 +258,7 @@ func (a *AddrBook) MarkGood(addr *NetAddress) {
}
}
// MarkAttempt marks that an attempt was made to connect to the address.
func (a *AddrBook) MarkAttempt(addr *NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
@ -297,6 +312,7 @@ func (a *AddrBook) GetSelection() []*NetAddress {
// Fisher-Yates shuffle the array. We only need to do the first
// `numAddresses' since we are throwing the rest.
// XXX: What's the point of this if we already loop randomly through addrLookup ?
for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end
j := rand.Intn(len(allAddr)-i) + i
@ -355,7 +371,7 @@ func (a *AddrBook) loadFromFile(filePath string) bool {
if err != nil {
cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
}
defer r.Close()
defer r.Close() // nolint: errcheck
aJSON := &addrBookJSON{}
dec := json.NewDecoder(r)
err = dec.Decode(aJSON)
@ -366,7 +382,7 @@ func (a *AddrBook) loadFromFile(filePath string) bool {
// Restore all the fields...
// Restore the key
a.key = aJSON.Key
// Restore .addrNew & .addrOld
// Restore .bucketsNew & .bucketsOld
for _, ka := range aJSON.Addrs {
for _, bucketIndex := range ka.Buckets {
bucket := a.getBucket(ka.BucketType, bucketIndex)
@ -391,28 +407,29 @@ func (a *AddrBook) Save() {
/* Private methods */
func (a *AddrBook) saveRoutine() {
dumpAddressTicker := time.NewTicker(dumpAddressInterval)
defer a.wg.Done()
saveFileTicker := time.NewTicker(dumpAddressInterval)
out:
for {
select {
case <-dumpAddressTicker.C:
case <-saveFileTicker.C:
a.saveToFile(a.filePath)
case <-a.Quit:
break out
}
}
dumpAddressTicker.Stop()
saveFileTicker.Stop()
a.saveToFile(a.filePath)
a.wg.Done()
a.Logger.Info("Address handler done")
}
func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
switch bucketType {
case bucketTypeNew:
return a.addrNew[bucketIdx]
return a.bucketsNew[bucketIdx]
case bucketTypeOld:
return a.addrOld[bucketIdx]
return a.bucketsOld[bucketIdx]
default:
cmn.PanicSanity("Should not happen")
return nil
@ -467,7 +484,7 @@ func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
}
addrStr := ka.Addr.String()
bucket := a.getBucket(bucketTypeNew, bucketIdx)
bucket := a.getBucket(bucketTypeOld, bucketIdx)
// Already exists?
if _, ok := bucket[addrStr]; ok {
@ -533,14 +550,13 @@ func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
return oldest
}
func (a *AddrBook) addAddress(addr, src *NetAddress) {
func (a *AddrBook) addAddress(addr, src *NetAddress) error {
if a.routabilityStrict && !addr.Routable() {
a.Logger.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
return
return fmt.Errorf("Cannot add non-routable address %v", addr)
}
if _, ok := a.ourAddrs[addr.String()]; ok {
// Ignore our own listener address.
return
return fmt.Errorf("Cannot add ourselves with address %v", addr)
}
ka := a.addrLookup[addr.String()]
@ -548,16 +564,16 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) {
if ka != nil {
// Already old.
if ka.isOld() {
return
return nil
}
// Already in max new buckets.
if len(ka.Buckets) == maxNewBucketsPerAddress {
return
return nil
}
// The more entries we have, the less likely we are to add more.
factor := int32(2 * len(ka.Buckets))
if a.rand.Int31n(factor) != 0 {
return
return nil
}
} else {
ka = newKnownAddress(addr, src)
@ -567,12 +583,13 @@ func (a *AddrBook) addAddress(addr, src *NetAddress) {
a.addToNewBucket(ka, bucket)
a.Logger.Info("Added new address", "address", addr, "total", a.size())
return nil
}
// Make space in the new buckets by expiring the really bad entries.
// If no bad entries are available we remove the oldest.
func (a *AddrBook) expireNew(bucketIdx int) {
for addrStr, ka := range a.addrNew[bucketIdx] {
for addrStr, ka := range a.bucketsNew[bucketIdx] {
// If an entry is bad, throw it away
if ka.isBad() {
a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr))
@ -674,8 +691,8 @@ func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
}
// Return a string representing the network group of this address.
// This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address and the string "unroutable for an unroutable
// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address and the string "unroutable" for an unroutable
// address.
func (a *AddrBook) groupKey(na *NetAddress) string {
if a.routabilityStrict && na.Local() {
@ -801,8 +818,8 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
}
/*
An address is bad if the address in question has not been tried in the last
minute and meets one of the following criteria:
An address is bad if the address in question is a New address, has not been tried in the last
minute, and meets one of the following criteria:
1) It claims to be from the future
2) It hasn't been seen in over a month
@ -811,14 +828,23 @@ func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
All addresses that meet these criteria are assumed to be worthless and not
worth keeping hold of.
XXX: so a good peer needs us to call MarkGood before the conditions above are reached!
*/
func (ka *knownAddress) isBad() bool {
// Is Old --> good
if ka.BucketType == bucketTypeOld {
return false
}
// Has been attempted in the last minute --> good
if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
return false
}
// Over a month old?
// Too old?
// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
// and shouldn't it be .Before ?
if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
return true
}
@ -829,6 +855,7 @@ func (ka *knownAddress) isBad() bool {
}
// Hasn't succeeded in too long?
// XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
ka.Attempts >= maxFailures {
return true

View File

@ -23,6 +23,42 @@ func createTempFileName(prefix string) string {
return fname
}
func TestAddrBookPickAddress(t *testing.T) {
assert := assert.New(t)
fname := createTempFileName("addrbook_test")
// 0 addresses
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
assert.Zero(book.Size())
addr := book.PickAddress(50)
assert.Nil(addr, "expected no address")
randAddrs := randNetAddressPairs(t, 1)
addrSrc := randAddrs[0]
book.AddAddress(addrSrc.addr, addrSrc.src)
// pick an address when we only have new address
addr = book.PickAddress(0)
assert.NotNil(addr, "expected an address")
addr = book.PickAddress(50)
assert.NotNil(addr, "expected an address")
addr = book.PickAddress(100)
assert.NotNil(addr, "expected an address")
// pick an address when we only have old address
book.MarkGood(addrSrc.addr)
addr = book.PickAddress(0)
assert.NotNil(addr, "expected an address")
addr = book.PickAddress(50)
assert.NotNil(addr, "expected an address")
// in this case, nNew==0 but we biased 100% to new, so we return nil
addr = book.PickAddress(100)
assert.Nil(addr, "did not expected an address")
}
func TestAddrBookSaveLoad(t *testing.T) {
fname := createTempFileName("addrbook_test")
@ -76,6 +112,7 @@ func TestAddrBookLookup(t *testing.T) {
}
func TestAddrBookPromoteToOld(t *testing.T) {
assert := assert.New(t)
fname := createTempFileName("addrbook_test")
randAddrs := randNetAddressPairs(t, 100)
@ -106,6 +143,8 @@ func TestAddrBookPromoteToOld(t *testing.T) {
if len(selection) > book.Size() {
t.Errorf("selection could not be bigger than the book")
}
assert.Equal(book.Size(), 100, "expecting book size to be 100")
}
func TestAddrBookHandlesDuplicates(t *testing.T) {

15
p2p/conn_go110.go Normal file
View File

@ -0,0 +1,15 @@
// +build go1.10
package p2p
// Go1.10 has a proper net.Conn implementation that
// has the SetDeadline method implemented as per
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
// lest we run into problems like
// https://github.com/tendermint/tendermint/issues/851
import "net"
func netPipe() (net.Conn, net.Conn) {
return net.Pipe()
}

32
p2p/conn_notgo110.go Normal file
View File

@ -0,0 +1,32 @@
// +build !go1.10
package p2p
import (
"net"
"time"
)
// Only Go1.10 has a proper net.Conn implementation that
// has the SetDeadline method implemented as per
// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706
// lest we run into problems like
// https://github.com/tendermint/tendermint/issues/851
// so for go versions < Go1.10 use our custom net.Conn creator
// that doesn't return an `Unimplemented error` for net.Conn.
// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04
// we hadn't cared about errors from SetDeadline so swallow them up anyways.
type pipe struct {
net.Conn
}
func (p *pipe) SetDeadline(t time.Time) error {
return nil
}
func netPipe() (net.Conn, net.Conn) {
p1, p2 := net.Pipe()
return &pipe{p1}, &pipe{p2}
}
var _ net.Conn = (*pipe)(nil)

View File

@ -11,10 +11,13 @@ import (
"time"
wire "github.com/tendermint/go-wire"
tmlegacy "github.com/tendermint/go-wire/nowriter/tmlegacy"
cmn "github.com/tendermint/tmlibs/common"
flow "github.com/tendermint/tmlibs/flowrate"
)
var legacy = tmlegacy.TMEncoderLegacy{}
const (
numBatchMsgPackets = 10
minReadBufferSize = 1024
@ -146,9 +149,8 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
var channels = []*Channel{}
for _, desc := range chDescs {
descCopy := *desc // copy the desc else unsafe access across connections
channel := newChannel(mconn, &descCopy)
channelsIdx[channel.id] = channel
channel := newChannel(mconn, *desc)
channelsIdx[channel.desc.ID] = channel
channels = append(channels, channel)
}
mconn.channels = channels
@ -161,7 +163,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
// OnStart implements BaseService
func (c *MConnection) OnStart() error {
c.BaseService.OnStart()
if err := c.BaseService.OnStart(); err != nil {
return err
}
c.quit = make(chan struct{})
c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle)
c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout)
@ -180,7 +184,7 @@ func (c *MConnection) OnStop() {
if c.quit != nil {
close(c.quit)
}
c.conn.Close()
c.conn.Close() // nolint: errcheck
// We can't close pong safely here because
// recvRoutine may write to it after we've stopped.
// Though it doesn't need to get closed at all,
@ -308,12 +312,12 @@ FOR_LOOP:
}
case <-c.pingTimer.Ch:
c.Logger.Debug("Send Ping")
wire.WriteByte(packetTypePing, c.bufWriter, &n, &err)
legacy.WriteOctet(packetTypePing, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
case <-c.pong:
c.Logger.Debug("Send Pong")
wire.WriteByte(packetTypePong, c.bufWriter, &n, &err)
legacy.WriteOctet(packetTypePong, c.bufWriter, &n, &err)
c.sendMonitor.Update(int(n))
c.flush()
case <-c.quit:
@ -372,7 +376,7 @@ func (c *MConnection) sendMsgPacket() bool {
continue
}
// Get ratio, and keep track of lowest ratio.
ratio := float32(channel.recentlySent) / float32(channel.priority)
ratio := float32(channel.recentlySent) / float32(channel.desc.Priority)
if ratio < leastRatio {
leastRatio = ratio
leastChannel = channel
@ -413,7 +417,7 @@ FOR_LOOP:
// Peek into bufReader for debugging
if numBytes := c.bufReader.Buffered(); numBytes > 0 {
log.Info("Peek connection buffer", "numBytes", numBytes, "bytes", log15.Lazy{func() []byte {
bytes, err := c.bufReader.Peek(MinInt(numBytes, 100))
bytes, err := c.bufReader.Peek(cmn.MinInt(numBytes, 100))
if err == nil {
return bytes
} else {
@ -459,8 +463,11 @@ FOR_LOOP:
}
channel, ok := c.channelsIdx[pkt.ChannelID]
if !ok || channel == nil {
cmn.PanicQ(cmn.Fmt("Unknown channel %X", pkt.ChannelID))
err := fmt.Errorf("Unknown channel %X", pkt.ChannelID)
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
msgBytes, err := channel.recvMsgPacket(pkt)
if err != nil {
if c.IsRunning() {
@ -475,7 +482,9 @@ FOR_LOOP:
c.onReceive(pkt.ChannelID, msgBytes)
}
default:
cmn.PanicSanity(cmn.Fmt("Unknown message type %X", pktType))
err := fmt.Errorf("Unknown message type %X", pktType)
c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err)
c.stopForError(err)
}
// TODO: shouldn't this go in the sendRoutine?
@ -511,10 +520,10 @@ func (c *MConnection) Status() ConnectionStatus {
status.Channels = make([]ChannelStatus, len(c.channels))
for i, channel := range c.channels {
status.Channels[i] = ChannelStatus{
ID: channel.id,
ID: channel.desc.ID,
SendQueueCapacity: cap(channel.sendQueue),
SendQueueSize: int(channel.sendQueueSize), // TODO use atomic
Priority: channel.priority,
Priority: channel.desc.Priority,
RecentlySent: channel.recentlySent,
}
}
@ -531,7 +540,7 @@ type ChannelDescriptor struct {
RecvMessageCapacity int
}
func (chDesc *ChannelDescriptor) FillDefaults() {
func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) {
if chDesc.SendQueueCapacity == 0 {
chDesc.SendQueueCapacity = defaultSendQueueCapacity
}
@ -541,36 +550,34 @@ func (chDesc *ChannelDescriptor) FillDefaults() {
if chDesc.RecvMessageCapacity == 0 {
chDesc.RecvMessageCapacity = defaultRecvMessageCapacity
}
filled = chDesc
return
}
// TODO: lowercase.
// NOTE: not goroutine-safe.
type Channel struct {
conn *MConnection
desc *ChannelDescriptor
id byte
desc ChannelDescriptor
sendQueue chan []byte
sendQueueSize int32 // atomic.
recving []byte
sending []byte
priority int
recentlySent int64 // exponential moving average
maxMsgPacketPayloadSize int
}
func newChannel(conn *MConnection, desc *ChannelDescriptor) *Channel {
desc.FillDefaults()
func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc = desc.FillDefaults()
if desc.Priority <= 0 {
cmn.PanicSanity("Channel default priority must be a postive integer")
cmn.PanicSanity("Channel default priority must be a positive integer")
}
return &Channel{
conn: conn,
desc: desc,
id: desc.ID,
sendQueue: make(chan []byte, desc.SendQueueCapacity),
recving: make([]byte, 0, desc.RecvBufferCapacity),
priority: desc.Priority,
maxMsgPacketPayloadSize: conn.config.maxMsgPacketPayloadSize,
}
}
@ -629,7 +636,7 @@ func (ch *Channel) isSendPending() bool {
// Not goroutine-safe
func (ch *Channel) nextMsgPacket() msgPacket {
packet := msgPacket{}
packet.ChannelID = byte(ch.id)
packet.ChannelID = byte(ch.desc.ID)
maxSize := ch.maxMsgPacketPayloadSize
packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))]
if len(ch.sending) <= maxSize {
@ -648,14 +655,18 @@ func (ch *Channel) nextMsgPacket() msgPacket {
func (ch *Channel) writeMsgPacketTo(w io.Writer) (n int, err error) {
packet := ch.nextMsgPacket()
// log.Debug("Write Msg Packet", "conn", ch.conn, "packet", packet)
wire.WriteByte(packetTypeMsg, w, &n, &err)
wire.WriteBinary(packet, w, &n, &err)
writeMsgPacketTo(packet, w, &n, &err)
if err == nil {
ch.recentlySent += int64(n)
}
return
}
func writeMsgPacketTo(packet msgPacket, w io.Writer, n *int, err *error) {
legacy.WriteOctet(packetTypeMsg, w, n, err)
wire.WriteBinary(packet, w, n, err)
}
// Handles incoming msgPackets. Returns a msg bytes if msg is complete.
// Not goroutine-safe
func (ch *Channel) recvMsgPacket(packet msgPacket) ([]byte, error) {

View File

@ -1,4 +1,4 @@
package p2p_test
package p2p
import (
"net"
@ -7,11 +7,11 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
p2p "github.com/tendermint/tendermint/p2p"
wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/log"
)
func createMConnection(conn net.Conn) *p2p.MConnection {
func createTestMConnection(conn net.Conn) *MConnection {
onReceive := func(chID byte, msgBytes []byte) {
}
onError := func(r interface{}) {
@ -21,9 +21,9 @@ func createMConnection(conn net.Conn) *p2p.MConnection {
return c
}
func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *p2p.MConnection {
chDescs := []*p2p.ChannelDescriptor{&p2p.ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
c := p2p.NewMConnection(conn, chDescs, onReceive, onError)
func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *MConnection {
chDescs := []*ChannelDescriptor{&ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}}
c := NewMConnection(conn, chDescs, onReceive, onError)
c.SetLogger(log.TestingLogger())
return c
}
@ -31,12 +31,12 @@ func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msg
func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
server, client := netPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createMConnection(client)
_, err := mconn.Start()
mconn := createTestMConnection(client)
err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
@ -44,12 +44,18 @@ func TestMConnectionSend(t *testing.T) {
assert.True(mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine.
server.Read(make([]byte, len(msg)))
_, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
assert.True(mconn.CanSend(0x01))
msg = "Spider-Man"
assert.True(mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg)))
_, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown")
@ -58,9 +64,9 @@ func TestMConnectionSend(t *testing.T) {
func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
server, client := netPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})
@ -71,12 +77,12 @@ func TestMConnectionReceive(t *testing.T) {
errorsCh <- r
}
mconn1 := createMConnectionWithCallbacks(client, onReceive, onError)
_, err := mconn1.Start()
err := mconn1.Start()
require.Nil(err)
defer mconn1.Stop()
mconn2 := createMConnection(server)
_, err = mconn2.Start()
mconn2 := createTestMConnection(server)
err = mconn2.Start()
require.Nil(err)
defer mconn2.Stop()
@ -96,12 +102,12 @@ func TestMConnectionReceive(t *testing.T) {
func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
server, client := netPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
mconn := createMConnection(client)
_, err := mconn.Start()
mconn := createTestMConnection(client)
err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
@ -113,9 +119,9 @@ func TestMConnectionStatus(t *testing.T) {
func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := net.Pipe()
defer server.Close()
defer client.Close()
server, client := netPipe()
defer server.Close() // nolint: errcheck
defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte)
errorsCh := make(chan interface{})
@ -126,11 +132,13 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
errorsCh <- r
}
mconn := createMConnectionWithCallbacks(client, onReceive, onError)
_, err := mconn.Start()
err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
client.Close()
if err := client.Close(); err != nil {
t.Error(err)
}
select {
case receivedBytes := <-receivedCh:
@ -142,3 +150,166 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
t.Fatal("Did not receive error in 500ms")
}
}
func newClientAndServerConnsForReadErrors(require *require.Assertions, chOnErr chan struct{}) (*MConnection, *MConnection) {
server, client := netPipe()
onReceive := func(chID byte, msgBytes []byte) {}
onError := func(r interface{}) {}
// create client conn with two channels
chDescs := []*ChannelDescriptor{
{ID: 0x01, Priority: 1, SendQueueCapacity: 1},
{ID: 0x02, Priority: 1, SendQueueCapacity: 1},
}
mconnClient := NewMConnection(client, chDescs, onReceive, onError)
mconnClient.SetLogger(log.TestingLogger().With("module", "client"))
err := mconnClient.Start()
require.Nil(err)
// create server conn with 1 channel
// it fires on chOnErr when there's an error
serverLogger := log.TestingLogger().With("module", "server")
onError = func(r interface{}) {
chOnErr <- struct{}{}
}
mconnServer := createMConnectionWithCallbacks(server, onReceive, onError)
mconnServer.SetLogger(serverLogger)
err = mconnServer.Start()
require.Nil(err)
return mconnClient, mconnServer
}
func expectSend(ch chan struct{}) bool {
after := time.After(time.Second * 5)
select {
case <-ch:
return true
case <-after:
return false
}
}
func TestMConnectionReadErrorBadEncoding(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
client := mconnClient.conn
msg := "Ant-Man"
// send badly encoded msgPacket
var n int
var err error
wire.WriteByte(packetTypeMsg, client, &n, &err)
wire.WriteByteSlice([]byte(msg), client, &n, &err)
assert.True(expectSend(chOnErr), "badly encoded msgPacket")
}
func TestMConnectionReadErrorUnknownChannel(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
msg := "Ant-Man"
// fail to send msg on channel unknown by client
assert.False(mconnClient.Send(0x03, msg))
// send msg on channel unknown by the server.
// should cause an error
assert.True(mconnClient.Send(0x02, msg))
assert.True(expectSend(chOnErr), "unknown channel")
}
func TestMConnectionReadErrorLongMessage(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{})
chOnRcv := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
mconnServer.onReceive = func(chID byte, msgBytes []byte) {
chOnRcv <- struct{}{}
}
client := mconnClient.conn
// send msg thats just right
var n int
var err error
packet := msgPacket{
ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-5),
EOF: 1,
}
writeMsgPacketTo(packet, client, &n, &err)
assert.True(expectSend(chOnRcv), "msg just right")
// send msg thats too long
packet = msgPacket{
ChannelID: 0x01,
Bytes: make([]byte, mconnClient.config.maxMsgPacketTotalSize()-4),
EOF: 1,
}
writeMsgPacketTo(packet, client, &n, &err)
assert.True(expectSend(chOnErr), "msg too long")
}
func TestMConnectionReadErrorUnknownMsgType(t *testing.T) {
assert, require := assert.New(t), require.New(t)
chOnErr := make(chan struct{})
mconnClient, mconnServer := newClientAndServerConnsForReadErrors(require, chOnErr)
defer mconnClient.Stop()
defer mconnServer.Stop()
// send msg with unknown msg type
var n int
var err error
wire.WriteByte(0x04, mconnClient.conn, &n, &err)
assert.True(expectSend(chOnErr), "unknown msg type")
}
func TestMConnectionTrySend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
server, client := netPipe()
defer server.Close()
defer client.Close()
mconn := createTestMConnection(client)
err := mconn.Start()
require.Nil(err)
defer mconn.Stop()
msg := "Semicolon-Woman"
resultCh := make(chan string, 2)
assert.True(mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg)))
assert.True(mconn.CanSend(0x01))
assert.True(mconn.TrySend(0x01, msg))
assert.False(mconn.CanSend(0x01))
go func() {
mconn.TrySend(0x01, msg)
resultCh <- "TrySend"
}()
go func() {
mconn.Send(0x01, msg)
resultCh <- "Send"
}()
assert.False(mconn.CanSend(0x01))
assert.False(mconn.TrySend(0x01, msg))
assert.Equal("TrySend", <-resultCh)
server.Read(make([]byte, len(msg)))
assert.Equal("Send", <-resultCh) // Order constrained by parallel blocking above
}

View File

@ -124,7 +124,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas
}
// implements the fuzz (delay, kill conn)
@ -143,7 +143,7 @@ func (fc *FuzzedConnection) fuzz() bool {
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
// XXX: can't this fail because machine precision?
// XXX: do we need an error?
fc.Close()
fc.Close() // nolint: errcheck, gas
return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
time.Sleep(fc.randomDuration())

View File

@ -16,7 +16,7 @@ type Listener interface {
InternalAddress() *NetAddress
ExternalAddress() *NetAddress
String() string
Stop() bool
Stop() error
}
// Implements Listener
@ -100,19 +100,24 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
connections: make(chan net.Conn, numBufferedConnections),
}
dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl)
dl.Start() // Started upon construction
err = dl.Start() // Started upon construction
if err != nil {
logger.Error("Error starting base service", "err", err)
}
return dl
}
func (l *DefaultListener) OnStart() error {
l.BaseService.OnStart()
if err := l.BaseService.OnStart(); err != nil {
return err
}
go l.listenRoutine()
return nil
}
func (l *DefaultListener) OnStop() {
l.BaseService.OnStop()
l.listener.Close()
l.listener.Close() // nolint: errcheck
}
// Accept connections and pass on the channel

View File

@ -25,7 +25,12 @@ func TestListener(t *testing.T) {
}
msg := []byte("hi!")
go connIn.Write(msg)
go func() {
_, err := connIn.Write(msg)
if err != nil {
t.Error(err)
}
}()
b := make([]byte, 32)
n, err := connOut.Read(b)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More