Merge pull request #703 from tendermint/fix-linting

add metalinter to CI and include fixes
This commit is contained in:
Zach 2017-11-28 00:02:52 +00:00 committed by GitHub
commit c3632bc54a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
81 changed files with 679 additions and 386 deletions

View File

@ -2,6 +2,7 @@ GOTOOLS = \
github.com/mitchellh/gox \ github.com/mitchellh/gox \
github.com/tcnksm/ghr \ github.com/tcnksm/ghr \
github.com/Masterminds/glide \ github.com/Masterminds/glide \
github.com/alecthomas/gometalinter
PACKAGES=$(shell go list ./... | grep -v '/vendor/') PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint BUILD_TAGS?=tendermint
@ -25,6 +26,8 @@ dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
test: test:
@echo "--> Running linter"
@make metalinter_test
@echo "--> Running go test" @echo "--> Running go test"
@go test $(PACKAGES) @go test $(PACKAGES)
@ -76,11 +79,40 @@ tools:
ensure_tools: ensure_tools:
go get $(GOTOOLS) go get $(GOTOOLS)
@gometalinter --install
### Formatting, linting, and vetting ### Formatting, linting, and vetting
megacheck: metalinter:
@for pkg in ${PACKAGES}; do megacheck "$$pkg"; done @gometalinter --vendor --deadline=600s --enable-all --disable=lll ./...
metalinter_test:
@gometalinter --vendor --deadline=600s --disable-all \
--enable=deadcode \
--enable=gas \
--enable=misspell \
--enable=safesql \
./...
#--enable=maligned \
#--enable=dupl \
#--enable=errcheck \
#--enable=goconst \
#--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported
#--enable=gosimple \
#--enable=gotype \
#--enable=ineffassign \
#--enable=interfacer \
#--enable=megacheck \
#--enable=staticcheck \
#--enable=structcheck \
#--enable=unconvert \
#--enable=unparam \
#--enable=unused \
#--enable=varcheck \
#--enable=vet \
#--enable=vetshadow \
.PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools .PHONY: install build build_race dist test test_race test_integrations test100 draw_deps list_deps get_deps get_vendor_deps update_deps revision tools

View File

@ -4,9 +4,9 @@ import (
"testing" "testing"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
proto "github.com/tendermint/tendermint/benchmarks/proto" proto "github.com/tendermint/tendermint/benchmarks/proto"
"github.com/tendermint/tendermint/p2p"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
) )

View File

@ -18,12 +18,16 @@ func BenchmarkFileWrite(b *testing.B) {
b.StartTimer() b.StartTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
file.Write([]byte(testString)) _, err := file.Write([]byte(testString))
if err != nil {
b.Error(err)
}
} }
file.Close() if err := file.Close(); err != nil {
err = os.Remove("benchmark_file_write.out") b.Error(err)
if err != nil { }
if err := os.Remove("benchmark_file_write.out"); err != nil {
b.Error(err) b.Error(err)
} }
} }

View File

@ -24,9 +24,6 @@ import bytes "bytes"
import strings "strings" import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import io "io" import io "io"
@ -392,31 +389,6 @@ func (this *PubKeyEd25519) GoString() string {
s = append(s, "}") s = append(s, "}")
return strings.Join(s, "") return strings.Join(s, "")
} }
func valueToGoStringTest(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringTest(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
if e == nil {
return "nil"
}
s := "map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "}"
return s
}
func (m *ResultStatus) Marshal() (data []byte, err error) { func (m *ResultStatus) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@ -586,24 +558,6 @@ func (m *PubKeyEd25519) MarshalTo(data []byte) (int, error) {
return i, nil return i, nil
} }
func encodeFixed64Test(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Test(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTest(data []byte, offset int, v uint64) int { func encodeVarintTest(data []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80) data[offset] = uint8(v&0x7f | 0x80)
@ -689,9 +643,6 @@ func sovTest(x uint64) (n int) {
} }
return n return n
} }
func sozTest(x uint64) (n int) {
return sovTest(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ResultStatus) String() string { func (this *ResultStatus) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
@ -742,14 +693,6 @@ func (this *PubKeyEd25519) String() string {
}, "") }, "")
return s return s
} }
func valueToStringTest(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ResultStatus) Unmarshal(data []byte) error { func (m *ResultStatus) Unmarshal(data []byte) error {
var hasFields [1]uint64 var hasFields [1]uint64
l := len(data) l := len(data)

View File

@ -232,7 +232,7 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int
} }
} }
// MaxPeerHeight returns the heighest height reported by a peer // MaxPeerHeight returns the highest height reported by a peer.
func (pool *BlockPool) MaxPeerHeight() int { func (pool *BlockPool) MaxPeerHeight() int {
pool.mtx.Lock() pool.mtx.Lock()
defer pool.mtx.Unlock() defer pool.mtx.Unlock()
@ -311,7 +311,10 @@ func (pool *BlockPool) makeNextRequester() {
pool.requesters[nextHeight] = request pool.requesters[nextHeight] = request
pool.numPending++ pool.numPending++
request.Start() _, err := request.Start()
if err != nil {
request.Logger.Error("Error starting request", "err", err)
}
} }
func (pool *BlockPool) sendRequest(height int, peerID string) { func (pool *BlockPool) sendRequest(height int, peerID string) {

View File

@ -36,7 +36,12 @@ func TestBasic(t *testing.T) {
requestsCh := make(chan BlockRequest, 100) requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger()) pool.SetLogger(log.TestingLogger())
pool.Start()
_, err := pool.Start()
if err != nil {
t.Error(err)
}
defer pool.Stop() defer pool.Stop()
// Introduce each peer. // Introduce each peer.
@ -88,7 +93,10 @@ func TestTimeout(t *testing.T) {
requestsCh := make(chan BlockRequest, 100) requestsCh := make(chan BlockRequest, 100)
pool := NewBlockPool(start, requestsCh, timeoutsCh) pool := NewBlockPool(start, requestsCh, timeoutsCh)
pool.SetLogger(log.TestingLogger()) pool.SetLogger(log.TestingLogger())
pool.Start() _, err := pool.Start()
if err != nil {
t.Error(err)
}
defer pool.Stop() defer pool.Stop()
for _, peer := range peers { for _, peer := range peers {

View File

@ -88,7 +88,9 @@ func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
// OnStart implements cmn.Service. // OnStart implements cmn.Service.
func (bcR *BlockchainReactor) OnStart() error { func (bcR *BlockchainReactor) OnStart() error {
bcR.BaseReactor.OnStart() if err := bcR.BaseReactor.OnStart(); err != nil {
return err
}
if bcR.fastSync { if bcR.fastSync {
_, err := bcR.pool.Start() _, err := bcR.pool.Start()
if err != nil { if err != nil {
@ -108,7 +110,7 @@ func (bcR *BlockchainReactor) OnStop() {
// GetChannels implements Reactor // GetChannels implements Reactor
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{ {
ID: BlockchainChannel, ID: BlockchainChannel,
Priority: 10, Priority: 10,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
@ -226,7 +228,7 @@ FOR_LOOP:
} }
case <-statusUpdateTicker.C: case <-statusUpdateTicker.C:
// ask for status updates // ask for status updates
go bcR.BroadcastStatusRequest() go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C: case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus() height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers() outbound, inbound, _ := bcR.Switch.NumPeers()

View File

@ -7,9 +7,9 @@ import (
"io" "io"
"sync" "sync"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@ -67,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
} }
bytez := []byte{} bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
@ -76,7 +76,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
} }
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block) block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
} }
return block return block
} }
@ -90,7 +90,7 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
} }
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part) part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block part: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
} }
return part return part
} }
@ -104,7 +104,7 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
} }
return blockMeta return blockMeta
} }
@ -120,7 +120,7 @@ func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit {
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
} }
return commit return commit
} }
@ -135,7 +135,7 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
} }
return commit return commit
} }
@ -148,10 +148,10 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height height := block.Height
if height != bs.Height()+1 { if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
if !blockParts.IsComplete() { if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets")) cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
} }
// Save block meta // Save block meta
@ -187,7 +187,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) { func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
if height != bs.Height()+1 { if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
partBytes := wire.BinaryBytes(part) partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes) bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -222,7 +222,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) { func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj) bytes, err := json.Marshal(bsj)
if err != nil { if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err)) cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
} }
db.SetSync(blockStoreKey, bytes) db.SetSync(blockStoreKey, bytes)
} }
@ -237,7 +237,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
bsj := BlockStoreStateJSON{} bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj) err := json.Unmarshal(bytes, &bsj)
if err != nil { if err != nil {
PanicCrisis(Fmt("Could not unmarshal bytes: %X", bytes)) cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
} }
return bsj return bsj
} }

View File

@ -19,7 +19,10 @@ var GenValidatorCmd = &cobra.Command{
func genValidator(cmd *cobra.Command, args []string) { func genValidator(cmd *cobra.Command, args []string) {
privValidator := types.GenPrivValidatorFS("") privValidator := types.GenPrivValidatorFS("")
privValidatorJSONBytes, _ := json.MarshalIndent(privValidator, "", "\t") privValidatorJSONBytes, err := json.MarshalIndent(privValidator, "", "\t")
if err != nil {
panic(err)
}
fmt.Printf(`%v fmt.Printf(`%v
`, string(privValidatorJSONBytes)) `, string(privValidatorJSONBytes))
} }

View File

@ -28,12 +28,14 @@ func initFiles(cmd *cobra.Command, args []string) {
genDoc := types.GenesisDoc{ genDoc := types.GenesisDoc{
ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)),
} }
genDoc.Validators = []types.GenesisValidator{types.GenesisValidator{ genDoc.Validators = []types.GenesisValidator{{
PubKey: privValidator.GetPubKey(), PubKey: privValidator.GetPubKey(),
Power: 10, Power: 10,
}} }}
genDoc.SaveAs(genFile) if err := genDoc.SaveAs(genFile); err != nil {
panic(err)
}
} }
logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile()) logger.Info("Initialized tendermint", "genesis", config.GenesisFile(), "priv_validator", config.PrivValidatorFile())

View File

@ -25,10 +25,13 @@ var ResetPrivValidatorCmd = &cobra.Command{
} }
// ResetAll removes the privValidator files. // ResetAll removes the privValidator files.
// Exported so other CLI tools can use it // Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) { func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger) resetPrivValidatorFS(privValFile, logger)
os.RemoveAll(dbDir) if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir) logger.Info("Removed all data", "dir", dbDir)
} }

View File

@ -26,8 +26,12 @@ const (
// modify in the test cases. // modify in the test cases.
// NOTE: it unsets all TM* env variables. // NOTE: it unsets all TM* env variables.
func isolate(cmds ...*cobra.Command) cli.Executable { func isolate(cmds ...*cobra.Command) cli.Executable {
os.Unsetenv("TMHOME") if err := os.Unsetenv("TMHOME"); err != nil {
os.Unsetenv("TM_HOME") panic(err)
}
if err := os.Unsetenv("TM_HOME"); err != nil {
panic(err)
}
viper.Reset() viper.Reset()
config = cfg.DefaultConfig() config = cfg.DefaultConfig()

View File

@ -63,7 +63,9 @@ func testnetFiles(cmd *cobra.Command, args []string) {
// Write genesis file. // Write genesis file.
for i := 0; i < nValidators; i++ { for i := 0; i < nValidators; i++ {
mach := cmn.Fmt("mach%d", i) mach := cmn.Fmt("mach%d", i)
genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")) if err := genDoc.SaveAs(path.Join(dataDir, mach, "genesis.json")); err != nil {
panic(err)
}
} }
fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators)) fmt.Println(cmn.Fmt("Successfully initialized %v node directories", nValidators))

View File

@ -37,5 +37,7 @@ func main() {
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint")) cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv("$HOME/.tendermint"))
cmd.Execute() if err := cmd.Execute(); err != nil {
panic(err)
}
} }

View File

@ -12,8 +12,12 @@ import (
/****** these are for production settings ***********/ /****** these are for production settings ***********/
func EnsureRoot(rootDir string) { func EnsureRoot(rootDir string) {
cmn.EnsureDir(rootDir, 0700) if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.EnsureDir(rootDir+"/data", 0700) cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
cmn.PanicSanity(err.Error())
}
configFilePath := path.Join(rootDir, "config.toml") configFilePath := path.Join(rootDir, "config.toml")
@ -53,21 +57,23 @@ func ResetTestRoot(testName string) *Config {
rootDir = filepath.Join(rootDir, testName) rootDir = filepath.Join(rootDir, testName)
// Remove ~/.tendermint_test_bak // Remove ~/.tendermint_test_bak
if cmn.FileExists(rootDir + "_bak") { if cmn.FileExists(rootDir + "_bak") {
err := os.RemoveAll(rootDir + "_bak") if err := os.RemoveAll(rootDir + "_bak"); err != nil {
if err != nil {
cmn.PanicSanity(err.Error()) cmn.PanicSanity(err.Error())
} }
} }
// Move ~/.tendermint_test to ~/.tendermint_test_bak // Move ~/.tendermint_test to ~/.tendermint_test_bak
if cmn.FileExists(rootDir) { if cmn.FileExists(rootDir) {
err := os.Rename(rootDir, rootDir+"_bak") if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
if err != nil {
cmn.PanicSanity(err.Error()) cmn.PanicSanity(err.Error())
} }
} }
// Create new dir // Create new dir
cmn.EnsureDir(rootDir, 0700) if err := cmn.EnsureDir(rootDir, 0700); err != nil {
cmn.EnsureDir(rootDir+"/data", 0700) cmn.PanicSanity(err.Error())
}
if err := cmn.EnsureDir(rootDir+"/data", 0700); err != nil {
cmn.PanicSanity(err.Error())
}
configFilePath := path.Join(rootDir, "config.toml") configFilePath := path.Join(rootDir, "config.toml")
genesisFilePath := path.Join(rootDir, "genesis.json") genesisFilePath := path.Join(rootDir, "genesis.json")

View File

@ -24,7 +24,7 @@ func TestEnsureRoot(t *testing.T) {
// setup temp dir for test // setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test") tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir) // nolint: errcheck
// create root dir // create root dir
EnsureRoot(tmpDir) EnsureRoot(tmpDir)

View File

@ -70,7 +70,7 @@ func TestByzantine(t *testing.T) {
conR.SetLogger(logger.With("validator", i)) conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus) conR.SetEventBus(eventBus)
var conRI p2p.Reactor var conRI p2p.Reactor // nolint: gotype, gosimple
conRI = conR conRI = conR
if i == 0 { if i == 0 {
@ -170,13 +170,17 @@ func byzantineDecideProposalFunc(t *testing.T, height, round int, cs *ConsensusS
block1, blockParts1 := cs.createProposalBlock() block1, blockParts1 := cs.createProposalBlock()
polRound, polBlockID := cs.Votes.POLInfo() polRound, polBlockID := cs.Votes.POLInfo()
proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal1) // byzantine doesnt err if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil {
t.Error(err)
}
// Create a new proposal block from state/txs from the mempool. // Create a new proposal block from state/txs from the mempool.
block2, blockParts2 := cs.createProposalBlock() block2, blockParts2 := cs.createProposalBlock()
polRound, polBlockID = cs.Votes.POLInfo() polRound, polBlockID = cs.Votes.POLInfo()
proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID)
cs.privValidator.SignProposal(cs.state.ChainID, proposal2) // byzantine doesnt err if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil {
t.Error(err)
}
block1Hash := block1.Hash() block1Hash := block1.Hash()
block2Hash := block2.Hash() block2Hash := block2.Hash()
@ -289,12 +293,12 @@ func (privVal *ByzantinePrivValidator) SignVote(chainID string, vote *types.Vote
} }
func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) { func (privVal *ByzantinePrivValidator) SignProposal(chainID string, proposal *types.Proposal) (err error) {
proposal.Signature, err = privVal.Sign(types.SignBytes(chainID, proposal)) proposal.Signature, _ = privVal.Sign(types.SignBytes(chainID, proposal))
return nil return nil
} }
func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) { func (privVal *ByzantinePrivValidator) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) (err error) {
heartbeat.Signature, err = privVal.Sign(types.SignBytes(chainID, heartbeat)) heartbeat.Signature, _ = privVal.Sign(types.SignBytes(chainID, heartbeat))
return nil return nil
} }

View File

@ -268,7 +268,6 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm.
eventBus.SetLogger(log.TestingLogger().With("module", "events")) eventBus.SetLogger(log.TestingLogger().With("module", "events"))
eventBus.Start() eventBus.Start()
cs.SetEventBus(eventBus) cs.SetEventBus(eventBus)
return cs return cs
} }

View File

@ -5,6 +5,8 @@ import (
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
@ -118,8 +120,12 @@ func TestRmBadTx(t *testing.T) {
// increment the counter by 1 // increment the counter by 1
txBytes := make([]byte, 8) txBytes := make([]byte, 8)
binary.BigEndian.PutUint64(txBytes, uint64(0)) binary.BigEndian.PutUint64(txBytes, uint64(0))
app.DeliverTx(txBytes)
app.Commit() resDeliver := app.DeliverTx(txBytes)
assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver))
resCommit := app.Commit()
assert.False(t, resCommit.IsErr(), cmn.Fmt("expected no error. got %v", resCommit))
emptyMempoolCh := make(chan struct{}) emptyMempoolCh := make(chan struct{})
checkTxRespCh := make(chan struct{}) checkTxRespCh := make(chan struct{})

View File

@ -55,7 +55,9 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens
// OnStart implements BaseService. // OnStart implements BaseService.
func (conR *ConsensusReactor) OnStart() error { func (conR *ConsensusReactor) OnStart() error {
conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync()) conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync())
conR.BaseReactor.OnStart() if err := conR.BaseReactor.OnStart(); err != nil {
return err
}
err := conR.startBroadcastRoutine() err := conR.startBroadcastRoutine()
if err != nil { if err != nil {
@ -95,31 +97,34 @@ func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced in
// dont bother with the WAL if we fast synced // dont bother with the WAL if we fast synced
conR.conS.doWALCatchup = false conR.conS.doWALCatchup = false
} }
conR.conS.Start() _, err := conR.conS.Start()
if err != nil {
conR.Logger.Error("Error starting conS", "err", err)
}
} }
// GetChannels implements Reactor // GetChannels implements Reactor
func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
// TODO optimize // TODO optimize
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{ {
ID: StateChannel, ID: StateChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
}, },
&p2p.ChannelDescriptor{ {
ID: DataChannel, // maybe split between gossiping current block and catchup stuff ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
}, },
&p2p.ChannelDescriptor{ {
ID: VoteChannel, ID: VoteChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100, RecvBufferCapacity: 100 * 100,
}, },
&p2p.ChannelDescriptor{ {
ID: VoteSetBitsChannel, ID: VoteSetBitsChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 2, SendQueueCapacity: 2,

View File

@ -112,7 +112,9 @@ func TestReactorProposalHeartbeats(t *testing.T) {
}, css) }, css)
// send a tx // send a tx
css[3].mempool.CheckTx([]byte{1, 2, 3}, nil) if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil {
//t.Fatal(err)
}
// wait till everyone makes the first new block // wait till everyone makes the first new block
timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) { timeoutWaitGroup(t, N, func(wg *sync.WaitGroup, j int) {

View File

@ -7,12 +7,12 @@ import (
"hash/crc32" "hash/crc32"
"io" "io"
"reflect" "reflect"
"strconv" //"strconv"
"strings" //"strings"
"time" "time"
abci "github.com/tendermint/abci/types" abci "github.com/tendermint/abci/types"
auto "github.com/tendermint/tmlibs/autofile" //auto "github.com/tendermint/tmlibs/autofile"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/log" "github.com/tendermint/tmlibs/log"
@ -99,8 +99,13 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// NOTE: This is just a sanity check. As far as we know things work fine without it, // NOTE: This is just a sanity check. As far as we know things work fine without it,
// and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT). // and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT).
gr, found, err := cs.wal.SearchForEndHeight(uint64(csHeight)) gr, found, err := cs.wal.SearchForEndHeight(uint64(csHeight))
if err != nil {
return err
}
if gr != nil { if gr != nil {
gr.Close() if err := gr.Close(); err != nil {
return err
}
} }
if found { if found {
return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight) return fmt.Errorf("WAL should not contain #ENDHEIGHT %d.", csHeight)
@ -116,7 +121,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
if !found { if !found {
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)) return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))
} }
defer gr.Close() defer gr.Close() // nolint: errcheck
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
@ -145,6 +150,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
// Parses marker lines of the form: // Parses marker lines of the form:
// #ENDHEIGHT: 12345 // #ENDHEIGHT: 12345
/*
func makeHeightSearchFunc(height int) auto.SearchFunc { func makeHeightSearchFunc(height int) auto.SearchFunc {
return func(line string) (int, error) { return func(line string) (int, error) {
line = strings.TrimRight(line, "\n") line = strings.TrimRight(line, "\n")
@ -164,7 +170,7 @@ func makeHeightSearchFunc(height int) auto.SearchFunc {
return -1, nil return -1, nil
} }
} }
} }*/
//---------------------------------------------- //----------------------------------------------
// Recover from failure during block processing // Recover from failure during block processing
@ -230,7 +236,9 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int, proxyApp p
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain
if appBlockHeight == 0 { if appBlockHeight == 0 {
validators := types.TM2PB.Validators(h.state.Validators) validators := types.TM2PB.Validators(h.state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) if err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
return nil, err
}
} }
// First handle edge cases and constraints on the storeBlockHeight // First handle edge cases and constraints on the storeBlockHeight
@ -363,7 +371,10 @@ func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppC
abciResponses: abciResponses, abciResponses: abciResponses,
}) })
cli, _ := clientCreator.NewABCIClient() cli, _ := clientCreator.NewABCIClient()
cli.Start() _, err := cli.Start()
if err != nil {
panic(err)
}
return proxy.NewAppConnConsensus(cli) return proxy.NewAppConnConsensus(cli)
} }

View File

@ -59,13 +59,13 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
// just open the file for reading, no need to use wal // just open the file for reading, no need to use wal
fp, err := os.OpenFile(file, os.O_RDONLY, 0666) fp, err := os.OpenFile(file, os.O_RDONLY, 0600)
if err != nil { if err != nil {
return err return err
} }
pb := newPlayback(file, fp, cs, cs.state.Copy()) pb := newPlayback(file, fp, cs, cs.state.Copy())
defer pb.fp.Close() defer pb.fp.Close() // nolint: errcheck
var nextN int // apply N msgs in a row var nextN int // apply N msgs in a row
var msg *TimedWALMessage var msg *TimedWALMessage
@ -127,8 +127,10 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error {
newCS.SetEventBus(pb.cs.eventBus) newCS.SetEventBus(pb.cs.eventBus)
newCS.startForReplay() newCS.startForReplay()
pb.fp.Close() if err := pb.fp.Close(); err != nil {
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0666) return err
}
fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600)
if err != nil { if err != nil {
return err return err
} }
@ -220,7 +222,9 @@ func (pb *playback) replayConsoleLoop() int {
defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep)
if len(tokens) == 1 { if len(tokens) == 1 {
pb.replayReset(1, newStepCh) if err := pb.replayReset(1, newStepCh); err != nil {
pb.cs.Logger.Error("Replay reset error", "err", err)
}
} else { } else {
i, err := strconv.Atoi(tokens[1]) i, err := strconv.Atoi(tokens[1])
if err != nil { if err != nil {
@ -228,7 +232,9 @@ func (pb *playback) replayConsoleLoop() int {
} else if i > pb.count { } else if i > pb.count {
fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count)
} else { } else {
pb.replayReset(i, newStepCh) if err := pb.replayReset(i, newStepCh); err != nil {
pb.cs.Logger.Error("Replay reset error", "err", err)
}
} }
} }

View File

@ -411,7 +411,9 @@ func buildAppStateFromChain(proxyApp proxy.AppConns,
} }
validators := types.TM2PB.Validators(state.Validators) validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) if err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
panic(err)
}
defer proxyApp.Stop() defer proxyApp.Stop()
switch mode { switch mode {
@ -445,7 +447,9 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B
defer proxyApp.Stop() defer proxyApp.Stop()
validators := types.TM2PB.Validators(state.Validators) validators := types.TM2PB.Validators(state.Validators)
proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}) if err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil {
panic(err)
}
var latestAppHash []byte var latestAppHash []byte
@ -486,7 +490,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
if !found { if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
} }
defer gr.Close() defer gr.Close() // nolint: errcheck
// log.Notice("Build a blockchain by reading from the WAL") // log.Notice("Build a blockchain by reading from the WAL")

View File

@ -225,11 +225,14 @@ func (cs *ConsensusState) OnStart() error {
} }
// we need the timeoutRoutine for replay so // we need the timeoutRoutine for replay so
// we don't block on the tick chan. // we don't block on the tick chan.
// NOTE: we will get a build up of garbage go routines // NOTE: we will get a build up of garbage go routines
// firing on the tockChan until the receiveRoutine is started // firing on the tockChan until the receiveRoutine is started
// to deal with them (by that point, at most one will be valid) // to deal with them (by that point, at most one will be valid)
cs.timeoutTicker.Start() _, err := cs.timeoutTicker.Start()
if err != nil {
return err
}
// we may have lost some votes if the process crashed // we may have lost some votes if the process crashed
// reload from consensus log to catchup // reload from consensus log to catchup
@ -254,7 +257,11 @@ func (cs *ConsensusState) OnStart() error {
// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan
// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions
func (cs *ConsensusState) startRoutines(maxSteps int) { func (cs *ConsensusState) startRoutines(maxSteps int) {
cs.timeoutTicker.Start() _, err := cs.timeoutTicker.Start()
if err != nil {
cs.Logger.Error("Error starting timeout ticker", "err", err)
return
}
go cs.receiveRoutine(maxSteps) go cs.receiveRoutine(maxSteps)
} }
@ -338,12 +345,16 @@ func (cs *ConsensusState) AddProposalBlockPart(height, round int, part *types.Pa
// SetProposalAndBlock inputs the proposal and all block parts. // SetProposalAndBlock inputs the proposal and all block parts.
func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error { func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerKey string) error {
cs.SetProposal(proposal, peerKey) if err := cs.SetProposal(proposal, peerKey); err != nil {
return err
}
for i := 0; i < parts.Total(); i++ { for i := 0; i < parts.Total(); i++ {
part := parts.GetPart(i) part := parts.GetPart(i)
cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey) if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerKey); err != nil {
return err
}
} }
return nil // TODO errors return nil
} }
//------------------------------------------------------------ //------------------------------------------------------------
@ -361,7 +372,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime. // enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now()) sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
} }
@ -692,10 +703,7 @@ func (cs *ConsensusState) needProofBlock(height int) bool {
} }
lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1)
if !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) { return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash)
return true
}
return false
} }
func (cs *ConsensusState) proposalHeartbeat(height, round int) { func (cs *ConsensusState) proposalHeartbeat(height, round int) {

View File

@ -209,7 +209,9 @@ func TestBadProposal(t *testing.T) {
} }
// set the proposal block // set the proposal block
cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer") if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
// start the machine // start the machine
startTestRound(cs1, height, round) startTestRound(cs1, height, round)
@ -478,7 +480,9 @@ func TestLockNoPOL(t *testing.T) {
// now we're on a new round and not the proposer // now we're on a new round and not the proposer
// so set the proposal block // so set the proposal block
cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), "") if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil {
t.Fatal(err)
}
<-proposalCh <-proposalCh
<-voteCh // prevote <-voteCh // prevote
@ -555,7 +559,9 @@ func TestLockPOLRelock(t *testing.T) {
<-timeoutWaitCh <-timeoutWaitCh
//XXX: this isnt guaranteed to get there before the timeoutPropose ... //XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh <-newRoundCh
t.Log("### ONTO ROUND 1") t.Log("### ONTO ROUND 1")
@ -667,7 +673,9 @@ func TestLockPOLUnlock(t *testing.T) {
lockedBlockHash := rs.LockedBlock.Hash() lockedBlockHash := rs.LockedBlock.Hash()
//XXX: this isnt guaranteed to get there before the timeoutPropose ... //XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh <-newRoundCh
t.Log("#### ONTO ROUND 1") t.Log("#### ONTO ROUND 1")
@ -754,7 +762,9 @@ func TestLockPOLSafety1(t *testing.T) {
incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4)
//XXX: this isnt guaranteed to get there before the timeoutPropose ... //XXX: this isnt guaranteed to get there before the timeoutPropose ...
cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer") if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
<-newRoundCh <-newRoundCh
t.Log("### ONTO ROUND 1") t.Log("### ONTO ROUND 1")
@ -866,7 +876,9 @@ func TestLockPOLSafety2(t *testing.T) {
startTestRound(cs1, height, 1) startTestRound(cs1, height, 1)
<-newRoundCh <-newRoundCh
cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer") if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil {
t.Fatal(err)
}
<-proposalCh <-proposalCh
<-voteCh // prevote <-voteCh // prevote
@ -891,7 +903,9 @@ func TestLockPOLSafety2(t *testing.T) {
if err := vs3.SignProposal(config.ChainID, newProp); err != nil { if err := vs3.SignProposal(config.ChainID, newProp); err != nil {
t.Fatal(err) t.Fatal(err)
} }
cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer") if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
t.Fatal(err)
}
// Add the pol votes // Add the pol votes
addVotes(cs1, prevotes...) addVotes(cs1, prevotes...)

View File

@ -174,7 +174,6 @@ func (wal *baseWAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, fou
} }
} }
} }
gr.Close() gr.Close()
} }
@ -273,7 +272,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
} }
var nn int var nn int
var res *TimedWALMessage var res *TimedWALMessage // nolint: gosimple
res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage) res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to decode data: %v", err) return nil, fmt.Errorf("failed to decode data: %v", err)

View File

@ -34,7 +34,7 @@ const (
ValDir = "validators" ValDir = "validators"
CheckDir = "checkpoints" CheckDir = "checkpoints"
dirPerm = os.FileMode(0755) dirPerm = os.FileMode(0755)
filePerm = os.FileMode(0644) //filePerm = os.FileMode(0644)
) )
type provider struct { type provider struct {

View File

@ -189,8 +189,14 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) {
// WAL // WAL
if mem.wal != nil { if mem.wal != nil {
// TODO: Notify administrators when WAL fails // TODO: Notify administrators when WAL fails
mem.wal.Write([]byte(tx)) _, err := mem.wal.Write([]byte(tx))
mem.wal.Write([]byte("\n")) if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
_, err = mem.wal.Write([]byte("\n"))
if err != nil {
mem.logger.Error("Error writing to WAL", "err", err)
}
} }
// END WAL // END WAL
@ -331,10 +337,10 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs {
// Update informs the mempool that the given txs were committed and can be discarded. // Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus. // NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller // NOTE: unsafe; Lock/Unlock must be managed by caller
func (mem *Mempool) Update(height int, txs types.Txs) { func (mem *Mempool) Update(height int, txs types.Txs) error {
// TODO: check err ? if err := mem.proxyAppConn.FlushSync(); err != nil { // To flush async resCb calls e.g. from CheckTx
mem.proxyAppConn.FlushSync() // To flush async resCb calls e.g. from CheckTx return err
}
// First, create a lookup map of txns in new txs. // First, create a lookup map of txns in new txs.
txsMap := make(map[string]struct{}) txsMap := make(map[string]struct{})
for _, tx := range txs { for _, tx := range txs {
@ -357,6 +363,7 @@ func (mem *Mempool) Update(height int, txs types.Txs) {
// mem.recheckCursor re-scans mem.txs and possibly removes some txs. // mem.recheckCursor re-scans mem.txs and possibly removes some txs.
// Before mem.Reap(), we should wait for mem.recheckCursor to be nil. // Before mem.Reap(), we should wait for mem.recheckCursor to be nil.
} }
return nil
} }
func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx { func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx {

View File

@ -20,7 +20,10 @@ func newMempoolWithApp(cc proxy.ClientCreator) *Mempool {
appConnMem, _ := cc.NewABCIClient() appConnMem, _ := cc.NewABCIClient()
appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool"))
appConnMem.Start() _, err := appConnMem.Start()
if err != nil {
panic(err)
}
mempool := NewMempool(config.Mempool, appConnMem, 0) mempool := NewMempool(config.Mempool, appConnMem, 0)
mempool.SetLogger(log.TestingLogger()) mempool.SetLogger(log.TestingLogger())
return mempool return mempool
@ -49,9 +52,11 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs {
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
txBytes := make([]byte, 20) txBytes := make([]byte, 20)
txs[i] = txBytes txs[i] = txBytes
rand.Read(txBytes) _, err := rand.Read(txBytes)
err := mempool.CheckTx(txBytes, nil)
if err != nil { if err != nil {
t.Error(err)
}
if err := mempool.CheckTx(txBytes, nil); err != nil {
t.Fatal("Error after CheckTx: %v", err) t.Fatal("Error after CheckTx: %v", err)
} }
} }
@ -78,7 +83,9 @@ func TestTxsAvailable(t *testing.T) {
// it should fire once now for the new height // it should fire once now for the new height
// since there are still txs left // since there are still txs left
committedTxs, txs := txs[:50], txs[50:] committedTxs, txs := txs[:50], txs[50:]
mempool.Update(1, committedTxs) if err := mempool.Update(1, committedTxs); err != nil {
t.Error(err)
}
ensureFire(t, mempool.TxsAvailable(), timeoutMS) ensureFire(t, mempool.TxsAvailable(), timeoutMS)
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
@ -88,7 +95,9 @@ func TestTxsAvailable(t *testing.T) {
// now call update with all the txs. it should not fire as there are no txs left // now call update with all the txs. it should not fire as there are no txs left
committedTxs = append(txs, moreTxs...) committedTxs = append(txs, moreTxs...)
mempool.Update(2, committedTxs) if err := mempool.Update(2, committedTxs); err != nil {
t.Error(err)
}
ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) ensureNoFire(t, mempool.TxsAvailable(), timeoutMS)
// send a bunch more txs, it should only fire once // send a bunch more txs, it should only fire once
@ -146,7 +155,9 @@ func TestSerialReap(t *testing.T) {
binary.BigEndian.PutUint64(txBytes, uint64(i)) binary.BigEndian.PutUint64(txBytes, uint64(i))
txs = append(txs, txBytes) txs = append(txs, txBytes)
} }
mempool.Update(0, txs) if err := mempool.Update(0, txs); err != nil {
t.Error(err)
}
} }
commitRange := func(start, end int) { commitRange := func(start, end int) {

View File

@ -50,7 +50,7 @@ func (memR *MempoolReactor) SetLogger(l log.Logger) {
// It returns the list of channels for this reactor. // It returns the list of channels for this reactor.
func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{ return []*p2p.ChannelDescriptor{
&p2p.ChannelDescriptor{ {
ID: MempoolChannel, ID: MempoolChannel,
Priority: 5, Priority: 5,
}, },

View File

@ -384,7 +384,7 @@ func (n *Node) OnStop() {
n.eventBus.Stop() n.eventBus.Stop()
} }
// RunForever waits for an interupt signal and stops the node. // RunForever waits for an interrupt signal and stops the node.
func (n *Node) RunForever() { func (n *Node) RunForever() {
// Sleep forever and then... // Sleep forever and then...
cmn.TrapSignal(func() { cmn.TrapSignal(func() {
@ -430,7 +430,10 @@ func (n *Node) startRPC() ([]net.Listener, error) {
mux := http.NewServeMux() mux := http.NewServeMux()
rpcLogger := n.Logger.With("module", "rpc-server") rpcLogger := n.Logger.With("module", "rpc-server")
onDisconnect := rpcserver.OnDisconnect(func(remoteAddr string) { onDisconnect := rpcserver.OnDisconnect(func(remoteAddr string) {
n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr)
if err != nil {
rpcLogger.Error("Error unsubsribing from all on disconnect", "err", err)
}
}) })
wm := rpcserver.NewWebsocketManager(rpccore.Routes, onDisconnect) wm := rpcserver.NewWebsocketManager(rpccore.Routes, onDisconnect)
wm.SetLogger(rpcLogger.With("protocol", "websocket")) wm.SetLogger(rpcLogger.With("protocol", "websocket"))

View File

@ -19,7 +19,10 @@ func TestNodeStartStop(t *testing.T) {
// create & start node // create & start node
n, err := DefaultNewNode(config, log.TestingLogger()) n, err := DefaultNewNode(config, log.TestingLogger())
assert.NoError(t, err, "expected no err on DefaultNewNode") assert.NoError(t, err, "expected no err on DefaultNewNode")
n.Start() _, err1 := n.Start()
if err1 != nil {
t.Error(err1)
}
t.Logf("Started node %v", n.sw.NodeInfo()) t.Logf("Started node %v", n.sw.NodeInfo())
// wait for the node to produce a block // wait for the node to produce a block

View File

@ -130,7 +130,9 @@ func (a *AddrBook) init() {
// OnStart implements Service. // OnStart implements Service.
func (a *AddrBook) OnStart() error { func (a *AddrBook) OnStart() error {
a.BaseService.OnStart() if err := a.BaseService.OnStart(); err != nil {
return err
}
a.loadFromFile(a.filePath) a.loadFromFile(a.filePath)
// wg.Add to ensure that any invocation of .Wait() // wg.Add to ensure that any invocation of .Wait()
@ -369,7 +371,7 @@ func (a *AddrBook) loadFromFile(filePath string) bool {
if err != nil { if err != nil {
cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
} }
defer r.Close() defer r.Close() // nolint: errcheck
aJSON := &addrBookJSON{} aJSON := &addrBookJSON{}
dec := json.NewDecoder(r) dec := json.NewDecoder(r)
err = dec.Decode(aJSON) err = dec.Decode(aJSON)

View File

@ -163,7 +163,9 @@ func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onRec
// OnStart implements BaseService // OnStart implements BaseService
func (c *MConnection) OnStart() error { func (c *MConnection) OnStart() error {
c.BaseService.OnStart() if err := c.BaseService.OnStart(); err != nil {
return err
}
c.quit = make(chan struct{}) c.quit = make(chan struct{})
c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle) c.flushTimer = cmn.NewThrottleTimer("flush", c.config.flushThrottle)
c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout) c.pingTimer = cmn.NewRepeatTimer("ping", pingTimeout)
@ -182,7 +184,7 @@ func (c *MConnection) OnStop() {
if c.quit != nil { if c.quit != nil {
close(c.quit) close(c.quit)
} }
c.conn.Close() c.conn.Close() // nolint: errcheck
// We can't close pong safely here because // We can't close pong safely here because
// recvRoutine may write to it after we've stopped. // recvRoutine may write to it after we've stopped.
// Though it doesn't need to get closed at all, // Though it doesn't need to get closed at all,
@ -569,7 +571,7 @@ type Channel struct {
func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel {
desc = desc.FillDefaults() desc = desc.FillDefaults()
if desc.Priority <= 0 { if desc.Priority <= 0 {
cmn.PanicSanity("Channel default priority must be a postive integer") cmn.PanicSanity("Channel default priority must be a positive integer")
} }
return &Channel{ return &Channel{
conn: conn, conn: conn,

View File

@ -32,8 +32,8 @@ func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer server.Close() defer server.Close() // nolint: errcheck
defer client.Close() defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
_, err := mconn.Start() _, err := mconn.Start()
@ -44,12 +44,18 @@ func TestMConnectionSend(t *testing.T) {
assert.True(mconn.Send(0x01, msg)) assert.True(mconn.Send(0x01, msg))
// Note: subsequent Send/TrySend calls could pass because we are reading from // Note: subsequent Send/TrySend calls could pass because we are reading from
// the send queue in a separate goroutine. // the send queue in a separate goroutine.
server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
assert.True(mconn.CanSend(0x01)) assert.True(mconn.CanSend(0x01))
msg = "Spider-Man" msg = "Spider-Man"
assert.True(mconn.TrySend(0x01, msg)) assert.True(mconn.TrySend(0x01, msg))
server.Read(make([]byte, len(msg))) _, err = server.Read(make([]byte, len(msg)))
if err != nil {
t.Error(err)
}
assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(mconn.CanSend(0x05), "CanSend should return false because channel is unknown")
assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown") assert.False(mconn.Send(0x05, "Absorbing Man"), "Send should return false because channel is unknown")
@ -59,8 +65,8 @@ func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer server.Close() defer server.Close() // nolint: errcheck
defer client.Close() defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte) receivedCh := make(chan []byte)
errorsCh := make(chan interface{}) errorsCh := make(chan interface{})
@ -97,8 +103,8 @@ func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer server.Close() defer server.Close() // nolint: errcheck
defer client.Close() defer client.Close() // nolint: errcheck
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
_, err := mconn.Start() _, err := mconn.Start()
@ -114,8 +120,8 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer server.Close() defer server.Close() // nolint: errcheck
defer client.Close() defer client.Close() // nolint: errcheck
receivedCh := make(chan []byte) receivedCh := make(chan []byte)
errorsCh := make(chan interface{}) errorsCh := make(chan interface{})
@ -130,7 +136,9 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
require.Nil(err) require.Nil(err)
defer mconn.Stop() defer mconn.Stop()
client.Close() if err := client.Close(); err != nil {
t.Error(err)
}
select { select {
case receivedBytes := <-receivedCh: case receivedBytes := <-receivedCh:

View File

@ -124,7 +124,7 @@ func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error {
func (fc *FuzzedConnection) randomDuration() time.Duration { func (fc *FuzzedConnection) randomDuration() time.Duration {
maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000)
return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) return time.Millisecond * time.Duration(rand.Int()%maxDelayMillis) // nolint: gas
} }
// implements the fuzz (delay, kill conn) // implements the fuzz (delay, kill conn)
@ -143,7 +143,7 @@ func (fc *FuzzedConnection) fuzz() bool {
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
// XXX: can't this fail because machine precision? // XXX: can't this fail because machine precision?
// XXX: do we need an error? // XXX: do we need an error?
fc.Close() fc.Close() // nolint: errcheck, gas
return true return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
time.Sleep(fc.randomDuration()) time.Sleep(fc.randomDuration())

View File

@ -100,19 +100,24 @@ func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log
connections: make(chan net.Conn, numBufferedConnections), connections: make(chan net.Conn, numBufferedConnections),
} }
dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl) dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl)
dl.Start() // Started upon construction _, err = dl.Start() // Started upon construction
if err != nil {
logger.Error("Error starting base service", "err", err)
}
return dl return dl
} }
func (l *DefaultListener) OnStart() error { func (l *DefaultListener) OnStart() error {
l.BaseService.OnStart() if err := l.BaseService.OnStart(); err != nil {
return err
}
go l.listenRoutine() go l.listenRoutine()
return nil return nil
} }
func (l *DefaultListener) OnStop() { func (l *DefaultListener) OnStop() {
l.BaseService.OnStop() l.BaseService.OnStop()
l.listener.Close() l.listener.Close() // nolint: errcheck
} }
// Accept connections and pass on the channel // Accept connections and pass on the channel

View File

@ -25,7 +25,12 @@ func TestListener(t *testing.T) {
} }
msg := []byte("hi!") msg := []byte("hi!")
go connIn.Write(msg) go func() {
_, err := connIn.Write(msg)
if err != nil {
t.Error(err)
}
}()
b := make([]byte, 32) b := make([]byte, 32)
n, err := connOut.Read(b) n, err := connOut.Read(b)
if err != nil { if err != nil {

View File

@ -88,7 +88,9 @@ func newOutboundPeer(addr *NetAddress, reactorsByCh map[byte]Reactor, chDescs []
peer, err := newPeerFromConnAndConfig(conn, true, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config) peer, err := newPeerFromConnAndConfig(conn, true, reactorsByCh, chDescs, onPeerError, ourNodePrivKey, config)
if err != nil { if err != nil {
conn.Close() if err := conn.Close(); err != nil {
return nil, err
}
return nil, err return nil, err
} }
return peer, nil return peer, nil
@ -146,7 +148,7 @@ func (p *peer) SetLogger(l log.Logger) {
// CloseConn should be used when the peer was created, but never started. // CloseConn should be used when the peer was created, but never started.
func (p *peer) CloseConn() { func (p *peer) CloseConn() {
p.conn.Close() p.conn.Close() // nolint: errcheck
} }
// makePersistent marks the peer as persistent. // makePersistent marks the peer as persistent.
@ -230,7 +232,9 @@ func (p *peer) PubKey() crypto.PubKeyEd25519 {
// OnStart implements BaseService. // OnStart implements BaseService.
func (p *peer) OnStart() error { func (p *peer) OnStart() error {
p.BaseService.OnStart() if err := p.BaseService.OnStart(); err != nil {
return err
}
_, err := p.mconn.Start() _, err := p.mconn.Start()
return err return err
} }

View File

@ -28,7 +28,9 @@ func TestPeerSetAddRemoveOne(t *testing.T) {
var peerList []Peer var peerList []Peer
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
p := randPeer() p := randPeer()
peerSet.Add(p) if err := peerSet.Add(p); err != nil {
t.Error(err)
}
peerList = append(peerList, p) peerList = append(peerList, p)
} }
@ -48,7 +50,9 @@ func TestPeerSetAddRemoveOne(t *testing.T) {
// 2. Next we are testing removing the peer at the end // 2. Next we are testing removing the peer at the end
// a) Replenish the peerSet // a) Replenish the peerSet
for _, peer := range peerList { for _, peer := range peerList {
peerSet.Add(peer) if err := peerSet.Add(peer); err != nil {
t.Error(err)
}
} }
// b) In reverse, remove each element // b) In reverse, remove each element

View File

@ -23,7 +23,8 @@ func TestPeerBasic(t *testing.T) {
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), DefaultPeerConfig()) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), DefaultPeerConfig())
require.Nil(err) require.Nil(err)
p.Start() _, err = p.Start()
require.Nil(err)
defer p.Stop() defer p.Stop()
assert.True(p.IsRunning()) assert.True(p.IsRunning())
@ -49,7 +50,8 @@ func TestPeerWithoutAuthEnc(t *testing.T) {
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
require.Nil(err) require.Nil(err)
p.Start() _, err = p.Start()
require.Nil(err)
defer p.Stop() defer p.Stop()
assert.True(p.IsRunning()) assert.True(p.IsRunning())
@ -69,7 +71,9 @@ func TestPeerSend(t *testing.T) {
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config) p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config)
require.Nil(err) require.Nil(err)
p.Start() _, err = p.Start()
require.Nil(err)
defer p.Stop() defer p.Stop()
assert.True(p.CanSend(0x01)) assert.True(p.CanSend(0x01))
@ -78,7 +82,7 @@ func TestPeerSend(t *testing.T) {
func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) { func createOutboundPeerAndPerformHandshake(addr *NetAddress, config *PeerConfig) (*peer, error) {
chDescs := []*ChannelDescriptor{ chDescs := []*ChannelDescriptor{
&ChannelDescriptor{ID: 0x01, Priority: 1}, {ID: 0x01, Priority: 1},
} }
reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)} reactorsByCh := map[byte]Reactor{0x01: NewTestReactor(chDescs, true)}
pk := crypto.GenPrivKeyEd25519() pk := crypto.GenPrivKeyEd25519()
@ -148,7 +152,9 @@ func (p *remotePeer) accept(l net.Listener) {
} }
select { select {
case <-p.quit: case <-p.quit:
conn.Close() if err := conn.Close(); err != nil {
golog.Fatal(err)
}
return return
default: default:
} }

View File

@ -66,8 +66,13 @@ func NewPEXReactor(b *AddrBook) *PEXReactor {
// OnStart implements BaseService // OnStart implements BaseService
func (r *PEXReactor) OnStart() error { func (r *PEXReactor) OnStart() error {
r.BaseReactor.OnStart() if err := r.BaseReactor.OnStart(); err != nil {
r.book.Start() return err
}
_, err := r.book.Start()
if err != nil {
return err
}
go r.ensurePeersRoutine() go r.ensurePeersRoutine()
go r.flushMsgCountByPeer() go r.flushMsgCountByPeer()
return nil return nil
@ -82,7 +87,7 @@ func (r *PEXReactor) OnStop() {
// GetChannels implements Reactor // GetChannels implements Reactor
func (r *PEXReactor) GetChannels() []*ChannelDescriptor { func (r *PEXReactor) GetChannels() []*ChannelDescriptor {
return []*ChannelDescriptor{ return []*ChannelDescriptor{
&ChannelDescriptor{ {
ID: PexChannel, ID: PexChannel,
Priority: 1, Priority: 1,
SendQueueCapacity: 10, SendQueueCapacity: 10,
@ -278,7 +283,7 @@ func (r *PEXReactor) ensurePeers() {
// If we need more addresses, pick a random peer and ask for more. // If we need more addresses, pick a random peer and ask for more.
if r.book.NeedMoreAddrs() { if r.book.NeedMoreAddrs() {
if peers := r.Switch.Peers().List(); len(peers) > 0 { if peers := r.Switch.Peers().List(); len(peers) > 0 {
i := rand.Int() % len(peers) i := rand.Int() % len(peers) // nolint: gas
peer := peers[i] peer := peers[i]
r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer) r.Logger.Info("No addresses to dial. Sending pexRequest to random peer", "peer", peer)
r.RequestPEX(peer) r.RequestPEX(peer)

View File

@ -20,7 +20,7 @@ func TestPEXReactorBasic(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -36,7 +36,7 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -69,7 +69,7 @@ func TestPEXReactorRunning(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", false) book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -139,7 +139,7 @@ func TestPEXReactorReceive(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", false) book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -164,7 +164,7 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())

View File

@ -302,7 +302,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKeyEd25519, signa
// sha256 // sha256
func hash32(input []byte) (res *[32]byte) { func hash32(input []byte) (res *[32]byte) {
hasher := sha256.New() hasher := sha256.New()
hasher.Write(input) // does not error hasher.Write(input) // nolint: errcheck, gas
resSlice := hasher.Sum(nil) resSlice := hasher.Sum(nil)
res = new([32]byte) res = new([32]byte)
copy(res[:], resSlice) copy(res[:], resSlice)
@ -312,7 +312,7 @@ func hash32(input []byte) (res *[32]byte) {
// We only fill in the first 20 bytes with ripemd160 // We only fill in the first 20 bytes with ripemd160
func hash24(input []byte) (res *[24]byte) { func hash24(input []byte) (res *[24]byte) {
hasher := ripemd160.New() hasher := ripemd160.New()
hasher.Write(input) // does not error hasher.Write(input) // nolint: errcheck, gas
resSlice := hasher.Sum(nil) resSlice := hasher.Sum(nil)
res = new([24]byte) res = new([24]byte)
copy(res[:], resSlice) copy(res[:], resSlice)

View File

@ -70,8 +70,12 @@ func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection
func TestSecretConnectionHandshake(t *testing.T) { func TestSecretConnectionHandshake(t *testing.T) {
fooSecConn, barSecConn := makeSecretConnPair(t) fooSecConn, barSecConn := makeSecretConnPair(t)
fooSecConn.Close() if err := fooSecConn.Close(); err != nil {
barSecConn.Close() t.Error(err)
}
if err := barSecConn.Close(); err != nil {
t.Error(err)
}
} }
func TestSecretConnectionReadWrite(t *testing.T) { func TestSecretConnectionReadWrite(t *testing.T) {
@ -110,7 +114,9 @@ func TestSecretConnectionReadWrite(t *testing.T) {
return return
} }
} }
nodeConn.PipeWriter.Close() if err := nodeConn.PipeWriter.Close(); err != nil {
t.Error(err)
}
}, },
func() { func() {
// Node reads // Node reads
@ -125,7 +131,9 @@ func TestSecretConnectionReadWrite(t *testing.T) {
} }
*nodeReads = append(*nodeReads, string(readBuffer[:n])) *nodeReads = append(*nodeReads, string(readBuffer[:n]))
} }
nodeConn.PipeReader.Close() if err := nodeConn.PipeReader.Close(); err != nil {
t.Error(err)
}
}) })
} }
} }
@ -197,6 +205,8 @@ func BenchmarkSecretConnection(b *testing.B) {
} }
b.StopTimer() b.StopTimer()
fooSecConn.Close() if err := fooSecConn.Close(); err != nil {
b.Error(err)
}
//barSecConn.Close() race condition //barSecConn.Close() race condition
} }

View File

@ -174,7 +174,9 @@ func (sw *Switch) SetNodePrivKey(nodePrivKey crypto.PrivKeyEd25519) {
// OnStart implements BaseService. It starts all the reactors, peers, and listeners. // OnStart implements BaseService. It starts all the reactors, peers, and listeners.
func (sw *Switch) OnStart() error { func (sw *Switch) OnStart() error {
sw.BaseService.OnStart() if err := sw.BaseService.OnStart(); err != nil {
return err
}
// Start reactors // Start reactors
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
_, err := reactor.Start() _, err := reactor.Start()
@ -287,7 +289,12 @@ func (sw *Switch) SetPubKeyFilter(f func(crypto.PubKeyEd25519) error) {
} }
func (sw *Switch) startInitPeer(peer *peer) { func (sw *Switch) startInitPeer(peer *peer) {
peer.Start() // spawn send/recv routines _, err := peer.Start() // spawn send/recv routines
if err != nil {
// Should never happen
sw.Logger.Error("Error starting peer", "peer", peer, "err", err)
}
for _, reactor := range sw.reactors { for _, reactor := range sw.reactors {
reactor.AddPeer(peer) reactor.AddPeer(peer)
} }
@ -511,7 +518,7 @@ func MakeConnectedSwitches(cfg *cfg.P2PConfig, n int, initSwitch func(int, *Swit
} }
// Connect2Switches will connect switches i and j via net.Pipe(). // Connect2Switches will connect switches i and j via net.Pipe().
// Blocks until a conection is established. // Blocks until a connection is established.
// NOTE: caller ensures i and j are within bounds. // NOTE: caller ensures i and j are within bounds.
func Connect2Switches(switches []*Switch, i, j int) { func Connect2Switches(switches []*Switch, i, j int) {
switchI := switches[i] switchI := switches[i]
@ -568,7 +575,9 @@ func makeSwitch(cfg *cfg.P2PConfig, i int, network, version string, initSwitch f
func (sw *Switch) addPeerWithConnection(conn net.Conn) error { func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig) peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, sw.peerConfig)
if err != nil { if err != nil {
conn.Close() if err := conn.Close(); err != nil {
sw.Logger.Error("Error closing connection", "err", err)
}
return err return err
} }
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr())) peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))
@ -583,7 +592,9 @@ func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error { func (sw *Switch) addPeerWithConnectionAndConfig(conn net.Conn, config *PeerConfig) error {
peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config) peer, err := newInboundPeer(conn, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodePrivKey, config)
if err != nil { if err != nil {
conn.Close() if err := conn.Close(); err != nil {
sw.Logger.Error("Error closing connection", "err", err)
}
return err return err
} }
peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr())) peer.SetLogger(sw.Logger.With("peer", conn.RemoteAddr()))

View File

@ -10,11 +10,12 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
crypto "github.com/tendermint/go-crypto" crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire" wire "github.com/tendermint/go-wire"
"github.com/tendermint/tmlibs/log"
cfg "github.com/tendermint/tendermint/config" cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tmlibs/log"
) )
var ( var (
@ -100,12 +101,12 @@ func makeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switc
func initSwitchFunc(i int, sw *Switch) *Switch { func initSwitchFunc(i int, sw *Switch) *Switch {
// Make two reactors of two channels each // Make two reactors of two channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10}, {ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10}, {ID: byte(0x01), Priority: 10},
}, true)) }, true))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10}, {ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10}, {ID: byte(0x03), Priority: 10},
}, true)) }, true))
return sw return sw
} }
@ -171,10 +172,12 @@ func TestConnAddrFilter(t *testing.T) {
// connect to good peer // connect to good peer
go func() { go func() {
s1.addPeerWithConnection(c1) err := s1.addPeerWithConnection(c1)
assert.NotNil(t, err, "expected err")
}() }()
go func() { go func() {
s2.addPeerWithConnection(c2) err := s2.addPeerWithConnection(c2)
assert.NotNil(t, err, "expected err")
}() }()
assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond)
@ -206,10 +209,12 @@ func TestConnPubKeyFilter(t *testing.T) {
// connect to good peer // connect to good peer
go func() { go func() {
s1.addPeerWithConnection(c1) err := s1.addPeerWithConnection(c1)
assert.NotNil(t, err, "expected error")
}() }()
go func() { go func() {
s2.addPeerWithConnection(c2) err := s2.addPeerWithConnection(c2)
assert.NotNil(t, err, "expected error")
}() }()
assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond)
@ -220,7 +225,10 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
sw.Start() _, err := sw.Start()
if err != nil {
t.Error(err)
}
defer sw.Stop() defer sw.Stop()
// simulate remote peer // simulate remote peer
@ -244,7 +252,10 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc) sw := makeSwitch(config, 1, "testing", "123.123.123", initSwitchFunc)
sw.Start() _, err := sw.Start()
if err != nil {
t.Error(err)
}
defer sw.Stop() defer sw.Stop()
// simulate remote peer // simulate remote peer
@ -295,12 +306,12 @@ func BenchmarkSwitches(b *testing.B) {
s1, s2 := makeSwitchPair(b, func(i int, sw *Switch) *Switch { s1, s2 := makeSwitchPair(b, func(i int, sw *Switch) *Switch {
// Make bar reactors of bar channels each // Make bar reactors of bar channels each
sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("foo", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x00), Priority: 10}, {ID: byte(0x00), Priority: 10},
&ChannelDescriptor{ID: byte(0x01), Priority: 10}, {ID: byte(0x01), Priority: 10},
}, false)) }, false))
sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{ sw.AddReactor("bar", NewTestReactor([]*ChannelDescriptor{
&ChannelDescriptor{ID: byte(0x02), Priority: 10}, {ID: byte(0x02), Priority: 10},
&ChannelDescriptor{ID: byte(0x03), Priority: 10}, {ID: byte(0x03), Priority: 10},
}, false)) }, false))
return sw return sw
}) })

View File

@ -47,7 +47,9 @@ func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {
// OnStart implements Service // OnStart implements Service
func (tms *TrustMetricStore) OnStart() error { func (tms *TrustMetricStore) OnStart() error {
tms.BaseService.OnStart() if err := tms.BaseService.OnStart(); err != nil {
return err
}
tms.mtx.Lock() tms.mtx.Lock()
defer tms.mtx.Unlock() defer tms.mtx.Unlock()

View File

@ -55,12 +55,12 @@ func (info *NodeInfo) CompatibleWith(other *NodeInfo) error {
} }
func (info *NodeInfo) ListenHost() string { func (info *NodeInfo) ListenHost() string {
host, _, _ := net.SplitHostPort(info.ListenAddr) host, _, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas
return host return host
} }
func (info *NodeInfo) ListenPort() int { func (info *NodeInfo) ListenPort() int {
_, port, _ := net.SplitHostPort(info.ListenAddr) _, port, _ := net.SplitHostPort(info.ListenAddr) // nolint: errcheck, gas
port_i, err := strconv.Atoi(port) port_i, err := strconv.Atoi(port)
if err != nil { if err != nil {
return -1 return -1

View File

@ -97,11 +97,12 @@ func Probe(logger log.Logger) (caps UPNPCapabilities, err error) {
// Deferred cleanup // Deferred cleanup
defer func() { defer func() {
err = nat.DeletePortMapping("tcp", intPort, extPort) if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil {
if err != nil {
logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) logger.Error(cmn.Fmt("Port mapping delete error: %v", err))
} }
listener.Close() if err := listener.Close(); err != nil {
logger.Error(cmn.Fmt("Listener closing error: %v", err))
}
}() }()
supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger)

View File

@ -40,11 +40,10 @@ func Discover() (nat NAT, err error) {
return return
} }
socket := conn.(*net.UDPConn) socket := conn.(*net.UDPConn)
defer socket.Close() defer socket.Close() // nolint: errcheck
err = socket.SetDeadline(time.Now().Add(3 * time.Second)) if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil {
if err != nil { return nil, err
return
} }
st := "InternetGatewayDevice:1" st := "InternetGatewayDevice:1"
@ -64,6 +63,9 @@ func Discover() (nat NAT, err error) {
} }
var n int var n int
_, _, err = socket.ReadFromUDP(answerBytes) _, _, err = socket.ReadFromUDP(answerBytes)
if err != nil {
return
}
for { for {
n, _, err = socket.ReadFromUDP(answerBytes) n, _, err = socket.ReadFromUDP(answerBytes)
if err != nil { if err != nil {
@ -198,7 +200,8 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
if err != nil { if err != nil {
return return
} }
defer r.Body.Close() defer r.Body.Close() // nolint: errcheck
if r.StatusCode >= 400 { if r.StatusCode >= 400 {
err = errors.New(string(r.StatusCode)) err = errors.New(string(r.StatusCode))
return return
@ -296,15 +299,21 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return
} }
var envelope Envelope var envelope Envelope
data, err := ioutil.ReadAll(response.Body) data, err := ioutil.ReadAll(response.Body)
if err != nil {
return
}
reader := bytes.NewReader(data) reader := bytes.NewReader(data)
xml.NewDecoder(reader).Decode(&envelope) err = xml.NewDecoder(reader).Decode(&envelope)
if err != nil {
return
}
info = statusInfo{envelope.Soap.ExternalIP.IPAddress} info = statusInfo{envelope.Soap.ExternalIP.IPAddress}
@ -339,7 +348,7 @@ func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return
@ -365,7 +374,7 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return

View File

@ -7,9 +7,9 @@ import (
// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. // doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func doubleSha256(b []byte) []byte { func doubleSha256(b []byte) []byte {
hasher := sha256.New() hasher := sha256.New()
hasher.Write(b) hasher.Write(b) // nolint: errcheck, gas
sum := hasher.Sum(nil) sum := hasher.Sum(nil)
hasher.Reset() hasher.Reset()
hasher.Write(sum) hasher.Write(sum) // nolint: errcheck, gas
return hasher.Sum(nil) return hasher.Sum(nil)
} }

View File

@ -72,7 +72,9 @@ func TestEcho(t *testing.T) {
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
proxy.EchoAsync(cmn.Fmt("echo-%v", i)) proxy.EchoAsync(cmn.Fmt("echo-%v", i))
} }
proxy.FlushSync() if err := proxy.FlushSync(); err != nil {
t.Error(err)
}
} }
func BenchmarkEcho(b *testing.B) { func BenchmarkEcho(b *testing.B) {
@ -106,7 +108,9 @@ func BenchmarkEcho(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
proxy.EchoAsync(echoString) proxy.EchoAsync(echoString)
} }
proxy.FlushSync() if err := proxy.FlushSync(); err != nil {
b.Error(err)
}
b.StopTimer() b.StopTimer()
// info := proxy.InfoSync(types.RequestInfo{""}) // info := proxy.InfoSync(types.RequestInfo{""})

View File

@ -49,7 +49,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
c := a.App.CheckTx(tx) c := a.App.CheckTx(tx)
// and this gets written in a background thread... // and this gets written in a background thread...
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }
@ -58,7 +58,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
c := a.App.CheckTx(tx) c := a.App.CheckTx(tx)
// and this gets written in a background thread... // and this gets written in a background thread...
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }

View File

@ -79,6 +79,8 @@ func TestABCIMock(t *testing.T) {
func TestABCIRecorder(t *testing.T) { func TestABCIRecorder(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
// This mock returns errors on everything but Query
m := mock.ABCIMock{ m := mock.ABCIMock{
Info: mock.Call{Response: abci.ResponseInfo{ Info: mock.Call{Response: abci.ResponseInfo{
Data: "data", Data: "data",
@ -92,8 +94,11 @@ func TestABCIRecorder(t *testing.T) {
require.Equal(0, len(r.Calls)) require.Equal(0, len(r.Calls))
r.ABCIInfo() _, err := r.ABCIInfo()
r.ABCIQueryWithOptions("path", data.Bytes("data"), client.ABCIQueryOptions{Trusted: false}) assert.Nil(err, "expected no err on info")
_, err = r.ABCIQueryWithOptions("path", data.Bytes("data"), client.ABCIQueryOptions{Trusted: false})
assert.NotNil(err, "expected error on query")
require.Equal(2, len(r.Calls)) require.Equal(2, len(r.Calls))
info := r.Calls[0] info := r.Calls[0]
@ -118,11 +123,14 @@ func TestABCIRecorder(t *testing.T) {
assert.EqualValues("data", qa.Data) assert.EqualValues("data", qa.Data)
assert.False(qa.Trusted) assert.False(qa.Trusted)
// now add some broadcasts // now add some broadcasts (should all err)
txs := []types.Tx{{1}, {2}, {3}} txs := []types.Tx{{1}, {2}, {3}}
r.BroadcastTxCommit(txs[0]) _, err = r.BroadcastTxCommit(txs[0])
r.BroadcastTxSync(txs[1]) assert.NotNil(err, "expected err on broadcast")
r.BroadcastTxAsync(txs[2]) _, err = r.BroadcastTxSync(txs[1])
assert.NotNil(err, "expected err on broadcast")
_, err = r.BroadcastTxAsync(txs[2])
assert.NotNil(err, "expected err on broadcast")
require.Equal(5, len(r.Calls)) require.Equal(5, len(r.Calls))

View File

@ -140,7 +140,9 @@ func TestAppCalls(t *testing.T) {
apph := txh + 1 // this is where the tx will be applied to the state apph := txh + 1 // this is where the tx will be applied to the state
// wait before querying // wait before querying
client.WaitForHeight(c, apph, nil) if err := client.WaitForHeight(c, apph, nil); err != nil {
t.Error(err)
}
qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true})
if assert.Nil(err) && assert.True(qres.Code.IsOK()) { if assert.Nil(err) && assert.True(qres.Code.IsOK()) {
// assert.Equal(k, data.GetKey()) // only returned for proofs // assert.Equal(k, data.GetKey()) // only returned for proofs

View File

@ -29,7 +29,9 @@ func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error
func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) {
pprof.StopCPUProfile() pprof.StopCPUProfile()
profFile.Close() if err := profFile.Close(); err != nil {
return nil, err
}
return &ctypes.ResultUnsafeProfile{}, nil return &ctypes.ResultUnsafeProfile{}, nil
} }
@ -38,8 +40,12 @@ func UnsafeWriteHeapProfile(filename string) (*ctypes.ResultUnsafeProfile, error
if err != nil { if err != nil {
return nil, err return nil, err
} }
pprof.WriteHeapProfile(memProfFile) if err := pprof.WriteHeapProfile(memProfFile); err != nil {
memProfFile.Close() return nil, err
}
if err := memProfFile.Close(); err != nil {
return nil, err
}
return &ctypes.ResultUnsafeProfile{}, nil return &ctypes.ResultUnsafeProfile{}, nil
} }

View File

@ -25,7 +25,7 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
grpcServer := grpc.NewServer() grpcServer := grpc.NewServer()
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
go grpcServer.Serve(ln) go grpcServer.Serve(ln) // nolint: errcheck
return ln, nil return ln, nil
} }

View File

@ -93,7 +93,8 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer httpResponse.Body.Close() defer httpResponse.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(httpResponse.Body) responseBytes, err := ioutil.ReadAll(httpResponse.Body)
if err != nil { if err != nil {
return nil, err return nil, err
@ -128,7 +129,8 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(resp.Body) responseBytes, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -290,10 +290,11 @@ func (c *WSClient) processBacklog() error {
select { select {
case request := <-c.backlog: case request := <-c.backlog:
if c.writeWait > 0 { if c.writeWait > 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil {
c.Logger.Error("failed to set write deadline", "err", err)
}
} }
err := c.conn.WriteJSON(request) if err := c.conn.WriteJSON(request); err != nil {
if err != nil {
c.Logger.Error("failed to resend request", "err", err) c.Logger.Error("failed to resend request", "err", err)
c.reconnectAfter <- err c.reconnectAfter <- err
// requeue request // requeue request
@ -312,8 +313,7 @@ func (c *WSClient) reconnectRoutine() {
case originalError := <-c.reconnectAfter: case originalError := <-c.reconnectAfter:
// wait until writeRoutine and readRoutine finish // wait until writeRoutine and readRoutine finish
c.wg.Wait() c.wg.Wait()
err := c.reconnect() if err := c.reconnect(); err != nil {
if err != nil {
c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError)
c.Stop() c.Stop()
return return
@ -352,7 +352,10 @@ func (c *WSClient) writeRoutine() {
defer func() { defer func() {
ticker.Stop() ticker.Stop()
c.conn.Close() if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests
// likely because it's closing an already closed connection
}
c.wg.Done() c.wg.Done()
}() }()
@ -360,10 +363,11 @@ func (c *WSClient) writeRoutine() {
select { select {
case request := <-c.send: case request := <-c.send:
if c.writeWait > 0 { if c.writeWait > 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil {
c.Logger.Error("failed to set write deadline", "err", err)
}
} }
err := c.conn.WriteJSON(request) if err := c.conn.WriteJSON(request); err != nil {
if err != nil {
c.Logger.Error("failed to send request", "err", err) c.Logger.Error("failed to send request", "err", err)
c.reconnectAfter <- err c.reconnectAfter <- err
// add request to the backlog, so we don't lose it // add request to the backlog, so we don't lose it
@ -372,10 +376,11 @@ func (c *WSClient) writeRoutine() {
} }
case <-ticker.C: case <-ticker.C:
if c.writeWait > 0 { if c.writeWait > 0 {
c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)) if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil {
c.Logger.Error("failed to set write deadline", "err", err)
}
} }
err := c.conn.WriteMessage(websocket.PingMessage, []byte{}) if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil {
if err != nil {
c.Logger.Error("failed to write ping", "err", err) c.Logger.Error("failed to write ping", "err", err)
c.reconnectAfter <- err c.reconnectAfter <- err
return return
@ -387,7 +392,9 @@ func (c *WSClient) writeRoutine() {
case <-c.readRoutineQuit: case <-c.readRoutineQuit:
return return
case <-c.Quit: case <-c.Quit:
c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil {
c.Logger.Error("failed to write message", "err", err)
}
return return
} }
} }
@ -397,7 +404,10 @@ func (c *WSClient) writeRoutine() {
// executing all reads from this goroutine. // executing all reads from this goroutine.
func (c *WSClient) readRoutine() { func (c *WSClient) readRoutine() {
defer func() { defer func() {
c.conn.Close() if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests
// likely because it's closing an already closed connection
}
c.wg.Done() c.wg.Done()
}() }()
@ -415,7 +425,9 @@ func (c *WSClient) readRoutine() {
for { for {
// reset deadline for every message type (control or data) // reset deadline for every message type (control or data)
if c.readWait > 0 { if c.readWait > 0 {
c.conn.SetReadDeadline(time.Now().Add(c.readWait)) if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil {
c.Logger.Error("failed to set read deadline", "err", err)
}
} }
_, data, err := c.conn.ReadMessage() _, data, err := c.conn.ReadMessage()
if err != nil { if err != nil {

View File

@ -34,7 +34,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
defer conn.Close() defer conn.Close() // nolint: errcheck
for { for {
messageType, _, err := conn.ReadMessage() messageType, _, err := conn.ReadMessage()
if err != nil { if err != nil {
@ -43,7 +43,9 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock() h.mtx.RLock()
if h.closeConnAfterRead { if h.closeConnAfterRead {
conn.Close() if err := conn.Close(); err != nil {
panic(err)
}
} }
h.mtx.RUnlock() h.mtx.RUnlock()
@ -102,7 +104,9 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) {
go callWgDoneOnResult(t, c, &wg) go callWgDoneOnResult(t, c, &wg)
// hacky way to abort the connection before write // hacky way to abort the connection before write
c.conn.Close() if err := c.conn.Close(); err != nil {
t.Error(err)
}
// results in WS write error, the client should resend on reconnect // results in WS write error, the client should resend on reconnect
call(t, "a", c) call(t, "a", c)
@ -135,14 +139,18 @@ func TestWSClientReconnectFailure(t *testing.T) {
}() }()
// hacky way to abort the connection before write // hacky way to abort the connection before write
c.conn.Close() if err := c.conn.Close(); err != nil {
t.Error(err)
}
s.Close() s.Close()
// results in WS write error // results in WS write error
// provide timeout to avoid blocking // provide timeout to avoid blocking
ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout)
defer cancel() defer cancel()
c.Call(ctx, "a", make(map[string]interface{})) if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil {
t.Error(err)
}
// expect to reconnect almost immediately // expect to reconnect almost immediately
time.Sleep(10 * time.Millisecond) time.Sleep(10 * time.Millisecond)

View File

@ -216,19 +216,17 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) {
return "", err return "", err
} }
select { msg := <-cl.ResponsesCh
case msg := <-cl.ResponsesCh: if msg.Error != nil {
if msg.Error != nil { return "", err
return "", err
}
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
if err != nil {
return "", nil
}
return result.Value, nil
} }
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
if err != nil {
return "", nil
}
return result.Value, nil
} }
func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) {
@ -240,19 +238,17 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) {
return []byte{}, err return []byte{}, err
} }
select { msg := <-cl.ResponsesCh
case msg := <-cl.ResponsesCh: if msg.Error != nil {
if msg.Error != nil { return []byte{}, msg.Error
return []byte{}, msg.Error
}
result := new(ResultEchoBytes)
err = json.Unmarshal(msg.Result, result)
if err != nil {
return []byte{}, nil
}
return result.Value, nil
} }
result := new(ResultEchoBytes)
err = json.Unmarshal(msg.Result, result)
if err != nil {
return []byte{}, nil
}
return result.Value, nil
} }
func testWithWSClient(t *testing.T, cl *client.WSClient) { func testWithWSClient(t *testing.T, cl *client.WSClient) {
@ -322,17 +318,15 @@ func TestWSNewWSRPCFunc(t *testing.T) {
err = cl.Call(context.Background(), "echo_ws", params) err = cl.Call(context.Background(), "echo_ws", params)
require.Nil(t, err) require.Nil(t, err)
select { msg := <-cl.ResponsesCh
case msg := <-cl.ResponsesCh: if msg.Error != nil {
if msg.Error != nil { t.Fatal(err)
t.Fatal(err)
}
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
require.Nil(t, err)
got := result.Value
assert.Equal(t, got, val)
} }
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
require.Nil(t, err)
got := result.Value
assert.Equal(t, got, val)
} }
func TestWSHandlesArrayParams(t *testing.T) { func TestWSHandlesArrayParams(t *testing.T) {
@ -347,17 +341,15 @@ func TestWSHandlesArrayParams(t *testing.T) {
err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) err = cl.CallWithArrayParams(context.Background(), "echo_ws", params)
require.Nil(t, err) require.Nil(t, err)
select { msg := <-cl.ResponsesCh
case msg := <-cl.ResponsesCh: if msg.Error != nil {
if msg.Error != nil { t.Fatalf("%+v", err)
t.Fatalf("%+v", err)
}
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
require.Nil(t, err)
got := result.Value
assert.Equal(t, got, val)
} }
result := new(ResultEcho)
err = json.Unmarshal(msg.Result, result)
require.Nil(t, err)
got := result.Value
assert.Equal(t, got, val)
} }
// TestWSClientPingPong checks that a client & server exchange pings // TestWSClientPingPong checks that a client & server exchange pings

View File

@ -99,7 +99,11 @@ func funcReturnTypes(f interface{}) []reflect.Type {
// jsonrpc calls grab the given method's function info and runs reflect.Call // jsonrpc calls grab the given method's function info and runs reflect.Call
func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
b, _ := ioutil.ReadAll(r.Body) b, err := ioutil.ReadAll(r.Body)
if err != nil {
WriteRPCResponseHTTP(w, types.RPCInvalidRequestError("", errors.Wrap(err, "Error reading request body")))
return
}
// if its an empty request (like from a browser), // if its an empty request (like from a browser),
// just display a list of functions // just display a list of functions
if len(b) == 0 { if len(b) == 0 {
@ -108,7 +112,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han
} }
var request types.RPCRequest var request types.RPCRequest
err := json.Unmarshal(b, &request) err = json.Unmarshal(b, &request)
if err != nil { if err != nil {
WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request"))) WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request")))
return return
@ -529,7 +533,7 @@ func (wsc *wsConnection) readRoutine() {
wsc.WriteRPCResponse(types.RPCInternalError("unknown", err)) wsc.WriteRPCResponse(types.RPCInternalError("unknown", err))
go wsc.readRoutine() go wsc.readRoutine()
} else { } else {
wsc.baseConn.Close() wsc.baseConn.Close() // nolint: errcheck
} }
}() }()
@ -543,7 +547,9 @@ func (wsc *wsConnection) readRoutine() {
return return
default: default:
// reset deadline for every type of message (control or data) // reset deadline for every type of message (control or data)
wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil {
wsc.Logger.Error("failed to set read deadline", "err", err)
}
var in []byte var in []byte
_, in, err := wsc.baseConn.ReadMessage() _, in, err := wsc.baseConn.ReadMessage()
if err != nil { if err != nil {
@ -615,7 +621,9 @@ func (wsc *wsConnection) writeRoutine() {
pingTicker := time.NewTicker(wsc.pingPeriod) pingTicker := time.NewTicker(wsc.pingPeriod)
defer func() { defer func() {
pingTicker.Stop() pingTicker.Stop()
wsc.baseConn.Close() if err := wsc.baseConn.Close(); err != nil {
wsc.Logger.Error("Error closing connection", "err", err)
}
}() }()
// https://github.com/gorilla/websocket/issues/97 // https://github.com/gorilla/websocket/issues/97
@ -662,7 +670,9 @@ func (wsc *wsConnection) writeRoutine() {
// All writes to the websocket must (re)set the write deadline. // All writes to the websocket must (re)set the write deadline.
// If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553) // If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553)
func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error {
wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)) if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil {
return err
}
return wsc.baseConn.WriteMessage(msgType, msg) return wsc.baseConn.WriteMessage(msgType, msg)
} }
@ -713,7 +723,10 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ
con := NewWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) con := NewWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...)
con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr()))
wm.logger.Info("New websocket connection", "remote", con.remoteAddr) wm.logger.Info("New websocket connection", "remote", con.remoteAddr)
con.Start() // Blocking _, err = con.Start() // Blocking
if err != nil {
wm.logger.Error("Error starting connection", "err", err)
}
} }
// rpc.websocket // rpc.websocket
@ -770,5 +783,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st
buf.WriteString("</body></html>") buf.WriteString("</body></html>")
w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Type", "text/html")
w.WriteHeader(200) w.WriteHeader(200)
w.Write(buf.Bytes()) w.Write(buf.Bytes()) // nolint: errcheck
} }

View File

@ -56,7 +56,7 @@ func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RP
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpCode) w.WriteHeader(httpCode)
w.Write(jsonBytes) w.Write(jsonBytes) // nolint: errcheck, gas
} }
func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
@ -66,7 +66,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200) w.WriteHeader(200)
w.Write(jsonBytes) w.Write(jsonBytes) // nolint: errcheck, gas
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------

View File

@ -150,7 +150,7 @@ func TestParseRPC(t *testing.T) {
{`{"name": "john", "height": 22}`, 22, "john", false}, {`{"name": "john", "height": 22}`, 22, "john", false},
// defaults // defaults
{`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false},
// should fail - wrong types/lenght // should fail - wrong types/length
{`["flew", 7]`, 0, "", true}, {`["flew", 7]`, 0, "", true},
{`[7,"flew",100]`, 0, "", true}, {`[7,"flew",100]`, 0, "", true},
{`{"name": -12, "height": "fred"}`, 0, "", true}, {`{"name": -12, "height": "fred"}`, 0, "", true},

View File

@ -92,7 +92,10 @@ func GetGRPCClient() core_grpc.BroadcastAPIClient {
// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized // StartTendermint starts a test tendermint server in a go routine and returns when it is initialized
func StartTendermint(app abci.Application) *nm.Node { func StartTendermint(app abci.Application) *nm.Node {
node := NewTendermint(app) node := NewTendermint(app)
node.Start() _, err := node.Start()
if err != nil {
panic(err)
}
// wait for rpc // wait for rpc
waitForRPC() waitForRPC()

View File

@ -41,10 +41,18 @@ func main() {
panic(fmt.Errorf("failed to marshal msg: %v", err)) panic(fmt.Errorf("failed to marshal msg: %v", err))
} }
os.Stdout.Write(json) _, err = os.Stdout.Write(json)
os.Stdout.Write([]byte("\n")) if err == nil {
if end, ok := msg.Msg.(cs.EndHeightMessage); ok { _, err = os.Stdout.Write([]byte("\n"))
os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) }
if err == nil {
if end, ok := msg.Msg.(cs.EndHeightMessage); ok {
_, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) // nolint: errcheck, gas
}
}
if err != nil {
fmt.Println("Failed to write message", err)
os.Exit(1)
} }
} }
} }

View File

@ -160,6 +160,7 @@ func updateValidators(validators *types.ValidatorSet, changedValidators []*abci.
// return a bit array of validators that signed the last commit // return a bit array of validators that signed the last commit
// NOTE: assumes commits have already been authenticated // NOTE: assumes commits have already been authenticated
/* function is currently unused
func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray { func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray {
signed := cmn.NewBitArray(len(block.LastCommit.Precommits)) signed := cmn.NewBitArray(len(block.LastCommit.Precommits))
for i, precommit := range block.LastCommit.Precommits { for i, precommit := range block.LastCommit.Precommits {
@ -169,6 +170,7 @@ func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray {
} }
return signed return signed
} }
*/
//----------------------------------------------------- //-----------------------------------------------------
// Validate block // Validate block
@ -271,9 +273,7 @@ func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, bl
s.AppHash = res.Data s.AppHash = res.Data
// Update mempool. // Update mempool.
mempool.Update(block.Height, block.Txs) return mempool.Update(block.Height, block.Txs)
return nil
} }
func (s *State) indexTxs(abciResponses *ABCIResponses) { func (s *State) indexTxs(abciResponses *ABCIResponses) {
@ -282,14 +282,18 @@ func (s *State) indexTxs(abciResponses *ABCIResponses) {
batch := txindex.NewBatch(len(abciResponses.DeliverTx)) batch := txindex.NewBatch(len(abciResponses.DeliverTx))
for i, d := range abciResponses.DeliverTx { for i, d := range abciResponses.DeliverTx {
tx := abciResponses.txs[i] tx := abciResponses.txs[i]
batch.Add(types.TxResult{ if err := batch.Add(types.TxResult{
Height: uint64(abciResponses.Height), Height: uint64(abciResponses.Height),
Index: uint32(i), Index: uint32(i),
Tx: tx, Tx: tx,
Result: *d, Result: *d,
}) }); err != nil {
s.logger.Error("Error with batch.Add", "err", err)
}
}
if err := s.TxIndexer.AddBatch(batch); err != nil {
s.logger.Error("Error adding batch", "err", err)
} }
s.TxIndexer.AddBatch(batch)
} }
// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.

View File

@ -59,7 +59,7 @@ func state() *State {
s, _ := MakeGenesisState(dbm.NewMemDB(), &types.GenesisDoc{ s, _ := MakeGenesisState(dbm.NewMemDB(), &types.GenesisDoc{
ChainID: chainID, ChainID: chainID,
Validators: []types.GenesisValidator{ Validators: []types.GenesisValidator{
types.GenesisValidator{privKey.PubKey(), 10000, "test"}, {privKey.PubKey(), 10000, "test"},
}, },
AppHash: nil, AppHash: nil,
}) })

View File

@ -11,7 +11,7 @@ type TxIndexer interface {
// AddBatch analyzes, indexes or stores a batch of transactions. // AddBatch analyzes, indexes or stores a batch of transactions.
// NOTE: We do not specify Index method for analyzing a single transaction // NOTE: We do not specify Index method for analyzing a single transaction
// here because it bears heavy perfomance loses. Almost all advanced indexers // here because it bears heavy performance losses. Almost all advanced indexers
// support batching. // support batching.
AddBatch(b *Batch) error AddBatch(b *Batch) error

View File

@ -21,7 +21,9 @@ func TestTxIndex(t *testing.T) {
hash := tx.Hash() hash := tx.Hash()
batch := txindex.NewBatch(1) batch := txindex.NewBatch(1)
batch.Add(*txResult) if err := batch.Add(*txResult); err != nil {
t.Error(err)
}
err := indexer.AddBatch(batch) err := indexer.AddBatch(batch)
require.Nil(t, err) require.Nil(t, err)
@ -38,14 +40,16 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
defer os.RemoveAll(dir) defer os.RemoveAll(dir) // nolint: errcheck
store := db.NewDB("tx_index", "leveldb", dir) store := db.NewDB("tx_index", "leveldb", dir)
indexer := &TxIndex{store: store} indexer := &TxIndex{store: store}
batch := txindex.NewBatch(txsCount) batch := txindex.NewBatch(txsCount)
for i := 0; i < txsCount; i++ { for i := 0; i < txsCount; i++ {
batch.Add(*txResult) if err := batch.Add(*txResult); err != nil {
b.Fatal(err)
}
txResult.Index += 1 txResult.Index += 1
} }

View File

@ -6,6 +6,10 @@ pwd
BRANCH=$(git rev-parse --abbrev-ref HEAD) BRANCH=$(git rev-parse --abbrev-ref HEAD)
echo "Current branch: $BRANCH" echo "Current branch: $BRANCH"
# run the linter
# TODO: drop the `_test` once we're ballin' enough
make metalinter_test
# run the go unit tests with coverage # run the go unit tests with coverage
bash test/test_cover.sh bash test/test_cover.sh

View File

@ -40,17 +40,17 @@ func TestHeartbeatWriteSignBytes(t *testing.T) {
hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1}
hb.WriteSignBytes("0xdeadbeef", buf, &n, &err) hb.WriteSignBytes("0xdeadbeef", buf, &n, &err)
require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`) require.Equal(t, buf.String(), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`)
buf.Reset() buf.Reset()
plainHb := &Heartbeat{} plainHb := &Heartbeat{}
plainHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) plainHb.WriteSignBytes("0xdeadbeef", buf, &n, &err)
require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`) require.Equal(t, buf.String(), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`)
require.Panics(t, func() { require.Panics(t, func() {
buf.Reset() buf.Reset()
var nilHb *Heartbeat var nilHb *Heartbeat
nilHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) nilHb.WriteSignBytes("0xdeadbeef", buf, &n, &err)
require.Equal(t, string(buf.Bytes()), "null") require.Equal(t, buf.String(), "null")
}) })
} }

View File

@ -34,7 +34,7 @@ func (part *Part) Hash() []byte {
return part.hash return part.hash
} else { } else {
hasher := ripemd160.New() hasher := ripemd160.New()
hasher.Write(part.Bytes) // doesn't err hasher.Write(part.Bytes) // nolint: errcheck, gas
part.hash = hasher.Sum(nil) part.hash = hasher.Sum(nil)
return part.hash return part.hash
} }

View File

@ -34,7 +34,9 @@ func TestLoadOrGenValidator(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
_, tempFilePath := cmn.Tempfile("priv_validator_") _, tempFilePath := cmn.Tempfile("priv_validator_")
os.Remove(tempFilePath) if err := os.Remove(tempFilePath); err != nil {
t.Error(err)
}
privVal := LoadOrGenPrivValidatorFS(tempFilePath) privVal := LoadOrGenPrivValidatorFS(tempFilePath)
addr := privVal.GetAddress() addr := privVal.GetAddress()
privVal = LoadOrGenPrivValidatorFS(tempFilePath) privVal = LoadOrGenPrivValidatorFS(tempFilePath)

View File

@ -30,7 +30,10 @@ func BenchmarkProposalWriteSignBytes(b *testing.B) {
func BenchmarkProposalSign(b *testing.B) { func BenchmarkProposalSign(b *testing.B) {
privVal := GenPrivValidatorFS("") privVal := GenPrivValidatorFS("")
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
privVal.Signer.Sign(SignBytes("test_chain_id", testProposal)) _, err := privVal.Signer.Sign(SignBytes("test_chain_id", testProposal))
if err != nil {
b.Error(err)
}
} }
} }

View File

@ -25,7 +25,7 @@ type Mempool interface {
Size() int Size() int
CheckTx(Tx, func(*abci.Response)) error CheckTx(Tx, func(*abci.Response)) error
Reap(int) Txs Reap(int) Txs
Update(height int, txs Txs) Update(height int, txs Txs) error
Flush() Flush()
TxsAvailable() <-chan int TxsAvailable() <-chan int
@ -42,7 +42,7 @@ func (m MockMempool) Unlock() {}
func (m MockMempool) Size() int { return 0 } func (m MockMempool) Size() int { return 0 }
func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil } func (m MockMempool) CheckTx(tx Tx, cb func(*abci.Response)) error { return nil }
func (m MockMempool) Reap(n int) Txs { return Txs{} } func (m MockMempool) Reap(n int) Txs { return Txs{} }
func (m MockMempool) Update(height int, txs Txs) {} func (m MockMempool) Update(height int, txs Txs) error { return nil }
func (m MockMempool) Flush() {} func (m MockMempool) Flush() {}
func (m MockMempool) TxsAvailable() <-chan int { return make(chan int) } func (m MockMempool) TxsAvailable() <-chan int { return make(chan int) }
func (m MockMempool) EnableTxsAvailable() {} func (m MockMempool) EnableTxsAvailable() {}

View File

@ -71,7 +71,7 @@ func (v *Validator) String() string {
} }
// Hash computes the unique ID of a validator with a given voting power. // Hash computes the unique ID of a validator with a given voting power.
// It exludes the Accum value, which changes with every round. // It excludes the Accum value, which changes with every round.
func (v *Validator) Hash() []byte { func (v *Validator) Hash() []byte {
return wire.BinaryRipemd160(struct { return wire.BinaryRipemd160(struct {
Address data.Bytes Address data.Bytes

View File

@ -6,7 +6,7 @@ import (
"testing" "testing"
"github.com/tendermint/go-crypto" "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
) )

View File

@ -126,7 +126,10 @@ func Test2_3Majority(t *testing.T) {
// 6 out of 10 voted for nil. // 6 out of 10 voted for nil.
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
vote := withValidator(voteProto, privValidators[i].GetAddress(), i) vote := withValidator(voteProto, privValidators[i].GetAddress(), i)
signAddVote(privValidators[i], vote, voteSet) _, err := signAddVote(privValidators[i], vote, voteSet)
if err != nil {
t.Error(err)
}
} }
blockID, ok := voteSet.TwoThirdsMajority() blockID, ok := voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
@ -136,7 +139,10 @@ func Test2_3Majority(t *testing.T) {
// 7th validator voted for some blockhash // 7th validator voted for some blockhash
{ {
vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) vote := withValidator(voteProto, privValidators[6].GetAddress(), 6)
signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet) _, err := signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
t.Errorf("There should be no 2/3 majority") t.Errorf("There should be no 2/3 majority")
@ -146,7 +152,10 @@ func Test2_3Majority(t *testing.T) {
// 8th validator voted for nil. // 8th validator voted for nil.
{ {
vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) vote := withValidator(voteProto, privValidators[7].GetAddress(), 7)
signAddVote(privValidators[7], vote, voteSet) _, err := signAddVote(privValidators[7], vote, voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if !ok || !blockID.IsZero() { if !ok || !blockID.IsZero() {
t.Errorf("There should be 2/3 majority for nil") t.Errorf("There should be 2/3 majority for nil")
@ -174,7 +183,10 @@ func Test2_3MajorityRedux(t *testing.T) {
// 66 out of 100 voted for nil. // 66 out of 100 voted for nil.
for i := 0; i < 66; i++ { for i := 0; i < 66; i++ {
vote := withValidator(voteProto, privValidators[i].GetAddress(), i) vote := withValidator(voteProto, privValidators[i].GetAddress(), i)
signAddVote(privValidators[i], vote, voteSet) _, err := signAddVote(privValidators[i], vote, voteSet)
if err != nil {
t.Error(err)
}
} }
blockID, ok := voteSet.TwoThirdsMajority() blockID, ok := voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
@ -184,7 +196,10 @@ func Test2_3MajorityRedux(t *testing.T) {
// 67th validator voted for nil // 67th validator voted for nil
{ {
vote := withValidator(voteProto, privValidators[66].GetAddress(), 66) vote := withValidator(voteProto, privValidators[66].GetAddress(), 66)
signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
t.Errorf("There should be no 2/3 majority: last vote added was nil") t.Errorf("There should be no 2/3 majority: last vote added was nil")
@ -195,7 +210,10 @@ func Test2_3MajorityRedux(t *testing.T) {
{ {
vote := withValidator(voteProto, privValidators[67].GetAddress(), 67) vote := withValidator(voteProto, privValidators[67].GetAddress(), 67)
blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)}
signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash") t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash")
@ -206,7 +224,10 @@ func Test2_3MajorityRedux(t *testing.T) {
{ {
vote := withValidator(voteProto, privValidators[68].GetAddress(), 68) vote := withValidator(voteProto, privValidators[68].GetAddress(), 68)
blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash}
signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total") t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total")
@ -216,7 +237,10 @@ func Test2_3MajorityRedux(t *testing.T) {
// 70th validator voted for different BlockHash // 70th validator voted for different BlockHash
{ {
vote := withValidator(voteProto, privValidators[69].GetAddress(), 69) vote := withValidator(voteProto, privValidators[69].GetAddress(), 69)
signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet) _, err := signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if ok || !blockID.IsZero() { if ok || !blockID.IsZero() {
t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash") t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash")
@ -226,7 +250,10 @@ func Test2_3MajorityRedux(t *testing.T) {
// 71st validator voted for the right BlockHash & BlockPartsHeader // 71st validator voted for the right BlockHash & BlockPartsHeader
{ {
vote := withValidator(voteProto, privValidators[70].GetAddress(), 70) vote := withValidator(voteProto, privValidators[70].GetAddress(), 70)
signAddVote(privValidators[70], vote, voteSet) _, err := signAddVote(privValidators[70], vote, voteSet)
if err != nil {
t.Error(err)
}
blockID, ok = voteSet.TwoThirdsMajority() blockID, ok = voteSet.TwoThirdsMajority()
if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) { if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) {
t.Errorf("There should be 2/3 majority") t.Errorf("There should be 2/3 majority")
@ -439,7 +466,10 @@ func TestMakeCommit(t *testing.T) {
// 6 out of 10 voted for some block. // 6 out of 10 voted for some block.
for i := 0; i < 6; i++ { for i := 0; i < 6; i++ {
vote := withValidator(voteProto, privValidators[i].GetAddress(), i) vote := withValidator(voteProto, privValidators[i].GetAddress(), i)
signAddVote(privValidators[i], vote, voteSet) _, err := signAddVote(privValidators[i], vote, voteSet)
if err != nil {
t.Error(err)
}
} }
// MakeCommit should fail. // MakeCommit should fail.
@ -450,13 +480,20 @@ func TestMakeCommit(t *testing.T) {
vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) vote := withValidator(voteProto, privValidators[6].GetAddress(), 6)
vote = withBlockHash(vote, cmn.RandBytes(32)) vote = withBlockHash(vote, cmn.RandBytes(32))
vote = withBlockPartsHeader(vote, PartSetHeader{123, cmn.RandBytes(32)}) vote = withBlockPartsHeader(vote, PartSetHeader{123, cmn.RandBytes(32)})
signAddVote(privValidators[6], vote, voteSet)
_, err := signAddVote(privValidators[6], vote, voteSet)
if err != nil {
t.Error(err)
}
} }
// The 8th voted like everyone else. // The 8th voted like everyone else.
{ {
vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) vote := withValidator(voteProto, privValidators[7].GetAddress(), 7)
signAddVote(privValidators[7], vote, voteSet) _, err := signAddVote(privValidators[7], vote, voteSet)
if err != nil {
t.Error(err)
}
} }
commit := voteSet.MakeCommit() commit := voteSet.MakeCommit()