errcheck; sort some stuff out

This commit is contained in:
Zach Ramsay 2017-10-03 18:49:20 -04:00 committed by Ethan Buchman
parent 563faa98de
commit d7cb291fb2
26 changed files with 61 additions and 154 deletions

View File

@ -89,7 +89,6 @@ metalinter_test: ensure_tools
gometalinter --vendor --deadline=600s --disable-all \ gometalinter --vendor --deadline=600s --disable-all \
--enable=deadcode \ --enable=deadcode \
--enable=gas \ --enable=gas \
--enable=goimports \
--enable=gosimple \ --enable=gosimple \
--enable=gotype \ --enable=gotype \
--enable=ineffassign \ --enable=ineffassign \
@ -104,6 +103,7 @@ metalinter_test: ensure_tools
#--enable=errcheck \ #--enable=errcheck \
#--enable=goconst \ #--enable=goconst \
#--enable=gocyclo \ #--enable=gocyclo \
#--enable=goimports \
#--enable=golint \ <== comments on anything exported #--enable=golint \ <== comments on anything exported
#--enable=interfacer \ #--enable=interfacer \
#--enable=megacheck \ #--enable=megacheck \

View File

@ -1,4 +1,4 @@
package benchmarks // nolint (goimports) package benchmarks // nolint: goimports
import ( import (
"testing" "testing"

View File

@ -228,7 +228,7 @@ FOR_LOOP:
} }
case <-statusUpdateTicker.C: case <-statusUpdateTicker.C:
// ask for status updates // ask for status updates
go bcR.BroadcastStatusRequest() // nolint (errcheck) go bcR.BroadcastStatusRequest() // nolint: errcheck
case <-switchToConsensusTicker.C: case <-switchToConsensusTicker.C:
height, numPending, lenRequesters := bcR.pool.GetStatus() height, numPending, lenRequesters := bcR.pool.GetStatus()
outbound, inbound, _ := bcR.Switch.NumPeers() outbound, inbound, _ := bcR.Switch.NumPeers()

View File

@ -9,7 +9,7 @@ import (
"github.com/tendermint/go-wire" "github.com/tendermint/go-wire"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
. "github.com/tendermint/tmlibs/common" cmn "github.com/tendermint/tmlibs/common"
dbm "github.com/tendermint/tmlibs/db" dbm "github.com/tendermint/tmlibs/db"
) )
@ -67,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
} }
bytez := []byte{} bytez := []byte{}
for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ {
@ -76,7 +76,7 @@ func (bs *BlockStore) LoadBlock(height int) *types.Block {
} }
block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block) block := wire.ReadBinary(&types.Block{}, bytes.NewReader(bytez), 0, &n, &err).(*types.Block)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block: %v", err))
} }
return block return block
} }
@ -90,7 +90,7 @@ func (bs *BlockStore) LoadBlockPart(height int, index int) *types.Part {
} }
part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part) part := wire.ReadBinary(&types.Part{}, r, 0, &n, &err).(*types.Part)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block part: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block part: %v", err))
} }
return part return part
} }
@ -104,7 +104,7 @@ func (bs *BlockStore) LoadBlockMeta(height int) *types.BlockMeta {
} }
blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta) blockMeta := wire.ReadBinary(&types.BlockMeta{}, r, 0, &n, &err).(*types.BlockMeta)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading block meta: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading block meta: %v", err))
} }
return blockMeta return blockMeta
} }
@ -120,7 +120,7 @@ func (bs *BlockStore) LoadBlockCommit(height int) *types.Commit {
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
} }
return commit return commit
} }
@ -135,7 +135,7 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
} }
commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit) commit := wire.ReadBinary(&types.Commit{}, r, 0, &n, &err).(*types.Commit)
if err != nil { if err != nil {
PanicCrisis(Fmt("Error reading commit: %v", err)) cmn.PanicCrisis(cmn.Fmt("Error reading commit: %v", err))
} }
return commit return commit
} }
@ -148,10 +148,10 @@ func (bs *BlockStore) LoadSeenCommit(height int) *types.Commit {
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
height := block.Height height := block.Height
if height != bs.Height()+1 { if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
if !blockParts.IsComplete() { if !blockParts.IsComplete() {
PanicSanity(Fmt("BlockStore can only save complete block part sets")) cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets"))
} }
// Save block meta // Save block meta
@ -187,7 +187,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) { func (bs *BlockStore) saveBlockPart(height int, index int, part *types.Part) {
if height != bs.Height()+1 { if height != bs.Height()+1 {
PanicSanity(Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
} }
partBytes := wire.BinaryBytes(part) partBytes := wire.BinaryBytes(part)
bs.db.Set(calcBlockPartKey(height, index), partBytes) bs.db.Set(calcBlockPartKey(height, index), partBytes)
@ -222,7 +222,7 @@ type BlockStoreStateJSON struct {
func (bsj BlockStoreStateJSON) Save(db dbm.DB) { func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
bytes, err := json.Marshal(bsj) bytes, err := json.Marshal(bsj)
if err != nil { if err != nil {
PanicSanity(Fmt("Could not marshal state bytes: %v", err)) cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err))
} }
db.SetSync(blockStoreKey, bytes) db.SetSync(blockStoreKey, bytes)
} }
@ -237,7 +237,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON {
bsj := BlockStoreStateJSON{} bsj := BlockStoreStateJSON{}
err := json.Unmarshal(bytes, &bsj) err := json.Unmarshal(bytes, &bsj)
if err != nil { if err != nil {
PanicCrisis(Fmt("Could not unmarshal bytes: %X", bytes)) cmn.PanicCrisis(cmn.Fmt("Could not unmarshal bytes: %X", bytes))
} }
return bsj return bsj
} }

View File

@ -25,10 +25,13 @@ var ResetPrivValidatorCmd = &cobra.Command{
} }
// ResetAll removes the privValidator files. // ResetAll removes the privValidator files.
// Exported so other CLI tools can use it // Exported so other CLI tools can use it.
func ResetAll(dbDir, privValFile string, logger log.Logger) { func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorFS(privValFile, logger) resetPrivValidatorFS(privValFile, logger)
os.RemoveAll(dbDir) if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir) logger.Info("Removed all data", "dir", dbDir)
} }
@ -44,16 +47,6 @@ func resetPrivValidator(cmd *cobra.Command, args []string) {
resetPrivValidatorFS(config.PrivValidatorFile(), logger) resetPrivValidatorFS(config.PrivValidatorFile(), logger)
} }
// Exported so other CLI tools can use it
func ResetAll(dbDir, privValFile string, logger log.Logger) {
resetPrivValidatorLocal(privValFile, logger)
if err := os.RemoveAll(dbDir); err != nil {
logger.Error("Error removing directory", "err", err)
return
}
logger.Info("Removed all data", "dir", dbDir)
}
func resetPrivValidatorFS(privValFile string, logger log.Logger) { func resetPrivValidatorFS(privValFile string, logger log.Logger) {
// Get PrivValidator // Get PrivValidator
if _, err := os.Stat(privValFile); err == nil { if _, err := os.Stat(privValFile); err == nil {

View File

@ -24,10 +24,7 @@ func TestEnsureRoot(t *testing.T) {
// setup temp dir for test // setup temp dir for test
tmpDir, err := ioutil.TempDir("", "config-test") tmpDir, err := ioutil.TempDir("", "config-test")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(tmpDir) // nolint: errcheck
err := os.RemoveAll(tmpDir)
require.Nil(err)
}()
// create root dir // create root dir
EnsureRoot(tmpDir) EnsureRoot(tmpDir)

View File

@ -70,7 +70,7 @@ func TestByzantine(t *testing.T) {
conR.SetLogger(logger.With("validator", i)) conR.SetLogger(logger.With("validator", i))
conR.SetEventBus(eventBus) conR.SetEventBus(eventBus)
var conRI p2p.Reactor // nolint (gotype) var conRI p2p.Reactor // nolint: gotype
conRI = conR conRI = conR
if i == 0 { if i == 0 {

View File

@ -115,12 +115,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error {
} else if err != nil { } else if err != nil {
return err return err
} else { } else {
defer func() { defer gr.Close() // nolint: errcheck
if err := gr.Close(); err != nil {
cs.Logger.Error("Error closing wal Search", "err", err)
return
}
}()
} }
if !found { if !found {
return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)) return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1))

View File

@ -65,11 +65,7 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error {
} }
pb := newPlayback(file, fp, cs, cs.state.Copy()) pb := newPlayback(file, fp, cs, cs.state.Copy())
defer func() { defer pb.fp.Close() // nolint: errcheck
if err := pb.fp.Close(); err != nil {
cs.Logger.Error("Error closing new playback", "err", err)
}
}()
var nextN int // apply N msgs in a row var nextN int // apply N msgs in a row
var msg *TimedWALMessage var msg *TimedWALMessage

View File

@ -490,12 +490,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
if !found { if !found {
return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1))
} }
defer func() { defer gr.Close()
if err := gr.Close(); err != nil {
wal.Logger.Error("Error closing wal Search", "err", err)
return
}
}()
// log.Notice("Build a blockchain by reading from the WAL") // log.Notice("Build a blockchain by reading from the WAL")

View File

@ -372,7 +372,7 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType)
// enterNewRound(height, 0) at cs.StartTime. // enterNewRound(height, 0) at cs.StartTime.
func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) {
//cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime)
sleepDuration := rs.StartTime.Sub(time.Now()) // nolint (gotype) sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype
cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight)
} }

View File

@ -1,4 +1,4 @@
package node // nolint (goimports) package node
import ( import (
"time" "time"

View File

@ -32,16 +32,8 @@ func TestMConnectionSend(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer func() { defer server.Close() // nolint: errcheck
if err := server.Close(); err != nil { defer client.Close() // nolint: errcheck
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
_, err := mconn.Start() _, err := mconn.Start()
@ -73,16 +65,8 @@ func TestMConnectionReceive(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer func() { defer server.Close() // nolint: errcheck
if err := server.Close(); err != nil { defer client.Close() // nolint: errcheck
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
receivedCh := make(chan []byte) receivedCh := make(chan []byte)
errorsCh := make(chan interface{}) errorsCh := make(chan interface{})
@ -119,16 +103,8 @@ func TestMConnectionStatus(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer func() { defer server.Close() // nolint: errcheck
if err := server.Close(); err != nil { defer client.Close() // nolint: errcheck
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
mconn := createTestMConnection(client) mconn := createTestMConnection(client)
_, err := mconn.Start() _, err := mconn.Start()
@ -144,16 +120,8 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
server, client := netPipe() server, client := netPipe()
defer func() { defer server.Close() // nolint: errcheck
if err := server.Close(); err != nil { defer client.Close() // nolint: errcheck
t.Error(err)
}
}()
defer func() {
if err := client.Close(); err != nil {
t.Error(err)
}
}()
receivedCh := make(chan []byte) receivedCh := make(chan []byte)
errorsCh := make(chan interface{}) errorsCh := make(chan interface{})

View File

@ -143,7 +143,7 @@ func (fc *FuzzedConnection) fuzz() bool {
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn {
// XXX: can't this fail because machine precision? // XXX: can't this fail because machine precision?
// XXX: do we need an error? // XXX: do we need an error?
fc.Close() // nolint (errcheck) fc.Close() // nolint: errcheck
return true return true
} else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep {
time.Sleep(fc.randomDuration()) time.Sleep(fc.randomDuration())

View File

@ -20,11 +20,7 @@ func TestPEXReactorBasic(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -40,11 +36,7 @@ func TestPEXReactorAddRemovePeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -77,11 +69,7 @@ func TestPEXReactorRunning(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
book := NewAddrBook(dir+"addrbook.json", false) book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -151,11 +139,7 @@ func TestPEXReactorReceive(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
book := NewAddrBook(dir+"addrbook.json", false) book := NewAddrBook(dir+"addrbook.json", false)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())
@ -180,11 +164,7 @@ func TestPEXReactorAbuseFromPeer(t *testing.T) {
dir, err := ioutil.TempDir("", "pex_reactor") dir, err := ioutil.TempDir("", "pex_reactor")
require.Nil(err) require.Nil(err)
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
t.Error(err)
}
}()
book := NewAddrBook(dir+"addrbook.json", true) book := NewAddrBook(dir+"addrbook.json", true)
book.SetLogger(log.TestingLogger()) book.SetLogger(log.TestingLogger())

View File

@ -40,7 +40,7 @@ func Discover() (nat NAT, err error) {
return return
} }
socket := conn.(*net.UDPConn) socket := conn.(*net.UDPConn)
defer socket.Close() // nolint (errcheck) defer socket.Close() // nolint: errcheck
if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil {
return nil, err return nil, err
@ -197,7 +197,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
if err != nil { if err != nil {
return return
} }
defer r.Body.Close() // nolint (errcheck) defer r.Body.Close() // nolint: errcheck
if r.StatusCode >= 400 { if r.StatusCode >= 400 {
err = errors.New(string(r.StatusCode)) err = errors.New(string(r.StatusCode))
@ -296,7 +296,7 @@ func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() // nolint (errcheck) defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return
@ -345,7 +345,7 @@ func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() // nolint (errcheck) defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return
@ -371,7 +371,7 @@ func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort
var response *http.Response var response *http.Response
response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
if response != nil { if response != nil {
defer response.Body.Close() // nolint (errcheck) defer response.Body.Close() // nolint: errcheck
} }
if err != nil { if err != nil {
return return

View File

@ -7,15 +7,9 @@ import (
// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. // doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
func doubleSha256(b []byte) []byte { func doubleSha256(b []byte) []byte {
hasher := sha256.New() hasher := sha256.New()
_, err := hasher.Write(b) _, _ := hasher.Write(b) // error ignored
if err != nil {
panic(err)
}
sum := hasher.Sum(nil) sum := hasher.Sum(nil)
hasher.Reset() hasher.Reset()
_, err = hasher.Write(sum) _, _ = hasher.Write(sum) // error ignored
if err != nil {
panic(err)
}
return hasher.Sum(nil) return hasher.Sum(nil)
} }

View File

@ -49,7 +49,7 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error
c := a.App.CheckTx(tx) c := a.App.CheckTx(tx)
// and this gets written in a background thread... // and this gets written in a background thread...
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() // nolint (errcheck) go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }
@ -58,7 +58,7 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error)
c := a.App.CheckTx(tx) c := a.App.CheckTx(tx)
// and this gets written in a background thread... // and this gets written in a background thread...
if c.IsOK() { if c.IsOK() {
go func() { a.App.DeliverTx(tx) }() // nolint (errcheck) go func() { a.App.DeliverTx(tx) }() // nolint: errcheck
} }
return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil
} }

View File

@ -25,7 +25,7 @@ func StartGRPCServer(protoAddr string) (net.Listener, error) {
grpcServer := grpc.NewServer() grpcServer := grpc.NewServer()
RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{})
go grpcServer.Serve(ln) // nolint (errcheck) go grpcServer.Serve(ln) // nolint: errcheck
return ln, nil return ln, nil
} }

View File

@ -93,7 +93,7 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer httpResponse.Body.Close() // nolint (errcheck) defer httpResponse.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(httpResponse.Body) responseBytes, err := ioutil.ReadAll(httpResponse.Body)
if err != nil { if err != nil {
@ -129,7 +129,7 @@ func (c *URIClient) Call(method string, params map[string]interface{}, result in
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close() // nolint (errcheck) defer resp.Body.Close() // nolint: errcheck
responseBytes, err := ioutil.ReadAll(resp.Body) responseBytes, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {

View File

@ -354,7 +354,7 @@ func (c *WSClient) writeRoutine() {
ticker.Stop() ticker.Stop()
if err := c.conn.Close(); err != nil { if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests // ignore error; it will trigger in tests
// likely because it's closing and already closed connection // likely because it's closing an already closed connection
} }
c.wg.Done() c.wg.Done()
}() }()
@ -406,7 +406,7 @@ func (c *WSClient) readRoutine() {
defer func() { defer func() {
if err := c.conn.Close(); err != nil { if err := c.conn.Close(); err != nil {
// ignore error; it will trigger in tests // ignore error; it will trigger in tests
// likely because it's closing and already closed connection // likely because it's closing an already closed connection
} }
c.wg.Done() c.wg.Done()
}() }()

View File

@ -34,11 +34,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if err != nil { if err != nil {
panic(err) panic(err)
} }
defer func() { defer conn.Close() // nolint: errcheck
if err := conn.Close(); err != nil {
panic(err)
}
}()
for { for {
messageType, _, err := conn.ReadMessage() messageType, _, err := conn.ReadMessage()
if err != nil { if err != nil {

View File

@ -782,8 +782,5 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st
buf.WriteString("</body></html>") buf.WriteString("</body></html>")
w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Type", "text/html")
w.WriteHeader(200) w.WriteHeader(200)
_, err := w.Write(buf.Bytes()) _, _ := w.Write(buf.Bytes()) // error ignored
if err != nil {
// ignore error
}
} }

View File

@ -56,7 +56,7 @@ func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RP
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(httpCode) w.WriteHeader(httpCode)
_, _ = w.Write(jsonBytes) // ignoring error _, _ = w.Write(jsonBytes) // error ignored
} }
func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
@ -66,7 +66,7 @@ func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) {
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(200) w.WriteHeader(200)
_, _ = w.Write(jsonBytes) // ignoring error _, _ = w.Write(jsonBytes) // error ignored
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------

View File

@ -40,11 +40,7 @@ func benchmarkTxIndex(txsCount int, b *testing.B) {
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
defer func() { defer os.RemoveAll(dir) // nolint: errcheck
if err := os.RemoveAll(dir); err != nil {
b.Fatal(err)
}
}()
store := db.NewDB("tx_index", "leveldb", dir) store := db.NewDB("tx_index", "leveldb", dir)
indexer := &TxIndex{store: store} indexer := &TxIndex{store: store}

View File

@ -34,7 +34,7 @@ func (part *Part) Hash() []byte {
return part.hash return part.hash
} else { } else {
hasher := ripemd160.New() hasher := ripemd160.New()
_, _ = hasher.Write(part.Bytes) // ignoring error _, _ = hasher.Write(part.Bytes) // error ignored
part.hash = hasher.Sum(nil) part.hash = hasher.Sum(nil)
return part.hash return part.hash
} }