Merge pull request #14933 from egonelbre/megacheck_eth

eth: fix megacheck warnings
This commit is contained in:
Péter Szilágyi 2017-08-08 09:59:52 +03:00 committed by GitHub
commit 43437806fb
7 changed files with 17 additions and 40 deletions

View File

@ -465,26 +465,6 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
return true, structLogger.StructLogs(), nil
}
// callmsg is the message type used for call transitions.
type callmsg struct {
addr common.Address
to *common.Address
gas, gasPrice *big.Int
value *big.Int
data []byte
}
// accessor boilerplate to implement core.Message
func (m callmsg) From() (common.Address, error) { return m.addr, nil }
func (m callmsg) FromFrontier() (common.Address, error) { return m.addr, nil }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.to }
func (m callmsg) GasPrice() *big.Int { return m.gasPrice }
func (m callmsg) Gas() *big.Int { return m.gas }
func (m callmsg) Value() *big.Int { return m.value }
func (m callmsg) Data() []byte { return m.data }
// formatError formats a Go error into either an empty string or the data content
// of the error itself.
func formatError(err error) string {

View File

@ -403,8 +403,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
dl.lock.Lock()
defer dl.lock.Unlock()
var err error
err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
if err == nil {
// Assign the owned hashes, headers and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes))
@ -1381,7 +1380,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("peer-half", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1398,7 +1397,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("peer-full", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1454,7 +1453,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("fork A", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1474,7 +1473,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("fork B", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1535,7 +1534,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("faulty", nil, mode); err == nil {
t.Fatalf("succeeded faulty synchronisation")
panic("succeeded faulty synchronisation")
}
}()
<-starting
@ -1552,7 +1551,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1613,7 +1612,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("attack", nil, mode); err == nil {
t.Fatalf("succeeded attacker synchronisation")
panic("succeeded attacker synchronisation")
}
}()
<-starting
@ -1630,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting

View File

@ -54,7 +54,6 @@ type PublicFilterAPI struct {
backend Backend
useMipMap bool
mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database
events *EventSystem
filtersMu sync.Mutex

View File

@ -20,7 +20,6 @@ import (
"context"
"math"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -42,8 +41,6 @@ type Filter struct {
backend Backend
useMipMap bool
created time.Time
db ethdb.Database
begin, end int64
addresses []common.Address

View File

@ -74,7 +74,6 @@ type subscription struct {
// subscription which match the subscription criteria.
type EventSystem struct {
mux *event.TypeMux
sub *event.TypeMuxSubscription
backend Backend
lightMode bool
lastHead *types.Header

View File

@ -18,6 +18,7 @@ package filters
import (
"context"
"fmt"
"math/big"
"reflect"
"testing"
@ -439,15 +440,15 @@ func TestPendingLogsSubscription(t *testing.T) {
}
if len(fetched) != len(tt.expected) {
t.Fatalf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
}
for l := range fetched {
if fetched[l].Removed {
t.Errorf("expected log not to be removed for log %d in case %d", l, i)
panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
}
if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
t.Errorf("invalid log on index %d for case %d", l, i)
panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
}
}
}()

View File

@ -138,7 +138,9 @@ func (pm *ProtocolManager) syncer() {
defer pm.downloader.Terminate()
// Wait for different events to fire synchronisation operations
forceSync := time.Tick(forceSyncCycle)
forceSync := time.NewTicker(forceSyncCycle)
defer forceSync.Stop()
for {
select {
case <-pm.newPeerCh:
@ -148,7 +150,7 @@ func (pm *ProtocolManager) syncer() {
}
go pm.synchronise(pm.peers.BestPeer())
case <-forceSync:
case <-forceSync.C:
// Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer())