eth, eth/downloader: Moved block processing & graceful shutdown

The downloader is no longer responsible for processing blocks. The
eth-protocol handler now takes care of this instead.

Added graceful shutdown during block processing. Closes #846
This commit is contained in:
obscuren 2015-05-01 00:23:51 +02:00
parent 8595198c1b
commit 016f152b36
6 changed files with 347 additions and 291 deletions

View File

@ -219,7 +219,7 @@ func New(config *Config) (*Ethereum, error) {
} }
eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux()) eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux())
eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain) eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.GetBlock)
eth.pow = ethash.New(eth.chainManager) eth.pow = ethash.New(eth.chainManager)
eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State, eth.chainManager.GasLimit)
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux())
@ -454,8 +454,8 @@ func (self *Ethereum) SuggestPeer(nodeURL string) error {
func (s *Ethereum) Stop() { func (s *Ethereum) Stop() {
s.txSub.Unsubscribe() // quits txBroadcastLoop s.txSub.Unsubscribe() // quits txBroadcastLoop
s.chainManager.Stop()
s.protocolManager.Stop() s.protocolManager.Stop()
s.chainManager.Stop()
s.txPool.Stop() s.txPool.Stop()
s.eventMux.Stop() s.eventMux.Stop()
if s.whisper != nil { if s.whisper != nil {

View File

@ -3,14 +3,11 @@ package downloader
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"math/big"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
@ -27,16 +24,21 @@ var (
minDesiredPeerCount = 5 // Amount of peers desired to start syncing minDesiredPeerCount = 5 // Amount of peers desired to start syncing
blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out
errLowTd = errors.New("peer's TD is too low") errLowTd = errors.New("peer's TD is too low")
errBusy = errors.New("busy") errBusy = errors.New("busy")
errUnknownPeer = errors.New("peer's unknown or unhealthy") errUnknownPeer = errors.New("peer's unknown or unhealthy")
ErrBadPeer = errors.New("action from bad peer ignored") ErrBadPeer = errors.New("action from bad peer ignored")
errTimeout = errors.New("timeout") errNoPeers = errors.New("no peers to keep download active")
errEmptyHashSet = errors.New("empty hash set by peer") errPendingQueue = errors.New("pending items in queue")
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") errTimeout = errors.New("timeout")
errEmptyHashSet = errors.New("empty hash set by peer")
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
errAlreadyInPool = errors.New("hash already in pool")
errBlockNumberOverflow = errors.New("received block which overflows")
) )
type hashCheckFn func(common.Hash) bool type hashCheckFn func(common.Hash) bool
type getBlockFn func(common.Hash) *types.Block
type chainInsertFn func(types.Blocks) (int, error) type chainInsertFn func(types.Blocks) (int, error)
type hashIterFn func() (common.Hash, error) type hashIterFn func() (common.Hash, error)
@ -58,13 +60,12 @@ type Downloader struct {
activePeer string activePeer string
// Callbacks // Callbacks
hasBlock hashCheckFn hasBlock hashCheckFn
insertChain chainInsertFn getBlock getBlockFn
// Status // Status
fetchingHashes int32 fetchingHashes int32
downloadingBlocks int32 downloadingBlocks int32
processingBlocks int32
// Channels // Channels
newPeerCh chan *peer newPeerCh chan *peer
@ -72,15 +73,15 @@ type Downloader struct {
blockCh chan blockPack blockCh chan blockPack
} }
func New(hasBlock hashCheckFn, insertChain chainInsertFn) *Downloader { func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
downloader := &Downloader{ downloader := &Downloader{
queue: newqueue(), queue: newqueue(),
peers: make(peers), peers: make(peers),
hasBlock: hasBlock, hasBlock: hasBlock,
insertChain: insertChain, getBlock: getBlock,
newPeerCh: make(chan *peer, 1), newPeerCh: make(chan *peer, 1),
hashCh: make(chan []common.Hash, 1), hashCh: make(chan []common.Hash, 1),
blockCh: make(chan blockPack, 1), blockCh: make(chan blockPack, 1),
} }
return downloader return downloader
@ -126,6 +127,12 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
return errBusy return errBusy
} }
// When a synchronisation attempt is made while the queue stil
// contains items we abort the sync attempt
if d.queue.size() > 0 {
return errPendingQueue
}
// Fetch the peer using the id or throw an error if the peer couldn't be found // Fetch the peer using the id or throw an error if the peer couldn't be found
p := d.peers[id] p := d.peers[id]
if p == nil { if p == nil {
@ -138,30 +145,87 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
return err return err
} }
return d.process(p) return nil
} }
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error { // Done lets the downloader know that whatever previous hashes were taken
// are processed. If the block count reaches zero and done is called
// we reset the queue for the next batch of incoming hashes and blocks.
func (d *Downloader) Done() {
d.queue.mu.Lock()
defer d.queue.mu.Unlock()
if len(d.queue.blocks) == 0 {
d.queue.resetNoTS()
}
}
// TakeBlocks takes blocks from the queue and yields them to the blockTaker handler
// it's possible it yields no blocks
func (d *Downloader) TakeBlocks() types.Blocks {
d.queue.mu.Lock()
defer d.queue.mu.Unlock()
var blocks types.Blocks
if len(d.queue.blocks) > 0 {
// Make sure the parent hash is known
if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) {
return nil
}
for _, block := range d.queue.blocks {
if block == nil {
break
}
blocks = append(blocks, block)
}
d.queue.blockOffset += len(blocks)
// delete the blocks from the slice and let them be garbage collected
// without this slice trick the blocks would stay in memory until nil
// would be assigned to d.queue.blocks
copy(d.queue.blocks, d.queue.blocks[len(blocks):])
for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ {
d.queue.blocks[k] = nil
}
d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)]
//d.queue.blocks = d.queue.blocks[len(blocks):]
if len(d.queue.blocks) == 0 {
d.queue.blocks = nil
}
}
return blocks
}
func (d *Downloader) Has(hash common.Hash) bool {
return d.queue.has(hash)
}
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) {
d.activePeer = p.id d.activePeer = p.id
defer func() {
// reset on error
if err != nil {
d.queue.reset()
}
}()
glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id)
// Start the fetcher. This will block the update entirely // Start the fetcher. This will block the update entirely
// interupts need to be send to the appropriate channels // interupts need to be send to the appropriate channels
// respectively. // respectively.
if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil { if err = d.startFetchingHashes(p, hash, ignoreInitial); err != nil {
// handle error
glog.V(logger.Debug).Infoln("Error fetching hashes:", err)
// XXX Reset
return err return err
} }
// Start fetching blocks in paralel. The strategy is simple // Start fetching blocks in paralel. The strategy is simple
// take any available peers, seserve a chunk for each peer available, // take any available peers, seserve a chunk for each peer available,
// let the peer deliver the chunkn and periodically check if a peer // let the peer deliver the chunkn and periodically check if a peer
// has timedout. When done downloading, process blocks. // has timedout.
if err := d.startFetchingBlocks(p); err != nil { if err = d.startFetchingBlocks(p); err != nil {
glog.V(logger.Debug).Infoln("Error downloading blocks:", err)
// XXX reset
return err return err
} }
@ -175,6 +239,10 @@ func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitia
atomic.StoreInt32(&d.fetchingHashes, 1) atomic.StoreInt32(&d.fetchingHashes, 1)
defer atomic.StoreInt32(&d.fetchingHashes, 0) defer atomic.StoreInt32(&d.fetchingHashes, 0)
if d.queue.has(hash) {
return errAlreadyInPool
}
glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id) glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id)
start := time.Now() start := time.Now()
@ -196,10 +264,13 @@ out:
case hashes := <-d.hashCh: case hashes := <-d.hashCh:
failureResponseTimer.Reset(hashTtl) failureResponseTimer.Reset(hashTtl)
var done bool // determines whether we're done fetching hashes (i.e. common hash found) var (
done bool // determines whether we're done fetching hashes (i.e. common hash found)
hash common.Hash // current and common hash
)
hashSet := set.New() hashSet := set.New()
for _, hash := range hashes { for _, hash = range hashes {
if d.hasBlock(hash) { if d.hasBlock(hash) || d.queue.has(hash) {
glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4])
done = true done = true
@ -220,6 +291,14 @@ out:
// Get the next set of hashes // Get the next set of hashes
p.getHashes(hashes[len(hashes)-1]) p.getHashes(hashes[len(hashes)-1])
} else { // we're done } else { // we're done
// The offset of the queue is determined by the highest known block
var offset int
if block := d.getBlock(hash); block != nil {
offset = int(block.NumberU64() + 1)
}
// allocate proper size for the queueue
d.queue.alloc(offset, d.queue.hashPool.Size())
break out break out
} }
case <-failureResponseTimer.C: case <-failureResponseTimer.C:
@ -257,11 +336,27 @@ out:
// If the peer was previously banned and failed to deliver it's pack // If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message. // in a reasonable time frame, ignore it's message.
if d.peers[blockPack.peerId] != nil { if d.peers[blockPack.peerId] != nil {
err := d.queue.deliver(blockPack.peerId, blockPack.blocks)
if err != nil {
glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err)
// FIXME d.UnregisterPeer(blockPack.peerId)
break
}
if glog.V(logger.Debug) {
glog.Infof("adding %d blocks from: %s\n", len(blockPack.blocks), blockPack.peerId)
}
d.peers[blockPack.peerId].promote() d.peers[blockPack.peerId].promote()
d.queue.deliver(blockPack.peerId, blockPack.blocks)
d.peers.setState(blockPack.peerId, idleState) d.peers.setState(blockPack.peerId, idleState)
} }
case <-ticker.C: case <-ticker.C:
// after removing bad peers make sure we actually have suffucient peer left to keep downlading
if len(d.peers) == 0 {
d.queue.reset()
return errNoPeers
}
// If there are unrequested hashes left start fetching // If there are unrequested hashes left start fetching
// from the available peers. // from the available peers.
if d.queue.hashPool.Size() > 0 { if d.queue.hashPool.Size() > 0 {
@ -310,7 +405,7 @@ out:
if time.Since(chunk.itime) > blockTtl { if time.Since(chunk.itime) > blockTtl {
badPeers = append(badPeers, pid) badPeers = append(badPeers, pid)
// remove peer as good peer from peer list // remove peer as good peer from peer list
//d.UnregisterPeer(pid) // FIXME d.UnregisterPeer(pid)
} }
} }
d.queue.mu.Unlock() d.queue.mu.Unlock()
@ -364,114 +459,6 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
return nil return nil
} }
// Add an (unrequested) block to the downloader. This is usually done through the
// NewBlockMsg by the protocol handler.
// Adding blocks is done synchronously. if there are missing blocks, blocks will be
// fetched first. If the downloader is busy or if some other processed failed an error
// will be returned.
func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error {
hash := block.Hash()
if d.hasBlock(hash) {
return fmt.Errorf("known block %x", hash.Bytes()[:4])
}
peer := d.peers.getPeer(id)
// if the peer is in our healthy list of peers; update the td
// and add the block. Otherwise just ignore it
if peer == nil {
glog.V(logger.Detail).Infof("Ignored block from bad peer %s\n", id)
return ErrBadPeer
}
peer.mu.Lock()
peer.recentHash = block.Hash()
peer.mu.Unlock()
peer.promote()
glog.V(logger.Detail).Infoln("Inserting new block from:", id)
d.queue.addBlock(id, block)
// if neither go ahead to process
if d.isBusy() {
return errBusy
}
// Check if the parent of the received block is known.
// If the block is not know, request it otherwise, request.
phash := block.ParentHash()
if !d.hasBlock(phash) {
glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4])
// Get the missing hashes from the peer (synchronously)
err := d.getFromPeer(peer, peer.recentHash, true)
if err != nil {
return err
}
}
return d.process(peer)
}
func (d *Downloader) process(peer *peer) error {
atomic.StoreInt32(&d.processingBlocks, 1)
defer atomic.StoreInt32(&d.processingBlocks, 0)
// XXX this will move when optimised
// Sort the blocks by number. This bit needs much improvement. Right now
// it assumes full honesty form peers (i.e. it's not checked when the blocks
// link). We should at least check whihc queue match. This code could move
// to a seperate goroutine where it periodically checks for linked pieces.
types.BlockBy(types.Number).Sort(d.queue.blocks)
if len(d.queue.blocks) == 0 {
return nil
}
var blocks = d.queue.blocks
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number())
// Loop untill we're out of blocks
for len(blocks) != 0 {
max := int(math.Min(float64(len(blocks)), 256))
// TODO check for parent error. When there's a parent error we should stop
// processing and start requesting the `block.hash` so that it's parent and
// grandparents can be requested and queued.
var i int
i, err := d.insertChain(blocks[:max])
if err != nil && core.IsParentErr(err) {
// Ignore the missing blocks. Handler should take care of anything that's missing.
glog.V(logger.Debug).Infof("Ignored block with missing parent (%d)\n", i)
blocks = blocks[i+1:]
continue
} else if err != nil {
// immediatly unregister the false peer but do not disconnect
d.UnregisterPeer(d.activePeer)
// Reset chain completely. This needs much, much improvement.
// instead: check all blocks leading down to this block false block and remove it
d.queue.blocks = nil
return ErrBadPeer
}
// delete the blocks from the slice and let them be garbage collected
// without this slice trick the blocks would stay in memory until nil
// would be assigned to d.queue.blocks
copy(blocks, blocks[max:])
for k, n := len(blocks)-max, len(blocks); k < n; k++ {
blocks[k] = nil
}
blocks = blocks[:len(blocks)-max]
}
if len(blocks) == 0 {
d.queue.blocks = nil
} else {
d.queue.blocks = blocks
}
return nil
}
func (d *Downloader) isFetchingHashes() bool { func (d *Downloader) isFetchingHashes() bool {
return atomic.LoadInt32(&d.fetchingHashes) == 1 return atomic.LoadInt32(&d.fetchingHashes) == 1
} }
@ -480,12 +467,8 @@ func (d *Downloader) isDownloadingBlocks() bool {
return atomic.LoadInt32(&d.downloadingBlocks) == 1 return atomic.LoadInt32(&d.downloadingBlocks) == 1
} }
func (d *Downloader) isProcessing() bool {
return atomic.LoadInt32(&d.processingBlocks) == 1
}
func (d *Downloader) isBusy() bool { func (d *Downloader) isBusy() bool {
return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing() return d.isFetchingHashes() || d.isDownloadingBlocks()
} }
func (d *Downloader) IsBusy() bool { func (d *Downloader) IsBusy() bool {

View File

@ -8,8 +8,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
) )
var knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} var knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
@ -28,7 +26,7 @@ func createHashes(start, amount int) (hashes []common.Hash) {
func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block { func createBlocksFromHashes(hashes []common.Hash) map[common.Hash]*types.Block {
blocks := make(map[common.Hash]*types.Block) blocks := make(map[common.Hash]*types.Block)
for i, hash := range hashes { for i, hash := range hashes {
header := &types.Header{Number: big.NewInt(int64(i))} header := &types.Header{Number: big.NewInt(int64(len(hashes) - i))}
blocks[hash] = types.NewBlockWithHeader(header) blocks[hash] = types.NewBlockWithHeader(header)
blocks[hash].HeaderHash = hash blocks[hash].HeaderHash = hash
} }
@ -43,13 +41,11 @@ type downloadTester struct {
t *testing.T t *testing.T
pcount int pcount int
done chan bool done chan bool
insertedBlocks int
} }
func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester { func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types.Block) *downloadTester {
tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)} tester := &downloadTester{t: t, hashes: hashes, blocks: blocks, done: make(chan bool)}
downloader := New(tester.hasBlock, tester.insertChain) downloader := New(tester.hasBlock, tester.getBlock)
tester.downloader = downloader tester.downloader = downloader
return tester return tester
@ -62,10 +58,8 @@ func (dl *downloadTester) hasBlock(hash common.Hash) bool {
return false return false
} }
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
dl.insertedBlocks += len(blocks) return dl.blocks[knownHash]
return 0, nil
} }
func (dl *downloadTester) getHashes(hash common.Hash) error { func (dl *downloadTester) getHashes(hash common.Hash) error {
@ -102,9 +96,6 @@ func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash
} }
func TestDownload(t *testing.T) { func TestDownload(t *testing.T) {
glog.SetV(logger.Detail)
glog.SetToStderr(true)
minDesiredPeerCount = 4 minDesiredPeerCount = 4
blockTtl = 1 * time.Second blockTtl = 1 * time.Second
@ -123,15 +114,13 @@ func TestDownload(t *testing.T) {
t.Error("download error", err) t.Error("download error", err)
} }
if tester.insertedBlocks != targetBlocks { inqueue := len(tester.downloader.queue.blocks)
t.Error("expected", targetBlocks, "have", tester.insertedBlocks) if inqueue != targetBlocks {
t.Error("expected", targetBlocks, "have", inqueue)
} }
} }
func TestMissing(t *testing.T) { func TestMissing(t *testing.T) {
glog.SetV(logger.Detail)
glog.SetToStderr(true)
targetBlocks := 1000 targetBlocks := 1000
hashes := createHashes(0, 1000) hashes := createHashes(0, 1000)
extraHashes := createHashes(1001, 1003) extraHashes := createHashes(1001, 1003)
@ -148,7 +137,33 @@ func TestMissing(t *testing.T) {
t.Error("download error", err) t.Error("download error", err)
} }
if tester.insertedBlocks != targetBlocks { inqueue := len(tester.downloader.queue.blocks)
t.Error("expected", targetBlocks, "have", tester.insertedBlocks) if inqueue != targetBlocks {
t.Error("expected", targetBlocks, "have", inqueue)
}
}
func TestTaking(t *testing.T) {
minDesiredPeerCount = 4
blockTtl = 1 * time.Second
targetBlocks := 1000
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
tester := newTester(t, hashes, blocks)
tester.newPeer("peer1", big.NewInt(10000), hashes[0])
tester.newPeer("peer2", big.NewInt(0), common.Hash{})
tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
err := tester.downloader.Synchronise("peer1", hashes[0])
if err != nil {
t.Error("download error", err)
}
bs1 := tester.downloader.TakeBlocks(1000)
if len(bs1) != 1000 {
t.Error("expected to take 1000, got", len(bs1))
} }
} }

View File

@ -18,7 +18,9 @@ type queue struct {
mu sync.Mutex mu sync.Mutex
fetching map[string]*chunk fetching map[string]*chunk
blocks []*types.Block
blockOffset int
blocks []*types.Block
} }
func newqueue() *queue { func newqueue() *queue {
@ -34,6 +36,10 @@ func (c *queue) reset() {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
c.resetNoTS()
}
func (c *queue) resetNoTS() {
c.blockOffset = 0
c.hashPool.Clear() c.hashPool.Clear()
c.fetchPool.Clear() c.fetchPool.Clear()
c.blockHashes.Clear() c.blockHashes.Clear()
@ -41,6 +47,10 @@ func (c *queue) reset() {
c.fetching = make(map[string]*chunk) c.fetching = make(map[string]*chunk)
} }
func (c *queue) size() int {
return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size()
}
// reserve a `max` set of hashes for `p` peer. // reserve a `max` set of hashes for `p` peer.
func (c *queue) get(p *peer, max int) *chunk { func (c *queue) get(p *peer, max int) *chunk {
c.mu.Lock() c.mu.Lock()
@ -89,7 +99,7 @@ func (c *queue) get(p *peer, max int) *chunk {
} }
func (c *queue) has(hash common.Hash) bool { func (c *queue) has(hash common.Hash) bool {
return c.hashPool.Has(hash) || c.fetchPool.Has(hash) return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash)
} }
func (c *queue) addBlock(id string, block *types.Block) { func (c *queue) addBlock(id string, block *types.Block) {
@ -103,8 +113,24 @@ func (c *queue) addBlock(id string, block *types.Block) {
} }
} }
func (c *queue) getBlock(hash common.Hash) *types.Block {
c.mu.Lock()
defer c.mu.Unlock()
if !c.blockHashes.Has(hash) {
return nil
}
for _, block := range c.blocks {
if block.Hash() == hash {
return block
}
}
return nil
}
// deliver delivers a chunk to the queue that was requested of the peer // deliver delivers a chunk to the queue that was requested of the peer
func (c *queue) deliver(id string, blocks []*types.Block) { func (c *queue) deliver(id string, blocks []*types.Block) error {
c.mu.Lock() c.mu.Lock()
defer c.mu.Unlock() defer c.mu.Unlock()
@ -124,11 +150,35 @@ func (c *queue) deliver(id string, blocks []*types.Block) {
// merge block hashes // merge block hashes
c.blockHashes.Merge(blockHashes) c.blockHashes.Merge(blockHashes)
// Add the blocks // Add the blocks
c.blocks = append(c.blocks, blocks...) for _, block := range blocks {
// See (1) for future limitation
n := int(block.NumberU64()) - c.blockOffset
if n > len(c.blocks) || n < 0 {
return errBlockNumberOverflow
}
c.blocks[n] = block
}
// Add back whatever couldn't be delivered // Add back whatever couldn't be delivered
c.hashPool.Merge(chunk.hashes) c.hashPool.Merge(chunk.hashes)
c.fetchPool.Separate(chunk.hashes) c.fetchPool.Separate(chunk.hashes)
} }
return nil
}
func (c *queue) alloc(offset, size int) {
c.mu.Lock()
defer c.mu.Unlock()
if c.blockOffset < offset {
c.blockOffset = offset
}
// (1) XXX at some point we could limit allocation to memory and use the disk
// to store future blocks.
if len(c.blocks) < size {
c.blocks = append(c.blocks, make([]*types.Block, size)...)
}
} }
// puts puts sets of hashes on to the queue for fetching // puts puts sets of hashes on to the queue for fetching

View File

@ -1,39 +1,5 @@
package eth package eth
// XXX Fair warning, most of the code is re-used from the old protocol. Please be aware that most of this will actually change
// The idea is that most of the calls within the protocol will become synchronous.
// Block downloading and block processing will be complete seperate processes
/*
# Possible scenarios
// Synching scenario
// Use the best peer to synchronise
blocks, err := pm.downloader.Synchronise()
if err != nil {
// handle
break
}
pm.chainman.InsertChain(blocks)
// Receiving block with known parent
if parent_exist {
if err := pm.chainman.InsertChain(block); err != nil {
// handle
break
}
pm.BroadcastBlock(block)
}
// Receiving block with unknown parent
blocks, err := pm.downloader.SynchroniseWithPeer(peer)
if err != nil {
// handle
break
}
pm.chainman.InsertChain(blocks)
*/
import ( import (
"fmt" "fmt"
"math" "math"
@ -54,7 +20,9 @@ import (
const ( const (
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
minDesiredPeerCount = 5 // Amount of peers desired to start syncing blockProcTimer = 500 * time.Millisecond
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
blockProcAmount = 256
) )
func errResp(code errCode, format string, v ...interface{}) error { func errResp(code errCode, format string, v ...interface{}) error {
@ -91,6 +59,10 @@ type ProtocolManager struct {
newPeerCh chan *peer newPeerCh chan *peer
quitSync chan struct{} quitSync chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg sync.WaitGroup
quit bool
} }
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
@ -129,65 +101,6 @@ func (pm *ProtocolManager) removePeer(peer *peer) {
delete(pm.peers, peer.id) delete(pm.peers, peer.id)
} }
func (pm *ProtocolManager) syncHandler() {
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
itimer := time.NewTimer(peerCountTimeout)
out:
for {
select {
case <-pm.newPeerCh:
// Meet the `minDesiredPeerCount` before we select our best peer
if len(pm.peers) < minDesiredPeerCount {
break
}
// Find the best peer
peer := getBestPeer(pm.peers)
if peer == nil {
glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
}
itimer.Stop()
go pm.synchronise(peer)
case <-itimer.C:
// The timer will make sure that the downloader keeps an active state
// in which it attempts to always check the network for highest td peers
// Either select the peer or restart the timer if no peers could
// be selected.
if peer := getBestPeer(pm.peers); peer != nil {
go pm.synchronise(peer)
} else {
itimer.Reset(5 * time.Second)
}
case <-pm.quitSync:
break out
}
}
}
func (pm *ProtocolManager) synchronise(peer *peer) {
// Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return
}
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if pm.downloader.IsBusy() {
return
}
glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
// Get the hashes from the peer (synchronously)
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
if err != nil && err == downloader.ErrBadPeer {
glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action")
pm.removePeer(peer)
} else if err != nil {
// handle error
glog.V(logger.Debug).Infoln("error downloading:", err)
}
}
func (pm *ProtocolManager) Start() { func (pm *ProtocolManager) Start() {
// broadcast transactions // broadcast transactions
pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{}) pm.txSub = pm.eventMux.Subscribe(core.TxPreEvent{})
@ -197,18 +110,26 @@ func (pm *ProtocolManager) Start() {
pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
go pm.minedBroadcastLoop() go pm.minedBroadcastLoop()
// sync handler go pm.update()
go pm.syncHandler()
} }
func (pm *ProtocolManager) Stop() { func (pm *ProtocolManager) Stop() {
// Showing a log message. During download / process this could actually
// take between 5 to 10 seconds and therefor feedback is required.
glog.V(logger.Info).Infoln("Stopping ethereum protocol handler...")
pm.quit = true
pm.txSub.Unsubscribe() // quits txBroadcastLoop pm.txSub.Unsubscribe() // quits txBroadcastLoop
pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
close(pm.quitSync) // quits the sync handler close(pm.quitSync) // quits the sync handler
// Wait for any process action
pm.wg.Wait()
glog.V(logger.Info).Infoln("Ethereum protocol handler stopped")
} }
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
td, current, genesis := pm.chainman.Status() td, current, genesis := pm.chainman.Status()
return newPeer(pv, nv, genesis, current, td, p, rw) return newPeer(pv, nv, genesis, current, td, p, rw)
@ -359,6 +280,9 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Add the block hash as a known hash to the peer. This will later be used to determine // Add the block hash as a known hash to the peer. This will later be used to determine
// who should receive this. // who should receive this.
p.blockHashes.Add(hash) p.blockHashes.Add(hash)
// update the peer info
p.recentHash = hash
p.td = request.TD
_, chainHead, _ := self.chainman.Status() _, chainHead, _ := self.chainman.Status()
@ -383,7 +307,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Attempt to insert the newly received by checking if the parent exists. // Attempt to insert the newly received by checking if the parent exists.
// if the parent exists we process the block and propagate to our peers // if the parent exists we process the block and propagate to our peers
// if the parent does not exists we delegate to the downloader. // otherwise synchronise with the peer
if self.chainman.HasBlock(request.Block.ParentHash()) { if self.chainman.HasBlock(request.Block.ParentHash()) {
if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil { if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil {
glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error") glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error")
@ -400,24 +324,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
} }
self.BroadcastBlock(hash, request.Block) self.BroadcastBlock(hash, request.Block)
} else { } else {
// adding blocks is synchronous go self.synchronise(p)
go func() {
err := self.downloader.AddBlock(p.id, request.Block, request.TD)
if err != nil && err == downloader.ErrBadPeer {
glog.V(logger.Error).Infoln("removed peer (", p.id, ") with err:", err)
self.removePeer(p)
return
} else if err != nil {
glog.V(logger.Detail).Infoln("downloader err:", err)
return
}
if err := self.verifyTd(p, request); err != nil {
glog.V(logger.Error).Infoln(err)
return
}
self.BroadcastBlock(hash, request.Block)
}()
} }
default: default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code) return errResp(ErrInvalidMsgCode, "%v", msg.Code)

101
eth/sync.go Normal file
View File

@ -0,0 +1,101 @@
package eth
import (
"math"
"time"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
)
// Sync contains all synchronisation code for the eth protocol
func (pm *ProtocolManager) update() {
// itimer is used to determine when to start ignoring `minDesiredPeerCount`
itimer := time.NewTimer(peerCountTimeout)
// btimer is used for picking of blocks from the downloader
btimer := time.NewTicker(blockProcTimer)
out:
for {
select {
case <-pm.newPeerCh:
// Meet the `minDesiredPeerCount` before we select our best peer
if len(pm.peers) < minDesiredPeerCount {
break
}
// Find the best peer
peer := getBestPeer(pm.peers)
if peer == nil {
glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available")
}
itimer.Stop()
go pm.synchronise(peer)
case <-itimer.C:
// The timer will make sure that the downloader keeps an active state
// in which it attempts to always check the network for highest td peers
// Either select the peer or restart the timer if no peers could
// be selected.
if peer := getBestPeer(pm.peers); peer != nil {
go pm.synchronise(peer)
} else {
itimer.Reset(5 * time.Second)
}
case <-btimer.C:
go pm.processBlocks()
case <-pm.quitSync:
break out
}
}
}
// processBlocks will attempt to reconstruct a chain by checking the first item and check if it's
// a known parent. The first block in the chain may be unknown during downloading. When the
// downloader isn't downloading blocks will be dropped with an unknown parent until either it
// has depleted the list or found a known parent.
func (pm *ProtocolManager) processBlocks() error {
pm.wg.Add(1)
defer pm.wg.Done()
blocks := pm.downloader.TakeBlocks()
if len(blocks) == 0 {
return nil
}
defer pm.downloader.Done()
glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number())
for len(blocks) != 0 && !pm.quit {
max := int(math.Min(float64(len(blocks)), float64(blockProcAmount)))
_, err := pm.chainman.InsertChain(blocks[:max])
if err != nil {
return err
}
blocks = blocks[max:]
}
return nil
}
func (pm *ProtocolManager) synchronise(peer *peer) {
// Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return
}
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if pm.downloader.IsBusy() {
return
}
// Get the hashes from the peer (synchronously)
err := pm.downloader.Synchronise(peer.id, peer.recentHash)
if err != nil && err == downloader.ErrBadPeer {
glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action")
pm.removePeer(peer)
} else if err != nil {
// handle error
glog.V(logger.Detail).Infoln("error downloading:", err)
}
}