From 97d2954e227049a089652d91e6fb0ea1c8115cc6 Mon Sep 17 00:00:00 2001 From: obscuren Date: Mon, 13 Apr 2015 17:22:32 +0200 Subject: [PATCH 01/26] eth: added downloader for syncing up the chain --- eth/backend.go | 6 ++- eth/protocol.go | 105 +++++++++++++++++++++++++++++++----------------- 2 files changed, 73 insertions(+), 38 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index c7a5b233f..a71d5721e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -15,6 +15,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/logger" @@ -130,6 +131,7 @@ type Ethereum struct { accountManager *accounts.Manager whisper *whisper.Whisper pow *ethash.Ethash + downloader *downloader.Downloader net *p2p.Server eventMux *event.TypeMux @@ -194,6 +196,7 @@ func New(config *Config) (*Ethereum, error) { } eth.chainManager = core.NewChainManager(blockDb, stateDb, eth.EventMux()) + eth.downloader = downloader.New(eth.chainManager.HasBlock, eth.chainManager.InsertChain, eth.chainManager.Td) eth.pow = ethash.New(eth.chainManager) eth.txPool = core.NewTxPool(eth.EventMux(), eth.chainManager.State) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.txPool, eth.chainManager, eth.EventMux()) @@ -212,7 +215,7 @@ func New(config *Config) (*Ethereum, error) { return nil, err } - ethProto := EthProtocol(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.blockPool) + ethProto := EthProtocol(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.blockPool, eth.downloader) protocols := []p2p.Protocol{ethProto} if config.Shh { protocols = append(protocols, eth.whisper.Protocol()) @@ -349,6 +352,7 @@ func (s *Ethereum) ClientVersion() string { return s.clientVersio func (s *Ethereum) EthVersion() int { return s.ethVersionId } func (s *Ethereum) NetVersion() int { return s.netVersionId } func (s *Ethereum) ShhVersion() int { return s.shhVersionId } +func (s *Ethereum) Downloader() *downloader.Downloader { return s.downloader } // Start the ethereum func (s *Ethereum) Start() error { diff --git a/eth/protocol.go b/eth/protocol.go index 878038f74..b15868898 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/errs" + "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/p2p" @@ -18,8 +19,8 @@ const ( NetworkId = 0 ProtocolLength = uint64(8) ProtocolMaxMsgSize = 10 * 1024 * 1024 - maxHashes = 256 - maxBlocks = 64 + maxHashes = 512 + maxBlocks = 128 ) // eth protocol message codes @@ -64,6 +65,7 @@ type ethProtocol struct { txPool txPool chainManager chainManager blockPool blockPool + downloader *downloader.Downloader peer *p2p.Peer id string rw p2p.MsgReadWriter @@ -114,25 +116,26 @@ type statusMsgData struct { // main entrypoint, wrappers starting a server running the eth protocol // use this constructor to attach the protocol ("class") to server caps // the Dev p2p layer then runs the protocol instance on each peer -func EthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool) p2p.Protocol { +func EthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool, downloader *downloader.Downloader) p2p.Protocol { return p2p.Protocol{ Name: "eth", Version: uint(protocolVersion), Length: ProtocolLength, Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error { - return runEthProtocol(protocolVersion, networkId, txPool, chainManager, blockPool, peer, rw) + return runEthProtocol(protocolVersion, networkId, txPool, chainManager, blockPool, downloader, peer, rw) }, } } // the main loop that handles incoming messages // note RemovePeer in the post-disconnect hook -func runEthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool, peer *p2p.Peer, rw p2p.MsgReadWriter) (err error) { +func runEthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool, downloader *downloader.Downloader, peer *p2p.Peer, rw p2p.MsgReadWriter) (err error) { id := peer.ID() self := ðProtocol{ txPool: txPool, chainManager: chainManager, blockPool: blockPool, + downloader: downloader, rw: rw, peer: peer, protocolVersion: protocolVersion, @@ -211,24 +214,33 @@ func (self *ethProtocol) handle() error { case BlockHashesMsg: msgStream := rlp.NewStream(msg.Payload) - if _, err := msgStream.List(); err != nil { - return err - } - var i int - iter := func() (hash common.Hash, ok bool) { - err := msgStream.Decode(&hash) - if err == rlp.EOL { - return common.Hash{}, false - } else if err != nil { - self.protoError(ErrDecode, "msg %v: after %v hashes : %v", msg, i, err) - return common.Hash{}, false + var hashes []common.Hash + if err := msgStream.Decode(&hashes); err != nil { + break + } + self.downloader.HashCh <- hashes + + /* + if _, err := msgStream.List(); err != nil { + return err } - i++ - return hash, true - } - self.blockPool.AddBlockHashes(iter, self.id) + var i int + iter := func() (hash common.Hash, err error) { + err = msgStream.Decode(&hash) + if err == rlp.EOL { + return common.Hash{}, err + } else if err != nil { + return common.Hash{}, fmt.Errorf("Fetching hashes err (%d): %v", i, err) + } + + i++ + return hash, nil + } + self.downloader.HashCh <- iter + //self.blockPool.AddBlockHashes(iter, self.id) + */ case GetBlocksMsg: msgStream := rlp.NewStream(msg.Payload) @@ -260,23 +272,34 @@ func (self *ethProtocol) handle() error { case BlocksMsg: msgStream := rlp.NewStream(msg.Payload) - if _, err := msgStream.List(); err != nil { - return err + + var blocks []*types.Block + if err := msgStream.Decode(&blocks); err != nil { + glog.V(logger.Detail).Infoln("Decode error", err) + fmt.Println("decode error", err) + blocks = nil } - for { - var block types.Block - if err := msgStream.Decode(&block); err != nil { - if err == rlp.EOL { - break - } else { - return self.protoError(ErrDecode, "msg %v: %v", msg, err) + self.downloader.DeliverChunk(self.id, blocks) + /* + msgStream := rlp.NewStream(msg.Payload) + if _, err := msgStream.List(); err != nil { + return err + } + for { + var block types.Block + if err := msgStream.Decode(&block); err != nil { + if err == rlp.EOL { + break + } else { + return self.protoError(ErrDecode, "msg %v: %v", msg, err) + } } + if err := block.ValidateFields(); err != nil { + return self.protoError(ErrDecode, "block validation %v: %v", msg, err) + } + self.blockPool.AddBlock(&block, self.id) } - if err := block.ValidateFields(); err != nil { - return self.protoError(ErrDecode, "block validation %v: %v", msg, err) - } - self.blockPool.AddBlock(&block, self.id) - } + */ case NewBlockMsg: var request newBlockMsgData @@ -296,6 +319,8 @@ func (self *ethProtocol) handle() error { BlockPrevHash: request.Block.ParentHash().Hex(), RemoteId: self.peer.ID().String(), }) + + self.downloader.AddBlock(self.id, request.Block, request.TD) // to simplify backend interface adding a new block // uses AddPeer followed by AddBlock only if peer is the best peer // (or selected as new best peer) @@ -345,10 +370,16 @@ func (self *ethProtocol) handleStatus() error { return self.protoError(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, self.protocolVersion) } - _, suspended := self.blockPool.AddPeer(status.TD, status.CurrentBlock, self.id, self.requestBlockHashes, self.requestBlocks, self.protoErrorDisconnect) - if suspended { - return self.protoError(ErrSuspendedPeer, "") + err = self.downloader.RegisterPeer(self.id, status.TD, status.CurrentBlock, self.requestBlockHashes, self.requestBlocks) + if err != nil { + return self.protoError(ErrSuspendedPeer, "something") } + /* + _, suspended := self.blockPool.AddPeer(status.TD, status.CurrentBlock, self.id, self.requestBlockHashes, self.requestBlocks, self.protoErrorDisconnect) + if suspended { + return self.protoError(ErrSuspendedPeer, "") + } + */ self.peer.Debugf("Peer is [eth] capable (%d/%d). TD=%v H=%x\n", status.ProtocolVersion, status.NetworkId, status.TD, status.CurrentBlock[:4]) From c4678ffd77a18a9d03c888fdf242c9e5915b9f5f Mon Sep 17 00:00:00 2001 From: obscuren Date: Thu, 16 Apr 2015 00:14:31 +0200 Subject: [PATCH 02/26] downloader: updated downloader and fixed issues with catch up Properly ignore blocks coming from peers not in our peer list (blocked) and do never request anything from bad peers. Added some checks to account for blocks known when requesting hashes (missing parents). --- eth/downloader/downloader.go | 70 ++++++++++++++++++++++++------------ 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 83e6b8d32..1707e1395 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -54,8 +54,9 @@ type blockPack struct { } type syncPack struct { - peer *peer - hash common.Hash + peer *peer + hash common.Hash + ignoreInitial bool } func New(hasBlock hashCheckFn, insertChain chainInsertFn, currentTd currentTdFn) *Downloader { @@ -104,11 +105,13 @@ func (d *Downloader) UnregisterPeer(id string) { func (d *Downloader) peerHandler() { // itimer is used to determine when to start ignoring `minDesiredPeerCount` - itimer := time.NewTicker(5 * time.Second) + //itimer := time.NewTicker(5 * time.Second) + itimer := time.NewTimer(5 * time.Second) out: for { select { case <-d.newPeerCh: + itimer.Stop() // Meet the `minDesiredPeerCount` before we select our best peer if len(d.peers) < minDesiredPeerCount { break @@ -137,7 +140,7 @@ func (d *Downloader) selectPeer(p *peer) { } glog.V(logger.Detail).Infoln("New peer with highest TD =", p.td) - d.syncCh <- syncPack{p, p.recentHash} + d.syncCh <- syncPack{p, p.recentHash, false} } } @@ -147,11 +150,11 @@ out: select { case sync := <-d.syncCh: selectedPeer := sync.peer - glog.V(logger.Detail).Infoln("Synchronising with network using:", selectedPeer.id) + glog.V(logger.Detail).Infoln("Synchronising with the network using:", selectedPeer.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels // respectively. - if err := d.startFetchingHashes(selectedPeer, sync.hash); err != nil { + if err := d.startFetchingHashes(selectedPeer, sync.hash, sync.ignoreInitial); err != nil { // handle error glog.V(logger.Debug).Infoln("Error fetching hashes:", err) // XXX Reset @@ -178,11 +181,18 @@ out: } // XXX Make synchronous -func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash) error { - glog.V(logger.Debug).Infoln("Downloading hashes") +func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitial bool) error { + glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id) start := time.Now() + // We ignore the initial hash in some cases (e.g. we received a block without it's parent) + // In such circumstances we don't need to download the block so don't add it to the queue. + if !ignoreInitial { + // Add the hash to the queue first + d.queue.hashPool.Add(hash) + } + // Get the first batch of hashes p.getHashes(hash) atomic.StoreInt32(&d.fetchingHashes, 1) @@ -195,7 +205,7 @@ out: hashSet := set.New() for _, hash := range hashes { if d.hasBlock(hash) { - glog.V(logger.Debug).Infof("Found common hash %x\n", hash) + glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) done = true break @@ -207,7 +217,7 @@ out: // Add hashes to the chunk set // Check if we're done fetching - if !done { + if !done && len(hashes) > 0 { //fmt.Println("re-fetch. current =", d.queue.hashPool.Size()) // Get the next set of hashes p.getHashes(hashes[len(hashes)-1]) @@ -218,7 +228,7 @@ out: } } } - glog.V(logger.Detail).Infoln("Download hashes: done. Took", time.Since(start)) + glog.V(logger.Detail).Infof("Downloaded hashes (%d). Took %v\n", d.queue.hashPool.Size(), time.Since(start)) return nil } @@ -242,6 +252,10 @@ out: // from the available peers. if d.queue.hashPool.Size() > 0 { availablePeers := d.peers.get(idleState) + if len(availablePeers) == 0 { + glog.V(logger.Detail).Infoln("No peers available out of", len(d.peers)) + } + for _, peer := range availablePeers { // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. @@ -317,21 +331,33 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) { return } + peer := d.peers.getPeer(id) + // if the peer is in our healthy list of peers; update the td + // and add the block. Otherwise just ignore it + if peer == nil { + glog.V(logger.Detail).Infof("Ignored block from bad peer %s\n", id) + return + } + + peer.mu.Lock() + peer.td = td + peer.recentHash = block.Hash() + peer.mu.Unlock() + glog.V(logger.Detail).Infoln("Inserting new block from:", id) d.queue.addBlock(id, block, td) - // if the peer is in our healthy list of peers; update the td - // here is a good chance to add the peer back to the list - if peer := d.peers.getPeer(id); peer != nil { - peer.mu.Lock() - peer.td = td - peer.recentHash = block.Hash() - peer.mu.Unlock() - } - // if neither go ahead to process if !(d.isFetchingHashes() || d.isDownloadingBlocks()) { - d.process() + // Check if the parent of the received block is known. + // If the block is not know, request it otherwise, request. + phash := block.ParentHash() + if !d.hasBlock(phash) { + glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4]) + d.syncCh <- syncPack{peer, peer.recentHash, true} + } else { + d.process() + } } } @@ -369,7 +395,7 @@ func (d *Downloader) process() error { // TODO change this. This shite for i, block := range blocks[:max] { if !d.hasBlock(block.ParentHash()) { - d.syncCh <- syncPack{d.peers.bestPeer(), block.Hash()} + d.syncCh <- syncPack{d.peers.bestPeer(), block.Hash(), true} // remove processed blocks blocks = blocks[i:] From 73eb8e8c20b784e3a43f826e51a81326619d98ef Mon Sep 17 00:00:00 2001 From: obscuren Date: Thu, 16 Apr 2015 01:28:37 +0200 Subject: [PATCH 03/26] eth: basic implementation of the downloader --- eth/protocol.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/eth/protocol.go b/eth/protocol.go index a85d15a0c..66f3cbac8 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -396,6 +396,31 @@ func (self *ethProtocol) requestBlocks(hashes []common.Hash) error { return p2p.Send(self.rw, GetBlocksMsg, hashes) } +/* +func (self *ethProtocol) newRespBlockCh() BlockPack { + self.blockRespCh = make(chan blockResp) + return self.blockRespCh +} + +func (self *ethProtocol) RequestBlocks(hashes *set.Set) <-chan []*types.Block { + out := make(chan []*types.Block) + go func() { + done: + for { + select { + case blockResp := <-self.newRespBlockCh(): + if len(blockResp.blocks) { + } + case <-time.After(5 * time.Second): + } + } + + close(out) + }() + return out +} +*/ + func (self *ethProtocol) protoError(code int, format string, params ...interface{}) (err *errs.Error) { err = self.errors.New(code, format, params...) //err.Log(self.peer.Logger) From c2f410214c99ee3636cfb670e84e5f05d179a1ef Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 01:11:09 +0200 Subject: [PATCH 04/26] eth: began split up of peers and protocol manager --- eth/backend.go | 13 +- eth/handler.go | 224 ++++++++++++++++++++++++++++ eth/peer.go | 137 +++++++++++++++++ eth/protocol.go | 383 +----------------------------------------------- 4 files changed, 369 insertions(+), 388 deletions(-) create mode 100644 eth/handler.go create mode 100644 eth/peer.go diff --git a/eth/backend.go b/eth/backend.go index 3d5c4ba09..d34a2d26b 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/blockpool" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" @@ -131,7 +130,6 @@ type Ethereum struct { blockProcessor *core.BlockProcessor txPool *core.TxPool chainManager *core.ChainManager - blockPool *blockpool.BlockPool accountManager *accounts.Manager whisper *whisper.Whisper pow *ethash.Ethash @@ -219,17 +217,12 @@ func New(config *Config) (*Ethereum, error) { eth.shhVersionId = int(eth.whisper.Version()) eth.miner = miner.New(eth, eth.pow, config.MinerThreads) - hasBlock := eth.chainManager.HasBlock - insertChain := eth.chainManager.InsertChain - td := eth.chainManager.Td() - eth.blockPool = blockpool.New(hasBlock, insertChain, eth.pow.Verify, eth.EventMux(), td) - netprv, err := config.nodeKey() if err != nil { return nil, err } - ethProto := EthProtocol(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.blockPool, eth.downloader) + ethProto := EthProtocol(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.downloader) protocols := []p2p.Protocol{ethProto} if config.Shh { protocols = append(protocols, eth.whisper.Protocol()) @@ -352,7 +345,6 @@ func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManag func (s *Ethereum) ChainManager() *core.ChainManager { return s.chainManager } func (s *Ethereum) BlockProcessor() *core.BlockProcessor { return s.blockProcessor } func (s *Ethereum) TxPool() *core.TxPool { return s.txPool } -func (s *Ethereum) BlockPool() *blockpool.BlockPool { return s.blockPool } func (s *Ethereum) Whisper() *whisper.Whisper { return s.whisper } func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux } func (s *Ethereum) BlockDb() common.Database { return s.blockDb } @@ -384,7 +376,6 @@ func (s *Ethereum) Start() error { // Start services s.txPool.Start() - s.blockPool.Start() if s.whisper != nil { s.whisper.Start() @@ -410,7 +401,6 @@ func (s *Ethereum) StartForTest() { // Start services s.txPool.Start() - s.blockPool.Start() } func (self *Ethereum) SuggestPeer(nodeURL string) error { @@ -433,7 +423,6 @@ func (s *Ethereum) Stop() { s.txPool.Stop() s.eventMux.Stop() - s.blockPool.Stop() if s.whisper != nil { s.whisper.Stop() } diff --git a/eth/handler.go b/eth/handler.go new file mode 100644 index 000000000..b3890d365 --- /dev/null +++ b/eth/handler.go @@ -0,0 +1,224 @@ +package eth + +import ( + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" +) + +func errResp(code errCode, format string, v ...interface{}) error { + return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) +} + +// main entrypoint, wrappers starting a server running the eth protocol +// use this constructor to attach the protocol ("class") to server caps +// the Dev p2p layer then runs the protocol instance on each peer +func EthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, downloader *downloader.Downloader) p2p.Protocol { + protocol := newProtocolManager(txPool, chainManager, downloader) + + return p2p.Protocol{ + Name: "eth", + Version: uint(protocolVersion), + Length: ProtocolLength, + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + //return runEthProtocol(protocolVersion, networkId, txPool, chainManager, downloader, p, rw) + peer := protocol.newPeer(protocolVersion, networkId, p, rw) + err := protocol.handle(peer) + glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err) + + return err + }, + } +} + +type hashFetcherFn func(common.Hash) error +type blockFetcherFn func([]common.Hash) error + +// extProt is an interface which is passed around so we can expose GetHashes and GetBlock without exposing it to the rest of the protocol +// extProt is passed around to peers which require to GetHashes and GetBlocks +type extProt struct { + getHashes hashFetcherFn + getBlocks blockFetcherFn +} + +func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(hash) } +func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) } + +type EthProtocolManager struct { + protVer, netId int + txpool txPool + chainman chainManager + downloader *downloader.Downloader + + pmu sync.Mutex + peers map[string]*peer +} + +func newProtocolManager(txpool txPool, chainman chainManager, downloader *downloader.Downloader) *EthProtocolManager { + return &EthProtocolManager{ + txpool: txpool, + chainman: chainman, + downloader: downloader, + peers: make(map[string]*peer), + } +} + +func (pm *EthProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { + pm.pmu.Lock() + defer pm.pmu.Unlock() + + td, current, genesis := pm.chainman.Status() + + peer := newPeer(pv, nv, genesis, current, td, p, rw) + pm.peers[peer.id] = peer + + return peer +} + +func (pm *EthProtocolManager) handle(p *peer) error { + if err := p.handleStatus(); err != nil { + return err + } + + pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks) + defer pm.downloader.UnregisterPeer(p.id) + + // propagate existing transactions. new transactions appearing + // after this will be sent via broadcasts. + if err := p.sendTransactions(pm.txpool.GetTransactions()); err != nil { + return err + } + + // main loop. handle incoming messages. + for { + if err := pm.handleMsg(p); err != nil { + return err + } + } + + return nil +} + +func (self *EthProtocolManager) handleMsg(p *peer) error { + msg, err := p.rw.ReadMsg() + if err != nil { + return err + } + if msg.Size > ProtocolMaxMsgSize { + return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) + } + // make sure that the payload has been fully consumed + defer msg.Discard() + + switch msg.Code { + case GetTxMsg: // ignore + case StatusMsg: + return errResp(ErrExtraStatusMsg, "uncontrolled status message") + + case TxMsg: + // TODO: rework using lazy RLP stream + var txs []*types.Transaction + if err := msg.Decode(&txs); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + for i, tx := range txs { + if tx == nil { + return errResp(ErrDecode, "transaction %d is nil", i) + } + jsonlogger.LogJson(&logger.EthTxReceived{ + TxHash: tx.Hash().Hex(), + RemoteId: p.ID().String(), + }) + } + self.txpool.AddTransactions(txs) + + case GetBlockHashesMsg: + var request getBlockHashesMsgData + if err := msg.Decode(&request); err != nil { + return errResp(ErrDecode, "->msg %v: %v", msg, err) + } + + if request.Amount > maxHashes { + request.Amount = maxHashes + } + hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) + return p.sendBlockHashes(hashes) + case BlockHashesMsg: + msgStream := rlp.NewStream(msg.Payload) + + var hashes []common.Hash + if err := msgStream.Decode(&hashes); err != nil { + break + } + self.downloader.HashCh <- hashes + + case GetBlocksMsg: + msgStream := rlp.NewStream(msg.Payload) + if _, err := msgStream.List(); err != nil { + return err + } + + var blocks []*types.Block + var i int + for { + i++ + var hash common.Hash + err := msgStream.Decode(&hash) + if err == rlp.EOL { + break + } else if err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + + block := self.chainman.GetBlock(hash) + if block != nil { + blocks = append(blocks, block) + } + if i == maxBlocks { + break + } + } + return p.sendBlocks(blocks) + case BlocksMsg: + msgStream := rlp.NewStream(msg.Payload) + + var blocks []*types.Block + if err := msgStream.Decode(&blocks); err != nil { + glog.V(logger.Detail).Infoln("Decode error", err) + fmt.Println("decode error", err) + blocks = nil + } + self.downloader.DeliverChunk(p.id, blocks) + + case NewBlockMsg: + var request newBlockMsgData + if err := msg.Decode(&request); err != nil { + return errResp(ErrDecode, "%v: %v", msg, err) + } + if err := request.Block.ValidateFields(); err != nil { + return errResp(ErrDecode, "block validation %v: %v", msg, err) + } + hash := request.Block.Hash() + _, chainHead, _ := self.chainman.Status() + + jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ + BlockHash: hash.Hex(), + BlockNumber: request.Block.Number(), // this surely must be zero + ChainHeadHash: chainHead.Hex(), + BlockPrevHash: request.Block.ParentHash().Hex(), + RemoteId: p.ID().String(), + }) + self.downloader.AddBlock(p.id, request.Block, request.TD) + + default: + return errResp(ErrInvalidMsgCode, "%v", msg.Code) + } + return nil +} diff --git a/eth/peer.go b/eth/peer.go new file mode 100644 index 000000000..db7fea7a7 --- /dev/null +++ b/eth/peer.go @@ -0,0 +1,137 @@ +package eth + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/p2p" + "gopkg.in/fatih/set.v0" +) + +type statusMsgData struct { + ProtocolVersion uint32 + NetworkId uint32 + TD *big.Int + CurrentBlock common.Hash + GenesisBlock common.Hash +} + +type getBlockHashesMsgData struct { + Hash common.Hash + Amount uint64 +} + +type peer struct { + *p2p.Peer + + rw p2p.MsgReadWriter + + protv, netid int + + currentHash common.Hash + id string + td *big.Int + + genesis, ourHash common.Hash + ourTd *big.Int + + txHashes *set.Set + blockHashes *set.Set +} + +func newPeer(protv, netid int, genesis, currentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { + id := p.ID() + + return &peer{ + Peer: p, + rw: rw, + genesis: genesis, + ourHash: currentHash, + ourTd: td, + protv: protv, + netid: netid, + id: fmt.Sprintf("%x", id[:8]), + txHashes: set.New(), + blockHashes: set.New(), + } +} + +// sendTransactions sends transactions to the peer and includes the hashes +// in it's tx hash set for future reference. The tx hash will allow the +// manager to check whether the peer has already received this particular +// transaction +func (p *peer) sendTransactions(txs types.Transactions) error { + for _, tx := range txs { + p.txHashes.Add(tx.Hash()) + } + + return p2p.Send(p.rw, TxMsg, txs) +} + +func (p *peer) sendBlockHashes(hashes []common.Hash) error { + return p2p.Send(p.rw, BlockHashesMsg, hashes) +} + +func (p *peer) sendBlocks(blocks []*types.Block) error { + return p2p.Send(p.rw, BlocksMsg, blocks) +} + +func (p *peer) requestHashes(from common.Hash) error { + p.Debugf("fetching hashes (%d) %x...\n", maxHashes, from[0:4]) + return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes}) +} + +func (p *peer) requestBlocks(hashes []common.Hash) error { + p.Debugf("fetching %v blocks", len(hashes)) + return p2p.Send(p.rw, GetBlocksMsg, hashes) +} + +func (p *peer) handleStatus() error { + errc := make(chan error, 1) + go func() { + errc <- p2p.Send(p.rw, StatusMsg, &statusMsgData{ + ProtocolVersion: uint32(p.protv), + NetworkId: uint32(p.netid), + TD: p.ourTd, + CurrentBlock: p.ourHash, + GenesisBlock: p.genesis, + }) + }() + + // read and handle remote status + msg, err := p.rw.ReadMsg() + if err != nil { + return err + } + if msg.Code != StatusMsg { + return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) + } + if msg.Size > ProtocolMaxMsgSize { + return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) + } + + var status statusMsgData + if err := msg.Decode(&status); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + + if status.GenesisBlock != p.genesis { + return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, p.genesis) + } + + if int(status.NetworkId) != p.netid { + return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, p.netid) + } + + if int(status.ProtocolVersion) != p.protv { + return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.protv) + } + // Set the total difficulty of the peer + p.td = status.TD + // set the best hash of the peer + p.currentHash = status.CurrentBlock + + return <-errc +} diff --git a/eth/protocol.go b/eth/protocol.go index 66f3cbac8..48f37b59c 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -1,17 +1,10 @@ package eth import ( - "fmt" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/errs" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" ) const ( @@ -35,6 +28,8 @@ const ( NewBlockMsg ) +type errCode int + const ( ErrMsgTooLarge = iota ErrDecode @@ -47,6 +42,11 @@ const ( ErrSuspendedPeer ) +func (e errCode) String() string { + return errorToString[int(e)] +} + +// XXX change once legacy code is out var errorToString = map[int]string{ ErrMsgTooLarge: "Message too long", ErrDecode: "Invalid message", @@ -59,21 +59,6 @@ var errorToString = map[int]string{ ErrSuspendedPeer: "Suspended peer", } -// ethProtocol represents the ethereum wire protocol -// instance is running on each peer -type ethProtocol struct { - txPool txPool - chainManager chainManager - blockPool blockPool - downloader *downloader.Downloader - peer *p2p.Peer - id string - rw p2p.MsgReadWriter - errors *errs.Errors - protocolVersion int - networkId int -} - // backend is the interface the ethereum protocol backend should implement // used as an argument to EthProtocol type txPool interface { @@ -87,362 +72,8 @@ type chainManager interface { Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) } -type blockPool interface { - AddBlockHashes(next func() (common.Hash, bool), peerId string) - AddBlock(block *types.Block, peerId string) - AddPeer(td *big.Int, currentBlock common.Hash, peerId string, requestHashes func(common.Hash) error, requestBlocks func([]common.Hash) error, peerError func(*errs.Error)) (best bool, suspended bool) - RemovePeer(peerId string) -} - // message structs used for RLP serialization type newBlockMsgData struct { Block *types.Block TD *big.Int } - -type getBlockHashesMsgData struct { - Hash common.Hash - Amount uint64 -} - -type statusMsgData struct { - ProtocolVersion uint32 - NetworkId uint32 - TD *big.Int - CurrentBlock common.Hash - GenesisBlock common.Hash -} - -// main entrypoint, wrappers starting a server running the eth protocol -// use this constructor to attach the protocol ("class") to server caps -// the Dev p2p layer then runs the protocol instance on each peer -func EthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool, downloader *downloader.Downloader) p2p.Protocol { - return p2p.Protocol{ - Name: "eth", - Version: uint(protocolVersion), - Length: ProtocolLength, - Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error { - return runEthProtocol(protocolVersion, networkId, txPool, chainManager, blockPool, downloader, peer, rw) - }, - } -} - -// the main loop that handles incoming messages -// note RemovePeer in the post-disconnect hook -func runEthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, blockPool blockPool, downloader *downloader.Downloader, peer *p2p.Peer, rw p2p.MsgReadWriter) (err error) { - id := peer.ID() - self := ðProtocol{ - txPool: txPool, - chainManager: chainManager, - blockPool: blockPool, - downloader: downloader, - rw: rw, - peer: peer, - protocolVersion: protocolVersion, - networkId: networkId, - errors: &errs.Errors{ - Package: "ETH", - Errors: errorToString, - }, - id: fmt.Sprintf("%x", id[:8]), - } - - // handshake. - if err := self.handleStatus(); err != nil { - return err - } - defer self.blockPool.RemovePeer(self.id) - - // propagate existing transactions. new transactions appearing - // after this will be sent via broadcasts. - if err := p2p.Send(rw, TxMsg, txPool.GetTransactions()); err != nil { - return err - } - - // main loop. handle incoming messages. - for { - if err := self.handle(); err != nil { - return err - } - } -} - -func (self *ethProtocol) handle() error { - msg, err := self.rw.ReadMsg() - if err != nil { - return err - } - if msg.Size > ProtocolMaxMsgSize { - return self.protoError(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) - } - // make sure that the payload has been fully consumed - defer msg.Discard() - - switch msg.Code { - case GetTxMsg: // ignore - case StatusMsg: - return self.protoError(ErrExtraStatusMsg, "") - - case TxMsg: - // TODO: rework using lazy RLP stream - var txs []*types.Transaction - if err := msg.Decode(&txs); err != nil { - return self.protoError(ErrDecode, "msg %v: %v", msg, err) - } - for i, tx := range txs { - if tx == nil { - return self.protoError(ErrDecode, "transaction %d is nil", i) - } - jsonlogger.LogJson(&logger.EthTxReceived{ - TxHash: tx.Hash().Hex(), - RemoteId: self.peer.ID().String(), - }) - } - self.txPool.AddTransactions(txs) - - case GetBlockHashesMsg: - var request getBlockHashesMsgData - if err := msg.Decode(&request); err != nil { - return self.protoError(ErrDecode, "->msg %v: %v", msg, err) - } - - if request.Amount > maxHashes { - request.Amount = maxHashes - } - hashes := self.chainManager.GetBlockHashesFromHash(request.Hash, request.Amount) - return p2p.Send(self.rw, BlockHashesMsg, hashes) - - case BlockHashesMsg: - msgStream := rlp.NewStream(msg.Payload) - - var hashes []common.Hash - if err := msgStream.Decode(&hashes); err != nil { - break - } - self.downloader.HashCh <- hashes - - /* - if _, err := msgStream.List(); err != nil { - return err - } - - var i int - iter := func() (hash common.Hash, err error) { - err = msgStream.Decode(&hash) - if err == rlp.EOL { - return common.Hash{}, err - } else if err != nil { - return common.Hash{}, fmt.Errorf("Fetching hashes err (%d): %v", i, err) - } - - i++ - return hash, nil - } - self.downloader.HashCh <- iter - //self.blockPool.AddBlockHashes(iter, self.id) - */ - - case GetBlocksMsg: - msgStream := rlp.NewStream(msg.Payload) - if _, err := msgStream.List(); err != nil { - return err - } - - var blocks []*types.Block - var i int - for { - i++ - var hash common.Hash - err := msgStream.Decode(&hash) - if err == rlp.EOL { - break - } else if err != nil { - return self.protoError(ErrDecode, "msg %v: %v", msg, err) - } - - block := self.chainManager.GetBlock(hash) - if block != nil { - blocks = append(blocks, block) - } - if i == maxBlocks { - break - } - } - return p2p.Send(self.rw, BlocksMsg, blocks) - - case BlocksMsg: - msgStream := rlp.NewStream(msg.Payload) - - var blocks []*types.Block - if err := msgStream.Decode(&blocks); err != nil { - glog.V(logger.Detail).Infoln("Decode error", err) - fmt.Println("decode error", err) - blocks = nil - } - self.downloader.DeliverChunk(self.id, blocks) - /* - msgStream := rlp.NewStream(msg.Payload) - if _, err := msgStream.List(); err != nil { - return err - } - for { - var block types.Block - if err := msgStream.Decode(&block); err != nil { - if err == rlp.EOL { - break - } else { - return self.protoError(ErrDecode, "msg %v: %v", msg, err) - } - } - if err := block.ValidateFields(); err != nil { - return self.protoError(ErrDecode, "block validation %v: %v", msg, err) - } - self.blockPool.AddBlock(&block, self.id) - } - */ - - case NewBlockMsg: - var request newBlockMsgData - if err := msg.Decode(&request); err != nil { - return self.protoError(ErrDecode, "%v: %v", msg, err) - } - if err := request.Block.ValidateFields(); err != nil { - return self.protoError(ErrDecode, "block validation %v: %v", msg, err) - } - hash := request.Block.Hash() - _, chainHead, _ := self.chainManager.Status() - - jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ - BlockHash: hash.Hex(), - BlockNumber: request.Block.Number(), // this surely must be zero - ChainHeadHash: chainHead.Hex(), - BlockPrevHash: request.Block.ParentHash().Hex(), - RemoteId: self.peer.ID().String(), - }) - - self.downloader.AddBlock(self.id, request.Block, request.TD) - // to simplify backend interface adding a new block - // uses AddPeer followed by AddBlock only if peer is the best peer - // (or selected as new best peer) - if _, suspended := self.blockPool.AddPeer(request.TD, hash, self.id, self.requestBlockHashes, self.requestBlocks, self.protoErrorDisconnect); !suspended { - self.blockPool.AddBlock(request.Block, self.id) - } - - default: - return self.protoError(ErrInvalidMsgCode, "%v", msg.Code) - } - return nil -} - -func (self *ethProtocol) handleStatus() error { - if err := self.sendStatus(); err != nil { - return err - } - - // read and handle remote status - msg, err := self.rw.ReadMsg() - if err != nil { - return err - } - if msg.Code != StatusMsg { - return self.protoError(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) - } - if msg.Size > ProtocolMaxMsgSize { - return self.protoError(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) - } - - var status statusMsgData - if err := msg.Decode(&status); err != nil { - return self.protoError(ErrDecode, "msg %v: %v", msg, err) - } - - _, _, genesisBlock := self.chainManager.Status() - - if status.GenesisBlock != genesisBlock { - return self.protoError(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, genesisBlock) - } - - if int(status.NetworkId) != self.networkId { - return self.protoError(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, self.networkId) - } - - if int(status.ProtocolVersion) != self.protocolVersion { - return self.protoError(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, self.protocolVersion) - } - - err = self.downloader.RegisterPeer(self.id, status.TD, status.CurrentBlock, self.requestBlockHashes, self.requestBlocks) - if err != nil { - return self.protoError(ErrSuspendedPeer, "something") - } - /* - _, suspended := self.blockPool.AddPeer(status.TD, status.CurrentBlock, self.id, self.requestBlockHashes, self.requestBlocks, self.protoErrorDisconnect) - if suspended { - return self.protoError(ErrSuspendedPeer, "") - } - */ - - self.peer.Debugf("Peer is [eth] capable (%d/%d). TD=%v H=%x\n", status.ProtocolVersion, status.NetworkId, status.TD, status.CurrentBlock[:4]) - - return nil -} - -func (self *ethProtocol) requestBlockHashes(from common.Hash) error { - self.peer.Debugf("fetching hashes (%d) %x...\n", maxHashes, from[0:4]) - return p2p.Send(self.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes}) -} - -func (self *ethProtocol) requestBlocks(hashes []common.Hash) error { - self.peer.Debugf("fetching %v blocks", len(hashes)) - return p2p.Send(self.rw, GetBlocksMsg, hashes) -} - -/* -func (self *ethProtocol) newRespBlockCh() BlockPack { - self.blockRespCh = make(chan blockResp) - return self.blockRespCh -} - -func (self *ethProtocol) RequestBlocks(hashes *set.Set) <-chan []*types.Block { - out := make(chan []*types.Block) - go func() { - done: - for { - select { - case blockResp := <-self.newRespBlockCh(): - if len(blockResp.blocks) { - } - case <-time.After(5 * time.Second): - } - } - - close(out) - }() - return out -} -*/ - -func (self *ethProtocol) protoError(code int, format string, params ...interface{}) (err *errs.Error) { - err = self.errors.New(code, format, params...) - //err.Log(self.peer.Logger) - err.Log(glog.V(logger.Info)) - return -} - -func (self *ethProtocol) sendStatus() error { - td, currentBlock, genesisBlock := self.chainManager.Status() - return p2p.Send(self.rw, StatusMsg, &statusMsgData{ - ProtocolVersion: uint32(self.protocolVersion), - NetworkId: uint32(self.networkId), - TD: td, - CurrentBlock: currentBlock, - GenesisBlock: genesisBlock, - }) -} - -func (self *ethProtocol) protoErrorDisconnect(err *errs.Error) { - err.Log(glog.V(logger.Info)) - if err.Fatal() { - self.peer.Disconnect(p2p.DiscSubprotocolError) - } - -} From cc436c4b28c95f825499d67c92a18de5d27e90c2 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 02:21:07 +0200 Subject: [PATCH 05/26] eth: additional cleanups to the subprotocol, improved block propagation * Improved block propagation by sending blocks only to peers to which, as far as we know, the peer does not know about. * Made sub protocol its own manager * SubProtocol now contains the p2p.Protocol which is used instead of a function-returning-protocol thing. --- eth/backend.go | 41 +++++++------- eth/handler.go | 149 +++++++++++++++++++++++++++++++++++++------------ eth/peer.go | 6 ++ 3 files changed, 140 insertions(+), 56 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index d34a2d26b..923cdfa5d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -127,19 +127,20 @@ type Ethereum struct { //*** SERVICES *** // State manager for processing new blocks and managing the over all states - blockProcessor *core.BlockProcessor - txPool *core.TxPool - chainManager *core.ChainManager - accountManager *accounts.Manager - whisper *whisper.Whisper - pow *ethash.Ethash - downloader *downloader.Downloader + blockProcessor *core.BlockProcessor + txPool *core.TxPool + chainManager *core.ChainManager + accountManager *accounts.Manager + whisper *whisper.Whisper + pow *ethash.Ethash + protocolManager *ProtocolManager + downloader *downloader.Downloader net *p2p.Server eventMux *event.TypeMux txSub event.Subscription - blockSub event.Subscription - miner *miner.Miner + //blockSub event.Subscription + miner *miner.Miner // logger logger.LogSystem @@ -216,14 +217,14 @@ func New(config *Config) (*Ethereum, error) { eth.whisper = whisper.New() eth.shhVersionId = int(eth.whisper.Version()) eth.miner = miner.New(eth, eth.pow, config.MinerThreads) + eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.downloader) netprv, err := config.nodeKey() if err != nil { return nil, err } - ethProto := EthProtocol(config.ProtocolVersion, config.NetworkId, eth.txPool, eth.chainManager, eth.downloader) - protocols := []p2p.Protocol{ethProto} + protocols := []p2p.Protocol{eth.protocolManager.SubProtocol} if config.Shh { protocols = append(protocols, eth.whisper.Protocol()) } @@ -386,7 +387,7 @@ func (s *Ethereum) Start() error { go s.txBroadcastLoop() // broadcast mined blocks - s.blockSub = s.eventMux.Subscribe(core.ChainHeadEvent{}) + //s.blockSub = s.eventMux.Subscribe(core.ChainHeadEvent{}) go s.blockBroadcastLoop() glog.V(logger.Info).Infoln("Server started") @@ -418,8 +419,8 @@ func (s *Ethereum) Stop() { defer s.stateDb.Close() defer s.extraDb.Close() - s.txSub.Unsubscribe() // quits txBroadcastLoop - s.blockSub.Unsubscribe() // quits blockBroadcastLoop + s.txSub.Unsubscribe() // quits txBroadcastLoop + //s.blockSub.Unsubscribe() // quits blockBroadcastLoop s.txPool.Stop() s.eventMux.Stop() @@ -463,12 +464,14 @@ func (self *Ethereum) syncAccounts(tx *types.Transaction) { func (self *Ethereum) blockBroadcastLoop() { // automatically stops if unsubscribe - for obj := range self.blockSub.Chan() { - switch ev := obj.(type) { - case core.ChainHeadEvent: - self.net.BroadcastLimited("eth", NewBlockMsg, math.Sqrt, []interface{}{ev.Block, ev.Block.Td}) + /* + for obj := range self.blockSub.Chan() { + switch ev := obj.(type) { + case core.ChainHeadEvent: + self.net.BroadcastLimited("eth", NewBlockMsg, math.Sqrt, []interface{}{ev.Block, ev.Block.Td}) + } } - } + */ } func saveProtocolVersion(db common.Database, protov int) { diff --git a/eth/handler.go b/eth/handler.go index b3890d365..858ae2958 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -1,10 +1,46 @@ package eth +// XXX Fair warning, most of the code is re-used from the old protocol. Please be aware that most of this will actually change +// The idea is that most of the calls within the protocol will become synchronous. +// Block downloading and block processing will be complete seperate processes +/* +# Possible scenarios + +// Synching scenario +// Use the best peer to synchronise +blocks, err := pm.downloader.Synchronise() +if err != nil { + // handle + break +} +pm.chainman.InsertChain(blocks) + +// Receiving block with known parent +if parent_exist { + if err := pm.chainman.InsertChain(block); err != nil { + // handle + break + } + pm.BroadcastBlock(block) +} + +// Receiving block with unknown parent +blocks, err := pm.downloader.SynchroniseWithPeer(peer) +if err != nil { + // handle + break +} +pm.chainman.InsertChain(blocks) + +*/ + import ( "fmt" + "math" "sync" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/logger" @@ -17,27 +53,6 @@ func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) } -// main entrypoint, wrappers starting a server running the eth protocol -// use this constructor to attach the protocol ("class") to server caps -// the Dev p2p layer then runs the protocol instance on each peer -func EthProtocol(protocolVersion, networkId int, txPool txPool, chainManager chainManager, downloader *downloader.Downloader) p2p.Protocol { - protocol := newProtocolManager(txPool, chainManager, downloader) - - return p2p.Protocol{ - Name: "eth", - Version: uint(protocolVersion), - Length: ProtocolLength, - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - //return runEthProtocol(protocolVersion, networkId, txPool, chainManager, downloader, p, rw) - peer := protocol.newPeer(protocolVersion, networkId, p, rw) - err := protocol.handle(peer) - glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err) - - return err - }, - } -} - type hashFetcherFn func(common.Hash) error type blockFetcherFn func([]common.Hash) error @@ -51,44 +66,66 @@ type extProt struct { func (ep extProt) GetHashes(hash common.Hash) error { return ep.getHashes(hash) } func (ep extProt) GetBlock(hashes []common.Hash) error { return ep.getBlocks(hashes) } -type EthProtocolManager struct { +type ProtocolManager struct { protVer, netId int txpool txPool - chainman chainManager + chainman *core.ChainManager downloader *downloader.Downloader pmu sync.Mutex peers map[string]*peer + + SubProtocol p2p.Protocol } -func newProtocolManager(txpool txPool, chainman chainManager, downloader *downloader.Downloader) *EthProtocolManager { - return &EthProtocolManager{ +// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable +// with the ethereum network. +func NewProtocolManager(protocolVersion, networkId int, txpool txPool, chainman *core.ChainManager, downloader *downloader.Downloader) *ProtocolManager { + manager := &ProtocolManager{ txpool: txpool, chainman: chainman, downloader: downloader, peers: make(map[string]*peer), } + + manager.SubProtocol = p2p.Protocol{ + Name: "eth", + Version: uint(protocolVersion), + Length: ProtocolLength, + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + peer := manager.newPeer(protocolVersion, networkId, p, rw) + err := manager.handle(peer) + glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err) + + return err + }, + } + + return manager } -func (pm *EthProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { - pm.pmu.Lock() - defer pm.pmu.Unlock() +func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { td, current, genesis := pm.chainman.Status() - peer := newPeer(pv, nv, genesis, current, td, p, rw) - pm.peers[peer.id] = peer - - return peer + return newPeer(pv, nv, genesis, current, td, p, rw) } -func (pm *EthProtocolManager) handle(p *peer) error { +func (pm *ProtocolManager) handle(p *peer) error { if err := p.handleStatus(); err != nil { return err } + pm.pmu.Lock() + pm.peers[p.id] = p + pm.pmu.Unlock() pm.downloader.RegisterPeer(p.id, p.td, p.currentHash, p.requestHashes, p.requestBlocks) - defer pm.downloader.UnregisterPeer(p.id) + defer func() { + pm.pmu.Lock() + defer pm.pmu.Unlock() + delete(pm.peers, p.id) + pm.downloader.UnregisterPeer(p.id) + }() // propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. @@ -106,7 +143,7 @@ func (pm *EthProtocolManager) handle(p *peer) error { return nil } -func (self *EthProtocolManager) handleMsg(p *peer) error { +func (self *ProtocolManager) handleMsg(p *peer) error { msg, err := p.rw.ReadMsg() if err != nil { return err @@ -192,7 +229,6 @@ func (self *EthProtocolManager) handleMsg(p *peer) error { var blocks []*types.Block if err := msgStream.Decode(&blocks); err != nil { glog.V(logger.Detail).Infoln("Decode error", err) - fmt.Println("decode error", err) blocks = nil } self.downloader.DeliverChunk(p.id, blocks) @@ -206,6 +242,10 @@ func (self *EthProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "block validation %v: %v", msg, err) } hash := request.Block.Hash() + // Add the block hash as a known hash to the peer. This will later be used to detirmine + // who should receive this. + p.blockHashes.Add(hash) + _, chainHead, _ := self.chainman.Status() jsonlogger.LogJson(&logger.EthChainReceivedNewBlock{ @@ -215,10 +255,45 @@ func (self *EthProtocolManager) handleMsg(p *peer) error { BlockPrevHash: request.Block.ParentHash().Hex(), RemoteId: p.ID().String(), }) - self.downloader.AddBlock(p.id, request.Block, request.TD) + // Attempt to insert the newly received by checking if the parent exists. + // if the parent exists we process the block and propagate to our peers + // if the parent does not exists we delegate to the downloader. + // NOTE we can reduce chatter by dropping blocks with Td < currentTd + if self.chainman.HasBlock(request.Block.ParentHash()) { + if err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil { + // handle error + return nil + } + self.BroadcastBlock(hash, request.Block) + } else { + self.downloader.AddBlock(p.id, request.Block, request.TD) + } default: return errResp(ErrInvalidMsgCode, "%v", msg.Code) } return nil } + +// BroadcastBlock will propagate the block to its connected peers. It will sort +// out which peers do not contain the block in their block set and will do a +// sqrt(peers) to determine the amount of peers we broadcast to. +func (pm *ProtocolManager) BroadcastBlock(hash common.Hash, block *types.Block) { + pm.pmu.Lock() + defer pm.pmu.Unlock() + + // Find peers who don't know anything about the given hash. Peers that + // don't know about the hash will be a candidate for the broadcast loop + var peers []*peer + for _, peer := range pm.peers { + if !peer.blockHashes.Has(hash) { + peers = append(peers, peer) + } + } + // Broadcast block to peer set + peers = peers[:int(math.Sqrt(float64(len(peers))))] + for _, peer := range peers { + peer.sendNewBlock(block) + } + glog.V(logger.Detail).Infoln("broadcast block to", len(peers), "peers") +} diff --git a/eth/peer.go b/eth/peer.go index db7fea7a7..8cedbd85a 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -78,6 +78,12 @@ func (p *peer) sendBlocks(blocks []*types.Block) error { return p2p.Send(p.rw, BlocksMsg, blocks) } +func (p *peer) sendNewBlock(block *types.Block) error { + p.blockHashes.Add(block.Hash()) + + return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, block.Td}) +} + func (p *peer) requestHashes(from common.Hash) error { p.Debugf("fetching hashes (%d) %x...\n", maxHashes, from[0:4]) return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, maxHashes}) From ecc74d76ccf8fe4ca8c32120697d64845c475169 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 02:24:24 +0200 Subject: [PATCH 06/26] eth: drop blocks that are known --- eth/handler.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/eth/handler.go b/eth/handler.go index 858ae2958..065116fd0 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -256,6 +256,12 @@ func (self *ProtocolManager) handleMsg(p *peer) error { RemoteId: p.ID().String(), }) + // Make sure the block isn't already known. If this is the case simply drop + // the message and move on. + if self.chainman.HasBlock(hash) { + break + } + // Attempt to insert the newly received by checking if the parent exists. // if the parent exists we process the block and propagate to our peers // if the parent does not exists we delegate to the downloader. From 12e8d9c4dd03e02c507e7174c5a5288e2292a674 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 02:27:37 +0200 Subject: [PATCH 07/26] eth: listen for mined blocks and propagate using the protocol manager --- eth/backend.go | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 923cdfa5d..07552ad5a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -136,11 +136,11 @@ type Ethereum struct { protocolManager *ProtocolManager downloader *downloader.Downloader - net *p2p.Server - eventMux *event.TypeMux - txSub event.Subscription - //blockSub event.Subscription - miner *miner.Miner + net *p2p.Server + eventMux *event.TypeMux + txSub event.Subscription + minedBlockSub event.Subscription + miner *miner.Miner // logger logger.LogSystem @@ -387,8 +387,8 @@ func (s *Ethereum) Start() error { go s.txBroadcastLoop() // broadcast mined blocks - //s.blockSub = s.eventMux.Subscribe(core.ChainHeadEvent{}) - go s.blockBroadcastLoop() + s.minedBlockSub = s.eventMux.Subscribe(core.NewMinedBlockEvent{}) + go s.minedBroadcastLoop() glog.V(logger.Info).Infoln("Server started") return nil @@ -419,8 +419,8 @@ func (s *Ethereum) Stop() { defer s.stateDb.Close() defer s.extraDb.Close() - s.txSub.Unsubscribe() // quits txBroadcastLoop - //s.blockSub.Unsubscribe() // quits blockBroadcastLoop + s.txSub.Unsubscribe() // quits txBroadcastLoop + s.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop s.txPool.Stop() s.eventMux.Stop() @@ -462,16 +462,14 @@ func (self *Ethereum) syncAccounts(tx *types.Transaction) { } } -func (self *Ethereum) blockBroadcastLoop() { +func (self *Ethereum) minedBroadcastLoop() { // automatically stops if unsubscribe - /* - for obj := range self.blockSub.Chan() { - switch ev := obj.(type) { - case core.ChainHeadEvent: - self.net.BroadcastLimited("eth", NewBlockMsg, math.Sqrt, []interface{}{ev.Block, ev.Block.Td}) - } + for obj := range self.minedBlockSub.Chan() { + switch ev := obj.(type) { + case core.NewMinedBlockEvent: + self.protocolManager.BroadcastBlock(ev.Block) } - */ + } } func saveProtocolVersion(db common.Database, protov int) { From f1ae3dc4aa8f706d758c4ee40fe6b3968ee18324 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 02:27:50 +0200 Subject: [PATCH 08/26] geth: bump version number --- cmd/geth/main.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index e18b92a2e..c0953d75e 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -31,6 +31,8 @@ import ( "strconv" "time" + "path" + "github.com/codegangsta/cli" "github.com/ethereum/ethash" "github.com/ethereum/go-ethereum/accounts" @@ -42,13 +44,12 @@ import ( "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/logger" "github.com/peterh/liner" - "path" ) import _ "net/http/pprof" const ( ClientIdentifier = "Geth" - Version = "0.9.9" + Version = "0.9.10" ) var app = utils.NewApp(Version, "the go-ethereum command line interface") From a6c0a75f9a897f2f2003b696840467ad170d6a1b Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 02:38:13 +0200 Subject: [PATCH 09/26] eth: fixed proper BroadcastBlock for mined blocks --- eth/backend.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 07552ad5a..7799326fe 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -467,7 +467,7 @@ func (self *Ethereum) minedBroadcastLoop() { for obj := range self.minedBlockSub.Chan() { switch ev := obj.(type) { case core.NewMinedBlockEvent: - self.protocolManager.BroadcastBlock(ev.Block) + self.protocolManager.BroadcastBlock(ev.Block.Hash(), ev.Block) } } } From 8f873b762b54a033e891df03175a26cbfb582c43 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 03:15:26 +0200 Subject: [PATCH 10/26] downloader: all handlers check for isBusy --- eth/downloader/downloader.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 5f9d9ed74..c5b951344 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -116,7 +116,7 @@ func (d *Downloader) UnregisterPeer(id string) { // checks fail an error will be returned. This method is synchronous func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) { // Check if we're busy - if d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing() { + if d.isBusy() { return nil, errBusy } @@ -213,7 +213,7 @@ func (d *Downloader) selectPeer(p *peer) { // Make sure it's doing neither. Once done we can restart the // downloading process if the TD is higher. For now just get on // with whatever is going on. This prevents unecessary switching. - if !(d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing()) { + if !d.isBusy() { // selected peer must be better than our own // XXX we also check the peer's recent hash to make sure we // don't have it. Some peers report (i think) incorrect TD. @@ -340,10 +340,6 @@ out: // from the available peers. if d.queue.hashPool.Size() > 0 { availablePeers := d.peers.get(idleState) - if len(availablePeers) == 0 { - glog.V(logger.Detail).Infoln("No peers available out of", len(d.peers)) - } - for _, peer := range availablePeers { // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. @@ -440,7 +436,7 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) { d.queue.addBlock(id, block, td) // if neither go ahead to process - if !(d.isFetchingHashes() || d.isDownloadingBlocks()) { + if !d.isBusy() { // Check if the parent of the received block is known. // If the block is not know, request it otherwise, request. phash := block.ParentHash() @@ -519,3 +515,7 @@ func (d *Downloader) isDownloadingBlocks() bool { func (d *Downloader) isProcessing() bool { return atomic.LoadInt32(&d.processingBlocks) == 1 } + +func (d *Downloader) isBusy() bool { + return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing() +} From 1bc2d83b6f5d0b8ddcd1b1501a5b9554526289ea Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 14:24:44 +0200 Subject: [PATCH 11/26] core: improved uncle validation error message --- core/block_processor.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/block_processor.go b/core/block_processor.go index d5a29b258..e3c284979 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -323,7 +323,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty } uncles.Add(block.Hash()) - for _, uncle := range block.Uncles() { + for i, uncle := range block.Uncles() { if uncles.Has(uncle.Hash()) { // Error not unique return UncleError("Uncle not unique") @@ -340,9 +340,8 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty } if err := sm.ValidateHeader(uncle, ancestorHeaders[uncle.ParentHash]); err != nil { - return ValidationError(fmt.Sprintf("%v", err)) + return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, uncle.Hash().Bytes()[:4], err)) } - } return nil From eef4776b5bb9d77a0dab0d9ac8da61fdbc72129f Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 14:25:22 +0200 Subject: [PATCH 12/26] eth: ignore NewBlockMsg with lower td --- eth/handler.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index 065116fd0..bb12e1904 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -257,8 +257,9 @@ func (self *ProtocolManager) handleMsg(p *peer) error { }) // Make sure the block isn't already known. If this is the case simply drop - // the message and move on. - if self.chainman.HasBlock(hash) { + // the message and move on. If the TD is < currentTd; drop it as well. If this + // chain at some point becomes canonical, the downloader will fetch it. + if self.chainman.HasBlock(hash) && self.chainman.Td().Cmp(request.TD) > 0 { break } From 8244825bbf9ca7342c052508f50a56b16c979a1e Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 15:14:12 +0200 Subject: [PATCH 13/26] downloader: reset the queue if a peer response with an empty hash set --- eth/downloader/downloader.go | 39 +++++++++++++++++++++++------------- eth/downloader/queue.go | 11 ++++++++++ eth/handler.go | 2 +- 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index c5b951344..c71cfa684 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -17,8 +17,9 @@ import ( ) const ( - maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk - minDesiredPeerCount = 3 // Amount of peers desired to start syncing + maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk + minDesiredPeerCount = 3 // Amount of peers desired to start syncing + blockTtl = 15 * time.Second // The amount of time it takes for a request to time out ) var ( @@ -96,7 +97,7 @@ func (d *Downloader) RegisterPeer(id string, td *big.Int, hash common.Hash, getH // add peer to our peer set d.peers[id] = peer // broadcast new peer - //d.newPeerCh <- peer + d.newPeerCh <- peer return nil } @@ -265,6 +266,9 @@ out: // XXX Make synchronous func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitial bool) error { + atomic.StoreInt32(&d.fetchingHashes, 1) + defer atomic.StoreInt32(&d.fetchingHashes, 0) + glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", hash.Bytes()[:4], p.id) start := time.Now() @@ -275,10 +279,8 @@ func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitia // Add the hash to the queue first d.queue.hashPool.Add(hash) } - // Get the first batch of hashes p.getHashes(hash) - atomic.StoreInt32(&d.fetchingHashes, 1) out: for { @@ -299,14 +301,16 @@ out: d.queue.put(hashSet) // Add hashes to the chunk set - // Check if we're done fetching - if !done && len(hashes) > 0 { + if len(hashes) == 0 { // Make sure the peer actually gave you something valid + glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", p.id) + d.queue.reset() + + break out + } else if !done { // Check if we're done fetching //fmt.Println("re-fetch. current =", d.queue.hashPool.Size()) // Get the next set of hashes p.getHashes(hashes[len(hashes)-1]) - atomic.StoreInt32(&d.fetchingHashes, 1) - } else { - atomic.StoreInt32(&d.fetchingHashes, 0) + } else { // we're done break out } } @@ -319,6 +323,7 @@ out: func (d *Downloader) startFetchingBlocks(p *peer) error { glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "blocks") atomic.StoreInt32(&d.downloadingBlocks, 1) + defer atomic.StoreInt32(&d.downloadingBlocks, 0) start := time.Now() @@ -364,8 +369,6 @@ out: // When there are no more queue and no more `fetching`. We can // safely assume we're done. Another part of the process will check // for parent errors and will re-request anything that's missing - atomic.StoreInt32(&d.downloadingBlocks, 0) - // Break out so that we can process with processing blocks break out } else { // Check for bad peers. Bad peers may indicate a peer not responding @@ -376,7 +379,7 @@ out: d.queue.mu.Lock() var badPeers []string for pid, chunk := range d.queue.fetching { - if time.Since(chunk.itime) > 5*time.Second { + if time.Since(chunk.itime) > blockTtl { badPeers = append(badPeers, pid) // remove peer as good peer from peer list d.UnregisterPeer(pid) @@ -466,8 +469,11 @@ func (d *Downloader) process() error { // to a seperate goroutine where it periodically checks for linked pieces. types.BlockBy(types.Number).Sort(d.queue.blocks) blocks := d.queue.blocks + if len(blocks) == 0 { + return nil + } - glog.V(logger.Debug).Infoln("Inserting chain with", len(blocks), "blocks") + glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number()) var err error // Loop untill we're out of blocks @@ -491,6 +497,11 @@ func (d *Downloader) process() error { } } break + } else if err != nil { + // Reset chain completely. This needs much, much improvement. + // instead: check all blocks leading down to this block false block and remove it + blocks = nil + break } blocks = blocks[max:] } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 4d1aa4e93..df3bf7087 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -31,6 +31,17 @@ func newqueue() *queue { } } +func (c *queue) reset() { + c.mu.Lock() + defer c.mu.Unlock() + + c.hashPool.Clear() + c.fetchPool.Clear() + c.blockHashes.Clear() + c.blocks = nil + c.fetching = make(map[string]*chunk) +} + // reserve a `max` set of hashes for `p` peer. func (c *queue) get(p *peer, max int) *chunk { c.mu.Lock() diff --git a/eth/handler.go b/eth/handler.go index bb12e1904..f3fad68b7 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -95,7 +95,7 @@ func NewProtocolManager(protocolVersion, networkId int, txpool txPool, chainman Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { peer := manager.newPeer(protocolVersion, networkId, p, rw) err := manager.handle(peer) - glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err) + //glog.V(logger.Detail).Infof("[%s]: %v\n", peer.id, err) return err }, From 60613b57d1956275bb475a53b5085c4ead4ceb2c Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 17:35:03 +0200 Subject: [PATCH 14/26] downloader: make sure that hashes are only accepted from the active peer --- eth/downloader/downloader.go | 63 +++++++++++++++++++++++++----------- eth/handler.go | 11 +++++-- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index c71cfa684..41484e927 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -2,6 +2,7 @@ package downloader import ( "errors" + "fmt" "math" "math/big" "sync" @@ -18,8 +19,10 @@ import ( const ( maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk - minDesiredPeerCount = 3 // Amount of peers desired to start syncing - blockTtl = 15 * time.Second // The amount of time it takes for a request to time out + minDesiredPeerCount = 5 // Amount of peers desired to start syncing + peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount + blockTtl = 15 * time.Second // The amount of time it takes for a block request to time out + hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out ) var ( @@ -34,9 +37,10 @@ type hashIterFn func() (common.Hash, error) type currentTdFn func() *big.Int type Downloader struct { - mu sync.RWMutex - queue *queue - peers peers + mu sync.RWMutex + queue *queue + peers peers + activePeer string // Callbacks hasBlock hashCheckFn @@ -51,7 +55,7 @@ type Downloader struct { // Channels newPeerCh chan *peer syncCh chan syncPack - HashCh chan []common.Hash + hashCh chan []common.Hash blockCh chan blockPack quit chan struct{} } @@ -76,7 +80,7 @@ func New(hasBlock hashCheckFn, insertChain chainInsertFn, currentTd currentTdFn) currentTd: currentTd, newPeerCh: make(chan *peer, 1), syncCh: make(chan syncPack, 1), - HashCh: make(chan []common.Hash, 1), + hashCh: make(chan []common.Hash, 1), blockCh: make(chan blockPack, 1), quit: make(chan struct{}), } @@ -181,8 +185,7 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) func (d *Downloader) peerHandler() { // itimer is used to determine when to start ignoring `minDesiredPeerCount` - //itimer := time.NewTicker(5 * time.Second) - itimer := time.NewTimer(5 * time.Second) + itimer := time.NewTimer(peerCountTimeout) out: for { select { @@ -233,12 +236,16 @@ out: for { select { case sync := <-d.syncCh: - selectedPeer := sync.peer - glog.V(logger.Detail).Infoln("Synchronising with the network using:", selectedPeer.id) + start := time.Now() + + var peer *peer = sync.peer + + d.activePeer = peer.id + glog.V(logger.Detail).Infoln("Synchronising with the network using:", peer.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels // respectively. - if err := d.startFetchingHashes(selectedPeer, sync.hash, sync.ignoreInitial); err != nil { + if err := d.startFetchingHashes(peer, sync.hash, sync.ignoreInitial); err != nil { // handle error glog.V(logger.Debug).Infoln("Error fetching hashes:", err) // XXX Reset @@ -249,13 +256,13 @@ out: // take any available peers, seserve a chunk for each peer available, // let the peer deliver the chunkn and periodically check if a peer // has timedout. When done downloading, process blocks. - if err := d.startFetchingBlocks(selectedPeer); err != nil { + if err := d.startFetchingBlocks(peer); err != nil { glog.V(logger.Debug).Infoln("Error downloading blocks:", err) // XXX reset break } - glog.V(logger.Detail).Infoln("Sync completed") + glog.V(logger.Detail).Infoln("Network sync completed in", time.Since(start)) d.process() case <-d.quit: @@ -282,10 +289,12 @@ func (d *Downloader) startFetchingHashes(p *peer, hash common.Hash, ignoreInitia // Get the first batch of hashes p.getHashes(hash) + failureResponse := time.NewTimer(hashTtl) + out: for { select { - case hashes := <-d.HashCh: + case hashes := <-d.hashCh: var done bool // determines whether we're done fetching hashes (i.e. common hash found) hashSet := set.New() for _, hash := range hashes { @@ -313,15 +322,20 @@ out: } else { // we're done break out } + case <-failureResponse.C: + glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id) + d.queue.reset() + + break out } } - glog.V(logger.Detail).Infof("Downloaded hashes (%d). Took %v\n", d.queue.hashPool.Size(), time.Since(start)) + glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start)) return nil } func (d *Downloader) startFetchingBlocks(p *peer) error { - glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "blocks") + glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "block(s)") atomic.StoreInt32(&d.downloadingBlocks, 1) defer atomic.StoreInt32(&d.downloadingBlocks, 0) @@ -407,7 +421,20 @@ out: } } - glog.V(logger.Detail).Infoln("Download blocks: done. Took", time.Since(start)) + glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start)) + + return nil +} + +func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { + // make sure that the hashes that are being added are actually from the peer + // that's the current active peer. hashes that have been received from other + // peers are dropped and ignored. + if d.activePeer != id { + return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer) + } + + d.hashCh <- hashes return nil } diff --git a/eth/handler.go b/eth/handler.go index f3fad68b7..3aa9815f1 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -194,7 +194,10 @@ func (self *ProtocolManager) handleMsg(p *peer) error { if err := msgStream.Decode(&hashes); err != nil { break } - self.downloader.HashCh <- hashes + err := self.downloader.AddHashes(p.id, hashes) + if err != nil { + glog.V(logger.Debug).Infoln(err) + } case GetBlocksMsg: msgStream := rlp.NewStream(msg.Payload) @@ -259,7 +262,11 @@ func (self *ProtocolManager) handleMsg(p *peer) error { // Make sure the block isn't already known. If this is the case simply drop // the message and move on. If the TD is < currentTd; drop it as well. If this // chain at some point becomes canonical, the downloader will fetch it. - if self.chainman.HasBlock(hash) && self.chainman.Td().Cmp(request.TD) > 0 { + if self.chainman.HasBlock(hash) { + break + } + if self.chainman.Td().Cmp(request.TD) > 0 { + glog.V(logger.Debug).Infoln("dropped block", request.Block.Number(), "due to low TD", request.TD) break } From c2c24b3bb419a8ffffb58ec25788b951bef779f9 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 18:54:57 +0200 Subject: [PATCH 15/26] downloader: improved downloading and synchronisation * Downloader's peers keeps track of peer's previously requested hashes so that we don't have to re-request * Changed `AddBlock` to be fully synchronous --- eth/downloader/downloader.go | 146 +++++++++------------------------- eth/downloader/peer.go | 15 +++- eth/downloader/queue.go | 3 + eth/downloader/synchronous.go | 77 ++++++++++++++++++ 4 files changed, 130 insertions(+), 111 deletions(-) create mode 100644 eth/downloader/synchronous.go diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 41484e927..810031c79 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -26,9 +26,12 @@ const ( ) var ( - errLowTd = errors.New("peer's TD is too low") - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer's unknown or unhealthy") + errLowTd = errors.New("peer's TD is too low") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer's unknown or unhealthy") + errBadPeer = errors.New("action from bad peer ignored") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") ) type hashCheckFn func(common.Hash) bool @@ -116,73 +119,6 @@ func (d *Downloader) UnregisterPeer(id string) { delete(d.peers, id) } -// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given -// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the -// checks fail an error will be returned. This method is synchronous -func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) { - // Check if we're busy - if d.isBusy() { - return nil, errBusy - } - - // Attempt to select a peer. This can either be nothing, which returns, best peer - // or selected peer. If no peer could be found an error will be returned - var p *peer - if len(id) == 0 { - p = d.peers[id] - if p == nil { - return nil, errUnknownPeer - } - } else { - p = d.peers.bestPeer() - } - - // Make sure our td is lower than the peer's td - if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) { - return nil, errLowTd - } - - // Get the hash from the peer and initiate the downloading progress. - err := d.getFromPeer(p, p.recentHash, false) - if err != nil { - return nil, err - } - - return d.queue.blocks, nil -} - -// Synchronise will synchronise using the best peer. -func (d *Downloader) Synchronise() (types.Blocks, error) { - return d.SynchroniseWithPeer("") -} - -func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error { - glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) - // Start the fetcher. This will block the update entirely - // interupts need to be send to the appropriate channels - // respectively. - if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil { - // handle error - glog.V(logger.Debug).Infoln("Error fetching hashes:", err) - // XXX Reset - return err - } - - // Start fetching blocks in paralel. The strategy is simple - // take any available peers, seserve a chunk for each peer available, - // let the peer deliver the chunkn and periodically check if a peer - // has timedout. When done downloading, process blocks. - if err := d.startFetchingBlocks(p); err != nil { - glog.V(logger.Debug).Infoln("Error downloading blocks:", err) - // XXX reset - return err - } - - glog.V(logger.Detail).Infoln("Sync completed") - - return nil -} - func (d *Downloader) peerHandler() { // itimer is used to determine when to start ignoring `minDesiredPeerCount` itimer := time.NewTimer(peerCountTimeout) @@ -236,34 +172,14 @@ out: for { select { case sync := <-d.syncCh: - start := time.Now() - var peer *peer = sync.peer - d.activePeer = peer.id - glog.V(logger.Detail).Infoln("Synchronising with the network using:", peer.id) - // Start the fetcher. This will block the update entirely - // interupts need to be send to the appropriate channels - // respectively. - if err := d.startFetchingHashes(peer, sync.hash, sync.ignoreInitial); err != nil { - // handle error - glog.V(logger.Debug).Infoln("Error fetching hashes:", err) - // XXX Reset + + err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial) + if err != nil { break } - // Start fetching blocks in paralel. The strategy is simple - // take any available peers, seserve a chunk for each peer available, - // let the peer deliver the chunkn and periodically check if a peer - // has timedout. When done downloading, process blocks. - if err := d.startFetchingBlocks(peer); err != nil { - glog.V(logger.Debug).Infoln("Error downloading blocks:", err) - // XXX reset - break - } - - glog.V(logger.Detail).Infoln("Network sync completed in", time.Since(start)) - d.process() case <-d.quit: break out @@ -314,9 +230,8 @@ out: glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", p.id) d.queue.reset() - break out + return errEmptyHashSet } else if !done { // Check if we're done fetching - //fmt.Println("re-fetch. current =", d.queue.hashPool.Size()) // Get the next set of hashes p.getHashes(hashes[len(hashes)-1]) } else { // we're done @@ -324,9 +239,12 @@ out: } case <-failureResponse.C: glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id) + // TODO instead of reseting the queue select a new peer from which we can start downloading hashes. + // 1. check for peer's best hash to be included in the current hash set; + // 2. resume from last point (hashes[len(hashes)-1]) using the newly selected peer. d.queue.reset() - break out + return errTimeout } } glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start)) @@ -367,7 +285,6 @@ out: continue } - //fmt.Println("fetching for", peer.id) // XXX make fetch blocking. // Fetch the chunk and check for error. If the peer was somehow // already fetching a chunk due to a bug, it will be returned to @@ -417,7 +334,6 @@ out: } } - //fmt.Println(d.queue.hashPool.Size(), len(d.queue.fetching)) } } @@ -441,11 +357,14 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { // Add an (unrequested) block to the downloader. This is usually done through the // NewBlockMsg by the protocol handler. -func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) { +// Adding blocks is done synchronously. if there are missing blocks, blocks will be +// fetched first. If the downloader is busy or if some other processed failed an error +// will be returned. +func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) error { hash := block.Hash() if d.hasBlock(hash) { - return + return fmt.Errorf("known block %x", hash.Bytes()[:4]) } peer := d.peers.getPeer(id) @@ -453,7 +372,7 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) { // and add the block. Otherwise just ignore it if peer == nil { glog.V(logger.Detail).Infof("Ignored block from bad peer %s\n", id) - return + return errBadPeer } peer.mu.Lock() @@ -466,17 +385,24 @@ func (d *Downloader) AddBlock(id string, block *types.Block, td *big.Int) { d.queue.addBlock(id, block, td) // if neither go ahead to process - if !d.isBusy() { - // Check if the parent of the received block is known. - // If the block is not know, request it otherwise, request. - phash := block.ParentHash() - if !d.hasBlock(phash) { - glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4]) - d.syncCh <- syncPack{peer, peer.recentHash, true} - } else { - d.process() + if d.isBusy() { + return errBusy + } + + // Check if the parent of the received block is known. + // If the block is not know, request it otherwise, request. + phash := block.ParentHash() + if !d.hasBlock(phash) { + glog.V(logger.Detail).Infof("Missing parent %x, requires fetching\n", phash.Bytes()[:4]) + + // Get the missing hashes from the peer (synchronously) + err := d.getFromPeer(peer, peer.recentHash, true) + if err != nil { + return err } } + + return d.process() } // Deliver a chunk to the downloader. This is usually done through the BlocksMsg by diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 4cd306a05..5d5208e8e 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" + "gopkg.in/fatih/set.v0" ) const ( @@ -64,13 +65,23 @@ type peer struct { td *big.Int recentHash common.Hash + requested *set.Set + getHashes hashFetcherFn getBlocks blockFetcherFn } // create a new peer func newPeer(id string, td *big.Int, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer { - return &peer{id: id, td: td, recentHash: hash, getHashes: getHashes, getBlocks: getBlocks, state: idleState} + return &peer{ + id: id, + td: td, + recentHash: hash, + getHashes: getHashes, + getBlocks: getBlocks, + state: idleState, + requested: set.New(), + } } // fetch a chunk using the peer @@ -82,6 +93,8 @@ func (p *peer) fetch(chunk *chunk) error { return errors.New("peer already fetching chunk") } + p.requested.Merge(chunk.hashes) + // set working state p.state = workingState // convert the set to a fetchable slice diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index df3bf7087..5745bf1f8 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -65,6 +65,9 @@ func (c *queue) get(p *peer, max int) *chunk { return true }) + // remove hashes that have previously been fetched + hashes.Separate(p.requested) + // remove the fetchable hashes from hash pool c.hashPool.Separate(hashes) c.fetchPool.Merge(hashes) diff --git a/eth/downloader/synchronous.go b/eth/downloader/synchronous.go new file mode 100644 index 000000000..0511533cf --- /dev/null +++ b/eth/downloader/synchronous.go @@ -0,0 +1,77 @@ +package downloader + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" +) + +// THIS IS PENDING AND TO DO CHANGES FOR MAKING THE DOWNLOADER SYNCHRONOUS + +// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given +// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the +// checks fail an error will be returned. This method is synchronous +func (d *Downloader) SynchroniseWithPeer(id string) (types.Blocks, error) { + // Check if we're busy + if d.isBusy() { + return nil, errBusy + } + + // Attempt to select a peer. This can either be nothing, which returns, best peer + // or selected peer. If no peer could be found an error will be returned + var p *peer + if len(id) == 0 { + p = d.peers[id] + if p == nil { + return nil, errUnknownPeer + } + } else { + p = d.peers.bestPeer() + } + + // Make sure our td is lower than the peer's td + if p.td.Cmp(d.currentTd()) <= 0 || d.hasBlock(p.recentHash) { + return nil, errLowTd + } + + // Get the hash from the peer and initiate the downloading progress. + err := d.getFromPeer(p, p.recentHash, false) + if err != nil { + return nil, err + } + + return d.queue.blocks, nil +} + +// Synchronise will synchronise using the best peer. +func (d *Downloader) Synchronise() (types.Blocks, error) { + return d.SynchroniseWithPeer("") +} + +func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error { + glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) + // Start the fetcher. This will block the update entirely + // interupts need to be send to the appropriate channels + // respectively. + if err := d.startFetchingHashes(p, hash, ignoreInitial); err != nil { + // handle error + glog.V(logger.Debug).Infoln("Error fetching hashes:", err) + // XXX Reset + return err + } + + // Start fetching blocks in paralel. The strategy is simple + // take any available peers, seserve a chunk for each peer available, + // let the peer deliver the chunkn and periodically check if a peer + // has timedout. When done downloading, process blocks. + if err := d.startFetchingBlocks(p); err != nil { + glog.V(logger.Debug).Infoln("Error downloading blocks:", err) + // XXX reset + return err + } + + glog.V(logger.Detail).Infoln("Sync completed") + + return nil +} From 0d536734fe10e62dce86db1a6128b383ef66921d Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 18:55:50 +0200 Subject: [PATCH 16/26] eth: adapted to new synchronous api of downloader's AddBlock --- eth/downloader/downloader.go | 2 -- eth/downloader/synchronous.go | 2 ++ eth/handler.go | 14 ++++++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 810031c79..6dce40b04 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -173,8 +173,6 @@ out: select { case sync := <-d.syncCh: var peer *peer = sync.peer - d.activePeer = peer.id - err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial) if err != nil { break diff --git a/eth/downloader/synchronous.go b/eth/downloader/synchronous.go index 0511533cf..7bb49d24e 100644 --- a/eth/downloader/synchronous.go +++ b/eth/downloader/synchronous.go @@ -50,6 +50,8 @@ func (d *Downloader) Synchronise() (types.Blocks, error) { } func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) error { + d.activePeer = p.id + glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels diff --git a/eth/handler.go b/eth/handler.go index 3aa9815f1..749809175 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -265,10 +265,12 @@ func (self *ProtocolManager) handleMsg(p *peer) error { if self.chainman.HasBlock(hash) { break } - if self.chainman.Td().Cmp(request.TD) > 0 { + /* XXX unsure about this + if self.chainman.Td().Cmp(request.TD) > 0 && new(big.Int).Add(request.Block.Number(), big.NewInt(7)).Cmp(self.chainman.CurrentBlock().Number()) < 0 { glog.V(logger.Debug).Infoln("dropped block", request.Block.Number(), "due to low TD", request.TD) break } + */ // Attempt to insert the newly received by checking if the parent exists. // if the parent exists we process the block and propagate to our peers @@ -281,7 +283,15 @@ func (self *ProtocolManager) handleMsg(p *peer) error { } self.BroadcastBlock(hash, request.Block) } else { - self.downloader.AddBlock(p.id, request.Block, request.TD) + // adding blocks is synchronous + go func() { + err := self.downloader.AddBlock(p.id, request.Block, request.TD) + if err != nil { + glog.V(logger.Detail).Infoln("downloader err:", err) + return + } + self.BroadcastBlock(hash, request.Block) + }() } default: return errResp(ErrInvalidMsgCode, "%v", msg.Code) From 78e37e98e77b467e6950922da4ea99ff146ab21f Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 19:14:25 +0200 Subject: [PATCH 17/26] downloader: fixed a race condition for download status --- eth/downloader/downloader.go | 1 - 1 file changed, 1 deletion(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 6dce40b04..290e3b474 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -293,7 +293,6 @@ out: d.queue.put(chunk.hashes) } } - atomic.StoreInt32(&d.downloadingBlocks, 1) } else if len(d.queue.fetching) == 0 { // When there are no more queue and no more `fetching`. We can // safely assume we're done. Another part of the process will check From 7c5d50f627b223a8b0217f6ca684b4c7d1d877ef Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 19:29:30 +0200 Subject: [PATCH 18/26] downloader: throw an error if there are no peers available for download If all peers have been tried during the block download process and some hashes are unfetchable (available peers > 0 and fetching == 0) throw an error so the process can be aborted. --- eth/downloader/downloader.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 290e3b474..2b5dbe952 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -26,12 +26,13 @@ const ( ) var ( - errLowTd = errors.New("peer's TD is too low") - errBusy = errors.New("busy") - errUnknownPeer = errors.New("peer's unknown or unhealthy") - errBadPeer = errors.New("action from bad peer ignored") - errTimeout = errors.New("timeout") - errEmptyHashSet = errors.New("empty hash set by peer") + errLowTd = errors.New("peer's TD is too low") + errBusy = errors.New("busy") + errUnknownPeer = errors.New("peer's unknown or unhealthy") + errBadPeer = errors.New("action from bad peer ignored") + errTimeout = errors.New("timeout") + errEmptyHashSet = errors.New("empty hash set by peer") + errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") ) type hashCheckFn func(common.Hash) bool @@ -293,6 +294,15 @@ out: d.queue.put(chunk.hashes) } } + + // make sure that we have peers available for fetching. If all peers have been tried + // and all failed throw an error + if len(availablePeers) > 0 && d.queue.fetchPool.Size() == 0 { + d.queue.reset() + + return errPeersUnavailable + } + } else if len(d.queue.fetching) == 0 { // When there are no more queue and no more `fetching`. We can // safely assume we're done. Another part of the process will check From 6830ddb659270b59b5a310fdc0e581b09fae5326 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 20:25:55 +0200 Subject: [PATCH 19/26] downloader: free up peers from work when the downloader resets --- eth/downloader/downloader.go | 4 +++- eth/downloader/peer.go | 10 ++++++++++ eth/downloader/queue.go | 13 +++++++++---- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 2b5dbe952..2f98a1414 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -297,8 +297,9 @@ out: // make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if len(availablePeers) > 0 && d.queue.fetchPool.Size() == 0 { + if len(d.queue.fetching) == 0 { d.queue.reset() + d.peers.reset() return errPeersUnavailable } @@ -337,6 +338,7 @@ out: d.queue.deliver(pid, nil) if peer := d.peers[pid]; peer != nil { peer.demote() + peer.reset() } } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 5d5208e8e..ec2a61550 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -20,6 +20,12 @@ type blockFetcherFn func([]common.Hash) error // XXX make threadsafe!!!! type peers map[string]*peer +func (p peers) reset() { + for _, peer := range p { + p.reset() + } +} + func (p peers) get(state int) []*peer { var peers []*peer for _, peer := range p { @@ -128,3 +134,7 @@ func (p *peer) demote() { p.rep = 0 } } + +func (p *peer) reset() { + p.state = idleState +} diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 5745bf1f8..ce3aa9850 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -60,13 +60,18 @@ func (c *queue) get(p *peer, max int) *chunk { return false } - hashes.Add(v) - i++ + // Skip any hashes that have previously been requested from the peer + if !p.requested.Has(v) { + hashes.Add(v) + i++ + } return true }) - // remove hashes that have previously been fetched - hashes.Separate(p.requested) + // if no hashes can be requested return a nil chunk + if hashes.Size() == 0 { + return nil + } // remove the fetchable hashes from hash pool c.hashPool.Separate(hashes) From c6c22301faaa47e18efcc6d6127b4d415b9ded21 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 20:26:17 +0200 Subject: [PATCH 20/26] miner: changed listener --- miner/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/worker.go b/miner/worker.go index daabd3db5..007213a29 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -158,7 +158,7 @@ out: select { case event := <-events.Chan(): switch ev := event.(type) { - case core.ChainHeadEvent: + case core.NewMinedBlockEvent: self.commitNewWork() case core.ChainSideEvent: self.uncleMu.Lock() From a1d97ea4dbb5b4462b898f32b1c55e925a465227 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 20:35:49 +0200 Subject: [PATCH 21/26] typo --- eth/downloader/peer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index ec2a61550..88ede16f9 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -22,7 +22,7 @@ type peers map[string]*peer func (p peers) reset() { for _, peer := range p { - p.reset() + peer.reset() } } From c8cc523d4d073f3405057554b5a0881118eba6f1 Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 20:51:05 +0200 Subject: [PATCH 22/26] miner: reverted back to old event --- miner/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/worker.go b/miner/worker.go index 007213a29..daabd3db5 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -158,7 +158,7 @@ out: select { case event := <-events.Chan(): switch ev := event.(type) { - case core.NewMinedBlockEvent: + case core.ChainHeadEvent: self.commitNewWork() case core.ChainSideEvent: self.uncleMu.Lock() From 03b4cf74a2d2ce2f1ff39f0354b9577425e6524e Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 23:53:30 +0200 Subject: [PATCH 23/26] geth: added identity flag which allows to set a custom node name --- cmd/geth/main.go | 1 + cmd/utils/flags.go | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index c0953d75e..dab167bbb 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -218,6 +218,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso }, } app.Flags = []cli.Flag{ + utils.IdentityFlag, utils.UnlockedAccountFlag, utils.PasswordFileFlag, utils.BootnodesFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8141fae82..a1d9eedda 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -89,6 +89,10 @@ var ( Usage: "Blockchain version", Value: core.BlockChainVersion, } + IdentityFlag = cli.StringFlag{ + Name: "identity", + Usage: "node name", + } // miner settings MinerThreadsFlag = cli.IntFlag{ @@ -242,6 +246,11 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { // Set the log dir glog.SetLogDir(ctx.GlobalString(LogFileFlag.Name)) + customName := ctx.GlobalString(IdentityFlag.Name) + if len(customName) > 0 { + clientID += "/" + customName + } + return ð.Config{ Name: common.MakeName(clientID, version), DataDir: ctx.GlobalString(DataDirFlag.Name), From 50e096e627c8c07b4dda3a7221dda5f32dc5c5cb Mon Sep 17 00:00:00 2001 From: obscuren Date: Sat, 18 Apr 2015 23:56:08 +0200 Subject: [PATCH 24/26] downloader: don't remove peers. keep them around --- eth/downloader/downloader.go | 5 +++-- eth/handler.go | 11 +++++++++++ eth/protocol_test.go | 18 +++--------------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 2f98a1414..8f955b483 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -176,6 +176,7 @@ out: var peer *peer = sync.peer err := d.getFromPeer(peer, sync.hash, sync.ignoreInitial) if err != nil { + glog.V(logger.Detail).Infoln(err) break } @@ -301,7 +302,7 @@ out: d.queue.reset() d.peers.reset() - return errPeersUnavailable + return fmt.Errorf("%v avaialable = %d. total = %d", errPeersUnavailable, len(availablePeers), len(d.peers)) } } else if len(d.queue.fetching) == 0 { @@ -321,7 +322,7 @@ out: if time.Since(chunk.itime) > blockTtl { badPeers = append(badPeers, pid) // remove peer as good peer from peer list - d.UnregisterPeer(pid) + //d.UnregisterPeer(pid) } } d.queue.mu.Unlock() diff --git a/eth/handler.go b/eth/handler.go index 749809175..effe25ae3 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -185,7 +185,16 @@ func (self *ProtocolManager) handleMsg(p *peer) error { if request.Amount > maxHashes { request.Amount = maxHashes } + hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) + + if glog.V(logger.Debug) { + if len(hashes) == 0 { + glog.Infof("invalid block hash %x", request.Hash.Bytes()[:4]) + } + } + + // returns either requested hashes or nothing (i.e. not found) return p.sendBlockHashes(hashes) case BlockHashesMsg: msgStream := rlp.NewStream(msg.Payload) @@ -282,6 +291,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return nil } self.BroadcastBlock(hash, request.Block) + fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) } else { // adding blocks is synchronous go func() { @@ -291,6 +301,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return } self.BroadcastBlock(hash, request.Block) + fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) }() } default: diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 7c724f7a7..d44f66b89 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -1,20 +1,7 @@ package eth -import ( - "log" - "math/big" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/errs" - ethlogger "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discover" -) +/* +TODO All of these tests need to be re-written var logsys = ethlogger.NewStdLogSystem(os.Stdout, log.LstdFlags, ethlogger.LogLevel(ethlogger.DebugDetailLevel)) @@ -398,3 +385,4 @@ func TestTransactionsMsg(t *testing.T) { eth.checkError(ErrDecode, delay) } +*/ From 434dea3caf03515e69858c947ba5e0aad6b9c67a Mon Sep 17 00:00:00 2001 From: obscuren Date: Sun, 19 Apr 2015 00:03:26 +0200 Subject: [PATCH 25/26] eth: removed debug messages to stdout --- eth/handler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/handler.go b/eth/handler.go index effe25ae3..1dbc62cce 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -291,7 +291,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return nil } self.BroadcastBlock(hash, request.Block) - fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) + //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) } else { // adding blocks is synchronous go func() { @@ -301,7 +301,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { return } self.BroadcastBlock(hash, request.Block) - fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) + //fmt.Println(request.Block.Hash().Hex(), "our calculated TD =", request.Block.Td, "their TD =", request.TD) }() } default: From 164b878854b58aed833eb704579343099854735f Mon Sep 17 00:00:00 2001 From: obscuren Date: Sun, 19 Apr 2015 00:08:57 +0200 Subject: [PATCH 26/26] cleanup --- core/chain_manager.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index 8371a129d..4f1e1e68a 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -330,14 +330,13 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) ( } // XXX Could be optimised by using a different database which only holds hashes (i.e., linked list) for i := uint64(0); i < max; i++ { - parentHash := block.Header().ParentHash - block = self.GetBlock(parentHash) + block = self.GetBlock(block.ParentHash()) if block == nil { break } chain = append(chain, block.Hash()) - if block.Header().Number.Cmp(common.Big0) <= 0 { + if block.Number().Cmp(common.Big0) <= 0 { break } }