Merge pull request #1216 from karalabe/fix-eth-dataraces

Fix various data races in eth and core
This commit is contained in:
Jeffrey Wilcke 2015-06-09 06:53:49 -07:00
commit 365576620a
5 changed files with 85 additions and 38 deletions

View File

@ -56,10 +56,7 @@ func CalcTD(block, parent *types.Block) *big.Int {
if parent == nil { if parent == nil {
return block.Difficulty() return block.Difficulty()
} }
return new(big.Int).Add(parent.Td, block.Header().Difficulty)
td := new(big.Int).Add(parent.Td, block.Header().Difficulty)
return td
} }
func CalcGasLimit(parent *types.Block) *big.Int { func CalcGasLimit(parent *types.Block) *big.Int {
@ -178,7 +175,7 @@ func (self *ChainManager) Td() *big.Int {
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
return self.td return new(big.Int).Set(self.td)
} }
func (self *ChainManager) GasLimit() *big.Int { func (self *ChainManager) GasLimit() *big.Int {
@ -204,7 +201,7 @@ func (self *ChainManager) Status() (td *big.Int, currentBlock common.Hash, genes
self.mu.RLock() self.mu.RLock()
defer self.mu.RUnlock() defer self.mu.RUnlock()
return self.td, self.currentBlock.Hash(), self.genesisBlock.Hash() return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash()
} }
func (self *ChainManager) SetProcessor(proc types.BlockProcessor) { func (self *ChainManager) SetProcessor(proc types.BlockProcessor) {
@ -382,8 +379,8 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error
func (bc *ChainManager) insert(block *types.Block) { func (bc *ChainManager) insert(block *types.Block) {
key := append(blockNumPre, block.Number().Bytes()...) key := append(blockNumPre, block.Number().Bytes()...)
bc.blockDb.Put(key, block.Hash().Bytes()) bc.blockDb.Put(key, block.Hash().Bytes())
bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes()) bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
bc.currentBlock = block bc.currentBlock = block
bc.lastBlockHash = block.Hash() bc.lastBlockHash = block.Hash()
} }
@ -488,8 +485,7 @@ func (self *ChainManager) GetAncestors(block *types.Block, length int) (blocks [
} }
func (bc *ChainManager) setTotalDifficulty(td *big.Int) { func (bc *ChainManager) setTotalDifficulty(td *big.Int) {
//bc.blockDb.Put([]byte("LTD"), td.Bytes()) bc.td = new(big.Int).Set(td)
bc.td = td
} }
func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) { func (self *ChainManager) CalcTotalDiff(block *types.Block) (*big.Int, error) {
@ -544,6 +540,9 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
self.wg.Add(1) self.wg.Add(1)
defer self.wg.Done() defer self.wg.Done()
self.mu.Lock()
defer self.mu.Unlock()
self.chainmu.Lock() self.chainmu.Lock()
defer self.chainmu.Unlock() defer self.chainmu.Unlock()

View File

@ -157,7 +157,7 @@ func (pm *ProtocolManager) handle(p *peer) error {
} }
defer pm.removePeer(p.id) defer pm.removePeer(p.id)
if err := pm.downloader.RegisterPeer(p.id, p.recentHash, p.requestHashes, p.requestBlocks); err != nil { if err := pm.downloader.RegisterPeer(p.id, p.Head(), p.requestHashes, p.requestBlocks); err != nil {
return err return err
} }
// propagate existing transactions. new transactions appearing // propagate existing transactions. new transactions appearing
@ -303,7 +303,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
// Mark the hashes as present at the remote node // Mark the hashes as present at the remote node
for _, hash := range hashes { for _, hash := range hashes {
p.blockHashes.Add(hash) p.blockHashes.Add(hash)
p.recentHash = hash p.SetHead(hash)
} }
// Schedule all the unknown hashes for retrieval // Schedule all the unknown hashes for retrieval
unknown := make([]common.Hash, 0, len(hashes)) unknown := make([]common.Hash, 0, len(hashes))
@ -354,9 +354,9 @@ func (pm *ProtocolManager) importBlock(p *peer, block *types.Block, td *big.Int)
// Mark the block as present at the remote node (don't duplicate already held data) // Mark the block as present at the remote node (don't duplicate already held data)
p.blockHashes.Add(hash) p.blockHashes.Add(hash)
p.recentHash = hash p.SetHead(hash)
if td != nil { if td != nil {
p.td = td p.SetTd(td)
} }
// Log the block's arrival // Log the block's arrival
_, chainHead, _ := pm.chainman.Status() _, chainHead, _ := pm.chainman.Status()
@ -369,7 +369,7 @@ func (pm *ProtocolManager) importBlock(p *peer, block *types.Block, td *big.Int)
}) })
// If the block's already known or its difficulty is lower than ours, drop // If the block's already known or its difficulty is lower than ours, drop
if pm.chainman.HasBlock(hash) { if pm.chainman.HasBlock(hash) {
p.td = pm.chainman.GetBlock(hash).Td // update the peer's TD to the real value p.SetTd(pm.chainman.GetBlock(hash).Td) // update the peer's TD to the real value
return nil return nil
} }
if td != nil && pm.chainman.Td().Cmp(td) > 0 && new(big.Int).Add(block.Number(), big.NewInt(7)).Cmp(pm.chainman.CurrentBlock().Number()) < 0 { if td != nil && pm.chainman.Td().Cmp(td) > 0 && new(big.Int).Add(block.Number(), big.NewInt(7)).Cmp(pm.chainman.CurrentBlock().Number()) < 0 {

View File

@ -40,9 +40,11 @@ type peer struct {
protv, netid int protv, netid int
recentHash common.Hash
id string id string
head common.Hash
td *big.Int td *big.Int
lock sync.RWMutex
genesis, ourHash common.Hash genesis, ourHash common.Hash
ourTd *big.Int ourTd *big.Int
@ -51,14 +53,14 @@ type peer struct {
blockHashes *set.Set blockHashes *set.Set
} }
func newPeer(protv, netid int, genesis, recentHash common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func newPeer(protv, netid int, genesis, head common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID() id := p.ID()
return &peer{ return &peer{
Peer: p, Peer: p,
rw: rw, rw: rw,
genesis: genesis, genesis: genesis,
ourHash: recentHash, ourHash: head,
ourTd: td, ourTd: td,
protv: protv, protv: protv,
netid: netid, netid: netid,
@ -68,6 +70,39 @@ func newPeer(protv, netid int, genesis, recentHash common.Hash, td *big.Int, p *
} }
} }
// Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.head[:])
return hash
}
// SetHead updates the head (most recent) hash of the peer.
func (p *peer) SetHead(hash common.Hash) {
p.lock.Lock()
defer p.lock.Unlock()
copy(p.head[:], hash[:])
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
defer p.lock.RUnlock()
return new(big.Int).Set(p.td)
}
// SetTd updates the current total difficulty of a peer.
func (p *peer) SetTd(td *big.Int) {
p.lock.Lock()
defer p.lock.Unlock()
p.td.Set(td)
}
// sendTransactions sends transactions to the peer and includes the hashes // sendTransactions sends transactions to the peer and includes the hashes
// in it's tx hash set for future reference. The tx hash will allow the // in it's tx hash set for future reference. The tx hash will allow the
// manager to check whether the peer has already received this particular // manager to check whether the peer has already received this particular
@ -160,7 +195,7 @@ func (p *peer) handleStatus() error {
// Set the total difficulty of the peer // Set the total difficulty of the peer
p.td = status.TD p.td = status.TD
// set the best hash of the peer // set the best hash of the peer
p.recentHash = status.CurrentBlock p.head = status.CurrentBlock
return <-errc return <-errc
} }
@ -256,11 +291,14 @@ func (ps *peerSet) BestPeer() *peer {
ps.lock.RLock() ps.lock.RLock()
defer ps.lock.RUnlock() defer ps.lock.RUnlock()
var best *peer var (
bestPeer *peer
bestTd *big.Int
)
for _, p := range ps.peers { for _, p := range ps.peers {
if best == nil || p.td.Cmp(best.td) > 0 { if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
best = p bestPeer, bestTd = p, td
} }
} }
return best return bestPeer
} }

View File

@ -109,19 +109,27 @@ func (pm *ProtocolManager) fetcher() {
// If any explicit fetches were replied to, import them // If any explicit fetches were replied to, import them
if count := len(explicit); count > 0 { if count := len(explicit); count > 0 {
glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count) glog.V(logger.Debug).Infof("Importing %d explicitly fetched blocks", count)
go func() {
// Create a closure with the retrieved blocks and origin peers
peers := make([]*peer, 0, count)
blocks := make([]*types.Block, 0, count)
for _, block := range explicit { for _, block := range explicit {
hash := block.Hash() hash := block.Hash()
// Make sure there's still something pending to import
if announce := pending[hash]; announce != nil { if announce := pending[hash]; announce != nil {
peers = append(peers, announce.peer)
blocks = append(blocks, block)
delete(pending, hash) delete(pending, hash)
if err := pm.importBlock(announce.peer, block, nil); err != nil { }
}
// Run the importer on a new thread
go func() {
for i := 0; i < len(blocks); i++ {
if err := pm.importBlock(peers[i], blocks[i], nil); err != nil {
glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err) glog.V(logger.Detail).Infof("Failed to import explicitly fetched block: %v", err)
return return
} }
} }
}
}() }()
} }
@ -208,20 +216,21 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
return return
} }
// Make sure the peer's TD is higher than our own. If not drop. // Make sure the peer's TD is higher than our own. If not drop.
if peer.td.Cmp(pm.chainman.Td()) <= 0 { if peer.Td().Cmp(pm.chainman.Td()) <= 0 {
return return
} }
// FIXME if we have the hash in our chain and the TD of the peer is // FIXME if we have the hash in our chain and the TD of the peer is
// much higher than ours, something is wrong with us or the peer. // much higher than ours, something is wrong with us or the peer.
// Check if the hash is on our own chain // Check if the hash is on our own chain
if pm.chainman.HasBlock(peer.recentHash) { head := peer.Head()
if pm.chainman.HasBlock(head) {
glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known") glog.V(logger.Debug).Infoln("Synchronisation canceled: head already known")
return return
} }
// Get the hashes from the peer (synchronously) // Get the hashes from the peer (synchronously)
glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, peer.recentHash) glog.V(logger.Detail).Infof("Attempting synchronisation: %v, 0x%x", peer.id, head)
err := pm.downloader.Synchronise(peer.id, peer.recentHash) err := pm.downloader.Synchronise(peer.id, head)
switch err { switch err {
case nil: case nil:
glog.V(logger.Detail).Infof("Synchronisation completed") glog.V(logger.Detail).Infof("Synchronisation completed")

View File

@ -102,6 +102,7 @@ func (t *rlpx) doProtoHandshake(our *protoHandshake) (their *protoHandshake, err
werr := make(chan error, 1) werr := make(chan error, 1)
go func() { werr <- Send(t.rw, handshakeMsg, our) }() go func() { werr <- Send(t.rw, handshakeMsg, our) }()
if their, err = readProtocolHandshake(t.rw, our); err != nil { if their, err = readProtocolHandshake(t.rw, our); err != nil {
<-werr // make sure the write terminates too
return nil, err return nil, err
} }
if err := <-werr; err != nil { if err := <-werr; err != nil {