Merge pull request #825 from obscuren/develop

core: chain fork fix
This commit is contained in:
Jeffrey Wilcke 2015-04-29 04:47:26 -07:00
commit 764e81bf12
6 changed files with 117 additions and 35 deletions

View File

@ -47,7 +47,7 @@ import _ "net/http/pprof"
const (
ClientIdentifier = "Geth"
Version = "0.9.12"
Version = "0.9.13"
)
var (

View File

@ -74,8 +74,9 @@ type ChainManager struct {
eventMux *event.TypeMux
genesisBlock *types.Block
// Last known total difficulty
mu sync.RWMutex
tsmu sync.RWMutex
mu sync.RWMutex
tsmu sync.RWMutex
td *big.Int
currentBlock *types.Block
lastBlockHash common.Hash
@ -92,15 +93,14 @@ type ChainManager struct {
func NewChainManager(blockDb, stateDb common.Database, mux *event.TypeMux) *ChainManager {
bc := &ChainManager{
blockDb: blockDb,
stateDb: stateDb,
genesisBlock: GenesisBlock(stateDb),
eventMux: mux,
quit: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit),
currentGasLimit: new(big.Int),
blockDb: blockDb,
stateDb: stateDb,
genesisBlock: GenesisBlock(stateDb),
eventMux: mux,
quit: make(chan struct{}),
cache: NewBlockCache(blockCacheLimit),
}
bc.setLastBlock()
bc.setLastState()
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for _, hash := range badHashes {
@ -145,7 +145,7 @@ func (bc *ChainManager) SetHead(head *types.Block) {
bc.transState = statedb.Copy()
bc.setTotalDifficulty(head.Td)
bc.insert(head)
bc.setLastBlock()
bc.setLastState()
}
func (self *ChainManager) Td() *big.Int {
@ -212,7 +212,7 @@ func (self *ChainManager) setTransState(statedb *state.StateDB) {
self.transState = statedb
}
func (bc *ChainManager) setLastBlock() {
func (bc *ChainManager) setLastState() {
data, _ := bc.blockDb.Get([]byte("LastBlock"))
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
@ -224,6 +224,7 @@ func (bc *ChainManager) setLastBlock() {
} else {
bc.Reset()
}
bc.currentGasLimit = CalcGasLimit(bc.currentBlock)
if glog.V(logger.Info) {
glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td)
@ -319,6 +320,7 @@ func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.makeCache()
bc.td = gb.Difficulty()
}
// Export writes the active chain to the given writer.
@ -346,8 +348,6 @@ func (self *ChainManager) Export(w io.Writer) error {
func (bc *ChainManager) insert(block *types.Block) {
key := append(blockNumPre, block.Number().Bytes()...)
bc.blockDb.Put(key, block.Hash().Bytes())
// Push block to cache
bc.cache.Push(block)
bc.blockDb.Put([]byte("LastBlock"), block.Hash().Bytes())
bc.currentBlock = block
@ -358,6 +358,8 @@ func (bc *ChainManager) write(block *types.Block) {
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
key := append(blockHashPre, block.Hash().Bytes()...)
bc.blockDb.Put(key, enc)
// Push block to cache
bc.cache.Push(block)
}
// Accessors
@ -552,16 +554,17 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
// Compare the TD of the last known block in the canonical chain to make sure it's greater.
// At this point it's possible that a different chain (fork) becomes the new canonical chain.
if block.Td.Cmp(self.td) > 0 {
//if block.Header().Number.Cmp(new(big.Int).Add(cblock.Header().Number, common.Big1)) < 0 {
if block.Number().Cmp(cblock.Number()) <= 0 {
// Check for chain forks. If H(block.num - 1) != block.parent, we're on a fork and need to do some merging
if previous := self.getBlockByNumber(block.NumberU64() - 1); previous.Hash() != block.ParentHash() {
chash := cblock.Hash()
hash := block.Hash()
if glog.V(logger.Info) {
glog.Infof("Split detected. New head #%v (%x) TD=%v, was #%v (%x) TD=%v\n", block.Header().Number, hash[:4], block.Td, cblock.Header().Number, chash[:4], self.td)
}
// during split we merge two different chains and create the new canonical chain
self.merge(self.getBlockByNumber(block.NumberU64()), block)
self.merge(previous, block)
queue[i] = ChainSplitEvent{block, logs}
queueEvent.splitCount++
@ -587,16 +590,19 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
glog.Infof("inserted block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
}
} else {
if glog.V(logger.Detail) {
glog.Infof("inserted forked block #%d (%d TXs %d UNCs) (%x...)\n", block.Number(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4])
}
queue[i] = ChainSideEvent{block, logs}
queueEvent.sideCount++
}
self.futureBlocks.Delete(block.Hash())
}
self.mu.Unlock()
stats.processed++
self.futureBlocks.Delete(block.Hash())
}
if (stats.queued > 0 || stats.processed > 0) && bool(glog.V(logger.Info)) {
@ -610,33 +616,38 @@ func (self *ChainManager) InsertChain(chain types.Blocks) error {
return nil
}
// merge takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// diff takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
// to be part of the new canonical chain.
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) {
func (self *ChainManager) diff(oldBlock, newBlock *types.Block) types.Blocks {
glog.V(logger.Debug).Infof("Applying diff to %x & %x\n", oldBlock.Hash().Bytes()[:4], newBlock.Hash().Bytes()[:4])
var oldChain, newChain types.Blocks
// First find the split (common ancestor) so we can perform an adequate merge
var newChain types.Blocks
// first find common number
for newBlock = newBlock; newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) {
newChain = append(newChain, newBlock)
}
glog.V(logger.Debug).Infoln("Found common number", newBlock.Number())
for {
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
if oldBlock.Hash() == newBlock.Hash() {
break
}
oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock)
oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash())
}
return newChain
}
// merge merges two different chain to the new canonical chain
func (self *ChainManager) merge(oldBlock, newBlock *types.Block) {
newChain := self.diff(oldBlock, newBlock)
// insert blocks
for _, block := range newChain {
self.insert(block)
}
if glog.V(logger.Detail) {
for i, oldBlock := range oldChain {
glog.Infof("- %.10v = %x\n", oldBlock.Number(), oldBlock.Hash())
glog.Infof("+ %.10v = %x\n", newChain[i].Number(), newChain[i].Hash())
}
}
}
func (self *ChainManager) update() {

View File

@ -9,6 +9,7 @@ import (
"strconv"
"testing"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
@ -56,12 +57,14 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big
}
// Compare difficulties
f(tdpre, td)
// Loop over parents making sure reconstruction is done properly
}
func printChain(bc *ChainManager) {
for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- {
b := bc.GetBlockByNumber(uint64(i))
fmt.Printf("\t%x\n", b.Hash())
fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty())
}
}
@ -344,3 +347,50 @@ func TestGetAncestors(t *testing.T) {
ancestors := chainMan.GetAncestors(chain[len(chain)-1], 4)
fmt.Println(ancestors)
}
type bproc struct{}
func (bproc) Process(*types.Block) (state.Logs, error) { return nil, nil }
func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
var chain []*types.Block
for i, difficulty := range d {
header := &types.Header{Number: big.NewInt(int64(i + 1)), Difficulty: big.NewInt(int64(difficulty))}
block := types.NewBlockWithHeader(header)
copy(block.HeaderHash[:2], []byte{byte(i + 1), seed})
if i == 0 {
block.ParentHeaderHash = genesis.Hash()
} else {
copy(block.ParentHeaderHash[:2], []byte{byte(i), seed})
}
chain = append(chain, block)
}
return chain
}
func TestReorg(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
var eventMux event.TypeMux
genesis := GenesisBlock(db)
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux}
bc.cache = NewBlockCache(100)
bc.futureBlocks = NewBlockCache(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
bc.txState = state.ManageState(bc.State())
chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10)
chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11)
bc.InsertChain(chain1)
bc.InsertChain(chain2)
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash())
}
}
}

View File

@ -351,7 +351,7 @@ func (self *Block) Copy() *Block {
}
func (self *Block) String() string {
return fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v {
MinerHash: %x
%v
Transactions:
@ -360,6 +360,16 @@ Uncles:
%v
}
`, self.Number(), self.Size(), self.Td, self.header.HashNoNonce(), self.header, self.transactions, self.uncles)
if (self.HeaderHash != common.Hash{}) {
str += fmt.Sprintf("\nFake hash = %x", self.HeaderHash)
}
if (self.ParentHeaderHash != common.Hash{}) {
str += fmt.Sprintf("\nFake parent hash = %x", self.ParentHeaderHash)
}
return str
}
func (self *Header) String() string {

View File

@ -436,6 +436,8 @@ func (d *Downloader) process(peer *peer) error {
if err != nil && core.IsParentErr(err) {
glog.V(logger.Debug).Infoln("Aborting process due to missing parent.")
// XXX this needs a lot of attention
blocks = nil
break
} else if err != nil {
// immediatly unregister the false peer but do not disconnect
@ -472,3 +474,7 @@ func (d *Downloader) isProcessing() bool {
func (d *Downloader) isBusy() bool {
return d.isFetchingHashes() || d.isDownloadingBlocks() || d.isProcessing()
}
func (d *Downloader) IsBusy() bool {
return d.isBusy()
}

View File

@ -163,6 +163,11 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
if peer.td.Cmp(pm.chainman.Td()) <= 0 {
return
}
// Check downloader if it's busy so it doesn't show the sync message
// for every attempty
if pm.downloader.IsBusy() {
return
}
glog.V(logger.Info).Infof("Synchronisation attempt using %s TD=%v\n", peer.id, peer.td)
// Get the hashes from the peer (synchronously)