From 2b339cbbd8bb475d2195d54a71dcced700003430 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 31 Aug 2015 20:21:02 +0300 Subject: [PATCH 1/2] core, eth: split the db blocks into headers and bodies --- core/chain_manager.go | 282 +++++++++++++++++++++---------------- core/chain_manager_test.go | 5 +- core/chain_util.go | 258 +++++++++++++++++++++++++-------- core/genesis.go | 2 +- core/types/block.go | 4 + eth/backend.go | 108 ++++++-------- eth/handler.go | 43 +++--- eth/peer.go | 6 + eth/protocol.go | 16 +++ 9 files changed, 464 insertions(+), 260 deletions(-) diff --git a/core/chain_manager.go b/core/chain_manager.go index c8127951e..745b270f7 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -48,6 +48,8 @@ var ( ) const ( + headerCacheLimit = 256 + bodyCacheLimit = 256 blockCacheLimit = 256 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 @@ -71,7 +73,10 @@ type ChainManager struct { lastBlockHash common.Hash currentGasLimit *big.Int - cache *lru.Cache // cache is the LRU caching + headerCache *lru.Cache // Cache for the most recent block headers + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + blockCache *lru.Cache // Cache for the most recent entire blocks futureBlocks *lru.Cache // future blocks are blocks added for later processing quit chan struct{} @@ -84,13 +89,22 @@ type ChainManager struct { } func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) { - cache, _ := lru.New(blockCacheLimit) + headerCache, _ := lru.New(headerCacheLimit) + bodyCache, _ := lru.New(bodyCacheLimit) + bodyRLPCache, _ := lru.New(bodyCacheLimit) + blockCache, _ := lru.New(blockCacheLimit) + futureBlocks, _ := lru.New(maxFutureBlocks) + bc := &ChainManager{ - chainDb: chainDb, - eventMux: mux, - quit: make(chan struct{}), - cache: cache, - pow: pow, + chainDb: chainDb, + eventMux: mux, + quit: make(chan struct{}), + headerCache: headerCache, + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + blockCache: blockCache, + futureBlocks: futureBlocks, + pow: pow, } bc.genesisBlock = bc.GetBlockByNumber(0) @@ -105,11 +119,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) ( } glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") } - if err := bc.setLastState(); err != nil { return nil, err } - // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain for hash, _ := range BadHashes { if block := bc.GetBlock(hash); block != nil { @@ -123,14 +135,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) ( glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation") } } - // Take ownership of this particular state - - bc.futureBlocks, _ = lru.New(maxFutureBlocks) - bc.makeCache() - go bc.update() - return bc, nil } @@ -139,13 +145,15 @@ func (bc *ChainManager) SetHead(head *types.Block) { defer bc.mu.Unlock() for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) { - bc.removeBlock(block) + DeleteBlock(bc.chainDb, block.Hash()) } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() - bc.cache, _ = lru.New(blockCacheLimit) bc.currentBlock = head - bc.makeCache() - bc.setTotalDifficulty(head.Td) bc.insert(head) bc.setLastState() @@ -199,11 +207,9 @@ func (bc *ChainManager) recover() bool { if len(data) != 0 { block := bc.GetBlock(common.BytesToHash(data)) if block != nil { - err := bc.chainDb.Put([]byte("LastBlock"), block.Hash().Bytes()) - if err != nil { - glog.Fatalln("db write err:", err) + if err := WriteHead(bc.chainDb, block); err != nil { + glog.Fatalf("failed to write database head: %v", err) } - bc.currentBlock = block bc.lastBlockHash = block.Hash() return true @@ -213,14 +219,14 @@ func (bc *ChainManager) recover() bool { } func (bc *ChainManager) setLastState() error { - data, _ := bc.chainDb.Get([]byte("LastBlock")) - if len(data) != 0 { - block := bc.GetBlock(common.BytesToHash(data)) + head := GetHeadHash(bc.chainDb) + if head != (common.Hash{}) { + block := bc.GetBlock(head) if block != nil { bc.currentBlock = block bc.lastBlockHash = block.Hash() } else { - glog.Infof("LastBlock (%x) not found. Recovering...\n", data) + glog.Infof("LastBlock (%x) not found. Recovering...\n", head) if bc.recover() { glog.Infof("Recover successful") } else { @@ -240,63 +246,37 @@ func (bc *ChainManager) setLastState() error { return nil } -func (bc *ChainManager) makeCache() { - bc.cache, _ = lru.New(blockCacheLimit) - // load in last `blockCacheLimit` - 1 blocks. Last block is the current. - bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock) - for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) { - bc.cache.Add(block.Hash(), block) - } -} - +// Reset purges the entire blockchain, restoring it to its genesis state. func (bc *ChainManager) Reset() { + bc.ResetWithGenesisBlock(bc.genesisBlock) +} + +// ResetWithGenesisBlock purges the entire blockchain, restoring it to the +// specified genesis state. +func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) { bc.mu.Lock() defer bc.mu.Unlock() + // Dump the entire block chain and purge the caches for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { - bc.removeBlock(block) + DeleteBlock(bc.chainDb, block.Hash()) } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() - bc.cache, _ = lru.New(blockCacheLimit) + // Prepare the genesis block and reinitialize the chain + bc.genesisBlock = genesis + bc.genesisBlock.Td = genesis.Difficulty() - // Prepare the genesis block - err := WriteBlock(bc.chainDb, bc.genesisBlock) - if err != nil { - glog.Fatalln("db err:", err) + if err := WriteBlock(bc.chainDb, bc.genesisBlock); err != nil { + glog.Fatalf("failed to write genesis block: %v", err) } - bc.insert(bc.genesisBlock) bc.currentBlock = bc.genesisBlock - bc.makeCache() - - bc.setTotalDifficulty(common.Big("0")) -} - -func (bc *ChainManager) removeBlock(block *types.Block) { - bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...)) -} - -func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) { - bc.mu.Lock() - defer bc.mu.Unlock() - - for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { - bc.removeBlock(block) - } - - // Prepare the genesis block - gb.Td = gb.Difficulty() - bc.genesisBlock = gb - - err := WriteBlock(bc.chainDb, bc.genesisBlock) - if err != nil { - glog.Fatalln("db err:", err) - } - - bc.insert(bc.genesisBlock) - bc.currentBlock = bc.genesisBlock - bc.makeCache() - bc.td = gb.Difficulty() + bc.setTotalDifficulty(genesis.Difficulty()) } // Export writes the active chain to the given writer. @@ -359,61 +339,130 @@ func (bc *ChainManager) Genesis() *types.Block { return bc.genesisBlock } -// Block fetching methods +// HasHeader checks if a block header is present in the database or not, caching +// it if present. +func (bc *ChainManager) HasHeader(hash common.Hash) bool { + return bc.GetHeader(hash) != nil +} + +// GetHeader retrieves a block header from the database by hash, caching it if +// found. +func (self *ChainManager) GetHeader(hash common.Hash) *types.Header { + // Short circuit if the header's already in the cache, retrieve otherwise + if header, ok := self.headerCache.Get(hash); ok { + return header.(*types.Header) + } + header := GetHeaderByHash(self.chainDb, hash) + if header == nil { + return nil + } + // Cache the found header for next time and return + self.headerCache.Add(header.Hash(), header) + return header +} + +// GetHeaderByNumber retrieves a block header from the database by number, +// caching it (associated with its hash) if found. +func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header { + hash := GetHashByNumber(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetHeader(hash) +} + +// GetBody retrieves a block body (transactions, uncles and total difficulty) +// from the database by hash, caching it if found. The resion for the peculiar +// pointer-to-slice return type is to differentiate between empty and inexistent +// bodies. +func (self *ChainManager) GetBody(hash common.Hash) (*[]*types.Transaction, *[]*types.Header) { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyCache.Get(hash); ok { + body := cached.(*storageBody) + return &body.Transactions, &body.Uncles + } + transactions, uncles, td := GetBodyByHash(self.chainDb, hash) + if td == nil { + return nil, nil + } + // Cache the found body for next time and return + self.bodyCache.Add(hash, &storageBody{ + Transactions: transactions, + Uncles: uncles, + }) + return &transactions, &uncles +} + +// GetBodyRLP retrieves a block body in RLP encoding from the database by hash, +// caching it if found. +func (self *ChainManager) GetBodyRLP(hash common.Hash) []byte { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyRLPCache.Get(hash); ok { + return cached.([]byte) + } + body, td := GetBodyRLPByHash(self.chainDb, hash) + if td == nil { + return nil + } + // Cache the found body for next time and return + self.bodyRLPCache.Add(hash, body) + return body +} + +// HasBlock checks if a block is fully present in the database or not, caching +// it if present. func (bc *ChainManager) HasBlock(hash common.Hash) bool { - if bc.cache.Contains(hash) { - return true - } - - data, _ := bc.chainDb.Get(append(blockHashPre, hash[:]...)) - return len(data) != 0 -} - -func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) { - block := self.GetBlock(hash) - if block == nil { - return - } - // XXX Could be optimised by using a different database which only holds hashes (i.e., linked list) - for i := uint64(0); i < max; i++ { - block = self.GetBlock(block.ParentHash()) - if block == nil { - break - } - - chain = append(chain, block.Hash()) - if block.Number().Cmp(common.Big0) <= 0 { - break - } - } - - return + return bc.GetBlock(hash) != nil } +// GetBlock retrieves a block from the database by hash, caching it if found. func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { - if block, ok := self.cache.Get(hash); ok { + // Short circuit if the block's already in the cache, retrieve otherwise + if block, ok := self.blockCache.Get(hash); ok { return block.(*types.Block) } - block := GetBlockByHash(self.chainDb, hash) if block == nil { return nil } - - // Add the block to the cache - self.cache.Add(hash, (*types.Block)(block)) - - return (*types.Block)(block) + // Cache the found block for next time and return + self.blockCache.Add(block.Hash(), block) + return block } -func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.getBlockByNumber(num) - +// GetBlockByNumber retrieves a block from the database by number, caching it +// (associated with its hash) if found. +func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block { + hash := GetHashByNumber(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetBlock(hash) } +// GetBlockHashesFromHash retrieves a number of block hashes starting at a given +// hash, fetching towards the genesis block. +func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { + // Get the origin header from which to fetch + header := self.GetHeader(hash) + if header == nil { + return nil + } + // Iterate the headers until enough is collected or the genesis reached + chain := make([]common.Hash, 0, max) + for i := uint64(0); i < max; i++ { + if header = self.GetHeader(header.ParentHash); header == nil { + break + } + chain = append(chain, header.Hash()) + if header.Number.Cmp(common.Big0) <= 0 { + break + } + } + return chain +} + +// [deprecated by eth/62] // GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { for i := 0; i < n; i++ { @@ -427,11 +476,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []* return } -// non blocking version -func (self *ChainManager) getBlockByNumber(num uint64) *types.Block { - return GetBlockByNumber(self.chainDb, num) -} - func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) { for i := 0; block != nil && i < length; i++ { uncles = append(uncles, block.Uncles()...) diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go index 002dcbe44..97e7cacdc 100644 --- a/core/chain_manager_test.go +++ b/core/chain_manager_test.go @@ -388,7 +388,10 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block func chm(genesis *types.Block, db common.Database) *ChainManager { var eventMux event.TypeMux bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} - bc.cache, _ = lru.New(100) + bc.headerCache, _ = lru.New(100) + bc.bodyCache, _ = lru.New(100) + bc.bodyRLPCache, _ = lru.New(100) + bc.blockCache, _ = lru.New(100) bc.futureBlocks, _ = lru.New(100) bc.processor = bproc{} bc.ResetWithGenesisBlock(genesis) diff --git a/core/chain_util.go b/core/chain_util.go index 84b462ce3..c12bdda75 100644 --- a/core/chain_util.go +++ b/core/chain_util.go @@ -19,7 +19,6 @@ package core import ( "bytes" "math/big" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -30,9 +29,14 @@ import ( ) var ( - blockHashPre = []byte("block-hash-") + headKey = []byte("LastBlock") + + headerHashPre = []byte("header-hash-") + bodyHashPre = []byte("body-hash-") blockNumPre = []byte("block-num-") ExpDiffPeriod = big.NewInt(100000) + + blockHashPre = []byte("block-hash-") // [deprecated by eth/63] ) // CalcDifficulty is the difficulty adjustment algorithm. It returns @@ -112,8 +116,204 @@ func CalcGasLimit(parent *types.Block) *big.Int { return gl } -// GetBlockByHash returns the block corresponding to the hash or nil if not found +// storageBody is the block body encoding used for the database. +type storageBody struct { + Transactions []*types.Transaction + Uncles []*types.Header +} + +// GetHashByNumber retrieves a hash assigned to a canonical block number. +func GetHashByNumber(db common.Database, number uint64) common.Hash { + data, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...)) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeadHash retrieves the hash of the current canonical head block. +func GetHeadHash(db common.Database) common.Hash { + data, _ := db.Get(headKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeaderRLPByHash retrieves a block header in its raw RLP database encoding, +// or nil if the header's not found. +func GetHeaderRLPByHash(db common.Database, hash common.Hash) []byte { + data, _ := db.Get(append(headerHashPre, hash[:]...)) + return data +} + +// GetHeaderByHash retrieves the block header corresponding to the hash, nil if +// none found. +func GetHeaderByHash(db common.Database, hash common.Hash) *types.Header { + data := GetHeaderRLPByHash(db, hash) + if len(data) == 0 { + return nil + } + header := new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err) + return nil + } + return header +} + +// GetBodyRLPByHash retrieves the block body (transactions and uncles) in RLP +// encoding, and the associated total difficulty. +func GetBodyRLPByHash(db common.Database, hash common.Hash) ([]byte, *big.Int) { + combo, _ := db.Get(append(bodyHashPre, hash[:]...)) + if len(combo) == 0 { + return nil, nil + } + buffer := bytes.NewBuffer(combo) + + td := new(big.Int) + if err := rlp.Decode(buffer, td); err != nil { + glog.V(logger.Error).Infof("invalid block td RLP for hash %x: %v", hash, err) + return nil, nil + } + return buffer.Bytes(), td +} + +// GetBodyByHash retrieves the block body (transactons, uncles, total difficulty) +// corresponding to the hash, nils if none found. +func GetBodyByHash(db common.Database, hash common.Hash) ([]*types.Transaction, []*types.Header, *big.Int) { + data, td := GetBodyRLPByHash(db, hash) + if len(data) == 0 || td == nil { + return nil, nil, nil + } + body := new(storageBody) + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) + return nil, nil, nil + } + return body.Transactions, body.Uncles, td +} + +// GetBlockByHash retrieves an entire block corresponding to the hash, assembling +// it back from the stored header and body. func GetBlockByHash(db common.Database, hash common.Hash) *types.Block { + // Retrieve the block header and body contents + header := GetHeaderByHash(db, hash) + if header == nil { + return nil + } + transactions, uncles, td := GetBodyByHash(db, hash) + if td == nil { + return nil + } + // Reassemble the block and return + block := types.NewBlockWithHeader(header).WithBody(transactions, uncles) + block.Td = td + + return block +} + +// GetBlockByNumber returns the canonical block by number or nil if not found. +func GetBlockByNumber(db common.Database, number uint64) *types.Block { + key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...)) + if len(key) == 0 { + return nil + } + return GetBlockByHash(db, common.BytesToHash(key)) +} + +// WriteCanonNumber stores the canonical hash for the given block number. +func WriteCanonNumber(db common.Database, hash common.Hash, number uint64) error { + key := append(blockNumPre, big.NewInt(int64(number)).Bytes()...) + if err := db.Put(key, hash.Bytes()); err != nil { + glog.Fatalf("failed to store number to hash mapping into database: %v", err) + return err + } + return nil +} + +// WriteHead updates the head block of the chain database. +func WriteHead(db common.Database, block *types.Block) error { + if err := WriteCanonNumber(db, block.Hash(), block.NumberU64()); err != nil { + glog.Fatalf("failed to store canonical number into database: %v", err) + return err + } + if err := db.Put(headKey, block.Hash().Bytes()); err != nil { + glog.Fatalf("failed to store last block into database: %v", err) + return err + } + return nil +} + +// WriteHeader serializes a block header into the database. +func WriteHeader(db common.Database, header *types.Header) error { + data, err := rlp.EncodeToBytes(header) + if err != nil { + return err + } + key := append(headerHashPre, header.Hash().Bytes()...) + if err := db.Put(key, data); err != nil { + glog.Fatalf("failed to store header into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4]) + return nil +} + +// WriteBody serializes the body of a block into the database. +func WriteBody(db common.Database, block *types.Block) error { + body, err := rlp.EncodeToBytes(&storageBody{block.Transactions(), block.Uncles()}) + if err != nil { + return err + } + td, err := rlp.EncodeToBytes(block.Td) + if err != nil { + return err + } + key := append(bodyHashPre, block.Hash().Bytes()...) + if err := db.Put(key, append(td, body...)); err != nil { + glog.Fatalf("failed to store block body into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored block body #%v [%x…]", block.Number, block.Hash().Bytes()[:4]) + return nil +} + +// WriteBlock serializes a block into the database, header and body separately. +func WriteBlock(db common.Database, block *types.Block) error { + // Store the body first to retain database consistency + if err := WriteBody(db, block); err != nil { + return err + } + // Store the header too, signaling full block ownership + if err := WriteHeader(db, block.Header()); err != nil { + return err + } + return nil +} + +// DeleteHeader removes all block header data associated with a hash. +func DeleteHeader(db common.Database, hash common.Hash) { + db.Delete(append(headerHashPre, hash.Bytes()...)) +} + +// DeleteBody removes all block body data associated with a hash. +func DeleteBody(db common.Database, hash common.Hash) { + db.Delete(append(bodyHashPre, hash.Bytes()...)) +} + +// DeleteBlock removes all block data associated with a hash. +func DeleteBlock(db common.Database, hash common.Hash) { + DeleteHeader(db, hash) + DeleteBody(db, hash) +} + +// [deprecated by eth/63] +// GetBlockByHashOld returns the old combined block corresponding to the hash +// or nil if not found. This method is only used by the upgrade mechanism to +// access the old combined block representation. It will be dropped after the +// network transitions to eth/63. +func GetBlockByHashOld(db common.Database, hash common.Hash) *types.Block { data, _ := db.Get(append(blockHashPre, hash[:]...)) if len(data) == 0 { return nil @@ -125,55 +325,3 @@ func GetBlockByHash(db common.Database, hash common.Hash) *types.Block { } return (*types.Block)(&block) } - -// GetBlockByHash returns the canonical block by number or nil if not found -func GetBlockByNumber(db common.Database, number uint64) *types.Block { - key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...)) - if len(key) == 0 { - return nil - } - - return GetBlockByHash(db, common.BytesToHash(key)) -} - -// WriteCanonNumber writes the canonical hash for the given block -func WriteCanonNumber(db common.Database, block *types.Block) error { - key := append(blockNumPre, block.Number().Bytes()...) - err := db.Put(key, block.Hash().Bytes()) - if err != nil { - return err - } - return nil -} - -// WriteHead force writes the current head -func WriteHead(db common.Database, block *types.Block) error { - err := WriteCanonNumber(db, block) - if err != nil { - return err - } - err = db.Put([]byte("LastBlock"), block.Hash().Bytes()) - if err != nil { - return err - } - return nil -} - -// WriteBlock writes a block to the database -func WriteBlock(db common.Database, block *types.Block) error { - tstart := time.Now() - - enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block)) - key := append(blockHashPre, block.Hash().Bytes()...) - err := db.Put(key, enc) - if err != nil { - glog.Fatal("db write fail:", err) - return err - } - - if glog.V(logger.Debug) { - glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart)) - } - - return nil -} diff --git a/core/genesis.go b/core/genesis.go index 7d4e03c99..6fbc671b0 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -86,7 +86,7 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block, if block := GetBlockByHash(chainDb, block.Hash()); block != nil { glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number") - err := WriteCanonNumber(chainDb, block) + err := WriteCanonNumber(chainDb, block.Hash(), block.NumberU64()) if err != nil { return nil, err } diff --git a/core/types/block.go b/core/types/block.go index fd81db04c..558b46e01 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -135,6 +135,7 @@ type Block struct { ReceivedAt time.Time } +// [deprecated by eth/63] // StorageBlock defines the RLP encoding of a Block stored in the // state database. The StorageBlock encoding contains fields that // would otherwise need to be recomputed. @@ -147,6 +148,7 @@ type extblock struct { Uncles []*Header } +// [deprecated by eth/63] // "storage" block encoding. used for database. type storageblock struct { Header *Header @@ -268,6 +270,7 @@ func (b *Block) EncodeRLP(w io.Writer) error { }) } +// [deprecated by eth/63] func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { var sb storageblock if err := s.Decode(&sb); err != nil { @@ -277,6 +280,7 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { return nil } +// [deprecated by eth/63] func (b *StorageBlock) EncodeRLP(w io.Writer) error { return rlp.Encode(w, storageblock{ Header: b.header, diff --git a/eth/backend.go b/eth/backend.go index 639aaaaec..59f2ab01a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -18,6 +18,7 @@ package eth import ( + "bytes" "crypto/ecdsa" "encoding/json" "fmt" @@ -269,11 +270,7 @@ func New(config *Config) (*Ethereum, error) { newdb = func(path string) (common.Database, error) { return ethdb.NewLDBDatabase(path, config.DatabaseCache) } } - // attempt to merge database together, upgrading from an old version - if err := mergeDatabases(config.DataDir, newdb); err != nil { - return nil, err - } - + // Open the chain database and perform any upgrades needed chainDb, err := newdb(filepath.Join(config.DataDir, "chaindata")) if err != nil { return nil, fmt.Errorf("blockchain db err: %v", err) @@ -281,6 +278,10 @@ func New(config *Config) (*Ethereum, error) { if db, ok := chainDb.(*ethdb.LDBDatabase); ok { db.Meter("eth/db/chaindata/") } + if err := upgradeChainDatabase(chainDb); err != nil { + return nil, err + } + dappDb, err := newdb(filepath.Join(config.DataDir, "dapp")) if err != nil { return nil, fmt.Errorf("dapp db err: %v", err) @@ -721,74 +722,55 @@ func saveBlockchainVersion(db common.Database, bcVersion int) { } } -// mergeDatabases when required merge old database layout to one single database -func mergeDatabases(datadir string, newdb func(path string) (common.Database, error)) error { - // Check if already upgraded - data := filepath.Join(datadir, "chaindata") - if _, err := os.Stat(data); !os.IsNotExist(err) { +// upgradeChainDatabase ensures that the chain database stores block split into +// separate header and body entries. +func upgradeChainDatabase(db common.Database) error { + // Short circuit if the head block is stored already as separate header and body + data, err := db.Get([]byte("LastBlock")) + if err != nil { return nil } - // make sure it's not just a clean path - chainPath := filepath.Join(datadir, "blockchain") - if _, err := os.Stat(chainPath); os.IsNotExist(err) { + head := common.BytesToHash(data) + + if block := core.GetBlockByHashOld(db, head); block == nil { return nil } - glog.Infoln("Database upgrade required. Upgrading...") + // At least some of the database is still the old format, upgrade (skip the head block!) + glog.V(logger.Info).Info("Old database detected, upgrading...") - database, err := newdb(data) - if err != nil { - return fmt.Errorf("creating data db err: %v", err) - } - defer database.Close() + if db, ok := db.(*ethdb.LDBDatabase); ok { + blockPrefix := []byte("block-hash-") + for it := db.NewIterator(); it.Next(); { + // Skip anything other than a combined block + if !bytes.HasPrefix(it.Key(), blockPrefix) { + continue + } + // Skip the head block (merge last to signal upgrade completion) + if bytes.HasSuffix(it.Key(), head.Bytes()) { + continue + } + // Load the block, split and serialize (order!) + block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix))) - // Migrate blocks - chainDb, err := newdb(chainPath) - if err != nil { - return fmt.Errorf("state db err: %v", err) - } - defer chainDb.Close() - - if chain, ok := chainDb.(*ethdb.LDBDatabase); ok { - glog.Infoln("Merging blockchain database...") - it := chain.NewIterator() - for it.Next() { - database.Put(it.Key(), it.Value()) + if err := core.WriteBody(db, block); err != nil { + return err + } + if err := core.WriteHeader(db, block.Header()); err != nil { + return err + } + if err := db.Delete(it.Key()); err != nil { + return err + } } - it.Release() - } + // Lastly, upgrade the head block, disabling the upgrade mechanism + current := core.GetBlockByHashOld(db, head) - // Migrate state - stateDb, err := newdb(filepath.Join(datadir, "state")) - if err != nil { - return fmt.Errorf("state db err: %v", err) - } - defer stateDb.Close() - - if state, ok := stateDb.(*ethdb.LDBDatabase); ok { - glog.Infoln("Merging state database...") - it := state.NewIterator() - for it.Next() { - database.Put(it.Key(), it.Value()) + if err := core.WriteBody(db, current); err != nil { + return err } - it.Release() - } - - // Migrate transaction / receipts - extraDb, err := newdb(filepath.Join(datadir, "extra")) - if err != nil { - return fmt.Errorf("state db err: %v", err) - } - defer extraDb.Close() - - if extra, ok := extraDb.(*ethdb.LDBDatabase); ok { - glog.Infoln("Merging transaction database...") - - it := extra.NewIterator() - for it.Next() { - database.Put(it.Key(), it.Value()) + if err := core.WriteHeader(db, current.Header()); err != nil { + return err } - it.Release() } - return nil } diff --git a/eth/handler.go b/eth/handler.go index f22afecb7..95f4e8ce2 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -345,33 +345,33 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { if err := msg.Decode(&query); err != nil { return errResp(ErrDecode, "%v: %v", msg, err) } - // Gather blocks until the fetch or network limits is reached + // Gather headers until the fetch or network limits is reached var ( bytes common.StorageSize headers []*types.Header unknown bool ) for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch { - // Retrieve the next block satisfying the query - var origin *types.Block + // Retrieve the next header satisfying the query + var origin *types.Header if query.Origin.Hash != (common.Hash{}) { - origin = pm.chainman.GetBlock(query.Origin.Hash) + origin = pm.chainman.GetHeader(query.Origin.Hash) } else { - origin = pm.chainman.GetBlockByNumber(query.Origin.Number) + origin = pm.chainman.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break } - headers = append(headers, origin.Header()) - bytes += origin.Size() + headers = append(headers, origin) + bytes += 500 // Approximate, should be good enough estimate - // Advance to the next block of the query + // Advance to the next header of the query switch { case query.Origin.Hash != (common.Hash{}) && query.Reverse: // Hash based traversal towards the genesis block for i := 0; i < int(query.Skip)+1; i++ { - if block := pm.chainman.GetBlock(query.Origin.Hash); block != nil { - query.Origin.Hash = block.ParentHash() + if header := pm.chainman.GetHeader(query.Origin.Hash); header != nil { + query.Origin.Hash = header.ParentHash } else { unknown = true break @@ -379,9 +379,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } case query.Origin.Hash != (common.Hash{}) && !query.Reverse: // Hash based traversal towards the leaf block - if block := pm.chainman.GetBlockByNumber(origin.NumberU64() + query.Skip + 1); block != nil { - if pm.chainman.GetBlockHashesFromHash(block.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { - query.Origin.Hash = block.Hash() + if header := pm.chainman.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { + if pm.chainman.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { + query.Origin.Hash = header.Hash() } else { unknown = true } @@ -452,23 +452,24 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Gather blocks until the fetch or network limits is reached var ( hash common.Hash - bytes common.StorageSize - bodies []*blockBody + bytes int + bodies []*blockBodyRLP ) for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { - //Retrieve the hash of the next block + // Retrieve the hash of the next block if err := msgStream.Decode(&hash); err == rlp.EOL { break } else if err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) } - // Retrieve the requested block, stopping if enough was found - if block := pm.chainman.GetBlock(hash); block != nil { - bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) - bytes += block.Size() + // Retrieve the requested block body, stopping if enough was found + if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 { + body := blockBodyRLP(data) + bodies = append(bodies, &body) + bytes += len(body) } } - return p.SendBlockBodies(bodies) + return p.SendBlockBodiesRLP(bodies) case p.version >= eth63 && msg.Code == GetNodeDataMsg: // Decode the retrieval message diff --git a/eth/peer.go b/eth/peer.go index 8d7c48885..f1ddd9726 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -184,6 +184,12 @@ func (p *peer) SendBlockBodies(bodies []*blockBody) error { return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies)) } +// SendBlockBodiesRLP sends a batch of block contents to the remote peer from +// an already RLP encoded format. +func (p *peer) SendBlockBodiesRLP(bodies []*blockBodyRLP) error { + return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesRLPData(bodies)) +} + // SendNodeData sends a batch of arbitrary internal data, corresponding to the // hashes requested. func (p *peer) SendNodeData(data [][]byte) error { diff --git a/eth/protocol.go b/eth/protocol.go index 49f096a3b..24007bbb5 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -213,6 +213,22 @@ type blockBody struct { // blockBodiesData is the network packet for block content distribution. type blockBodiesData []*blockBody +// blockBodyRLP represents the RLP encoded data content of a single block. +type blockBodyRLP []byte + +// EncodeRLP is a specialized encoder for a block body to pass the already +// encoded body RLPs from the database on, without double encoding. +func (b *blockBodyRLP) EncodeRLP(w io.Writer) error { + if _, err := w.Write([]byte(*b)); err != nil { + return err + } + return nil +} + +// blockBodiesRLPData is the network packet for block content distribution +// based on original RLP formatting (i.e. skip the db-decode/proto-encode). +type blockBodiesRLPData []*blockBodyRLP + // nodeDataData is the network response packet for a node data retrieval. type nodeDataData []struct { Value []byte From cdc2662c4098d68a7b450b9b9ff2688acbffcee4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 7 Sep 2015 20:43:01 +0300 Subject: [PATCH 2/2] core: split out TD from database and all internals --- cmd/geth/main.go | 7 +- core/chain_makers.go | 1 - core/chain_manager.go | 150 ++++++++++-------- core/chain_manager_test.go | 14 +- core/chain_util.go | 222 ++++++++++++++------------- core/chain_util_test.go | 243 ++++++++++++++++++++++++++++++ core/genesis.go | 20 +-- core/types/block.go | 35 +++-- eth/backend.go | 18 ++- eth/downloader/downloader.go | 11 +- eth/downloader/downloader_test.go | 44 ++++-- eth/handler.go | 19 +-- eth/peer.go | 5 +- eth/protocol.go | 16 -- miner/worker.go | 4 +- rpc/api/eth.go | 72 +++++---- rpc/api/parsing.go | 4 +- tests/block_test_util.go | 5 +- xeth/xeth.go | 4 + 19 files changed, 595 insertions(+), 299 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f72f69791..ba753a493 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -528,17 +528,16 @@ func blockRecovery(ctx *cli.Context) { var block *types.Block if arg[0] == '#' { - block = core.GetBlockByNumber(blockDb, common.String2Big(arg[1:]).Uint64()) + block = core.GetBlock(blockDb, core.GetCanonicalHash(blockDb, common.String2Big(arg[1:]).Uint64())) } else { - block = core.GetBlockByHash(blockDb, common.HexToHash(arg)) + block = core.GetBlock(blockDb, common.HexToHash(arg)) } if block == nil { glog.Fatalln("block not found. Recovery failed") } - err = core.WriteHead(blockDb, block) - if err != nil { + if err = core.WriteHeadBlockHash(blockDb, block.Hash()); err != nil { glog.Fatalln("block write err", err) } glog.Infof("Recovery succesful. New HEAD %x\n", block.Hash()) diff --git a/core/chain_makers.go b/core/chain_makers.go index b009e0c28..f89218f82 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -158,7 +158,6 @@ func GenerateChain(parent *types.Block, db common.Database, n int, gen func(int, for i := 0; i < n; i++ { header := makeHeader(parent, statedb) block := genblock(i, header) - block.Td = CalcTD(block, parent) blocks[i] = block parent = block } diff --git a/core/chain_manager.go b/core/chain_manager.go index 745b270f7..407945f8e 100644 --- a/core/chain_manager.go +++ b/core/chain_manager.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" "github.com/hashicorp/golang-lru" ) @@ -48,8 +49,9 @@ var ( ) const ( - headerCacheLimit = 256 + headerCacheLimit = 512 bodyCacheLimit = 256 + tdCacheLimit = 1024 blockCacheLimit = 256 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 @@ -70,12 +72,12 @@ type ChainManager struct { checkpoint int // checkpoint counts towards the new checkpoint td *big.Int currentBlock *types.Block - lastBlockHash common.Hash currentGasLimit *big.Int headerCache *lru.Cache // Cache for the most recent block headers bodyCache *lru.Cache // Cache for the most recent block bodies bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + tdCache *lru.Cache // Cache for the most recent block total difficulties blockCache *lru.Cache // Cache for the most recent entire blocks futureBlocks *lru.Cache // future blocks are blocks added for later processing @@ -92,6 +94,7 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) ( headerCache, _ := lru.New(headerCacheLimit) bodyCache, _ := lru.New(bodyCacheLimit) bodyRLPCache, _ := lru.New(bodyCacheLimit) + tdCache, _ := lru.New(tdCacheLimit) blockCache, _ := lru.New(blockCacheLimit) futureBlocks, _ := lru.New(maxFutureBlocks) @@ -102,6 +105,7 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) ( headerCache: headerCache, bodyCache: bodyCache, bodyRLPCache: bodyRLPCache, + tdCache: tdCache, blockCache: blockCache, futureBlocks: futureBlocks, pow: pow, @@ -154,7 +158,7 @@ func (bc *ChainManager) SetHead(head *types.Block) { bc.futureBlocks.Purge() bc.currentBlock = head - bc.setTotalDifficulty(head.Td) + bc.setTotalDifficulty(bc.GetTd(head.Hash())) bc.insert(head) bc.setLastState() } @@ -177,7 +181,7 @@ func (self *ChainManager) LastBlockHash() common.Hash { self.mu.RLock() defer self.mu.RUnlock() - return self.lastBlockHash + return self.currentBlock.Hash() } func (self *ChainManager) CurrentBlock() *types.Block { @@ -207,11 +211,13 @@ func (bc *ChainManager) recover() bool { if len(data) != 0 { block := bc.GetBlock(common.BytesToHash(data)) if block != nil { - if err := WriteHead(bc.chainDb, block); err != nil { - glog.Fatalf("failed to write database head: %v", err) + if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { + glog.Fatalf("failed to write database head number: %v", err) + } + if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { + glog.Fatalf("failed to write database head hash: %v", err) } bc.currentBlock = block - bc.lastBlockHash = block.Hash() return true } } @@ -219,12 +225,11 @@ func (bc *ChainManager) recover() bool { } func (bc *ChainManager) setLastState() error { - head := GetHeadHash(bc.chainDb) + head := GetHeadBlockHash(bc.chainDb) if head != (common.Hash{}) { block := bc.GetBlock(head) if block != nil { bc.currentBlock = block - bc.lastBlockHash = block.Hash() } else { glog.Infof("LastBlock (%x) not found. Recovering...\n", head) if bc.recover() { @@ -236,7 +241,7 @@ func (bc *ChainManager) setLastState() error { } else { bc.Reset() } - bc.td = bc.currentBlock.Td + bc.td = bc.GetTd(bc.currentBlock.Hash()) bc.currentGasLimit = CalcGasLimit(bc.currentBlock) if glog.V(logger.Info) { @@ -268,10 +273,10 @@ func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) { bc.futureBlocks.Purge() // Prepare the genesis block and reinitialize the chain - bc.genesisBlock = genesis - bc.genesisBlock.Td = genesis.Difficulty() - - if err := WriteBlock(bc.chainDb, bc.genesisBlock); err != nil { + if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { + glog.Fatalf("failed to write genesis block TD: %v", err) + } + if err := WriteBlock(bc.chainDb, genesis); err != nil { glog.Fatalf("failed to write genesis block: %v", err) } bc.insert(bc.genesisBlock) @@ -315,23 +320,23 @@ func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error // insert injects a block into the current chain block chain. Note, this function // assumes that the `mu` mutex is held! func (bc *ChainManager) insert(block *types.Block) { - err := WriteHead(bc.chainDb, block) - if err != nil { - glog.Fatal("db write fail:", err) + // Add the block to the canonical chain number scheme and mark as the head + if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { + glog.Fatalf("failed to insert block number: %v", err) } - + if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { + glog.Fatalf("failed to insert block number: %v", err) + } + // Add a new restore point if we reached some limit bc.checkpoint++ if bc.checkpoint > checkpointLimit { - err = bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()) - if err != nil { - glog.Fatal("db write fail:", err) + if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil { + glog.Fatalf("failed to create checkpoint: %v", err) } - bc.checkpoint = 0 } - + // Update the internal internal state with the head block bc.currentBlock = block - bc.lastBlockHash = block.Hash() } // Accessors @@ -352,7 +357,7 @@ func (self *ChainManager) GetHeader(hash common.Hash) *types.Header { if header, ok := self.headerCache.Get(hash); ok { return header.(*types.Header) } - header := GetHeaderByHash(self.chainDb, hash) + header := GetHeader(self.chainDb, hash) if header == nil { return nil } @@ -364,44 +369,39 @@ func (self *ChainManager) GetHeader(hash common.Hash) *types.Header { // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header { - hash := GetHashByNumber(self.chainDb, number) + hash := GetCanonicalHash(self.chainDb, number) if hash == (common.Hash{}) { return nil } return self.GetHeader(hash) } -// GetBody retrieves a block body (transactions, uncles and total difficulty) -// from the database by hash, caching it if found. The resion for the peculiar -// pointer-to-slice return type is to differentiate between empty and inexistent -// bodies. -func (self *ChainManager) GetBody(hash common.Hash) (*[]*types.Transaction, *[]*types.Header) { +// GetBody retrieves a block body (transactions and uncles) from the database by +// hash, caching it if found. +func (self *ChainManager) GetBody(hash common.Hash) *types.Body { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := self.bodyCache.Get(hash); ok { - body := cached.(*storageBody) - return &body.Transactions, &body.Uncles + body := cached.(*types.Body) + return body } - transactions, uncles, td := GetBodyByHash(self.chainDb, hash) - if td == nil { - return nil, nil + body := GetBody(self.chainDb, hash) + if body == nil { + return nil } // Cache the found body for next time and return - self.bodyCache.Add(hash, &storageBody{ - Transactions: transactions, - Uncles: uncles, - }) - return &transactions, &uncles + self.bodyCache.Add(hash, body) + return body } // GetBodyRLP retrieves a block body in RLP encoding from the database by hash, // caching it if found. -func (self *ChainManager) GetBodyRLP(hash common.Hash) []byte { +func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue { // Short circuit if the body's already in the cache, retrieve otherwise if cached, ok := self.bodyRLPCache.Get(hash); ok { - return cached.([]byte) + return cached.(rlp.RawValue) } - body, td := GetBodyRLPByHash(self.chainDb, hash) - if td == nil { + body := GetBodyRLP(self.chainDb, hash) + if len(body) == 0 { return nil } // Cache the found body for next time and return @@ -409,6 +409,22 @@ func (self *ChainManager) GetBodyRLP(hash common.Hash) []byte { return body } +// GetTd retrieves a block's total difficulty in the canonical chain from the +// database by hash, caching it if found. +func (self *ChainManager) GetTd(hash common.Hash) *big.Int { + // Short circuit if the td's already in the cache, retrieve otherwise + if cached, ok := self.tdCache.Get(hash); ok { + return cached.(*big.Int) + } + td := GetTd(self.chainDb, hash) + if td == nil { + return nil + } + // Cache the found body for next time and return + self.tdCache.Add(hash, td) + return td +} + // HasBlock checks if a block is fully present in the database or not, caching // it if present. func (bc *ChainManager) HasBlock(hash common.Hash) bool { @@ -421,7 +437,7 @@ func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { if block, ok := self.blockCache.Get(hash); ok { return block.(*types.Block) } - block := GetBlockByHash(self.chainDb, hash) + block := GetBlock(self.chainDb, hash) if block == nil { return nil } @@ -433,7 +449,7 @@ func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { // GetBlockByNumber retrieves a block from the database by number, caching it // (associated with its hash) if found. func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block { - hash := GetHashByNumber(self.chainDb, number) + hash := GetCanonicalHash(self.chainDb, number) if hash == (common.Hash{}) { return nil } @@ -455,7 +471,7 @@ func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) [ break } chain = append(chain, header.Hash()) - if header.Number.Cmp(common.Big0) <= 0 { + if header.Number.Cmp(common.Big0) == 0 { break } } @@ -531,15 +547,25 @@ const ( SideStatTy ) -// WriteBlock writes the block to the chain (or pending queue) -func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status writeStatus, err error) { +// WriteBlock writes the block to the chain. +func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) { self.wg.Add(1) defer self.wg.Done() + // Calculate the total difficulty of the block + ptd := self.GetTd(block.ParentHash()) + if ptd == nil { + return NonStatTy, ParentError(block.ParentHash()) + } + td := new(big.Int).Add(block.Difficulty(), ptd) + + self.mu.RLock() cblock := self.currentBlock + self.mu.RUnlock() + // Compare the TD of the last known block in the canonical chain to make sure it's greater. // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if block.Td.Cmp(self.Td()) > 0 { + if td.Cmp(self.Td()) > 0 { // chain fork if block.ParentHash() != cblock.Hash() { // during split we merge two different chains and create the new canonical chain @@ -547,12 +573,10 @@ func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status wr if err != nil { return NonStatTy, err } - status = SplitStatTy } - self.mu.Lock() - self.setTotalDifficulty(block.Td) + self.setTotalDifficulty(td) self.insert(block) self.mu.Unlock() @@ -561,9 +585,11 @@ func (self *ChainManager) WriteBlock(block *types.Block, queued bool) (status wr status = SideStatTy } - err = WriteBlock(self.chainDb, block) - if err != nil { - glog.Fatalln("db err:", err) + if err := WriteTd(self.chainDb, block.Hash(), td); err != nil { + glog.Fatalf("failed to write block total difficulty: %v", err) + } + if err := WriteBlock(self.chainDb, block); err != nil { + glog.Fatalf("filed to write block contents: %v", err) } // Delete from future blocks self.futureBlocks.Remove(block.Hash()) @@ -622,11 +648,6 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { blockErr(block, err) return i, err } - - // Setting block.Td regardless of error (known for example) prevents errors down the line - // in the protocol handler - block.Td = new(big.Int).Set(CalcTD(block, self.GetBlock(block.ParentHash()))) - // Call in to the block processor and check for errors. It's likely that if one block fails // all others will fail too (unless a known block is returned). logs, receipts, err := self.processor.Process(block) @@ -666,7 +687,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { txcount += len(block.Transactions()) // write the block to the chain and get the status - status, err := self.WriteBlock(block, true) + status, err := self.WriteBlock(block) if err != nil { return i, err } @@ -799,12 +820,11 @@ out: case ChainEvent: // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long // and in most cases isn't even necessary. - if self.lastBlockHash == event.Hash { + if self.currentBlock.Hash() == event.Hash { self.currentGasLimit = CalcGasLimit(event.Block) self.eventMux.Post(ChainHeadEvent{event.Block}) } } - self.eventMux.Post(event) } } diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go index 97e7cacdc..a20480de8 100644 --- a/core/chain_manager_test.go +++ b/core/chain_manager_test.go @@ -77,6 +77,7 @@ func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() if bi1 != bi2 { + fmt.Printf("%+v\n%+v\n\n", bi1, bi2) t.Fatal("chains do not have the same hash at height", i) } bman2.bc.SetProcessor(bman2) @@ -110,7 +111,6 @@ func printChain(bc *ChainManager) { // process blocks against a chain func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { - td := new(big.Int) for _, block := range chainB { _, _, err := bman.bc.processor.Process(block) if err != nil { @@ -119,17 +119,12 @@ func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { } return nil, err } - parent := bman.bc.GetBlock(block.ParentHash()) - block.Td = CalcTD(block, parent) - td = block.Td - bman.bc.mu.Lock() - { - WriteBlock(bman.bc.chainDb, block) - } + WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash()))) + WriteBlock(bman.bc.chainDb, block) bman.bc.mu.Unlock() } - return td, nil + return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil } func loadChain(fn string, t *testing.T) (types.Blocks, error) { @@ -391,6 +386,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager { bc.headerCache, _ = lru.New(100) bc.bodyCache, _ = lru.New(100) bc.bodyRLPCache, _ = lru.New(100) + bc.tdCache, _ = lru.New(100) bc.blockCache, _ = lru.New(100) bc.futureBlocks, _ = lru.New(100) bc.processor = bproc{} diff --git a/core/chain_util.go b/core/chain_util.go index c12bdda75..0e3fa31f9 100644 --- a/core/chain_util.go +++ b/core/chain_util.go @@ -29,14 +29,18 @@ import ( ) var ( - headKey = []byte("LastBlock") + headHeaderKey = []byte("LastHeader") + headBlockKey = []byte("LastBlock") + + blockPrefix = []byte("block-") + blockNumPrefix = []byte("block-num-") + + headerSuffix = []byte("-header") + bodySuffix = []byte("-body") + tdSuffix = []byte("-td") - headerHashPre = []byte("header-hash-") - bodyHashPre = []byte("body-hash-") - blockNumPre = []byte("block-num-") ExpDiffPeriod = big.NewInt(100000) - - blockHashPre = []byte("block-hash-") // [deprecated by eth/63] + blockHashPre = []byte("block-hash-") // [deprecated by eth/63] ) // CalcDifficulty is the difficulty adjustment algorithm. It returns @@ -73,16 +77,6 @@ func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) return diff } -// CalcTD computes the total difficulty of block. -func CalcTD(block, parent *types.Block) *big.Int { - if parent == nil { - return block.Difficulty() - } - d := block.Difficulty() - d.Add(d, parent.Td) - return d -} - // CalcGasLimit computes the gas limit of the next block after parent. // The result may be modified by the caller. // This is miner strategy, not consensus protocol. @@ -116,41 +110,48 @@ func CalcGasLimit(parent *types.Block) *big.Int { return gl } -// storageBody is the block body encoding used for the database. -type storageBody struct { - Transactions []*types.Transaction - Uncles []*types.Header -} - -// GetHashByNumber retrieves a hash assigned to a canonical block number. -func GetHashByNumber(db common.Database, number uint64) common.Hash { - data, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...)) +// GetCanonicalHash retrieves a hash assigned to a canonical block number. +func GetCanonicalHash(db common.Database, number uint64) common.Hash { + data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) } -// GetHeadHash retrieves the hash of the current canonical head block. -func GetHeadHash(db common.Database) common.Hash { - data, _ := db.Get(headKey) +// GetHeadHeaderHash retrieves the hash of the current canonical head block's +// header. The difference between this and GetHeadBlockHash is that whereas the +// last block hash is only updated upon a full block import, the last header +// hash is updated already at header import, allowing head tracking for the +// fast synchronization mechanism. +func GetHeadHeaderHash(db common.Database) common.Hash { + data, _ := db.Get(headHeaderKey) if len(data) == 0 { return common.Hash{} } return common.BytesToHash(data) } -// GetHeaderRLPByHash retrieves a block header in its raw RLP database encoding, -// or nil if the header's not found. -func GetHeaderRLPByHash(db common.Database, hash common.Hash) []byte { - data, _ := db.Get(append(headerHashPre, hash[:]...)) +// GetHeadBlockHash retrieves the hash of the current canonical head block. +func GetHeadBlockHash(db common.Database) common.Hash { + data, _ := db.Get(headBlockKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil +// if the header's not found. +func GetHeaderRLP(db common.Database, hash common.Hash) rlp.RawValue { + data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...)) return data } -// GetHeaderByHash retrieves the block header corresponding to the hash, nil if -// none found. -func GetHeaderByHash(db common.Database, hash common.Hash) *types.Header { - data := GetHeaderRLPByHash(db, hash) +// GetHeader retrieves the block header corresponding to the hash, nil if none +// found. +func GetHeader(db common.Database, hash common.Hash) *types.Header { + data := GetHeaderRLP(db, hash) if len(data) == 0 { return nil } @@ -162,69 +163,61 @@ func GetHeaderByHash(db common.Database, hash common.Hash) *types.Header { return header } -// GetBodyRLPByHash retrieves the block body (transactions and uncles) in RLP -// encoding, and the associated total difficulty. -func GetBodyRLPByHash(db common.Database, hash common.Hash) ([]byte, *big.Int) { - combo, _ := db.Get(append(bodyHashPre, hash[:]...)) - if len(combo) == 0 { - return nil, nil - } - buffer := bytes.NewBuffer(combo) - - td := new(big.Int) - if err := rlp.Decode(buffer, td); err != nil { - glog.V(logger.Error).Infof("invalid block td RLP for hash %x: %v", hash, err) - return nil, nil - } - return buffer.Bytes(), td +// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. +func GetBodyRLP(db common.Database, hash common.Hash) rlp.RawValue { + data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...)) + return data } -// GetBodyByHash retrieves the block body (transactons, uncles, total difficulty) -// corresponding to the hash, nils if none found. -func GetBodyByHash(db common.Database, hash common.Hash) ([]*types.Transaction, []*types.Header, *big.Int) { - data, td := GetBodyRLPByHash(db, hash) - if len(data) == 0 || td == nil { - return nil, nil, nil +// GetBody retrieves the block body (transactons, uncles) corresponding to the +// hash, nil if none found. +func GetBody(db common.Database, hash common.Hash) *types.Body { + data := GetBodyRLP(db, hash) + if len(data) == 0 { + return nil } - body := new(storageBody) + body := new(types.Body) if err := rlp.Decode(bytes.NewReader(data), body); err != nil { glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) - return nil, nil, nil + return nil } - return body.Transactions, body.Uncles, td + return body } -// GetBlockByHash retrieves an entire block corresponding to the hash, assembling -// it back from the stored header and body. -func GetBlockByHash(db common.Database, hash common.Hash) *types.Block { +// GetTd retrieves a block's total difficulty corresponding to the hash, nil if +// none found. +func GetTd(db common.Database, hash common.Hash) *big.Int { + data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) + if len(data) == 0 { + return nil + } + td := new(big.Int) + if err := rlp.Decode(bytes.NewReader(data), td); err != nil { + glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) + return nil + } + return td +} + +// GetBlock retrieves an entire block corresponding to the hash, assembling it +// back from the stored header and body. +func GetBlock(db common.Database, hash common.Hash) *types.Block { // Retrieve the block header and body contents - header := GetHeaderByHash(db, hash) + header := GetHeader(db, hash) if header == nil { return nil } - transactions, uncles, td := GetBodyByHash(db, hash) - if td == nil { + body := GetBody(db, hash) + if body == nil { return nil } // Reassemble the block and return - block := types.NewBlockWithHeader(header).WithBody(transactions, uncles) - block.Td = td - - return block + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) } -// GetBlockByNumber returns the canonical block by number or nil if not found. -func GetBlockByNumber(db common.Database, number uint64) *types.Block { - key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...)) - if len(key) == 0 { - return nil - } - return GetBlockByHash(db, common.BytesToHash(key)) -} - -// WriteCanonNumber stores the canonical hash for the given block number. -func WriteCanonNumber(db common.Database, hash common.Hash, number uint64) error { - key := append(blockNumPre, big.NewInt(int64(number)).Bytes()...) +// WriteCanonicalHash stores the canonical hash for the given block number. +func WriteCanonicalHash(db common.Database, hash common.Hash, number uint64) error { + key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...) if err := db.Put(key, hash.Bytes()); err != nil { glog.Fatalf("failed to store number to hash mapping into database: %v", err) return err @@ -232,14 +225,19 @@ func WriteCanonNumber(db common.Database, hash common.Hash, number uint64) error return nil } -// WriteHead updates the head block of the chain database. -func WriteHead(db common.Database, block *types.Block) error { - if err := WriteCanonNumber(db, block.Hash(), block.NumberU64()); err != nil { - glog.Fatalf("failed to store canonical number into database: %v", err) +// WriteHeadHeaderHash stores the head header's hash. +func WriteHeadHeaderHash(db common.Database, hash common.Hash) error { + if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { + glog.Fatalf("failed to store last header's hash into database: %v", err) return err } - if err := db.Put(headKey, block.Hash().Bytes()); err != nil { - glog.Fatalf("failed to store last block into database: %v", err) + return nil +} + +// WriteHeadBlockHash stores the head block's hash. +func WriteHeadBlockHash(db common.Database, hash common.Hash) error { + if err := db.Put(headBlockKey, hash.Bytes()); err != nil { + glog.Fatalf("failed to store last block's hash into database: %v", err) return err } return nil @@ -251,7 +249,7 @@ func WriteHeader(db common.Database, header *types.Header) error { if err != nil { return err } - key := append(headerHashPre, header.Hash().Bytes()...) + key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...) if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store header into database: %v", err) return err @@ -261,28 +259,39 @@ func WriteHeader(db common.Database, header *types.Header) error { } // WriteBody serializes the body of a block into the database. -func WriteBody(db common.Database, block *types.Block) error { - body, err := rlp.EncodeToBytes(&storageBody{block.Transactions(), block.Uncles()}) +func WriteBody(db common.Database, hash common.Hash, body *types.Body) error { + data, err := rlp.EncodeToBytes(body) if err != nil { return err } - td, err := rlp.EncodeToBytes(block.Td) - if err != nil { - return err - } - key := append(bodyHashPre, block.Hash().Bytes()...) - if err := db.Put(key, append(td, body...)); err != nil { + key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...) + if err := db.Put(key, data); err != nil { glog.Fatalf("failed to store block body into database: %v", err) return err } - glog.V(logger.Debug).Infof("stored block body #%v [%x…]", block.Number, block.Hash().Bytes()[:4]) + glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) + return nil +} + +// WriteTd serializes the total difficulty of a block into the database. +func WriteTd(db common.Database, hash common.Hash, td *big.Int) error { + data, err := rlp.EncodeToBytes(td) + if err != nil { + return err + } + key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...) + if err := db.Put(key, data); err != nil { + glog.Fatalf("failed to store block total difficulty into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) return nil } // WriteBlock serializes a block into the database, header and body separately. func WriteBlock(db common.Database, block *types.Block) error { // Store the body first to retain database consistency - if err := WriteBody(db, block); err != nil { + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { return err } // Store the header too, signaling full block ownership @@ -292,20 +301,31 @@ func WriteBlock(db common.Database, block *types.Block) error { return nil } +// DeleteCanonicalHash removes the number to hash canonical mapping. +func DeleteCanonicalHash(db common.Database, number uint64) { + db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) +} + // DeleteHeader removes all block header data associated with a hash. func DeleteHeader(db common.Database, hash common.Hash) { - db.Delete(append(headerHashPre, hash.Bytes()...)) + db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...)) } // DeleteBody removes all block body data associated with a hash. func DeleteBody(db common.Database, hash common.Hash) { - db.Delete(append(bodyHashPre, hash.Bytes()...)) + db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...)) +} + +// DeleteTd removes all block total difficulty data associated with a hash. +func DeleteTd(db common.Database, hash common.Hash) { + db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) } // DeleteBlock removes all block data associated with a hash. func DeleteBlock(db common.Database, hash common.Hash) { DeleteHeader(db, hash) DeleteBody(db, hash) + DeleteTd(db, hash) } // [deprecated by eth/63] diff --git a/core/chain_util_test.go b/core/chain_util_test.go index 4bbe81194..3f0446715 100644 --- a/core/chain_util_test.go +++ b/core/chain_util_test.go @@ -23,6 +23,10 @@ import ( "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" ) type diffTest struct { @@ -75,3 +79,242 @@ func TestDifficulty(t *testing.T) { } } } + +// Tests block header storage and retrieval operations. +func TestHeaderStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test header to move around the database and make sure it's really new + header := &types.Header{Extra: []byte("test header")} + if entry := GetHeader(db, header.Hash()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + // Write and verify the header in the database + if err := WriteHeader(db, header); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if entry := GetHeader(db, header.Hash()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != header.Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) + } + if entry := GetHeaderRLP(db, header.Hash()); entry == nil { + t.Fatalf("Stored header RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() { + t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header) + } + } + // Delete the header and verify the execution + DeleteHeader(db, header.Hash()) + if entry := GetHeader(db, header.Hash()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } +} + +// Tests block body storage and retrieval operations. +func TestBodyStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test body to move around the database and make sure it's really new + body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}} + + hasher := sha3.NewKeccak256() + rlp.Encode(hasher, body) + hash := common.BytesToHash(hasher.Sum(nil)) + + if entry := GetBody(db, hash); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the body in the database + if err := WriteBody(db, hash, body); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBody(db, hash); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) + } + if entry := GetBodyRLP(db, hash); entry == nil { + t.Fatalf("Stored body RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash { + t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body) + } + } + // Delete the body and verify the execution + DeleteBody(db, hash) + if entry := GetBody(db, hash); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests block storage and retrieval operations. +func TestBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test block to move around the database and make sure it's really new + block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + if entry := GetHeader(db, block.Hash()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + if entry := GetBody(db, block.Hash()); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the block in the database + if err := WriteBlock(db, block); err != nil { + t.Fatalf("Failed to write block into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } + if entry := GetHeader(db, block.Hash()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != block.Header().Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) + } + if entry := GetBody(db, block.Hash()); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()}) + } + // Delete the block and verify the execution + DeleteBlock(db, block.Hash()) + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Deleted block returned: %v", entry) + } + if entry := GetHeader(db, block.Hash()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } + if entry := GetBody(db, block.Hash()); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests that partial block contents don't get reassembled into full blocks. +func TestPartialBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + block := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block")}) + + // Store a header and check that it's not recognized as a block + if err := WriteHeader(db, block.Header()); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteHeader(db, block.Hash()) + + // Store a body and check that it's not recognized as a block + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteBody(db, block.Hash()) + + // Store a header and a body separately and check reassembly + if err := WriteHeader(db, block.Header()); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } +} + +// Tests block total difficulty storage and retrieval operations. +func TestTdStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test TD to move around the database and make sure it's really new + hash, td := common.Hash{}, big.NewInt(314) + if entry := GetTd(db, hash); entry != nil { + t.Fatalf("Non existent TD returned: %v", entry) + } + // Write and verify the TD in the database + if err := WriteTd(db, hash, td); err != nil { + t.Fatalf("Failed to write TD into database: %v", err) + } + if entry := GetTd(db, hash); entry == nil { + t.Fatalf("Stored TD not found") + } else if entry.Cmp(td) != 0 { + t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) + } + // Delete the TD and verify the execution + DeleteTd(db, hash) + if entry := GetTd(db, hash); entry != nil { + t.Fatalf("Deleted TD returned: %v", entry) + } +} + +// Tests that canonical numbers can be mapped to hashes and retrieved. +func TestCanonicalMappingStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test canonical number and assinged hash to move around + hash, number := common.Hash{0: 0xff}, uint64(314) + if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Non existent canonical mapping returned: %v", entry) + } + // Write and verify the TD in the database + if err := WriteCanonicalHash(db, hash, number); err != nil { + t.Fatalf("Failed to write canonical mapping into database: %v", err) + } + if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) { + t.Fatalf("Stored canonical mapping not found") + } else if entry != hash { + t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) + } + // Delete the TD and verify the execution + DeleteCanonicalHash(db, number) + if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Deleted canonical mapping returned: %v", entry) + } +} + +// Tests that head headers and head blocks can be assigned, individually. +func TestHeadStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) + blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) + + // Check that no head entries are in a pristine database + if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head header entry returned: %v", entry) + } + if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head block entry returned: %v", entry) + } + // Assign separate entries for the head header and block + if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { + t.Fatalf("Failed to write head header hash: %v", err) + } + if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { + t.Fatalf("Failed to write head block hash: %v", err) + } + // Check that both heads are present, and different (i.e. two heads maintained) + if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { + t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) + } + if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { + t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) + } +} diff --git a/core/genesis.go b/core/genesis.go index 6fbc671b0..3a8f0af0c 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -82,28 +82,29 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block, Coinbase: common.HexToAddress(genesis.Coinbase), Root: statedb.Root(), }, nil, nil, nil) - block.Td = difficulty - if block := GetBlockByHash(chainDb, block.Hash()); block != nil { + if block := GetBlock(chainDb, block.Hash()); block != nil { glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number") - err := WriteCanonNumber(chainDb, block.Hash(), block.NumberU64()) + err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) if err != nil { return nil, err } return block, nil } - statedb.Sync() - err = WriteBlock(chainDb, block) - if err != nil { + if err := WriteTd(chainDb, block.Hash(), difficulty); err != nil { return nil, err } - err = WriteHead(chainDb, block) - if err != nil { + if err := WriteBlock(chainDb, block); err != nil { + return nil, err + } + if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil { + return nil, err + } + if err := WriteHeadBlockHash(chainDb, block.Hash()); err != nil { return nil, err } - return block, nil } @@ -120,7 +121,6 @@ func GenesisBlockForTesting(db common.Database, addr common.Address, balance *bi GasLimit: params.GenesisGasLimit, Root: statedb.Root(), }, nil, nil, nil) - block.Td = params.GenesisDifficulty return block } diff --git a/core/types/block.go b/core/types/block.go index 558b46e01..7a84045a6 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -117,6 +117,13 @@ func rlpHash(x interface{}) (h common.Hash) { return h } +// Body is a simple (mutable, non-safe) data container for storing and moving +// a block's data contents (transactions and uncles) together. +type Body struct { + Transactions []*Transaction + Uncles []*Header +} + type Block struct { header *Header uncles []*Header @@ -129,12 +136,19 @@ type Block struct { // Td is used by package core to store the total difficulty // of the chain up to and including the block. - Td *big.Int + td *big.Int // ReceivedAt is used by package eth to track block propagation time. ReceivedAt time.Time } +// DeprecatedTd is an old relic for extracting the TD of a block. It is in the +// code solely to facilitate upgrading the database from the old format to the +// new, after which it should be deleted. Do not use! +func (b *Block) DeprecatedTd() *big.Int { + return b.td +} + // [deprecated by eth/63] // StorageBlock defines the RLP encoding of a Block stored in the // state database. The StorageBlock encoding contains fields that @@ -170,7 +184,7 @@ var ( // are ignored and set to values derived from the given txs, uncles // and receipts. func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt) *Block { - b := &Block{header: copyHeader(header), Td: new(big.Int)} + b := &Block{header: copyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { @@ -276,20 +290,10 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { if err := s.Decode(&sb); err != nil { return err } - b.header, b.uncles, b.transactions, b.Td = sb.Header, sb.Uncles, sb.Txs, sb.TD + b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD return nil } -// [deprecated by eth/63] -func (b *StorageBlock) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, storageblock{ - Header: b.header, - Txs: b.transactions, - Uncles: b.uncles, - TD: b.Td, - }) -} - // TODO: copies func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Transactions() Transactions { return b.transactions } @@ -360,7 +364,6 @@ func (b *Block) WithMiningResult(nonce uint64, mixDigest common.Hash) *Block { transactions: b.transactions, receipts: b.receipts, uncles: b.uncles, - Td: b.Td, } } @@ -390,7 +393,7 @@ func (b *Block) Hash() common.Hash { } func (b *Block) String() string { - str := fmt.Sprintf(`Block(#%v): Size: %v TD: %v { + str := fmt.Sprintf(`Block(#%v): Size: %v { MinerHash: %x %v Transactions: @@ -398,7 +401,7 @@ Transactions: Uncles: %v } -`, b.Number(), b.Size(), b.Td, b.header.HashNoNonce(), b.header, b.transactions, b.uncles) +`, b.Number(), b.Size(), b.header.HashNoNonce(), b.header, b.transactions, b.uncles) return str } diff --git a/eth/backend.go b/eth/backend.go index 59f2ab01a..deb6d3d0f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -316,9 +316,13 @@ func New(config *Config) (*Ethereum, error) { if err != nil { return nil, err } - case config.GenesisBlock != nil: // This is for testing only. + } + // This is for testing only. + if config.GenesisBlock != nil { + core.WriteTd(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.Difficulty()) core.WriteBlock(chainDb, config.GenesisBlock) - core.WriteHead(chainDb, config.GenesisBlock) + core.WriteCanonicalHash(chainDb, config.GenesisBlock.Hash(), config.GenesisBlock.NumberU64()) + core.WriteHeadBlockHash(chainDb, config.GenesisBlock.Hash()) } if !config.SkipBcVersionCheck { @@ -752,7 +756,10 @@ func upgradeChainDatabase(db common.Database) error { // Load the block, split and serialize (order!) block := core.GetBlockByHashOld(db, common.BytesToHash(bytes.TrimPrefix(it.Key(), blockPrefix))) - if err := core.WriteBody(db, block); err != nil { + if err := core.WriteTd(db, block.Hash(), block.DeprecatedTd()); err != nil { + return err + } + if err := core.WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { return err } if err := core.WriteHeader(db, block.Header()); err != nil { @@ -765,7 +772,10 @@ func upgradeChainDatabase(db common.Database) error { // Lastly, upgrade the head block, disabling the upgrade mechanism current := core.GetBlockByHashOld(db, head) - if err := core.WriteBody(db, current); err != nil { + if err := core.WriteTd(db, current.Hash(), current.DeprecatedTd()); err != nil { + return err + } + if err := core.WriteBody(db, current.Hash(), &types.Body{current.Transactions(), current.Uncles()}); err != nil { return err } if err := core.WriteHeader(db, current.Header()); err != nil { diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 73f95bf64..d28985b3e 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -87,6 +87,9 @@ type blockRetrievalFn func(common.Hash) *types.Block // headRetrievalFn is a callback type for retrieving the head block from the local chain. type headRetrievalFn func() *types.Block +// tdRetrievalFn is a callback type for retrieving the total difficulty of a local block. +type tdRetrievalFn func(common.Hash) *big.Int + // chainInsertFn is a callback type to insert a batch of blocks into the local chain. type chainInsertFn func(types.Blocks) (int, error) @@ -136,6 +139,7 @@ type Downloader struct { hasBlock hashCheckFn // Checks if a block is present in the chain getBlock blockRetrievalFn // Retrieves a block from the chain headBlock headRetrievalFn // Retrieves the head block from the chain + getTd tdRetrievalFn // Retrieves the TD of a block from the chain insertChain chainInsertFn // Injects a batch of blocks into the chain dropPeer peerDropFn // Drops a peer for misbehaving @@ -168,7 +172,7 @@ type Block struct { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader { +func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, getTd tdRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader { return &Downloader{ mux: mux, queue: newQueue(), @@ -176,6 +180,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, he hasBlock: hasBlock, getBlock: getBlock, headBlock: headBlock, + getTd: getTd, insertChain: insertChain, dropPeer: dropPeer, newPeerCh: make(chan *peer, 1), @@ -582,7 +587,7 @@ func (d *Downloader) fetchHashes61(p *peer, td *big.Int, from uint64) error { // L: Sync begins, and finds common ancestor at 11 // L: Request new hashes up from 11 (R's TD was higher, it must have something) // R: Nothing to give - if !gotHashes && td.Cmp(d.headBlock().Td) > 0 { + if !gotHashes && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { return errStallingPeer } return nil @@ -958,7 +963,7 @@ func (d *Downloader) fetchHeaders(p *peer, td *big.Int, from uint64) error { // L: Sync begins, and finds common ancestor at 11 // L: Request new headers up from 11 (R's TD was higher, it must have something) // R: Nothing to give - if !gotHeaders && td.Cmp(d.headBlock().Td) > 0 { + if !gotHeaders && td.Cmp(d.getTd(d.headBlock().Hash())) > 0 { return errStallingPeer } return nil diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 8d009b671..dbcf93607 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -93,21 +93,25 @@ func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2 type downloadTester struct { downloader *Downloader - ownHashes []common.Hash // Hash chain belonging to the tester - ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester - peerHashes map[string][]common.Hash // Hash chain belonging to different test peers - peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers + ownHashes []common.Hash // Hash chain belonging to the tester + ownBlocks map[common.Hash]*types.Block // Blocks belonging to the tester + ownChainTd map[common.Hash]*big.Int // Total difficulties of the blocks in the local chain + peerHashes map[string][]common.Hash // Hash chain belonging to different test peers + peerBlocks map[string]map[common.Hash]*types.Block // Blocks belonging to different test peers + peerChainTds map[string]map[common.Hash]*big.Int // Total difficulties of the blocks in the peer chains } // newTester creates a new downloader test mocker. func newTester() *downloadTester { tester := &downloadTester{ - ownHashes: []common.Hash{genesis.Hash()}, - ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, - peerHashes: make(map[string][]common.Hash), - peerBlocks: make(map[string]map[common.Hash]*types.Block), + ownHashes: []common.Hash{genesis.Hash()}, + ownBlocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, + ownChainTd: map[common.Hash]*big.Int{genesis.Hash(): genesis.Difficulty()}, + peerHashes: make(map[string][]common.Hash), + peerBlocks: make(map[string]map[common.Hash]*types.Block), + peerChainTds: make(map[string]map[common.Hash]*big.Int), } - tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.insertChain, tester.dropPeer) + tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.getTd, tester.insertChain, tester.dropPeer) return tester } @@ -119,8 +123,8 @@ func (dl *downloadTester) sync(id string, td *big.Int) error { // If no particular TD was requested, load from the peer's blockchain if td == nil { td = big.NewInt(1) - if block, ok := dl.peerBlocks[id][hash]; ok { - td = block.Td + if diff, ok := dl.peerChainTds[id][hash]; ok { + td = diff } } err := dl.downloader.synchronise(id, hash, td) @@ -152,6 +156,11 @@ func (dl *downloadTester) headBlock() *types.Block { return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1]) } +// getTd retrieves the block's total difficulty from the canonical chain. +func (dl *downloadTester) getTd(hash common.Hash) *big.Int { + return dl.ownChainTd[hash] +} + // insertChain injects a new batch of blocks into the simulated chain. func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { for i, block := range blocks { @@ -160,6 +169,7 @@ func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { } dl.ownHashes = append(dl.ownHashes, block.Hash()) dl.ownBlocks[block.Hash()] = block + dl.ownChainTd[block.Hash()] = dl.ownChainTd[block.ParentHash()] } return len(blocks), nil } @@ -180,9 +190,16 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha // Assign the owned hashes and blocks to the peer (deep copy) dl.peerHashes[id] = make([]common.Hash, len(hashes)) copy(dl.peerHashes[id], hashes) + dl.peerBlocks[id] = make(map[common.Hash]*types.Block) - for hash, block := range blocks { - dl.peerBlocks[id][hash] = block + dl.peerChainTds[id] = make(map[common.Hash]*big.Int) + for _, hash := range hashes { + if block, ok := blocks[hash]; ok { + dl.peerBlocks[id][hash] = block + if parent, ok := dl.peerBlocks[id][block.ParentHash()]; ok { + dl.peerChainTds[id][hash] = new(big.Int).Add(block.Difficulty(), dl.peerChainTds[id][parent.Hash()]) + } + } } } return err @@ -192,6 +209,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha func (dl *downloadTester) dropPeer(id string) { delete(dl.peerHashes, id) delete(dl.peerBlocks, id) + delete(dl.peerChainTds, id) dl.downloader.UnregisterPeer(id) } diff --git a/eth/handler.go b/eth/handler.go index 95f4e8ce2..4aef69043 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -36,8 +36,10 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -// This is the target maximum size of returned blocks, headers or node data. -const softResponseLimit = 2 * 1024 * 1024 +const ( + softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. + estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header +) func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) @@ -113,7 +115,7 @@ func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow po } } // Construct the different synchronisation mechanisms - manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.InsertChain, manager.removePeer) + manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.GetTd, manager.chainman.InsertChain, manager.removePeer) validator := func(block *types.Block, parent *types.Block) error { return core.ValidateHeader(pow, block.Header(), parent, true, false) @@ -363,7 +365,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { break } headers = append(headers, origin) - bytes += 500 // Approximate, should be good enough estimate + bytes += estHeaderRlpSize // Advance to the next header of the query switch { @@ -453,7 +455,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { var ( hash common.Hash bytes int - bodies []*blockBodyRLP + bodies []rlp.RawValue ) for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { // Retrieve the hash of the next block @@ -464,9 +466,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } // Retrieve the requested block body, stopping if enough was found if data := pm.chainman.GetBodyRLP(hash); len(data) != 0 { - body := blockBodyRLP(data) - bodies = append(bodies, &body) - bytes += len(body) + bodies = append(bodies, data) + bytes += len(data) } } return p.SendBlockBodiesRLP(bodies) @@ -644,7 +645,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int if parent := pm.chainman.GetBlock(block.ParentHash()); parent != nil { - td = new(big.Int).Add(parent.Td, block.Difficulty()) + td = new(big.Int).Add(block.Difficulty(), pm.chainman.GetTd(block.ParentHash())) } else { glog.V(logger.Error).Infof("propagating dangling block #%d [%x]", block.NumberU64(), hash[:4]) return diff --git a/eth/peer.go b/eth/peer.go index f1ddd9726..603b49b88 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" "gopkg.in/fatih/set.v0" ) @@ -186,8 +187,8 @@ func (p *peer) SendBlockBodies(bodies []*blockBody) error { // SendBlockBodiesRLP sends a batch of block contents to the remote peer from // an already RLP encoded format. -func (p *peer) SendBlockBodiesRLP(bodies []*blockBodyRLP) error { - return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesRLPData(bodies)) +func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error { + return p2p.Send(p.rw, BlockBodiesMsg, bodies) } // SendNodeData sends a batch of arbitrary internal data, corresponding to the diff --git a/eth/protocol.go b/eth/protocol.go index 24007bbb5..49f096a3b 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -213,22 +213,6 @@ type blockBody struct { // blockBodiesData is the network packet for block content distribution. type blockBodiesData []*blockBody -// blockBodyRLP represents the RLP encoded data content of a single block. -type blockBodyRLP []byte - -// EncodeRLP is a specialized encoder for a block body to pass the already -// encoded body RLPs from the database on, without double encoding. -func (b *blockBodyRLP) EncodeRLP(w io.Writer) error { - if _, err := w.Write([]byte(*b)); err != nil { - return err - } - return nil -} - -// blockBodiesRLPData is the network packet for block content distribution -// based on original RLP formatting (i.e. skip the db-decode/proto-encode). -type blockBodiesRLPData []*blockBodyRLP - // nodeDataData is the network response packet for a node data retrieval. type nodeDataData []struct { Value []byte diff --git a/miner/worker.go b/miner/worker.go index 16a16931d..2f43b110f 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -283,7 +283,7 @@ func (self *worker) wait() { continue } - stat, err := self.chain.WriteBlock(block, false) + stat, err := self.chain.WriteBlock(block) if err != nil { glog.V(logger.Error).Infoln("error writing block to chain", err) continue @@ -533,14 +533,12 @@ func (self *worker) commitNewWork() { // create the new block whose nonce will be mined. work.Block = types.NewBlock(header, work.txs, uncles, work.receipts) - work.Block.Td = new(big.Int).Set(core.CalcTD(work.Block, self.chain.GetBlock(work.Block.ParentHash()))) // We only care about logging if we're actually mining. if atomic.LoadInt32(&self.mining) == 1 { glog.V(logger.Info).Infof("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)) self.logLocalMinedBlocks(work, previous) } - self.push(work) } diff --git a/rpc/api/eth.go b/rpc/api/eth.go index ba87e86c6..a93e41157 100644 --- a/rpc/api/eth.go +++ b/rpc/api/eth.go @@ -204,7 +204,8 @@ func (self *ethApi) GetBlockTransactionCountByHash(req *shared.Request) (interfa return nil, shared.NewDecodeParamError(err.Error()) } - block := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false) + raw := self.xeth.EthBlockByHash(args.Hash) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false) if block == nil { return nil, nil } else { @@ -218,7 +219,8 @@ func (self *ethApi) GetBlockTransactionCountByNumber(req *shared.Request) (inter return nil, shared.NewDecodeParamError(err.Error()) } - block := NewBlockRes(self.xeth.EthBlockByNumber(args.BlockNumber), false) + raw := self.xeth.EthBlockByNumber(args.BlockNumber) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false) if block == nil { return nil, nil } else { @@ -232,12 +234,12 @@ func (self *ethApi) GetUncleCountByBlockHash(req *shared.Request) (interface{}, return nil, shared.NewDecodeParamError(err.Error()) } - block := self.xeth.EthBlockByHash(args.Hash) - br := NewBlockRes(block, false) - if br == nil { + raw := self.xeth.EthBlockByHash(args.Hash) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false) + if block == nil { return nil, nil } - return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil + return newHexNum(big.NewInt(int64(len(block.Uncles))).Bytes()), nil } func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{}, error) { @@ -246,12 +248,12 @@ func (self *ethApi) GetUncleCountByBlockNumber(req *shared.Request) (interface{} return nil, shared.NewDecodeParamError(err.Error()) } - block := self.xeth.EthBlockByNumber(args.BlockNumber) - br := NewBlockRes(block, false) - if br == nil { + raw := self.xeth.EthBlockByNumber(args.BlockNumber) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false) + if block == nil { return nil, nil } - return newHexNum(big.NewInt(int64(len(br.Uncles))).Bytes()), nil + return newHexNum(big.NewInt(int64(len(block.Uncles))).Bytes()), nil } func (self *ethApi) GetData(req *shared.Request) (interface{}, error) { @@ -362,7 +364,7 @@ func (self *ethApi) GetBlockByHash(req *shared.Request) (interface{}, error) { } block := self.xeth.EthBlockByHash(args.BlockHash) - return NewBlockRes(block, args.IncludeTxs), nil + return NewBlockRes(block, self.xeth.Td(block.Hash()), args.IncludeTxs), nil } func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { @@ -372,8 +374,7 @@ func (self *ethApi) GetBlockByNumber(req *shared.Request) (interface{}, error) { } block := self.xeth.EthBlockByNumber(args.BlockNumber) - br := NewBlockRes(block, args.IncludeTxs) - return br, nil + return NewBlockRes(block, self.xeth.Td(block.Hash()), args.IncludeTxs), nil } func (self *ethApi) GetTransactionByHash(req *shared.Request) (interface{}, error) { @@ -402,16 +403,15 @@ func (self *ethApi) GetTransactionByBlockHashAndIndex(req *shared.Request) (inte return nil, shared.NewDecodeParamError(err.Error()) } - block := self.xeth.EthBlockByHash(args.Hash) - br := NewBlockRes(block, true) - if br == nil { + raw := self.xeth.EthBlockByHash(args.Hash) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true) + if block == nil { return nil, nil } - - if args.Index >= int64(len(br.Transactions)) || args.Index < 0 { + if args.Index >= int64(len(block.Transactions)) || args.Index < 0 { return nil, nil } else { - return br.Transactions[args.Index], nil + return block.Transactions[args.Index], nil } } @@ -421,17 +421,16 @@ func (self *ethApi) GetTransactionByBlockNumberAndIndex(req *shared.Request) (in return nil, shared.NewDecodeParamError(err.Error()) } - block := self.xeth.EthBlockByNumber(args.BlockNumber) - v := NewBlockRes(block, true) - if v == nil { + raw := self.xeth.EthBlockByNumber(args.BlockNumber) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true) + if block == nil { return nil, nil } - - if args.Index >= int64(len(v.Transactions)) || args.Index < 0 { + if args.Index >= int64(len(block.Transactions)) || args.Index < 0 { // return NewValidationError("Index", "does not exist") return nil, nil } - return v.Transactions[args.Index], nil + return block.Transactions[args.Index], nil } func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{}, error) { @@ -440,17 +439,16 @@ func (self *ethApi) GetUncleByBlockHashAndIndex(req *shared.Request) (interface{ return nil, shared.NewDecodeParamError(err.Error()) } - br := NewBlockRes(self.xeth.EthBlockByHash(args.Hash), false) - if br == nil { + raw := self.xeth.EthBlockByHash(args.Hash) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), false) + if block == nil { return nil, nil } - - if args.Index >= int64(len(br.Uncles)) || args.Index < 0 { + if args.Index >= int64(len(block.Uncles)) || args.Index < 0 { // return NewValidationError("Index", "does not exist") return nil, nil } - - return br.Uncles[args.Index], nil + return block.Uncles[args.Index], nil } func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interface{}, error) { @@ -459,17 +457,15 @@ func (self *ethApi) GetUncleByBlockNumberAndIndex(req *shared.Request) (interfac return nil, shared.NewDecodeParamError(err.Error()) } - block := self.xeth.EthBlockByNumber(args.BlockNumber) - v := NewBlockRes(block, true) - - if v == nil { + raw := self.xeth.EthBlockByNumber(args.BlockNumber) + block := NewBlockRes(raw, self.xeth.Td(raw.Hash()), true) + if block == nil { return nil, nil } - - if args.Index >= int64(len(v.Uncles)) || args.Index < 0 { + if args.Index >= int64(len(block.Uncles)) || args.Index < 0 { return nil, nil } else { - return v.Uncles[args.Index], nil + return block.Uncles[args.Index], nil } } diff --git a/rpc/api/parsing.go b/rpc/api/parsing.go index 5858bc136..cdfaa0ed1 100644 --- a/rpc/api/parsing.go +++ b/rpc/api/parsing.go @@ -281,7 +281,7 @@ func (b *BlockRes) MarshalJSON() ([]byte, error) { } } -func NewBlockRes(block *types.Block, fullTx bool) *BlockRes { +func NewBlockRes(block *types.Block, td *big.Int, fullTx bool) *BlockRes { if block == nil { return nil } @@ -299,7 +299,7 @@ func NewBlockRes(block *types.Block, fullTx bool) *BlockRes { res.ReceiptRoot = newHexData(block.ReceiptHash()) res.Miner = newHexData(block.Coinbase()) res.Difficulty = newHexNum(block.Difficulty()) - res.TotalDifficulty = newHexNum(block.Td) + res.TotalDifficulty = newHexNum(td) res.Size = newHexNum(block.Size().Int64()) res.ExtraData = newHexData(block.Extra()) res.GasLimit = newHexNum(block.GasLimit()) diff --git a/tests/block_test_util.go b/tests/block_test_util.go index 2090afce7..30488951d 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -440,9 +440,8 @@ func convertBlockTest(in *btJSON) (out *BlockTest, err error) { func mustConvertGenesis(testGenesis btHeader) *types.Block { hdr := mustConvertHeader(testGenesis) hdr.Number = big.NewInt(0) - b := types.NewBlockWithHeader(hdr) - b.Td = new(big.Int) - return b + + return types.NewBlockWithHeader(hdr) } func mustConvertHeader(in btHeader) *types.Header { diff --git a/xeth/xeth.go b/xeth/xeth.go index 8bd45998f..00b70da6c 100644 --- a/xeth/xeth.go +++ b/xeth/xeth.go @@ -355,6 +355,10 @@ func (self *XEth) EthBlockByNumber(num int64) *types.Block { return self.getBlockByHeight(num) } +func (self *XEth) Td(hash common.Hash) *big.Int { + return self.backend.ChainManager().GetTd(hash) +} + func (self *XEth) CurrentBlock() *types.Block { return self.backend.ChainManager().CurrentBlock() }