aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2015-09-01 01:21:02 +0800
committerPéter Szilágyi <peterke@gmail.com>2015-09-11 22:42:25 +0800
commit2b339cbbd8bb475d2195d54a71dcced700003430 (patch)
tree7520c67d5bb5c66fe2430394a72f83d052bbe2b7 /core
parent4e075e401354b4ee068cf78b1f283763fe927245 (diff)
downloadgo-tangerine-2b339cbbd8bb475d2195d54a71dcced700003430.tar.gz
go-tangerine-2b339cbbd8bb475d2195d54a71dcced700003430.tar.zst
go-tangerine-2b339cbbd8bb475d2195d54a71dcced700003430.zip
core, eth: split the db blocks into headers and bodies
Diffstat (limited to 'core')
-rw-r--r--core/chain_manager.go262
-rw-r--r--core/chain_manager_test.go5
-rw-r--r--core/chain_util.go210
-rw-r--r--core/genesis.go2
-rw-r--r--core/types/block.go4
5 files changed, 341 insertions, 142 deletions
diff --git a/core/chain_manager.go b/core/chain_manager.go
index c8127951e..745b270f7 100644
--- a/core/chain_manager.go
+++ b/core/chain_manager.go
@@ -48,6 +48,8 @@ var (
)
const (
+ headerCacheLimit = 256
+ bodyCacheLimit = 256
blockCacheLimit = 256
maxFutureBlocks = 256
maxTimeFutureBlocks = 30
@@ -71,7 +73,10 @@ type ChainManager struct {
lastBlockHash common.Hash
currentGasLimit *big.Int
- cache *lru.Cache // cache is the LRU caching
+ headerCache *lru.Cache // Cache for the most recent block headers
+ bodyCache *lru.Cache // Cache for the most recent block bodies
+ bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
+ blockCache *lru.Cache // Cache for the most recent entire blocks
futureBlocks *lru.Cache // future blocks are blocks added for later processing
quit chan struct{}
@@ -84,13 +89,22 @@ type ChainManager struct {
}
func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) {
- cache, _ := lru.New(blockCacheLimit)
+ headerCache, _ := lru.New(headerCacheLimit)
+ bodyCache, _ := lru.New(bodyCacheLimit)
+ bodyRLPCache, _ := lru.New(bodyCacheLimit)
+ blockCache, _ := lru.New(blockCacheLimit)
+ futureBlocks, _ := lru.New(maxFutureBlocks)
+
bc := &ChainManager{
- chainDb: chainDb,
- eventMux: mux,
- quit: make(chan struct{}),
- cache: cache,
- pow: pow,
+ chainDb: chainDb,
+ eventMux: mux,
+ quit: make(chan struct{}),
+ headerCache: headerCache,
+ bodyCache: bodyCache,
+ bodyRLPCache: bodyRLPCache,
+ blockCache: blockCache,
+ futureBlocks: futureBlocks,
+ pow: pow,
}
bc.genesisBlock = bc.GetBlockByNumber(0)
@@ -105,11 +119,9 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
}
glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block")
}
-
if err := bc.setLastState(); err != nil {
return nil, err
}
-
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
for hash, _ := range BadHashes {
if block := bc.GetBlock(hash); block != nil {
@@ -123,14 +135,8 @@ func NewChainManager(chainDb common.Database, pow pow.PoW, mux *event.TypeMux) (
glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation")
}
}
-
// Take ownership of this particular state
-
- bc.futureBlocks, _ = lru.New(maxFutureBlocks)
- bc.makeCache()
-
go bc.update()
-
return bc, nil
}
@@ -139,13 +145,15 @@ func (bc *ChainManager) SetHead(head *types.Block) {
defer bc.mu.Unlock()
for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
+ DeleteBlock(bc.chainDb, block.Hash())
}
+ bc.headerCache.Purge()
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.blockCache.Purge()
+ bc.futureBlocks.Purge()
- bc.cache, _ = lru.New(blockCacheLimit)
bc.currentBlock = head
- bc.makeCache()
-
bc.setTotalDifficulty(head.Td)
bc.insert(head)
bc.setLastState()
@@ -199,11 +207,9 @@ func (bc *ChainManager) recover() bool {
if len(data) != 0 {
block := bc.GetBlock(common.BytesToHash(data))
if block != nil {
- err := bc.chainDb.Put([]byte("LastBlock"), block.Hash().Bytes())
- if err != nil {
- glog.Fatalln("db write err:", err)
+ if err := WriteHead(bc.chainDb, block); err != nil {
+ glog.Fatalf("failed to write database head: %v", err)
}
-
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
return true
@@ -213,14 +219,14 @@ func (bc *ChainManager) recover() bool {
}
func (bc *ChainManager) setLastState() error {
- data, _ := bc.chainDb.Get([]byte("LastBlock"))
- if len(data) != 0 {
- block := bc.GetBlock(common.BytesToHash(data))
+ head := GetHeadHash(bc.chainDb)
+ if head != (common.Hash{}) {
+ block := bc.GetBlock(head)
if block != nil {
bc.currentBlock = block
bc.lastBlockHash = block.Hash()
} else {
- glog.Infof("LastBlock (%x) not found. Recovering...\n", data)
+ glog.Infof("LastBlock (%x) not found. Recovering...\n", head)
if bc.recover() {
glog.Infof("Recover successful")
} else {
@@ -240,63 +246,37 @@ func (bc *ChainManager) setLastState() error {
return nil
}
-func (bc *ChainManager) makeCache() {
- bc.cache, _ = lru.New(blockCacheLimit)
- // load in last `blockCacheLimit` - 1 blocks. Last block is the current.
- bc.cache.Add(bc.genesisBlock.Hash(), bc.genesisBlock)
- for _, block := range bc.GetBlocksFromHash(bc.currentBlock.Hash(), blockCacheLimit) {
- bc.cache.Add(block.Hash(), block)
- }
-}
-
+// Reset purges the entire blockchain, restoring it to its genesis state.
func (bc *ChainManager) Reset() {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
- }
-
- bc.cache, _ = lru.New(blockCacheLimit)
-
- // Prepare the genesis block
- err := WriteBlock(bc.chainDb, bc.genesisBlock)
- if err != nil {
- glog.Fatalln("db err:", err)
- }
-
- bc.insert(bc.genesisBlock)
- bc.currentBlock = bc.genesisBlock
- bc.makeCache()
-
- bc.setTotalDifficulty(common.Big("0"))
+ bc.ResetWithGenesisBlock(bc.genesisBlock)
}
-func (bc *ChainManager) removeBlock(block *types.Block) {
- bc.chainDb.Delete(append(blockHashPre, block.Hash().Bytes()...))
-}
-
-func (bc *ChainManager) ResetWithGenesisBlock(gb *types.Block) {
+// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
+// specified genesis state.
+func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) {
bc.mu.Lock()
defer bc.mu.Unlock()
+ // Dump the entire block chain and purge the caches
for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) {
- bc.removeBlock(block)
+ DeleteBlock(bc.chainDb, block.Hash())
}
+ bc.headerCache.Purge()
+ bc.bodyCache.Purge()
+ bc.bodyRLPCache.Purge()
+ bc.blockCache.Purge()
+ bc.futureBlocks.Purge()
- // Prepare the genesis block
- gb.Td = gb.Difficulty()
- bc.genesisBlock = gb
+ // Prepare the genesis block and reinitialize the chain
+ bc.genesisBlock = genesis
+ bc.genesisBlock.Td = genesis.Difficulty()
- err := WriteBlock(bc.chainDb, bc.genesisBlock)
- if err != nil {
- glog.Fatalln("db err:", err)
+ if err := WriteBlock(bc.chainDb, bc.genesisBlock); err != nil {
+ glog.Fatalf("failed to write genesis block: %v", err)
}
-
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
- bc.makeCache()
- bc.td = gb.Difficulty()
+ bc.setTotalDifficulty(genesis.Difficulty())
}
// Export writes the active chain to the given writer.
@@ -359,61 +339,130 @@ func (bc *ChainManager) Genesis() *types.Block {
return bc.genesisBlock
}
-// Block fetching methods
-func (bc *ChainManager) HasBlock(hash common.Hash) bool {
- if bc.cache.Contains(hash) {
- return true
+// HasHeader checks if a block header is present in the database or not, caching
+// it if present.
+func (bc *ChainManager) HasHeader(hash common.Hash) bool {
+ return bc.GetHeader(hash) != nil
+}
+
+// GetHeader retrieves a block header from the database by hash, caching it if
+// found.
+func (self *ChainManager) GetHeader(hash common.Hash) *types.Header {
+ // Short circuit if the header's already in the cache, retrieve otherwise
+ if header, ok := self.headerCache.Get(hash); ok {
+ return header.(*types.Header)
+ }
+ header := GetHeaderByHash(self.chainDb, hash)
+ if header == nil {
+ return nil
}
+ // Cache the found header for next time and return
+ self.headerCache.Add(header.Hash(), header)
+ return header
+}
- data, _ := bc.chainDb.Get(append(blockHashPre, hash[:]...))
- return len(data) != 0
+// GetHeaderByNumber retrieves a block header from the database by number,
+// caching it (associated with its hash) if found.
+func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header {
+ hash := GetHashByNumber(self.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return self.GetHeader(hash)
}
-func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) (chain []common.Hash) {
- block := self.GetBlock(hash)
- if block == nil {
- return
+// GetBody retrieves a block body (transactions, uncles and total difficulty)
+// from the database by hash, caching it if found. The resion for the peculiar
+// pointer-to-slice return type is to differentiate between empty and inexistent
+// bodies.
+func (self *ChainManager) GetBody(hash common.Hash) (*[]*types.Transaction, *[]*types.Header) {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := self.bodyCache.Get(hash); ok {
+ body := cached.(*storageBody)
+ return &body.Transactions, &body.Uncles
}
- // XXX Could be optimised by using a different database which only holds hashes (i.e., linked list)
- for i := uint64(0); i < max; i++ {
- block = self.GetBlock(block.ParentHash())
- if block == nil {
- break
- }
+ transactions, uncles, td := GetBodyByHash(self.chainDb, hash)
+ if td == nil {
+ return nil, nil
+ }
+ // Cache the found body for next time and return
+ self.bodyCache.Add(hash, &storageBody{
+ Transactions: transactions,
+ Uncles: uncles,
+ })
+ return &transactions, &uncles
+}
- chain = append(chain, block.Hash())
- if block.Number().Cmp(common.Big0) <= 0 {
- break
- }
+// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
+// caching it if found.
+func (self *ChainManager) GetBodyRLP(hash common.Hash) []byte {
+ // Short circuit if the body's already in the cache, retrieve otherwise
+ if cached, ok := self.bodyRLPCache.Get(hash); ok {
+ return cached.([]byte)
}
+ body, td := GetBodyRLPByHash(self.chainDb, hash)
+ if td == nil {
+ return nil
+ }
+ // Cache the found body for next time and return
+ self.bodyRLPCache.Add(hash, body)
+ return body
+}
- return
+// HasBlock checks if a block is fully present in the database or not, caching
+// it if present.
+func (bc *ChainManager) HasBlock(hash common.Hash) bool {
+ return bc.GetBlock(hash) != nil
}
+// GetBlock retrieves a block from the database by hash, caching it if found.
func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
- if block, ok := self.cache.Get(hash); ok {
+ // Short circuit if the block's already in the cache, retrieve otherwise
+ if block, ok := self.blockCache.Get(hash); ok {
return block.(*types.Block)
}
-
block := GetBlockByHash(self.chainDb, hash)
if block == nil {
return nil
}
-
- // Add the block to the cache
- self.cache.Add(hash, (*types.Block)(block))
-
- return (*types.Block)(block)
+ // Cache the found block for next time and return
+ self.blockCache.Add(block.Hash(), block)
+ return block
}
-func (self *ChainManager) GetBlockByNumber(num uint64) *types.Block {
- self.mu.RLock()
- defer self.mu.RUnlock()
-
- return self.getBlockByNumber(num)
+// GetBlockByNumber retrieves a block from the database by number, caching it
+// (associated with its hash) if found.
+func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block {
+ hash := GetHashByNumber(self.chainDb, number)
+ if hash == (common.Hash{}) {
+ return nil
+ }
+ return self.GetBlock(hash)
+}
+// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
+// hash, fetching towards the genesis block.
+func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
+ // Get the origin header from which to fetch
+ header := self.GetHeader(hash)
+ if header == nil {
+ return nil
+ }
+ // Iterate the headers until enough is collected or the genesis reached
+ chain := make([]common.Hash, 0, max)
+ for i := uint64(0); i < max; i++ {
+ if header = self.GetHeader(header.ParentHash); header == nil {
+ break
+ }
+ chain = append(chain, header.Hash())
+ if header.Number.Cmp(common.Big0) <= 0 {
+ break
+ }
+ }
+ return chain
}
+// [deprecated by eth/62]
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
for i := 0; i < n; i++ {
@@ -427,11 +476,6 @@ func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*
return
}
-// non blocking version
-func (self *ChainManager) getBlockByNumber(num uint64) *types.Block {
- return GetBlockByNumber(self.chainDb, num)
-}
-
func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) {
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go
index 002dcbe44..97e7cacdc 100644
--- a/core/chain_manager_test.go
+++ b/core/chain_manager_test.go
@@ -388,7 +388,10 @@ func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block
func chm(genesis *types.Block, db common.Database) *ChainManager {
var eventMux event.TypeMux
bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
- bc.cache, _ = lru.New(100)
+ bc.headerCache, _ = lru.New(100)
+ bc.bodyCache, _ = lru.New(100)
+ bc.bodyRLPCache, _ = lru.New(100)
+ bc.blockCache, _ = lru.New(100)
bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
diff --git a/core/chain_util.go b/core/chain_util.go
index 84b462ce3..c12bdda75 100644
--- a/core/chain_util.go
+++ b/core/chain_util.go
@@ -19,7 +19,6 @@ package core
import (
"bytes"
"math/big"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -30,9 +29,14 @@ import (
)
var (
- blockHashPre = []byte("block-hash-")
+ headKey = []byte("LastBlock")
+
+ headerHashPre = []byte("header-hash-")
+ bodyHashPre = []byte("body-hash-")
blockNumPre = []byte("block-num-")
ExpDiffPeriod = big.NewInt(100000)
+
+ blockHashPre = []byte("block-hash-") // [deprecated by eth/63]
)
// CalcDifficulty is the difficulty adjustment algorithm. It returns
@@ -112,68 +116,212 @@ func CalcGasLimit(parent *types.Block) *big.Int {
return gl
}
-// GetBlockByHash returns the block corresponding to the hash or nil if not found
-func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
- data, _ := db.Get(append(blockHashPre, hash[:]...))
+// storageBody is the block body encoding used for the database.
+type storageBody struct {
+ Transactions []*types.Transaction
+ Uncles []*types.Header
+}
+
+// GetHashByNumber retrieves a hash assigned to a canonical block number.
+func GetHashByNumber(db common.Database, number uint64) common.Hash {
+ data, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// GetHeadHash retrieves the hash of the current canonical head block.
+func GetHeadHash(db common.Database) common.Hash {
+ data, _ := db.Get(headKey)
+ if len(data) == 0 {
+ return common.Hash{}
+ }
+ return common.BytesToHash(data)
+}
+
+// GetHeaderRLPByHash retrieves a block header in its raw RLP database encoding,
+// or nil if the header's not found.
+func GetHeaderRLPByHash(db common.Database, hash common.Hash) []byte {
+ data, _ := db.Get(append(headerHashPre, hash[:]...))
+ return data
+}
+
+// GetHeaderByHash retrieves the block header corresponding to the hash, nil if
+// none found.
+func GetHeaderByHash(db common.Database, hash common.Hash) *types.Header {
+ data := GetHeaderRLPByHash(db, hash)
if len(data) == 0 {
return nil
}
- var block types.StorageBlock
- if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
- glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
+ header := new(types.Header)
+ if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
+ glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err)
return nil
}
- return (*types.Block)(&block)
+ return header
}
-// GetBlockByHash returns the canonical block by number or nil if not found
+// GetBodyRLPByHash retrieves the block body (transactions and uncles) in RLP
+// encoding, and the associated total difficulty.
+func GetBodyRLPByHash(db common.Database, hash common.Hash) ([]byte, *big.Int) {
+ combo, _ := db.Get(append(bodyHashPre, hash[:]...))
+ if len(combo) == 0 {
+ return nil, nil
+ }
+ buffer := bytes.NewBuffer(combo)
+
+ td := new(big.Int)
+ if err := rlp.Decode(buffer, td); err != nil {
+ glog.V(logger.Error).Infof("invalid block td RLP for hash %x: %v", hash, err)
+ return nil, nil
+ }
+ return buffer.Bytes(), td
+}
+
+// GetBodyByHash retrieves the block body (transactons, uncles, total difficulty)
+// corresponding to the hash, nils if none found.
+func GetBodyByHash(db common.Database, hash common.Hash) ([]*types.Transaction, []*types.Header, *big.Int) {
+ data, td := GetBodyRLPByHash(db, hash)
+ if len(data) == 0 || td == nil {
+ return nil, nil, nil
+ }
+ body := new(storageBody)
+ if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
+ glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err)
+ return nil, nil, nil
+ }
+ return body.Transactions, body.Uncles, td
+}
+
+// GetBlockByHash retrieves an entire block corresponding to the hash, assembling
+// it back from the stored header and body.
+func GetBlockByHash(db common.Database, hash common.Hash) *types.Block {
+ // Retrieve the block header and body contents
+ header := GetHeaderByHash(db, hash)
+ if header == nil {
+ return nil
+ }
+ transactions, uncles, td := GetBodyByHash(db, hash)
+ if td == nil {
+ return nil
+ }
+ // Reassemble the block and return
+ block := types.NewBlockWithHeader(header).WithBody(transactions, uncles)
+ block.Td = td
+
+ return block
+}
+
+// GetBlockByNumber returns the canonical block by number or nil if not found.
func GetBlockByNumber(db common.Database, number uint64) *types.Block {
key, _ := db.Get(append(blockNumPre, big.NewInt(int64(number)).Bytes()...))
if len(key) == 0 {
return nil
}
-
return GetBlockByHash(db, common.BytesToHash(key))
}
-// WriteCanonNumber writes the canonical hash for the given block
-func WriteCanonNumber(db common.Database, block *types.Block) error {
- key := append(blockNumPre, block.Number().Bytes()...)
- err := db.Put(key, block.Hash().Bytes())
- if err != nil {
+// WriteCanonNumber stores the canonical hash for the given block number.
+func WriteCanonNumber(db common.Database, hash common.Hash, number uint64) error {
+ key := append(blockNumPre, big.NewInt(int64(number)).Bytes()...)
+ if err := db.Put(key, hash.Bytes()); err != nil {
+ glog.Fatalf("failed to store number to hash mapping into database: %v", err)
return err
}
return nil
}
-// WriteHead force writes the current head
+// WriteHead updates the head block of the chain database.
func WriteHead(db common.Database, block *types.Block) error {
- err := WriteCanonNumber(db, block)
- if err != nil {
+ if err := WriteCanonNumber(db, block.Hash(), block.NumberU64()); err != nil {
+ glog.Fatalf("failed to store canonical number into database: %v", err)
return err
}
- err = db.Put([]byte("LastBlock"), block.Hash().Bytes())
- if err != nil {
+ if err := db.Put(headKey, block.Hash().Bytes()); err != nil {
+ glog.Fatalf("failed to store last block into database: %v", err)
return err
}
return nil
}
-// WriteBlock writes a block to the database
-func WriteBlock(db common.Database, block *types.Block) error {
- tstart := time.Now()
-
- enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
- key := append(blockHashPre, block.Hash().Bytes()...)
- err := db.Put(key, enc)
+// WriteHeader serializes a block header into the database.
+func WriteHeader(db common.Database, header *types.Header) error {
+ data, err := rlp.EncodeToBytes(header)
if err != nil {
- glog.Fatal("db write fail:", err)
return err
}
+ key := append(headerHashPre, header.Hash().Bytes()...)
+ if err := db.Put(key, data); err != nil {
+ glog.Fatalf("failed to store header into database: %v", err)
+ return err
+ }
+ glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4])
+ return nil
+}
- if glog.V(logger.Debug) {
- glog.Infof("wrote block #%v %s. Took %v\n", block.Number(), common.PP(block.Hash().Bytes()), time.Since(tstart))
+// WriteBody serializes the body of a block into the database.
+func WriteBody(db common.Database, block *types.Block) error {
+ body, err := rlp.EncodeToBytes(&storageBody{block.Transactions(), block.Uncles()})
+ if err != nil {
+ return err
+ }
+ td, err := rlp.EncodeToBytes(block.Td)
+ if err != nil {
+ return err
}
+ key := append(bodyHashPre, block.Hash().Bytes()...)
+ if err := db.Put(key, append(td, body...)); err != nil {
+ glog.Fatalf("failed to store block body into database: %v", err)
+ return err
+ }
+ glog.V(logger.Debug).Infof("stored block body #%v [%x…]", block.Number, block.Hash().Bytes()[:4])
+ return nil
+}
+// WriteBlock serializes a block into the database, header and body separately.
+func WriteBlock(db common.Database, block *types.Block) error {
+ // Store the body first to retain database consistency
+ if err := WriteBody(db, block); err != nil {
+ return err
+ }
+ // Store the header too, signaling full block ownership
+ if err := WriteHeader(db, block.Header()); err != nil {
+ return err
+ }
return nil
}
+
+// DeleteHeader removes all block header data associated with a hash.
+func DeleteHeader(db common.Database, hash common.Hash) {
+ db.Delete(append(headerHashPre, hash.Bytes()...))
+}
+
+// DeleteBody removes all block body data associated with a hash.
+func DeleteBody(db common.Database, hash common.Hash) {
+ db.Delete(append(bodyHashPre, hash.Bytes()...))
+}
+
+// DeleteBlock removes all block data associated with a hash.
+func DeleteBlock(db common.Database, hash common.Hash) {
+ DeleteHeader(db, hash)
+ DeleteBody(db, hash)
+}
+
+// [deprecated by eth/63]
+// GetBlockByHashOld returns the old combined block corresponding to the hash
+// or nil if not found. This method is only used by the upgrade mechanism to
+// access the old combined block representation. It will be dropped after the
+// network transitions to eth/63.
+func GetBlockByHashOld(db common.Database, hash common.Hash) *types.Block {
+ data, _ := db.Get(append(blockHashPre, hash[:]...))
+ if len(data) == 0 {
+ return nil
+ }
+ var block types.StorageBlock
+ if err := rlp.Decode(bytes.NewReader(data), &block); err != nil {
+ glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err)
+ return nil
+ }
+ return (*types.Block)(&block)
+}
diff --git a/core/genesis.go b/core/genesis.go
index 7d4e03c99..6fbc671b0 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -86,7 +86,7 @@ func WriteGenesisBlock(chainDb common.Database, reader io.Reader) (*types.Block,
if block := GetBlockByHash(chainDb, block.Hash()); block != nil {
glog.V(logger.Info).Infoln("Genesis block already in chain. Writing canonical number")
- err := WriteCanonNumber(chainDb, block)
+ err := WriteCanonNumber(chainDb, block.Hash(), block.NumberU64())
if err != nil {
return nil, err
}
diff --git a/core/types/block.go b/core/types/block.go
index fd81db04c..558b46e01 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -135,6 +135,7 @@ type Block struct {
ReceivedAt time.Time
}
+// [deprecated by eth/63]
// StorageBlock defines the RLP encoding of a Block stored in the
// state database. The StorageBlock encoding contains fields that
// would otherwise need to be recomputed.
@@ -147,6 +148,7 @@ type extblock struct {
Uncles []*Header
}
+// [deprecated by eth/63]
// "storage" block encoding. used for database.
type storageblock struct {
Header *Header
@@ -268,6 +270,7 @@ func (b *Block) EncodeRLP(w io.Writer) error {
})
}
+// [deprecated by eth/63]
func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
var sb storageblock
if err := s.Decode(&sb); err != nil {
@@ -277,6 +280,7 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error {
return nil
}
+// [deprecated by eth/63]
func (b *StorageBlock) EncodeRLP(w io.Writer) error {
return rlp.Encode(w, storageblock{
Header: b.header,