From e86e0ecdc8a977db2ff5df60dca3cad8355ace6d Mon Sep 17 00:00:00 2001 From: Péter Szilágyi Date: Thu, 22 Oct 2015 15:43:21 +0300 Subject: core, eth, miner, xeth: clean up tx/receipt db accessors --- core/block_validator_test.go | 2 +- core/blockchain.go | 12 +- core/blockchain_test.go | 8 +- core/chain_util.go | 414 ----------------------------- core/chain_util_test.go | 451 -------------------------------- core/database_util.go | 584 +++++++++++++++++++++++++++++++++++++++++ core/database_util_test.go | 609 +++++++++++++++++++++++++++++++++++++++++++ core/genesis.go | 2 +- core/transaction_util.go | 171 ------------ eth/backend_test.go | 4 +- eth/filters/filter_test.go | 8 +- miner/worker.go | 6 +- xeth/xeth.go | 41 +-- 13 files changed, 1218 insertions(+), 1094 deletions(-) delete mode 100644 core/chain_util.go delete mode 100644 core/chain_util_test.go create mode 100644 core/database_util.go create mode 100644 core/database_util_test.go delete mode 100644 core/transaction_util.go diff --git a/core/block_validator_test.go b/core/block_validator_test.go index a0694f067..70953d76d 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -81,7 +81,7 @@ func TestPutReceipt(t *testing.T) { Index: 0, }} - PutReceipts(db, types.Receipts{receipt}) + WriteReceipts(db, types.Receipts{receipt}) receipt = GetReceipt(db, common.Hash{}) if receipt == nil { t.Error("expected to get 1 receipt, got none.") diff --git a/core/blockchain.go b/core/blockchain.go index b6b00ca04..5e1fc9424 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -972,7 +972,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain glog.Fatal(errs[index]) return } - if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { + if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { errs[index] = fmt.Errorf("failed to write block receipts: %v", err) atomic.AddInt32(&failed, 1) glog.Fatal(errs[index]) @@ -1182,7 +1182,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { // coalesce logs for later processing coalescedLogs = append(coalescedLogs, logs...) - if err := PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { + if err := WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { return i, err } @@ -1201,11 +1201,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { events = append(events, ChainEvent{block, block.Hash(), logs}) // This puts transactions in a extra db for rpc - if err := PutTransactions(self.chainDb, block, block.Transactions()); err != nil { + if err := WriteTransactions(self.chainDb, block); err != nil { return i, err } // store the receipts - if err := PutReceipts(self.chainDb, receipts); err != nil { + if err := WriteReceipts(self.chainDb, receipts); err != nil { return i, err } // Write map map bloom filters @@ -1294,12 +1294,12 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // insert the block in the canonical way, re-writing history self.insert(block) // write canonical receipts and transactions - if err := PutTransactions(self.chainDb, block, block.Transactions()); err != nil { + if err := WriteTransactions(self.chainDb, block); err != nil { return err } receipts := GetBlockReceipts(self.chainDb, block.Hash()) // write receipts - if err := PutReceipts(self.chainDb, receipts); err != nil { + if err := WriteReceipts(self.chainDb, receipts); err != nil { return err } // Write map map bloom filters diff --git a/core/blockchain_test.go b/core/blockchain_test.go index e5ed66377..f18b5d084 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -937,8 +937,8 @@ func TestChainTxReorgs(t *testing.T) { // removed tx for i, tx := range (types.Transactions{pastDrop, freshDrop}) { - if GetTransaction(db, tx.Hash()) != nil { - t.Errorf("drop %d: tx found while shouldn't have been", i) + if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { + t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn) } if GetReceipt(db, tx.Hash()) != nil { t.Errorf("drop %d: receipt found while shouldn't have been", i) @@ -946,7 +946,7 @@ func TestChainTxReorgs(t *testing.T) { } // added tx for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { - if GetTransaction(db, tx.Hash()) == nil { + if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil { t.Errorf("add %d: expected tx to be found", i) } if GetReceipt(db, tx.Hash()) == nil { @@ -955,7 +955,7 @@ func TestChainTxReorgs(t *testing.T) { } // shared tx for i, tx := range (types.Transactions{postponed, swapped}) { - if GetTransaction(db, tx.Hash()) == nil { + if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn == nil { t.Errorf("share %d: expected tx to be found", i) } if GetReceipt(db, tx.Hash()) == nil { diff --git a/core/chain_util.go b/core/chain_util.go deleted file mode 100644 index ddff381a1..000000000 --- a/core/chain_util.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/binary" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" -) - -var ( - headHeaderKey = []byte("LastHeader") - headBlockKey = []byte("LastBlock") - headFastKey = []byte("LastFast") - - blockPrefix = []byte("block-") - blockNumPrefix = []byte("block-num-") - - headerSuffix = []byte("-header") - bodySuffix = []byte("-body") - tdSuffix = []byte("-td") - - ExpDiffPeriod = big.NewInt(100000) - blockHashPre = []byte("block-hash-") // [deprecated by eth/63] - - mipmapPre = []byte("mipmap-log-bloom-") - MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000} -) - -// CalcDifficulty is the difficulty adjustment algorithm. It returns -// the difficulty that a new block b should have when created at time -// given the parent block's time and difficulty. -func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int { - diff := new(big.Int) - adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor) - bigTime := new(big.Int) - bigParentTime := new(big.Int) - - bigTime.SetUint64(time) - bigParentTime.SetUint64(parentTime) - - if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 { - diff.Add(parentDiff, adjust) - } else { - diff.Sub(parentDiff, adjust) - } - if diff.Cmp(params.MinimumDifficulty) < 0 { - diff = params.MinimumDifficulty - } - - periodCount := new(big.Int).Add(parentNumber, common.Big1) - periodCount.Div(periodCount, ExpDiffPeriod) - if periodCount.Cmp(common.Big1) > 0 { - // diff = diff + 2^(periodCount - 2) - expDiff := periodCount.Sub(periodCount, common.Big2) - expDiff.Exp(common.Big2, expDiff, nil) - diff.Add(diff, expDiff) - diff = common.BigMax(diff, params.MinimumDifficulty) - } - - return diff -} - -// CalcGasLimit computes the gas limit of the next block after parent. -// The result may be modified by the caller. -// This is miner strategy, not consensus protocol. -func CalcGasLimit(parent *types.Block) *big.Int { - // contrib = (parentGasUsed * 3 / 2) / 1024 - contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3)) - contrib = contrib.Div(contrib, big.NewInt(2)) - contrib = contrib.Div(contrib, params.GasLimitBoundDivisor) - - // decay = parentGasLimit / 1024 -1 - decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor) - decay.Sub(decay, big.NewInt(1)) - - /* - strategy: gasLimit of block-to-mine is set based on parent's - gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we - increase it, otherwise lower it (or leave it unchanged if it's right - at that usage) the amount increased/decreased depends on how far away - from parentGasLimit * (2/3) parentGasUsed is. - */ - gl := new(big.Int).Sub(parent.GasLimit(), decay) - gl = gl.Add(gl, contrib) - gl.Set(common.BigMax(gl, params.MinGasLimit)) - - // however, if we're now below the target (GenesisGasLimit) we increase the - // limit as much as we can (parentGasLimit / 1024 -1) - if gl.Cmp(params.GenesisGasLimit) < 0 { - gl.Add(parent.GasLimit(), decay) - gl.Set(common.BigMin(gl, params.GenesisGasLimit)) - } - return gl -} - -// GetCanonicalHash retrieves a hash assigned to a canonical block number. -func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { - data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeadHeaderHash retrieves the hash of the current canonical head block's -// header. The difference between this and GetHeadBlockHash is that whereas the -// last block hash is only updated upon a full block import, the last header -// hash is updated already at header import, allowing head tracking for the -// light synchronization mechanism. -func GetHeadHeaderHash(db ethdb.Database) common.Hash { - data, _ := db.Get(headHeaderKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeadBlockHash retrieves the hash of the current canonical head block. -func GetHeadBlockHash(db ethdb.Database) common.Hash { - data, _ := db.Get(headBlockKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeadFastBlockHash retrieves the hash of the current canonical head block during -// fast synchronization. The difference between this and GetHeadBlockHash is that -// whereas the last block hash is only updated upon a full block import, the last -// fast hash is updated when importing pre-processed blocks. -func GetHeadFastBlockHash(db ethdb.Database) common.Hash { - data, _ := db.Get(headFastKey) - if len(data) == 0 { - return common.Hash{} - } - return common.BytesToHash(data) -} - -// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil -// if the header's not found. -func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { - data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...)) - return data -} - -// GetHeader retrieves the block header corresponding to the hash, nil if none -// found. -func GetHeader(db ethdb.Database, hash common.Hash) *types.Header { - data := GetHeaderRLP(db, hash) - if len(data) == 0 { - return nil - } - header := new(types.Header) - if err := rlp.Decode(bytes.NewReader(data), header); err != nil { - glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err) - return nil - } - return header -} - -// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. -func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { - data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...)) - return data -} - -// GetBody retrieves the block body (transactons, uncles) corresponding to the -// hash, nil if none found. -func GetBody(db ethdb.Database, hash common.Hash) *types.Body { - data := GetBodyRLP(db, hash) - if len(data) == 0 { - return nil - } - body := new(types.Body) - if err := rlp.Decode(bytes.NewReader(data), body); err != nil { - glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) - return nil - } - return body -} - -// GetTd retrieves a block's total difficulty corresponding to the hash, nil if -// none found. -func GetTd(db ethdb.Database, hash common.Hash) *big.Int { - data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) - if len(data) == 0 { - return nil - } - td := new(big.Int) - if err := rlp.Decode(bytes.NewReader(data), td); err != nil { - glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) - return nil - } - return td -} - -// GetBlock retrieves an entire block corresponding to the hash, assembling it -// back from the stored header and body. -func GetBlock(db ethdb.Database, hash common.Hash) *types.Block { - // Retrieve the block header and body contents - header := GetHeader(db, hash) - if header == nil { - return nil - } - body := GetBody(db, hash) - if body == nil { - return nil - } - // Reassemble the block and return - return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) -} - -// WriteCanonicalHash stores the canonical hash for the given block number. -func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { - key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...) - if err := db.Put(key, hash.Bytes()); err != nil { - glog.Fatalf("failed to store number to hash mapping into database: %v", err) - return err - } - return nil -} - -// WriteHeadHeaderHash stores the head header's hash. -func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { - if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { - glog.Fatalf("failed to store last header's hash into database: %v", err) - return err - } - return nil -} - -// WriteHeadBlockHash stores the head block's hash. -func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { - if err := db.Put(headBlockKey, hash.Bytes()); err != nil { - glog.Fatalf("failed to store last block's hash into database: %v", err) - return err - } - return nil -} - -// WriteHeadFastBlockHash stores the fast head block's hash. -func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { - if err := db.Put(headFastKey, hash.Bytes()); err != nil { - glog.Fatalf("failed to store last fast block's hash into database: %v", err) - return err - } - return nil -} - -// WriteHeader serializes a block header into the database. -func WriteHeader(db ethdb.Database, header *types.Header) error { - data, err := rlp.EncodeToBytes(header) - if err != nil { - return err - } - key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...) - if err := db.Put(key, data); err != nil { - glog.Fatalf("failed to store header into database: %v", err) - return err - } - glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4]) - return nil -} - -// WriteBody serializes the body of a block into the database. -func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error { - data, err := rlp.EncodeToBytes(body) - if err != nil { - return err - } - key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...) - if err := db.Put(key, data); err != nil { - glog.Fatalf("failed to store block body into database: %v", err) - return err - } - glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) - return nil -} - -// WriteTd serializes the total difficulty of a block into the database. -func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error { - data, err := rlp.EncodeToBytes(td) - if err != nil { - return err - } - key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...) - if err := db.Put(key, data); err != nil { - glog.Fatalf("failed to store block total difficulty into database: %v", err) - return err - } - glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) - return nil -} - -// WriteBlock serializes a block into the database, header and body separately. -func WriteBlock(db ethdb.Database, block *types.Block) error { - // Store the body first to retain database consistency - if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { - return err - } - // Store the header too, signaling full block ownership - if err := WriteHeader(db, block.Header()); err != nil { - return err - } - return nil -} - -// DeleteCanonicalHash removes the number to hash canonical mapping. -func DeleteCanonicalHash(db ethdb.Database, number uint64) { - db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) -} - -// DeleteHeader removes all block header data associated with a hash. -func DeleteHeader(db ethdb.Database, hash common.Hash) { - db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...)) -} - -// DeleteBody removes all block body data associated with a hash. -func DeleteBody(db ethdb.Database, hash common.Hash) { - db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...)) -} - -// DeleteTd removes all block total difficulty data associated with a hash. -func DeleteTd(db ethdb.Database, hash common.Hash) { - db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) -} - -// DeleteBlock removes all block data associated with a hash. -func DeleteBlock(db ethdb.Database, hash common.Hash) { - DeleteHeader(db, hash) - DeleteBody(db, hash) - DeleteTd(db, hash) -} - -// [deprecated by eth/63] -// GetBlockByHashOld returns the old combined block corresponding to the hash -// or nil if not found. This method is only used by the upgrade mechanism to -// access the old combined block representation. It will be dropped after the -// network transitions to eth/63. -func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block { - data, _ := db.Get(append(blockHashPre, hash[:]...)) - if len(data) == 0 { - return nil - } - var block types.StorageBlock - if err := rlp.Decode(bytes.NewReader(data), &block); err != nil { - glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err) - return nil - } - return (*types.Block)(&block) -} - -// returns a formatted MIP mapped key by adding prefix, canonical number and level -// -// ex. fn(98, 1000) = (prefix || 1000 || 0) -func mipmapKey(num, level uint64) []byte { - lkey := make([]byte, 8) - binary.BigEndian.PutUint64(lkey, level) - key := new(big.Int).SetUint64(num / level * level) - - return append(mipmapPre, append(lkey, key.Bytes()...)...) -} - -// WriteMapmapBloom writes each address included in the receipts' logs to the -// MIP bloom bin. -func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error { - batch := db.NewBatch() - for _, level := range MIPMapLevels { - key := mipmapKey(number, level) - bloomDat, _ := db.Get(key) - bloom := types.BytesToBloom(bloomDat) - for _, receipt := range receipts { - for _, log := range receipt.Logs { - bloom.Add(log.Address.Big()) - } - } - batch.Put(key, bloom.Bytes()) - } - if err := batch.Write(); err != nil { - return fmt.Errorf("mipmap write fail for: %d: %v", number, err) - } - return nil -} - -// GetMipmapBloom returns a bloom filter using the number and level as input -// parameters. For available levels see MIPMapLevels. -func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom { - bloomDat, _ := db.Get(mipmapKey(number, level)) - return types.BytesToBloom(bloomDat) -} diff --git a/core/chain_util_test.go b/core/chain_util_test.go deleted file mode 100644 index 0bbcbbe53..000000000 --- a/core/chain_util_test.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "encoding/json" - "io/ioutil" - "math/big" - "os" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/sha3" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/rlp" -) - -type diffTest struct { - ParentTimestamp uint64 - ParentDifficulty *big.Int - CurrentTimestamp uint64 - CurrentBlocknumber *big.Int - CurrentDifficulty *big.Int -} - -func (d *diffTest) UnmarshalJSON(b []byte) (err error) { - var ext struct { - ParentTimestamp string - ParentDifficulty string - CurrentTimestamp string - CurrentBlocknumber string - CurrentDifficulty string - } - if err := json.Unmarshal(b, &ext); err != nil { - return err - } - - d.ParentTimestamp = common.String2Big(ext.ParentTimestamp).Uint64() - d.ParentDifficulty = common.String2Big(ext.ParentDifficulty) - d.CurrentTimestamp = common.String2Big(ext.CurrentTimestamp).Uint64() - d.CurrentBlocknumber = common.String2Big(ext.CurrentBlocknumber) - d.CurrentDifficulty = common.String2Big(ext.CurrentDifficulty) - - return nil -} - -func TestDifficulty(t *testing.T) { - file, err := os.Open("../tests/files/BasicTests/difficulty.json") - if err != nil { - t.Fatal(err) - } - defer file.Close() - - tests := make(map[string]diffTest) - err = json.NewDecoder(file).Decode(&tests) - if err != nil { - t.Fatal(err) - } - - for name, test := range tests { - number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1)) - diff := CalcDifficulty(test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty) - if diff.Cmp(test.CurrentDifficulty) != 0 { - t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff) - } - } -} - -// Tests block header storage and retrieval operations. -func TestHeaderStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test header to move around the database and make sure it's really new - header := &types.Header{Extra: []byte("test header")} - if entry := GetHeader(db, header.Hash()); entry != nil { - t.Fatalf("Non existent header returned: %v", entry) - } - // Write and verify the header in the database - if err := WriteHeader(db, header); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if entry := GetHeader(db, header.Hash()); entry == nil { - t.Fatalf("Stored header not found") - } else if entry.Hash() != header.Hash() { - t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) - } - if entry := GetHeaderRLP(db, header.Hash()); entry == nil { - t.Fatalf("Stored header RLP not found") - } else { - hasher := sha3.NewKeccak256() - hasher.Write(entry) - - if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() { - t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header) - } - } - // Delete the header and verify the execution - DeleteHeader(db, header.Hash()) - if entry := GetHeader(db, header.Hash()); entry != nil { - t.Fatalf("Deleted header returned: %v", entry) - } -} - -// Tests block body storage and retrieval operations. -func TestBodyStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test body to move around the database and make sure it's really new - body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}} - - hasher := sha3.NewKeccak256() - rlp.Encode(hasher, body) - hash := common.BytesToHash(hasher.Sum(nil)) - - if entry := GetBody(db, hash); entry != nil { - t.Fatalf("Non existent body returned: %v", entry) - } - // Write and verify the body in the database - if err := WriteBody(db, hash, body); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBody(db, hash); entry == nil { - t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { - t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) - } - if entry := GetBodyRLP(db, hash); entry == nil { - t.Fatalf("Stored body RLP not found") - } else { - hasher := sha3.NewKeccak256() - hasher.Write(entry) - - if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash { - t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body) - } - } - // Delete the body and verify the execution - DeleteBody(db, hash) - if entry := GetBody(db, hash); entry != nil { - t.Fatalf("Deleted body returned: %v", entry) - } -} - -// Tests block storage and retrieval operations. -func TestBlockStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test block to move around the database and make sure it's really new - block := types.NewBlockWithHeader(&types.Header{ - Extra: []byte("test block"), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, - }) - if entry := GetBlock(db, block.Hash()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - if entry := GetHeader(db, block.Hash()); entry != nil { - t.Fatalf("Non existent header returned: %v", entry) - } - if entry := GetBody(db, block.Hash()); entry != nil { - t.Fatalf("Non existent body returned: %v", entry) - } - // Write and verify the block in the database - if err := WriteBlock(db, block); err != nil { - t.Fatalf("Failed to write block into database: %v", err) - } - if entry := GetBlock(db, block.Hash()); entry == nil { - t.Fatalf("Stored block not found") - } else if entry.Hash() != block.Hash() { - t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) - } - if entry := GetHeader(db, block.Hash()); entry == nil { - t.Fatalf("Stored header not found") - } else if entry.Hash() != block.Header().Hash() { - t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) - } - if entry := GetBody(db, block.Hash()); entry == nil { - t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { - t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()}) - } - // Delete the block and verify the execution - DeleteBlock(db, block.Hash()) - if entry := GetBlock(db, block.Hash()); entry != nil { - t.Fatalf("Deleted block returned: %v", entry) - } - if entry := GetHeader(db, block.Hash()); entry != nil { - t.Fatalf("Deleted header returned: %v", entry) - } - if entry := GetBody(db, block.Hash()); entry != nil { - t.Fatalf("Deleted body returned: %v", entry) - } -} - -// Tests that partial block contents don't get reassembled into full blocks. -func TestPartialBlockStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - block := types.NewBlockWithHeader(&types.Header{ - Extra: []byte("test block"), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyRootHash, - ReceiptHash: types.EmptyRootHash, - }) - // Store a header and check that it's not recognized as a block - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if entry := GetBlock(db, block.Hash()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - DeleteHeader(db, block.Hash()) - - // Store a body and check that it's not recognized as a block - if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBlock(db, block.Hash()); entry != nil { - t.Fatalf("Non existent block returned: %v", entry) - } - DeleteBody(db, block.Hash()) - - // Store a header and a body separately and check reassembly - if err := WriteHeader(db, block.Header()); err != nil { - t.Fatalf("Failed to write header into database: %v", err) - } - if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { - t.Fatalf("Failed to write body into database: %v", err) - } - if entry := GetBlock(db, block.Hash()); entry == nil { - t.Fatalf("Stored block not found") - } else if entry.Hash() != block.Hash() { - t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) - } -} - -// Tests block total difficulty storage and retrieval operations. -func TestTdStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test TD to move around the database and make sure it's really new - hash, td := common.Hash{}, big.NewInt(314) - if entry := GetTd(db, hash); entry != nil { - t.Fatalf("Non existent TD returned: %v", entry) - } - // Write and verify the TD in the database - if err := WriteTd(db, hash, td); err != nil { - t.Fatalf("Failed to write TD into database: %v", err) - } - if entry := GetTd(db, hash); entry == nil { - t.Fatalf("Stored TD not found") - } else if entry.Cmp(td) != 0 { - t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) - } - // Delete the TD and verify the execution - DeleteTd(db, hash) - if entry := GetTd(db, hash); entry != nil { - t.Fatalf("Deleted TD returned: %v", entry) - } -} - -// Tests that canonical numbers can be mapped to hashes and retrieved. -func TestCanonicalMappingStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - // Create a test canonical number and assinged hash to move around - hash, number := common.Hash{0: 0xff}, uint64(314) - if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { - t.Fatalf("Non existent canonical mapping returned: %v", entry) - } - // Write and verify the TD in the database - if err := WriteCanonicalHash(db, hash, number); err != nil { - t.Fatalf("Failed to write canonical mapping into database: %v", err) - } - if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) { - t.Fatalf("Stored canonical mapping not found") - } else if entry != hash { - t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) - } - // Delete the TD and verify the execution - DeleteCanonicalHash(db, number) - if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { - t.Fatalf("Deleted canonical mapping returned: %v", entry) - } -} - -// Tests that head headers and head blocks can be assigned, individually. -func TestHeadStorage(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) - blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) - blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")}) - - // Check that no head entries are in a pristine database - if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { - t.Fatalf("Non head header entry returned: %v", entry) - } - if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { - t.Fatalf("Non head block entry returned: %v", entry) - } - if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) { - t.Fatalf("Non fast head block entry returned: %v", entry) - } - // Assign separate entries for the head header and block - if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { - t.Fatalf("Failed to write head header hash: %v", err) - } - if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { - t.Fatalf("Failed to write head block hash: %v", err) - } - if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil { - t.Fatalf("Failed to write fast head block hash: %v", err) - } - // Check that both heads are present, and different (i.e. two heads maintained) - if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { - t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) - } - if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { - t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) - } - if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() { - t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash()) - } -} - -func TestMipmapBloom(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - receipt1 := new(types.Receipt) - receipt1.Logs = vm.Logs{ - &vm.Log{Address: common.BytesToAddress([]byte("test"))}, - &vm.Log{Address: common.BytesToAddress([]byte("address"))}, - } - receipt2 := new(types.Receipt) - receipt2.Logs = vm.Logs{ - &vm.Log{Address: common.BytesToAddress([]byte("test"))}, - &vm.Log{Address: common.BytesToAddress([]byte("address1"))}, - } - - WriteMipmapBloom(db, 1, types.Receipts{receipt1}) - WriteMipmapBloom(db, 2, types.Receipts{receipt2}) - - for _, level := range MIPMapLevels { - bloom := GetMipmapBloom(db, 2, level) - if !bloom.Test(new(big.Int).SetBytes([]byte("address1"))) { - t.Error("expected test to be included on level:", level) - } - } - - // reset - db, _ = ethdb.NewMemDatabase() - receipt := new(types.Receipt) - receipt.Logs = vm.Logs{ - &vm.Log{Address: common.BytesToAddress([]byte("test"))}, - } - WriteMipmapBloom(db, 999, types.Receipts{receipt1}) - - receipt = new(types.Receipt) - receipt.Logs = vm.Logs{ - &vm.Log{Address: common.BytesToAddress([]byte("test 1"))}, - } - WriteMipmapBloom(db, 1000, types.Receipts{receipt}) - - bloom := GetMipmapBloom(db, 1000, 1000) - if bloom.TestBytes([]byte("test")) { - t.Error("test should not have been included") - } -} - -func TestMipmapChain(t *testing.T) { - dir, err := ioutil.TempDir("", "mipmap") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - var ( - db, _ = ethdb.NewLDBDatabase(dir, 16) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = common.BytesToAddress([]byte("jeff")) - - hash1 = common.BytesToHash([]byte("topic1")) - ) - defer db.Close() - - genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)}) - chain, receipts := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) { - var receipts types.Receipts - switch i { - case 1: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{ - &vm.Log{ - Address: addr, - Topics: []common.Hash{hash1}, - }, - } - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - case 1000: - receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = vm.Logs{&vm.Log{Address: addr2}} - gen.AddUncheckedReceipt(receipt) - receipts = types.Receipts{receipt} - - } - - // store the receipts - err := PutReceipts(db, receipts) - if err != nil { - t.Fatal(err) - } - WriteMipmapBloom(db, uint64(i+1), receipts) - }) - for i, block := range chain { - WriteBlock(db, block) - if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := WriteHeadBlockHash(db, block.Hash()); err != nil { - t.Fatalf("failed to insert block number: %v", err) - } - if err := PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { - t.Fatal("error writing block receipts:", err) - } - } - - bloom := GetMipmapBloom(db, 0, 1000) - if bloom.TestBytes(addr2[:]) { - t.Error("address was included in bloom and should not have") - } -} diff --git a/core/database_util.go b/core/database_util.go new file mode 100644 index 000000000..fbcce3e8c --- /dev/null +++ b/core/database_util.go @@ -0,0 +1,584 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + headHeaderKey = []byte("LastHeader") + headBlockKey = []byte("LastBlock") + headFastKey = []byte("LastFast") + + blockPrefix = []byte("block-") + blockNumPrefix = []byte("block-num-") + + headerSuffix = []byte("-header") + bodySuffix = []byte("-body") + tdSuffix = []byte("-td") + + txMetaSuffix = []byte{0x01} + receiptsPrefix = []byte("receipts-") + blockReceiptsPrefix = []byte("receipts-block-") + + mipmapPre = []byte("mipmap-log-bloom-") + MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000} + + ExpDiffPeriod = big.NewInt(100000) + blockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually] +) + +// CalcDifficulty is the difficulty adjustment algorithm. It returns +// the difficulty that a new block b should have when created at time +// given the parent block's time and difficulty. +func CalcDifficulty(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int { + diff := new(big.Int) + adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor) + bigTime := new(big.Int) + bigParentTime := new(big.Int) + + bigTime.SetUint64(time) + bigParentTime.SetUint64(parentTime) + + if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 { + diff.Add(parentDiff, adjust) + } else { + diff.Sub(parentDiff, adjust) + } + if diff.Cmp(params.MinimumDifficulty) < 0 { + diff = params.MinimumDifficulty + } + + periodCount := new(big.Int).Add(parentNumber, common.Big1) + periodCount.Div(periodCount, ExpDiffPeriod) + if periodCount.Cmp(common.Big1) > 0 { + // diff = diff + 2^(periodCount - 2) + expDiff := periodCount.Sub(periodCount, common.Big2) + expDiff.Exp(common.Big2, expDiff, nil) + diff.Add(diff, expDiff) + diff = common.BigMax(diff, params.MinimumDifficulty) + } + + return diff +} + +// CalcGasLimit computes the gas limit of the next block after parent. +// The result may be modified by the caller. +// This is miner strategy, not consensus protocol. +func CalcGasLimit(parent *types.Block) *big.Int { + // contrib = (parentGasUsed * 3 / 2) / 1024 + contrib := new(big.Int).Mul(parent.GasUsed(), big.NewInt(3)) + contrib = contrib.Div(contrib, big.NewInt(2)) + contrib = contrib.Div(contrib, params.GasLimitBoundDivisor) + + // decay = parentGasLimit / 1024 -1 + decay := new(big.Int).Div(parent.GasLimit(), params.GasLimitBoundDivisor) + decay.Sub(decay, big.NewInt(1)) + + /* + strategy: gasLimit of block-to-mine is set based on parent's + gasUsed value. if parentGasUsed > parentGasLimit * (2/3) then we + increase it, otherwise lower it (or leave it unchanged if it's right + at that usage) the amount increased/decreased depends on how far away + from parentGasLimit * (2/3) parentGasUsed is. + */ + gl := new(big.Int).Sub(parent.GasLimit(), decay) + gl = gl.Add(gl, contrib) + gl.Set(common.BigMax(gl, params.MinGasLimit)) + + // however, if we're now below the target (GenesisGasLimit) we increase the + // limit as much as we can (parentGasLimit / 1024 -1) + if gl.Cmp(params.GenesisGasLimit) < 0 { + gl.Add(parent.GasLimit(), decay) + gl.Set(common.BigMin(gl, params.GenesisGasLimit)) + } + return gl +} + +// GetCanonicalHash retrieves a hash assigned to a canonical block number. +func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { + data, _ := db.Get(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeadHeaderHash retrieves the hash of the current canonical head block's +// header. The difference between this and GetHeadBlockHash is that whereas the +// last block hash is only updated upon a full block import, the last header +// hash is updated already at header import, allowing head tracking for the +// light synchronization mechanism. +func GetHeadHeaderHash(db ethdb.Database) common.Hash { + data, _ := db.Get(headHeaderKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeadBlockHash retrieves the hash of the current canonical head block. +func GetHeadBlockHash(db ethdb.Database) common.Hash { + data, _ := db.Get(headBlockKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeadFastBlockHash retrieves the hash of the current canonical head block during +// fast synchronization. The difference between this and GetHeadBlockHash is that +// whereas the last block hash is only updated upon a full block import, the last +// fast hash is updated when importing pre-processed blocks. +func GetHeadFastBlockHash(db ethdb.Database) common.Hash { + data, _ := db.Get(headFastKey) + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil +// if the header's not found. +func GetHeaderRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { + data, _ := db.Get(append(append(blockPrefix, hash[:]...), headerSuffix...)) + return data +} + +// GetHeader retrieves the block header corresponding to the hash, nil if none +// found. +func GetHeader(db ethdb.Database, hash common.Hash) *types.Header { + data := GetHeaderRLP(db, hash) + if len(data) == 0 { + return nil + } + header := new(types.Header) + if err := rlp.Decode(bytes.NewReader(data), header); err != nil { + glog.V(logger.Error).Infof("invalid block header RLP for hash %x: %v", hash, err) + return nil + } + return header +} + +// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. +func GetBodyRLP(db ethdb.Database, hash common.Hash) rlp.RawValue { + data, _ := db.Get(append(append(blockPrefix, hash[:]...), bodySuffix...)) + return data +} + +// GetBody retrieves the block body (transactons, uncles) corresponding to the +// hash, nil if none found. +func GetBody(db ethdb.Database, hash common.Hash) *types.Body { + data := GetBodyRLP(db, hash) + if len(data) == 0 { + return nil + } + body := new(types.Body) + if err := rlp.Decode(bytes.NewReader(data), body); err != nil { + glog.V(logger.Error).Infof("invalid block body RLP for hash %x: %v", hash, err) + return nil + } + return body +} + +// GetTd retrieves a block's total difficulty corresponding to the hash, nil if +// none found. +func GetTd(db ethdb.Database, hash common.Hash) *big.Int { + data, _ := db.Get(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) + if len(data) == 0 { + return nil + } + td := new(big.Int) + if err := rlp.Decode(bytes.NewReader(data), td); err != nil { + glog.V(logger.Error).Infof("invalid block total difficulty RLP for hash %x: %v", hash, err) + return nil + } + return td +} + +// GetBlock retrieves an entire block corresponding to the hash, assembling it +// back from the stored header and body. +func GetBlock(db ethdb.Database, hash common.Hash) *types.Block { + // Retrieve the block header and body contents + header := GetHeader(db, hash) + if header == nil { + return nil + } + body := GetBody(db, hash) + if body == nil { + return nil + } + // Reassemble the block and return + return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) +} + +// GetBlockReceipts retrieves the receipts generated by the transactions included +// in a block given by its hash. +func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { + data, _ := db.Get(append(blockReceiptsPrefix, hash[:]...)) + if len(data) == 0 { + return nil + } + storageReceipts := []*types.ReceiptForStorage{} + if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { + glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err) + return nil + } + receipts := make(types.Receipts, len(storageReceipts)) + for i, receipt := range storageReceipts { + receipts[i] = (*types.Receipt)(receipt) + } + return receipts +} + +// GetTransaction retrieves a specific transaction from the database, along with +// its added positional metadata. +func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { + // Retrieve the transaction itself from the database + data, _ := db.Get(hash.Bytes()) + if len(data) == 0 { + return nil, common.Hash{}, 0, 0 + } + var tx types.Transaction + if err := rlp.DecodeBytes(data, &tx); err != nil { + return nil, common.Hash{}, 0, 0 + } + // Retrieve the blockchain positional metadata + data, _ = db.Get(append(hash.Bytes(), txMetaSuffix...)) + if len(data) == 0 { + return nil, common.Hash{}, 0, 0 + } + var meta struct { + BlockHash common.Hash + BlockIndex uint64 + Index uint64 + } + if err := rlp.DecodeBytes(data, &meta); err != nil { + return nil, common.Hash{}, 0, 0 + } + return &tx, meta.BlockHash, meta.BlockIndex, meta.Index +} + +// GetReceipt returns a receipt by hash +func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { + data, _ := db.Get(append(receiptsPrefix, txHash[:]...)) + if len(data) == 0 { + return nil + } + var receipt types.ReceiptForStorage + err := rlp.DecodeBytes(data, &receipt) + if err != nil { + glog.V(logger.Core).Infoln("GetReceipt err:", err) + } + return (*types.Receipt)(&receipt) +} + +// WriteCanonicalHash stores the canonical hash for the given block number. +func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { + key := append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...) + if err := db.Put(key, hash.Bytes()); err != nil { + glog.Fatalf("failed to store number to hash mapping into database: %v", err) + return err + } + return nil +} + +// WriteHeadHeaderHash stores the head header's hash. +func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { + if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { + glog.Fatalf("failed to store last header's hash into database: %v", err) + return err + } + return nil +} + +// WriteHeadBlockHash stores the head block's hash. +func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { + if err := db.Put(headBlockKey, hash.Bytes()); err != nil { + glog.Fatalf("failed to store last block's hash into database: %v", err) + return err + } + return nil +} + +// WriteHeadFastBlockHash stores the fast head block's hash. +func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { + if err := db.Put(headFastKey, hash.Bytes()); err != nil { + glog.Fatalf("failed to store last fast block's hash into database: %v", err) + return err + } + return nil +} + +// WriteHeader serializes a block header into the database. +func WriteHeader(db ethdb.Database, header *types.Header) error { + data, err := rlp.EncodeToBytes(header) + if err != nil { + return err + } + key := append(append(blockPrefix, header.Hash().Bytes()...), headerSuffix...) + if err := db.Put(key, data); err != nil { + glog.Fatalf("failed to store header into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored header #%v [%x…]", header.Number, header.Hash().Bytes()[:4]) + return nil +} + +// WriteBody serializes the body of a block into the database. +func WriteBody(db ethdb.Database, hash common.Hash, body *types.Body) error { + data, err := rlp.EncodeToBytes(body) + if err != nil { + return err + } + key := append(append(blockPrefix, hash.Bytes()...), bodySuffix...) + if err := db.Put(key, data); err != nil { + glog.Fatalf("failed to store block body into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored block body [%x…]", hash.Bytes()[:4]) + return nil +} + +// WriteTd serializes the total difficulty of a block into the database. +func WriteTd(db ethdb.Database, hash common.Hash, td *big.Int) error { + data, err := rlp.EncodeToBytes(td) + if err != nil { + return err + } + key := append(append(blockPrefix, hash.Bytes()...), tdSuffix...) + if err := db.Put(key, data); err != nil { + glog.Fatalf("failed to store block total difficulty into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td) + return nil +} + +// WriteBlock serializes a block into the database, header and body separately. +func WriteBlock(db ethdb.Database, block *types.Block) error { + // Store the body first to retain database consistency + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { + return err + } + // Store the header too, signaling full block ownership + if err := WriteHeader(db, block.Header()); err != nil { + return err + } + return nil +} + +// WriteBlockReceipts stores all the transaction receipts belonging to a block +// as a single receipt slice. This is used during chain reorganisations for +// rescheduling dropped transactions. +func WriteBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error { + // Convert the receipts into their storage form and serialize them + storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) + for i, receipt := range receipts { + storageReceipts[i] = (*types.ReceiptForStorage)(receipt) + } + bytes, err := rlp.EncodeToBytes(storageReceipts) + if err != nil { + return err + } + // Store the flattened receipt slice + if err := db.Put(append(blockReceiptsPrefix, hash.Bytes()...), bytes); err != nil { + glog.Fatalf("failed to store block receipts into database: %v", err) + return err + } + glog.V(logger.Debug).Infof("stored block receipts [%x…]", hash.Bytes()[:4]) + return nil +} + +// WriteTransactions stores the transactions associated with a specific block +// into the given database. Beside writing the transaction, the function also +// stores a metadata entry along with the transaction, detailing the position +// of this within the blockchain. +func WriteTransactions(db ethdb.Database, block *types.Block) error { + batch := db.NewBatch() + + // Iterate over each transaction and encode it with its metadata + for i, tx := range block.Transactions() { + // Encode and queue up the transaction for storage + data, err := rlp.EncodeToBytes(tx) + if err != nil { + return err + } + if err := batch.Put(tx.Hash().Bytes(), data); err != nil { + return err + } + // Encode and queue up the transaction metadata for storage + meta := struct { + BlockHash common.Hash + BlockIndex uint64 + Index uint64 + }{ + BlockHash: block.Hash(), + BlockIndex: block.NumberU64(), + Index: uint64(i), + } + data, err = rlp.EncodeToBytes(meta) + if err != nil { + return err + } + if err := batch.Put(append(tx.Hash().Bytes(), txMetaSuffix...), data); err != nil { + return err + } + } + // Write the scheduled data into the database + if err := batch.Write(); err != nil { + glog.Fatalf("failed to store transactions into database: %v", err) + return err + } + return nil +} + +// WriteReceipts stores a batch of transaction receipts into the database. +func WriteReceipts(db ethdb.Database, receipts types.Receipts) error { + batch := db.NewBatch() + + // Iterate over all the receipts and queue them for database injection + for _, receipt := range receipts { + storageReceipt := (*types.ReceiptForStorage)(receipt) + data, err := rlp.EncodeToBytes(storageReceipt) + if err != nil { + return err + } + if err := batch.Put(append(receiptsPrefix, receipt.TxHash.Bytes()...), data); err != nil { + return err + } + } + // Write the scheduled data into the database + if err := batch.Write(); err != nil { + glog.Fatalf("failed to store receipts into database: %v", err) + return err + } + return nil +} + +// DeleteCanonicalHash removes the number to hash canonical mapping. +func DeleteCanonicalHash(db ethdb.Database, number uint64) { + db.Delete(append(blockNumPrefix, big.NewInt(int64(number)).Bytes()...)) +} + +// DeleteHeader removes all block header data associated with a hash. +func DeleteHeader(db ethdb.Database, hash common.Hash) { + db.Delete(append(append(blockPrefix, hash.Bytes()...), headerSuffix...)) +} + +// DeleteBody removes all block body data associated with a hash. +func DeleteBody(db ethdb.Database, hash common.Hash) { + db.Delete(append(append(blockPrefix, hash.Bytes()...), bodySuffix...)) +} + +// DeleteTd removes all block total difficulty data associated with a hash. +func DeleteTd(db ethdb.Database, hash common.Hash) { + db.Delete(append(append(blockPrefix, hash.Bytes()...), tdSuffix...)) +} + +// DeleteBlock removes all block data associated with a hash. +func DeleteBlock(db ethdb.Database, hash common.Hash) { + DeleteBlockReceipts(db, hash) + DeleteHeader(db, hash) + DeleteBody(db, hash) + DeleteTd(db, hash) +} + +// DeleteBlockReceipts removes all receipt data associated with a block hash. +func DeleteBlockReceipts(db ethdb.Database, hash common.Hash) { + db.Delete(append(blockReceiptsPrefix, hash.Bytes()...)) +} + +// DeleteTransaction removes all transaction data associated with a hash. +func DeleteTransaction(db ethdb.Database, hash common.Hash) { + db.Delete(hash.Bytes()) + db.Delete(append(hash.Bytes(), txMetaSuffix...)) +} + +// DeleteReceipt removes all receipt data associated with a transaction hash. +func DeleteReceipt(db ethdb.Database, hash common.Hash) { + db.Delete(append(receiptsPrefix, hash.Bytes()...)) +} + +// [deprecated by the header/block split, remove eventually] +// GetBlockByHashOld returns the old combined block corresponding to the hash +// or nil if not found. This method is only used by the upgrade mechanism to +// access the old combined block representation. It will be dropped after the +// network transitions to eth/63. +func GetBlockByHashOld(db ethdb.Database, hash common.Hash) *types.Block { + data, _ := db.Get(append(blockHashPrefix, hash[:]...)) + if len(data) == 0 { + return nil + } + var block types.StorageBlock + if err := rlp.Decode(bytes.NewReader(data), &block); err != nil { + glog.V(logger.Error).Infof("invalid block RLP for hash %x: %v", hash, err) + return nil + } + return (*types.Block)(&block) +} + +// returns a formatted MIP mapped key by adding prefix, canonical number and level +// +// ex. fn(98, 1000) = (prefix || 1000 || 0) +func mipmapKey(num, level uint64) []byte { + lkey := make([]byte, 8) + binary.BigEndian.PutUint64(lkey, level) + key := new(big.Int).SetUint64(num / level * level) + + return append(mipmapPre, append(lkey, key.Bytes()...)...) +} + +// WriteMapmapBloom writes each address included in the receipts' logs to the +// MIP bloom bin. +func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error { + batch := db.NewBatch() + for _, level := range MIPMapLevels { + key := mipmapKey(number, level) + bloomDat, _ := db.Get(key) + bloom := types.BytesToBloom(bloomDat) + for _, receipt := range receipts { + for _, log := range receipt.Logs { + bloom.Add(log.Address.Big()) + } + } + batch.Put(key, bloom.Bytes()) + } + if err := batch.Write(); err != nil { + return fmt.Errorf("mipmap write fail for: %d: %v", number, err) + } + return nil +} + +// GetMipmapBloom returns a bloom filter using the number and level as input +// parameters. For available levels see MIPMapLevels. +func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom { + bloomDat, _ := db.Get(mipmapKey(number, level)) + return types.BytesToBloom(bloomDat) +} diff --git a/core/database_util_test.go b/core/database_util_test.go new file mode 100644 index 000000000..059f1ae9f --- /dev/null +++ b/core/database_util_test.go @@ -0,0 +1,609 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "math/big" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" +) + +type diffTest struct { + ParentTimestamp uint64 + ParentDifficulty *big.Int + CurrentTimestamp uint64 + CurrentBlocknumber *big.Int + CurrentDifficulty *big.Int +} + +func (d *diffTest) UnmarshalJSON(b []byte) (err error) { + var ext struct { + ParentTimestamp string + ParentDifficulty string + CurrentTimestamp string + CurrentBlocknumber string + CurrentDifficulty string + } + if err := json.Unmarshal(b, &ext); err != nil { + return err + } + + d.ParentTimestamp = common.String2Big(ext.ParentTimestamp).Uint64() + d.ParentDifficulty = common.String2Big(ext.ParentDifficulty) + d.CurrentTimestamp = common.String2Big(ext.CurrentTimestamp).Uint64() + d.CurrentBlocknumber = common.String2Big(ext.CurrentBlocknumber) + d.CurrentDifficulty = common.String2Big(ext.CurrentDifficulty) + + return nil +} + +func TestDifficulty(t *testing.T) { + file, err := os.Open("../tests/files/BasicTests/difficulty.json") + if err != nil { + t.Fatal(err) + } + defer file.Close() + + tests := make(map[string]diffTest) + err = json.NewDecoder(file).Decode(&tests) + if err != nil { + t.Fatal(err) + } + + for name, test := range tests { + number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1)) + diff := CalcDifficulty(test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty) + if diff.Cmp(test.CurrentDifficulty) != 0 { + t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff) + } + } +} + +// Tests block header storage and retrieval operations. +func TestHeaderStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test header to move around the database and make sure it's really new + header := &types.Header{Extra: []byte("test header")} + if entry := GetHeader(db, header.Hash()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + // Write and verify the header in the database + if err := WriteHeader(db, header); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if entry := GetHeader(db, header.Hash()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != header.Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, header) + } + if entry := GetHeaderRLP(db, header.Hash()); entry == nil { + t.Fatalf("Stored header RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if hash := common.BytesToHash(hasher.Sum(nil)); hash != header.Hash() { + t.Fatalf("Retrieved RLP header mismatch: have %v, want %v", entry, header) + } + } + // Delete the header and verify the execution + DeleteHeader(db, header.Hash()) + if entry := GetHeader(db, header.Hash()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } +} + +// Tests block body storage and retrieval operations. +func TestBodyStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test body to move around the database and make sure it's really new + body := &types.Body{Uncles: []*types.Header{{Extra: []byte("test header")}}} + + hasher := sha3.NewKeccak256() + rlp.Encode(hasher, body) + hash := common.BytesToHash(hasher.Sum(nil)) + + if entry := GetBody(db, hash); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the body in the database + if err := WriteBody(db, hash, body); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBody(db, hash); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(types.Transactions(body.Transactions)) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) + } + if entry := GetBodyRLP(db, hash); entry == nil { + t.Fatalf("Stored body RLP not found") + } else { + hasher := sha3.NewKeccak256() + hasher.Write(entry) + + if calc := common.BytesToHash(hasher.Sum(nil)); calc != hash { + t.Fatalf("Retrieved RLP body mismatch: have %v, want %v", entry, body) + } + } + // Delete the body and verify the execution + DeleteBody(db, hash) + if entry := GetBody(db, hash); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests block storage and retrieval operations. +func TestBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test block to move around the database and make sure it's really new + block := types.NewBlockWithHeader(&types.Header{ + Extra: []byte("test block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + if entry := GetHeader(db, block.Hash()); entry != nil { + t.Fatalf("Non existent header returned: %v", entry) + } + if entry := GetBody(db, block.Hash()); entry != nil { + t.Fatalf("Non existent body returned: %v", entry) + } + // Write and verify the block in the database + if err := WriteBlock(db, block); err != nil { + t.Fatalf("Failed to write block into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } + if entry := GetHeader(db, block.Hash()); entry == nil { + t.Fatalf("Stored header not found") + } else if entry.Hash() != block.Header().Hash() { + t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) + } + if entry := GetBody(db, block.Hash()); entry == nil { + t.Fatalf("Stored body not found") + } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, &types.Body{block.Transactions(), block.Uncles()}) + } + // Delete the block and verify the execution + DeleteBlock(db, block.Hash()) + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Deleted block returned: %v", entry) + } + if entry := GetHeader(db, block.Hash()); entry != nil { + t.Fatalf("Deleted header returned: %v", entry) + } + if entry := GetBody(db, block.Hash()); entry != nil { + t.Fatalf("Deleted body returned: %v", entry) + } +} + +// Tests that partial block contents don't get reassembled into full blocks. +func TestPartialBlockStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + block := types.NewBlockWithHeader(&types.Header{ + Extra: []byte("test block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + // Store a header and check that it's not recognized as a block + if err := WriteHeader(db, block.Header()); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteHeader(db, block.Hash()) + + // Store a body and check that it's not recognized as a block + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + DeleteBody(db, block.Hash()) + + // Store a header and a body separately and check reassembly + if err := WriteHeader(db, block.Header()); err != nil { + t.Fatalf("Failed to write header into database: %v", err) + } + if err := WriteBody(db, block.Hash(), &types.Body{block.Transactions(), block.Uncles()}); err != nil { + t.Fatalf("Failed to write body into database: %v", err) + } + if entry := GetBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } +} + +// Tests block total difficulty storage and retrieval operations. +func TestTdStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test TD to move around the database and make sure it's really new + hash, td := common.Hash{}, big.NewInt(314) + if entry := GetTd(db, hash); entry != nil { + t.Fatalf("Non existent TD returned: %v", entry) + } + // Write and verify the TD in the database + if err := WriteTd(db, hash, td); err != nil { + t.Fatalf("Failed to write TD into database: %v", err) + } + if entry := GetTd(db, hash); entry == nil { + t.Fatalf("Stored TD not found") + } else if entry.Cmp(td) != 0 { + t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) + } + // Delete the TD and verify the execution + DeleteTd(db, hash) + if entry := GetTd(db, hash); entry != nil { + t.Fatalf("Deleted TD returned: %v", entry) + } +} + +// Tests that canonical numbers can be mapped to hashes and retrieved. +func TestCanonicalMappingStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + // Create a test canonical number and assinged hash to move around + hash, number := common.Hash{0: 0xff}, uint64(314) + if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Non existent canonical mapping returned: %v", entry) + } + // Write and verify the TD in the database + if err := WriteCanonicalHash(db, hash, number); err != nil { + t.Fatalf("Failed to write canonical mapping into database: %v", err) + } + if entry := GetCanonicalHash(db, number); entry == (common.Hash{}) { + t.Fatalf("Stored canonical mapping not found") + } else if entry != hash { + t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) + } + // Delete the TD and verify the execution + DeleteCanonicalHash(db, number) + if entry := GetCanonicalHash(db, number); entry != (common.Hash{}) { + t.Fatalf("Deleted canonical mapping returned: %v", entry) + } +} + +// Tests that head headers and head blocks can be assigned, individually. +func TestHeadStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + blockHead := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block header")}) + blockFull := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block full")}) + blockFast := types.NewBlockWithHeader(&types.Header{Extra: []byte("test block fast")}) + + // Check that no head entries are in a pristine database + if entry := GetHeadHeaderHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head header entry returned: %v", entry) + } + if entry := GetHeadBlockHash(db); entry != (common.Hash{}) { + t.Fatalf("Non head block entry returned: %v", entry) + } + if entry := GetHeadFastBlockHash(db); entry != (common.Hash{}) { + t.Fatalf("Non fast head block entry returned: %v", entry) + } + // Assign separate entries for the head header and block + if err := WriteHeadHeaderHash(db, blockHead.Hash()); err != nil { + t.Fatalf("Failed to write head header hash: %v", err) + } + if err := WriteHeadBlockHash(db, blockFull.Hash()); err != nil { + t.Fatalf("Failed to write head block hash: %v", err) + } + if err := WriteHeadFastBlockHash(db, blockFast.Hash()); err != nil { + t.Fatalf("Failed to write fast head block hash: %v", err) + } + // Check that both heads are present, and different (i.e. two heads maintained) + if entry := GetHeadHeaderHash(db); entry != blockHead.Hash() { + t.Fatalf("Head header hash mismatch: have %v, want %v", entry, blockHead.Hash()) + } + if entry := GetHeadBlockHash(db); entry != blockFull.Hash() { + t.Fatalf("Head block hash mismatch: have %v, want %v", entry, blockFull.Hash()) + } + if entry := GetHeadFastBlockHash(db); entry != blockFast.Hash() { + t.Fatalf("Fast head block hash mismatch: have %v, want %v", entry, blockFast.Hash()) + } +} + +// Tests that transactions and associated metadata can be stored and retrieved. +func TestTransactionStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + tx1 := types.NewTransaction(1, common.BytesToAddress([]byte{0x11}), big.NewInt(111), big.NewInt(1111), big.NewInt(11111), []byte{0x11, 0x11, 0x11}) + tx2 := types.NewTransaction(2, common.BytesToAddress([]byte{0x22}), big.NewInt(222), big.NewInt(2222), big.NewInt(22222), []byte{0x22, 0x22, 0x22}) + tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), big.NewInt(3333), big.NewInt(33333), []byte{0x33, 0x33, 0x33}) + txs := []*types.Transaction{tx1, tx2, tx3} + + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil) + + // Check that no transactions entries are in a pristine database + for i, tx := range txs { + if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { + t.Fatalf("tx #%d [%x]: non existent transaction returned: %v", i, tx.Hash(), txn) + } + } + // Insert all the transactions into the database, and verify contents + if err := WriteTransactions(db, block); err != nil { + t.Fatalf("failed to write transactions: %v", err) + } + for i, tx := range txs { + if txn, hash, number, index := GetTransaction(db, tx.Hash()); txn == nil { + t.Fatalf("tx #%d [%x]: transaction not found", i, tx.Hash()) + } else { + if hash != block.Hash() || number != block.NumberU64() || index != uint64(i) { + t.Fatalf("tx #%d [%x]: positional metadata mismatch: have %x/%d/%d, want %x/%v/%v", i, tx.Hash(), hash, number, index, block.Hash(), block.NumberU64(), i) + } + if tx.String() != txn.String() { + t.Fatalf("tx #%d [%x]: transaction mismatch: have %v, want %v", i, tx.Hash(), txn, tx) + } + } + } + // Delete the transactions and check purge + for i, tx := range txs { + DeleteTransaction(db, tx.Hash()) + if txn, _, _, _ := GetTransaction(db, tx.Hash()); txn != nil { + t.Fatalf("tx #%d [%x]: deleted transaction returned: %v", i, tx.Hash(), txn) + } + } +} + +// Tests that receipts can be stored and retrieved. +func TestReceiptStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + receipt1 := &types.Receipt{ + PostState: []byte{0x01}, + CumulativeGasUsed: big.NewInt(1), + Logs: vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte{0x11})}, + &vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: common.BytesToHash([]byte{0x11, 0x11}), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: big.NewInt(111111), + } + receipt2 := &types.Receipt{ + PostState: []byte{0x02}, + CumulativeGasUsed: big.NewInt(2), + Logs: vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte{0x22})}, + &vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + TxHash: common.BytesToHash([]byte{0x22, 0x22}), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: big.NewInt(222222), + } + receipts := []*types.Receipt{receipt1, receipt2} + + // Check that no receipt entries are in a pristine database + for i, receipt := range receipts { + if r := GetReceipt(db, receipt.TxHash); r != nil { + t.Fatalf("receipt #%d [%x]: non existent receipt returned: %v", i, receipt.TxHash, r) + } + } + // Insert all the receipts into the database, and verify contents + if err := WriteReceipts(db, receipts); err != nil { + t.Fatalf("failed to write receipts: %v", err) + } + for i, receipt := range receipts { + if r := GetReceipt(db, receipt.TxHash); r == nil { + t.Fatalf("receipt #%d [%x]: receipt not found", i, receipt.TxHash) + } else { + rlpHave, _ := rlp.EncodeToBytes(r) + rlpWant, _ := rlp.EncodeToBytes(receipt) + + if bytes.Compare(rlpHave, rlpWant) != 0 { + t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt) + } + } + } + // Delete the receipts and check purge + for i, receipt := range receipts { + DeleteReceipt(db, receipt.TxHash) + if r := GetReceipt(db, receipt.TxHash); r != nil { + t.Fatalf("receipt #%d [%x]: deleted receipt returned: %v", i, receipt.TxHash, r) + } + } +} + +// Tests that receipts associated with a single block can be stored and retrieved. +func TestBlockReceiptStorage(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + receipt1 := &types.Receipt{ + PostState: []byte{0x01}, + CumulativeGasUsed: big.NewInt(1), + Logs: vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte{0x11})}, + &vm.Log{Address: common.BytesToAddress([]byte{0x01, 0x11})}, + }, + TxHash: common.BytesToHash([]byte{0x11, 0x11}), + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: big.NewInt(111111), + } + receipt2 := &types.Receipt{ + PostState: []byte{0x02}, + CumulativeGasUsed: big.NewInt(2), + Logs: vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte{0x22})}, + &vm.Log{Address: common.BytesToAddress([]byte{0x02, 0x22})}, + }, + TxHash: common.BytesToHash([]byte{0x22, 0x22}), + ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), + GasUsed: big.NewInt(222222), + } + receipts := []*types.Receipt{receipt1, receipt2} + + // Check that no receipt entries are in a pristine database + hash := common.BytesToHash([]byte{0x03, 0x14}) + if rs := GetBlockReceipts(db, hash); len(rs) != 0 { + t.Fatalf("non existent receipts returned: %v", rs) + } + // Insert the receipt slice into the database and check presence + if err := WriteBlockReceipts(db, hash, receipts); err != nil { + t.Fatalf("failed to write block receipts: %v", err) + } + if rs := GetBlockReceipts(db, hash); len(rs) == 0 { + t.Fatalf("no receipts returned") + } else { + for i := 0; i < len(receipts); i++ { + rlpHave, _ := rlp.EncodeToBytes(rs[i]) + rlpWant, _ := rlp.EncodeToBytes(receipts[i]) + + if bytes.Compare(rlpHave, rlpWant) != 0 { + t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i]) + } + } + } + // Delete the receipt slice and check purge + DeleteBlockReceipts(db, hash) + if rs := GetBlockReceipts(db, hash); len(rs) != 0 { + t.Fatalf("deleted receipts returned: %v", rs) + } +} + +func TestMipmapBloom(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + receipt1 := new(types.Receipt) + receipt1.Logs = vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte("test"))}, + &vm.Log{Address: common.BytesToAddress([]byte("address"))}, + } + receipt2 := new(types.Receipt) + receipt2.Logs = vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte("test"))}, + &vm.Log{Address: common.BytesToAddress([]byte("address1"))}, + } + + WriteMipmapBloom(db, 1, types.Receipts{receipt1}) + WriteMipmapBloom(db, 2, types.Receipts{receipt2}) + + for _, level := range MIPMapLevels { + bloom := GetMipmapBloom(db, 2, level) + if !bloom.Test(new(big.Int).SetBytes([]byte("address1"))) { + t.Error("expected test to be included on level:", level) + } + } + + // reset + db, _ = ethdb.NewMemDatabase() + receipt := new(types.Receipt) + receipt.Logs = vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte("test"))}, + } + WriteMipmapBloom(db, 999, types.Receipts{receipt1}) + + receipt = new(types.Receipt) + receipt.Logs = vm.Logs{ + &vm.Log{Address: common.BytesToAddress([]byte("test 1"))}, + } + WriteMipmapBloom(db, 1000, types.Receipts{receipt}) + + bloom := GetMipmapBloom(db, 1000, 1000) + if bloom.TestBytes([]byte("test")) { + t.Error("test should not have been included") + } +} + +func TestMipmapChain(t *testing.T) { + dir, err := ioutil.TempDir("", "mipmap") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + var ( + db, _ = ethdb.NewLDBDatabase(dir, 16) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = common.BytesToAddress([]byte("jeff")) + + hash1 = common.BytesToHash([]byte("topic1")) + ) + defer db.Close() + + genesis := WriteGenesisBlockForTesting(db, GenesisAccount{addr, big.NewInt(1000000)}) + chain, receipts := GenerateChain(genesis, db, 1010, func(i int, gen *BlockGen) { + var receipts types.Receipts + switch i { + case 1: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.Logs = vm.Logs{ + &vm.Log{ + Address: addr, + Topics: []common.Hash{hash1}, + }, + } + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + case 1000: + receipt := types.NewReceipt(nil, new(big.Int)) + receipt.Logs = vm.Logs{&vm.Log{Address: addr2}} + gen.AddUncheckedReceipt(receipt) + receipts = types.Receipts{receipt} + + } + + // store the receipts + err := WriteReceipts(db, receipts) + if err != nil { + t.Fatal(err) + } + WriteMipmapBloom(db, uint64(i+1), receipts) + }) + for i, block := range chain { + WriteBlock(db, block) + if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := WriteHeadBlockHash(db, block.Hash()); err != nil { + t.Fatalf("failed to insert block number: %v", err) + } + if err := WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { + t.Fatal("error writing block receipts:", err) + } + } + + bloom := GetMipmapBloom(db, 0, 1000) + if bloom.TestBytes(addr2[:]) { + t.Error("address was included in bloom and should not have") + } +} diff --git a/core/genesis.go b/core/genesis.go index dac5de92f..3fd8f42b0 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -103,7 +103,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, if err := WriteBlock(chainDb, block); err != nil { return nil, err } - if err := PutBlockReceipts(chainDb, block.Hash(), nil); err != nil { + if err := WriteBlockReceipts(chainDb, block.Hash(), nil); err != nil { return nil, err } if err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()); err != nil { diff --git a/core/transaction_util.go b/core/transaction_util.go deleted file mode 100644 index e2e5b9aee..000000000 --- a/core/transaction_util.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/rlp" - "github.com/syndtr/goleveldb/leveldb" -) - -var ( - receiptsPre = []byte("receipts-") - blockReceiptsPre = []byte("receipts-block-") -) - -// PutTransactions stores the transactions in the given database -func PutTransactions(db ethdb.Database, block *types.Block, txs types.Transactions) error { - batch := db.NewBatch() - - for i, tx := range block.Transactions() { - rlpEnc, err := rlp.EncodeToBytes(tx) - if err != nil { - return fmt.Errorf("failed encoding tx: %v", err) - } - - batch.Put(tx.Hash().Bytes(), rlpEnc) - - var txExtra struct { - BlockHash common.Hash - BlockIndex uint64 - Index uint64 - } - txExtra.BlockHash = block.Hash() - txExtra.BlockIndex = block.NumberU64() - txExtra.Index = uint64(i) - rlpMeta, err := rlp.EncodeToBytes(txExtra) - if err != nil { - return fmt.Errorf("failed encoding tx meta data: %v", err) - } - - batch.Put(append(tx.Hash().Bytes(), 0x0001), rlpMeta) - } - - if err := batch.Write(); err != nil { - return fmt.Errorf("failed writing tx to db: %v", err) - } - return nil -} - -func DeleteTransaction(db ethdb.Database, txHash common.Hash) { - db.Delete(txHash[:]) -} - -func GetTransaction(db ethdb.Database, txhash common.Hash) *types.Transaction { - data, _ := db.Get(txhash[:]) - if len(data) != 0 { - var tx types.Transaction - if err := rlp.DecodeBytes(data, &tx); err != nil { - return nil - } - return &tx - } - return nil -} - -// PutReceipts stores the receipts in the current database -func PutReceipts(db ethdb.Database, receipts types.Receipts) error { - batch := new(leveldb.Batch) - _, batchWrite := db.(*ethdb.LDBDatabase) - - for _, receipt := range receipts { - storageReceipt := (*types.ReceiptForStorage)(receipt) - bytes, err := rlp.EncodeToBytes(storageReceipt) - if err != nil { - return err - } - - if batchWrite { - batch.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) - } else { - err = db.Put(append(receiptsPre, receipt.TxHash[:]...), bytes) - if err != nil { - return err - } - } - } - if db, ok := db.(*ethdb.LDBDatabase); ok { - if err := db.LDB().Write(batch, nil); err != nil { - return err - } - } - - return nil -} - -// Delete a receipts from the database -func DeleteReceipt(db ethdb.Database, txHash common.Hash) { - db.Delete(append(receiptsPre, txHash[:]...)) -} - -// GetReceipt returns a receipt by hash -func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { - data, _ := db.Get(append(receiptsPre, txHash[:]...)) - if len(data) == 0 { - return nil - } - var receipt types.ReceiptForStorage - err := rlp.DecodeBytes(data, &receipt) - if err != nil { - glog.V(logger.Core).Infoln("GetReceipt err:", err) - } - return (*types.Receipt)(&receipt) -} - -// GetBlockReceipts returns the receipts generated by the transactions -// included in block's given hash. -func GetBlockReceipts(db ethdb.Database, hash common.Hash) types.Receipts { - data, _ := db.Get(append(blockReceiptsPre, hash[:]...)) - if len(data) == 0 { - return nil - } - rs := []*types.ReceiptForStorage{} - if err := rlp.DecodeBytes(data, &rs); err != nil { - glog.V(logger.Error).Infof("invalid receipt array RLP for hash %x: %v", hash, err) - return nil - } - receipts := make(types.Receipts, len(rs)) - for i, receipt := range rs { - receipts[i] = (*types.Receipt)(receipt) - } - return receipts -} - -// PutBlockReceipts stores the block's transactions associated receipts -// and stores them by block hash in a single slice. This is required for -// forks and chain reorgs -func PutBlockReceipts(db ethdb.Database, hash common.Hash, receipts types.Receipts) error { - rs := make([]*types.ReceiptForStorage, len(receipts)) - for i, receipt := range receipts { - rs[i] = (*types.ReceiptForStorage)(receipt) - } - bytes, err := rlp.EncodeToBytes(rs) - if err != nil { - return err - } - err = db.Put(append(blockReceiptsPre, hash[:]...), bytes) - if err != nil { - return err - } - return nil -} diff --git a/eth/backend_test.go b/eth/backend_test.go index 0379fc843..83219de62 100644 --- a/eth/backend_test.go +++ b/eth/backend_test.go @@ -32,7 +32,7 @@ func TestMipmapUpgrade(t *testing.T) { } // store the receipts - err := core.PutReceipts(db, receipts) + err := core.WriteReceipts(db, receipts) if err != nil { t.Fatal(err) } @@ -45,7 +45,7 @@ func TestMipmapUpgrade(t *testing.T) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { t.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { + if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { t.Fatal("error writing block receipts:", err) } } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index a5418e2e7..5772114b3 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -64,7 +64,7 @@ func BenchmarkMipmaps(b *testing.B) { } // store the receipts - err := core.PutReceipts(db, receipts) + err := core.WriteReceipts(db, receipts) if err != nil { b.Fatal(err) } @@ -78,7 +78,7 @@ func BenchmarkMipmaps(b *testing.B) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { b.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { + if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { b.Fatal("error writing block receipts:", err) } } @@ -163,7 +163,7 @@ func TestFilters(t *testing.T) { } // store the receipts - err := core.PutReceipts(db, receipts) + err := core.WriteReceipts(db, receipts) if err != nil { t.Fatal(err) } @@ -180,7 +180,7 @@ func TestFilters(t *testing.T) { if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil { t.Fatalf("failed to insert block number: %v", err) } - if err := core.PutBlockReceipts(db, block.Hash(), receipts[i]); err != nil { + if err := core.WriteBlockReceipts(db, block.Hash(), receipts[i]); err != nil { t.Fatal("error writing block receipts:", err) } } diff --git a/miner/worker.go b/miner/worker.go index 238f1a4bf..aa0fa85cb 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -305,9 +305,9 @@ func (self *worker) wait() { // check if canon block and write transactions if stat == core.CanonStatTy { // This puts transactions in a extra db for rpc - core.PutTransactions(self.chainDb, block, block.Transactions()) + core.WriteTransactions(self.chainDb, block) // store the receipts - core.PutReceipts(self.chainDb, work.receipts) + core.WriteReceipts(self.chainDb, work.receipts) // Write map map bloom filters core.WriteMipmapBloom(self.chainDb, block.NumberU64(), work.receipts) } @@ -320,7 +320,7 @@ func (self *worker) wait() { self.mux.Post(core.ChainHeadEvent{block}) self.mux.Post(logs) } - if err := core.PutBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { + if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), receipts); err != nil { glog.V(logger.Warn).Infoln("error writing block receipts:", err) } }(block, work.state.Logs(), work.receipts) diff --git a/xeth/xeth.go b/xeth/xeth.go index 243bef0b8..ae9f1fe47 100644 --- a/xeth/xeth.go +++ b/xeth/xeth.go @@ -322,44 +322,11 @@ func (self *XEth) EthBlockByHash(strHash string) *types.Block { return block } -func (self *XEth) EthTransactionByHash(hash string) (tx *types.Transaction, blhash common.Hash, blnum *big.Int, txi uint64) { - // Due to increasing return params and need to determine if this is from transaction pool or - // some chain, this probably needs to be refactored for more expressiveness - data, _ := self.backend.ChainDb().Get(common.FromHex(hash)) - if len(data) != 0 { - dtx := new(types.Transaction) - if err := rlp.DecodeBytes(data, dtx); err != nil { - glog.V(logger.Error).Infoln(err) - return - } - tx = dtx - } else { // check pending transactions - tx = self.backend.TxPool().GetTransaction(common.HexToHash(hash)) - } - - // meta - var txExtra struct { - BlockHash common.Hash - BlockIndex uint64 - Index uint64 - } - - v, dberr := self.backend.ChainDb().Get(append(common.FromHex(hash), 0x0001)) - // TODO check specifically for ErrNotFound - if dberr != nil { - return +func (self *XEth) EthTransactionByHash(hash string) (*types.Transaction, common.Hash, uint64, uint64) { + if tx, hash, number, index := core.GetTransaction(self.backend.ChainDb(), common.HexToHash(hash)); tx != nil { + return tx, hash, number, index } - r := bytes.NewReader(v) - err := rlp.Decode(r, &txExtra) - if err == nil { - blhash = txExtra.BlockHash - blnum = big.NewInt(int64(txExtra.BlockIndex)) - txi = txExtra.Index - } else { - glog.V(logger.Error).Infoln(err) - } - - return + return self.backend.TxPool().GetTransaction(common.HexToHash(hash)), common.Hash{}, 0, 0 } func (self *XEth) BlockByNumber(num int64) *Block { -- cgit