From 7c7692933c21b77328a94eed714f66c276776197 Mon Sep 17 00:00:00 2001 From: Jeffrey Wilcke Date: Mon, 31 Aug 2015 17:09:50 +0200 Subject: cmd/geth, cmd/utils, core, rpc: renamed to blockchain * Renamed ChainManager to BlockChain * Checkpointing is no longer required and never really properly worked when the state was corrupted. --- core/bench_test.go | 2 +- core/block_processor.go | 10 +- core/block_processor_test.go | 6 +- core/blockchain.go | 809 +++++++++++++++++++++++++++++++++++++++++ core/blockchain_test.go | 652 +++++++++++++++++++++++++++++++++ core/chain_makers.go | 4 +- core/chain_makers_test.go | 2 +- core/chain_manager.go | 849 ------------------------------------------- core/chain_manager_test.go | 652 --------------------------------- core/helper_test.go | 6 +- core/manager.go | 2 +- core/vm/common.go | 4 +- core/vm/contract.go | 4 +- core/vm/doc.go | 8 +- core/vm/environment.go | 4 +- core/vm/memory.go | 2 +- core/vm_env.go | 4 +- 17 files changed, 1490 insertions(+), 1530 deletions(-) create mode 100644 core/blockchain.go create mode 100644 core/blockchain_test.go delete mode 100644 core/chain_manager.go delete mode 100644 core/chain_manager_test.go (limited to 'core') diff --git a/core/bench_test.go b/core/bench_test.go index def4f0d2a..27f3e3158 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -168,7 +168,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Time the insertion of the new chain. // State and blocks are stored in the same DB. evmux := new(event.TypeMux) - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) defer chainman.Stop() b.ReportAllocs() diff --git a/core/block_processor.go b/core/block_processor.go index 40e3931ba..783e15687 100644 --- a/core/block_processor.go +++ b/core/block_processor.go @@ -47,7 +47,7 @@ type BlockProcessor struct { // Mutex for locking the block processor. Blocks can only be handled one at a time mutex sync.Mutex // Canonical block chain - bc *ChainManager + bc *BlockChain // non-persistent key/value memory storage mem map[string]*big.Int // Proof of work used for validating @@ -70,12 +70,12 @@ type GasPool interface { SubGas(gas, price *big.Int) error } -func NewBlockProcessor(db ethdb.Database, pow pow.PoW, chainManager *ChainManager, eventMux *event.TypeMux) *BlockProcessor { +func NewBlockProcessor(db ethdb.Database, pow pow.PoW, blockchain *BlockChain, eventMux *event.TypeMux) *BlockProcessor { sm := &BlockProcessor{ chainDb: db, mem: make(map[string]*big.Int), Pow: pow, - bc: chainManager, + bc: blockchain, eventMux: eventMux, } return sm @@ -124,7 +124,7 @@ func (self *BlockProcessor) ApplyTransaction(gp GasPool, statedb *state.StateDB, return receipt, gas, err } -func (self *BlockProcessor) ChainManager() *ChainManager { +func (self *BlockProcessor) BlockChain() *BlockChain { return self.bc } @@ -347,7 +347,7 @@ func (sm *BlockProcessor) VerifyUncles(statedb *state.StateDB, block, parent *ty // GetBlockReceipts returns the receipts beloniging to the block hash func (sm *BlockProcessor) GetBlockReceipts(bhash common.Hash) types.Receipts { - if block := sm.ChainManager().GetBlock(bhash); block != nil { + if block := sm.BlockChain().GetBlock(bhash); block != nil { return GetBlockReceipts(sm.chainDb, block.Hash()) } diff --git a/core/block_processor_test.go b/core/block_processor_test.go index 5735b8d0a..ba8bd7bcd 100644 --- a/core/block_processor_test.go +++ b/core/block_processor_test.go @@ -30,16 +30,16 @@ import ( "github.com/ethereum/go-ethereum/pow/ezp" ) -func proc() (*BlockProcessor, *ChainManager) { +func proc() (*BlockProcessor, *BlockChain) { db, _ := ethdb.NewMemDatabase() var mux event.TypeMux WriteTestNetGenesisBlock(db, 0) - chainMan, err := NewChainManager(db, thePow(), &mux) + blockchain, err := NewBlockChain(db, thePow(), &mux) if err != nil { fmt.Println(err) } - return NewBlockProcessor(db, ezp.New(), chainMan, &mux), chainMan + return NewBlockProcessor(db, ezp.New(), blockchain, &mux), blockchain } func TestNumber(t *testing.T) { diff --git a/core/blockchain.go b/core/blockchain.go new file mode 100644 index 000000000..e8209f8e3 --- /dev/null +++ b/core/blockchain.go @@ -0,0 +1,809 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package core implements the Ethereum consensus protocol. +package core + +import ( + "errors" + "fmt" + "io" + "math/big" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/logger" + "github.com/ethereum/go-ethereum/logger/glog" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/hashicorp/golang-lru" +) + +var ( + chainlogger = logger.NewLogger("CHAIN") + jsonlogger = logger.NewJsonLogger() + + blockInsertTimer = metrics.NewTimer("chain/inserts") + + ErrNoGenesis = errors.New("Genesis not found in chain") +) + +const ( + headerCacheLimit = 512 + bodyCacheLimit = 256 + tdCacheLimit = 1024 + blockCacheLimit = 256 + maxFutureBlocks = 256 + maxTimeFutureBlocks = 30 +) + +type BlockChain struct { + chainDb ethdb.Database + processor types.BlockProcessor + eventMux *event.TypeMux + genesisBlock *types.Block + // Last known total difficulty + mu sync.RWMutex + chainmu sync.RWMutex + tsmu sync.RWMutex + + td *big.Int + currentBlock *types.Block + currentGasLimit *big.Int + + headerCache *lru.Cache // Cache for the most recent block headers + bodyCache *lru.Cache // Cache for the most recent block bodies + bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format + tdCache *lru.Cache // Cache for the most recent block total difficulties + blockCache *lru.Cache // Cache for the most recent entire blocks + futureBlocks *lru.Cache // future blocks are blocks added for later processing + + quit chan struct{} + running int32 // running must be called automically + // procInterrupt must be atomically called + procInterrupt int32 // interrupt signaler for block processing + wg sync.WaitGroup + + pow pow.PoW +} + +func NewBlockChain(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*BlockChain, error) { + headerCache, _ := lru.New(headerCacheLimit) + bodyCache, _ := lru.New(bodyCacheLimit) + bodyRLPCache, _ := lru.New(bodyCacheLimit) + tdCache, _ := lru.New(tdCacheLimit) + blockCache, _ := lru.New(blockCacheLimit) + futureBlocks, _ := lru.New(maxFutureBlocks) + + bc := &BlockChain{ + chainDb: chainDb, + eventMux: mux, + quit: make(chan struct{}), + headerCache: headerCache, + bodyCache: bodyCache, + bodyRLPCache: bodyRLPCache, + tdCache: tdCache, + blockCache: blockCache, + futureBlocks: futureBlocks, + pow: pow, + } + + bc.genesisBlock = bc.GetBlockByNumber(0) + if bc.genesisBlock == nil { + reader, err := NewDefaultGenesisReader() + if err != nil { + return nil, err + } + bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader) + if err != nil { + return nil, err + } + glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") + } + if err := bc.setLastState(); err != nil { + return nil, err + } + // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain + for hash, _ := range BadHashes { + if block := bc.GetBlock(hash); block != nil { + glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4]) + block = bc.GetBlock(block.ParentHash()) + if block == nil { + glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?") + } + bc.SetHead(block) + + glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation") + } + } + // Take ownership of this particular state + go bc.update() + return bc, nil +} + +func (bc *BlockChain) SetHead(head *types.Block) { + bc.mu.Lock() + defer bc.mu.Unlock() + + for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) { + DeleteBlock(bc.chainDb, block.Hash()) + } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() + + bc.currentBlock = head + bc.setTotalDifficulty(bc.GetTd(head.Hash())) + bc.insert(head) + bc.setLastState() +} + +func (self *BlockChain) Td() *big.Int { + self.mu.RLock() + defer self.mu.RUnlock() + + return new(big.Int).Set(self.td) +} + +func (self *BlockChain) GasLimit() *big.Int { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock.GasLimit() +} + +func (self *BlockChain) LastBlockHash() common.Hash { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock.Hash() +} + +func (self *BlockChain) CurrentBlock() *types.Block { + self.mu.RLock() + defer self.mu.RUnlock() + + return self.currentBlock +} + +func (self *BlockChain) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { + self.mu.RLock() + defer self.mu.RUnlock() + + return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash() +} + +func (self *BlockChain) SetProcessor(proc types.BlockProcessor) { + self.processor = proc +} + +func (self *BlockChain) State() *state.StateDB { + return state.New(self.CurrentBlock().Root(), self.chainDb) +} + +func (bc *BlockChain) setLastState() error { + head := GetHeadBlockHash(bc.chainDb) + if head != (common.Hash{}) { + block := bc.GetBlock(head) + if block != nil { + bc.currentBlock = block + } + } else { + bc.Reset() + } + bc.td = bc.GetTd(bc.currentBlock.Hash()) + bc.currentGasLimit = CalcGasLimit(bc.currentBlock) + + if glog.V(logger.Info) { + glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td) + } + + return nil +} + +// Reset purges the entire blockchain, restoring it to its genesis state. +func (bc *BlockChain) Reset() { + bc.ResetWithGenesisBlock(bc.genesisBlock) +} + +// ResetWithGenesisBlock purges the entire blockchain, restoring it to the +// specified genesis state. +func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) { + bc.mu.Lock() + defer bc.mu.Unlock() + + // Dump the entire block chain and purge the caches + for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { + DeleteBlock(bc.chainDb, block.Hash()) + } + bc.headerCache.Purge() + bc.bodyCache.Purge() + bc.bodyRLPCache.Purge() + bc.blockCache.Purge() + bc.futureBlocks.Purge() + + // Prepare the genesis block and reinitialize the chain + if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { + glog.Fatalf("failed to write genesis block TD: %v", err) + } + if err := WriteBlock(bc.chainDb, genesis); err != nil { + glog.Fatalf("failed to write genesis block: %v", err) + } + bc.genesisBlock = genesis + bc.insert(bc.genesisBlock) + bc.currentBlock = bc.genesisBlock + bc.setTotalDifficulty(genesis.Difficulty()) +} + +// Export writes the active chain to the given writer. +func (self *BlockChain) Export(w io.Writer) error { + if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil { + return err + } + return nil +} + +// ExportN writes a subset of the active chain to the given writer. +func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { + self.mu.RLock() + defer self.mu.RUnlock() + + if first > last { + return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) + } + + glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1) + + for nr := first; nr <= last; nr++ { + block := self.GetBlockByNumber(nr) + if block == nil { + return fmt.Errorf("export failed on #%d: not found", nr) + } + + if err := block.EncodeRLP(w); err != nil { + return err + } + } + + return nil +} + +// insert injects a block into the current chain block chain. Note, this function +// assumes that the `mu` mutex is held! +func (bc *BlockChain) insert(block *types.Block) { + // Add the block to the canonical chain number scheme and mark as the head + if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { + glog.Fatalf("failed to insert block number: %v", err) + } + bc.currentBlock = block +} + +// Accessors +func (bc *BlockChain) Genesis() *types.Block { + return bc.genesisBlock +} + +// HasHeader checks if a block header is present in the database or not, caching +// it if present. +func (bc *BlockChain) HasHeader(hash common.Hash) bool { + return bc.GetHeader(hash) != nil +} + +// GetHeader retrieves a block header from the database by hash, caching it if +// found. +func (self *BlockChain) GetHeader(hash common.Hash) *types.Header { + // Short circuit if the header's already in the cache, retrieve otherwise + if header, ok := self.headerCache.Get(hash); ok { + return header.(*types.Header) + } + header := GetHeader(self.chainDb, hash) + if header == nil { + return nil + } + // Cache the found header for next time and return + self.headerCache.Add(header.Hash(), header) + return header +} + +// GetHeaderByNumber retrieves a block header from the database by number, +// caching it (associated with its hash) if found. +func (self *BlockChain) GetHeaderByNumber(number uint64) *types.Header { + hash := GetCanonicalHash(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetHeader(hash) +} + +// GetBody retrieves a block body (transactions and uncles) from the database by +// hash, caching it if found. +func (self *BlockChain) GetBody(hash common.Hash) *types.Body { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyCache.Get(hash); ok { + body := cached.(*types.Body) + return body + } + body := GetBody(self.chainDb, hash) + if body == nil { + return nil + } + // Cache the found body for next time and return + self.bodyCache.Add(hash, body) + return body +} + +// GetBodyRLP retrieves a block body in RLP encoding from the database by hash, +// caching it if found. +func (self *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue { + // Short circuit if the body's already in the cache, retrieve otherwise + if cached, ok := self.bodyRLPCache.Get(hash); ok { + return cached.(rlp.RawValue) + } + body := GetBodyRLP(self.chainDb, hash) + if len(body) == 0 { + return nil + } + // Cache the found body for next time and return + self.bodyRLPCache.Add(hash, body) + return body +} + +// GetTd retrieves a block's total difficulty in the canonical chain from the +// database by hash, caching it if found. +func (self *BlockChain) GetTd(hash common.Hash) *big.Int { + // Short circuit if the td's already in the cache, retrieve otherwise + if cached, ok := self.tdCache.Get(hash); ok { + return cached.(*big.Int) + } + td := GetTd(self.chainDb, hash) + if td == nil { + return nil + } + // Cache the found body for next time and return + self.tdCache.Add(hash, td) + return td +} + +// HasBlock checks if a block is fully present in the database or not, caching +// it if present. +func (bc *BlockChain) HasBlock(hash common.Hash) bool { + return bc.GetBlock(hash) != nil +} + +// GetBlock retrieves a block from the database by hash, caching it if found. +func (self *BlockChain) GetBlock(hash common.Hash) *types.Block { + // Short circuit if the block's already in the cache, retrieve otherwise + if block, ok := self.blockCache.Get(hash); ok { + return block.(*types.Block) + } + block := GetBlock(self.chainDb, hash) + if block == nil { + return nil + } + // Cache the found block for next time and return + self.blockCache.Add(block.Hash(), block) + return block +} + +// GetBlockByNumber retrieves a block from the database by number, caching it +// (associated with its hash) if found. +func (self *BlockChain) GetBlockByNumber(number uint64) *types.Block { + hash := GetCanonicalHash(self.chainDb, number) + if hash == (common.Hash{}) { + return nil + } + return self.GetBlock(hash) +} + +// GetBlockHashesFromHash retrieves a number of block hashes starting at a given +// hash, fetching towards the genesis block. +func (self *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { + // Get the origin header from which to fetch + header := self.GetHeader(hash) + if header == nil { + return nil + } + // Iterate the headers until enough is collected or the genesis reached + chain := make([]common.Hash, 0, max) + for i := uint64(0); i < max; i++ { + if header = self.GetHeader(header.ParentHash); header == nil { + break + } + chain = append(chain, header.Hash()) + if header.Number.Cmp(common.Big0) == 0 { + break + } + } + return chain +} + +// [deprecated by eth/62] +// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. +func (self *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { + for i := 0; i < n; i++ { + block := self.GetBlock(hash) + if block == nil { + break + } + blocks = append(blocks, block) + hash = block.ParentHash() + } + return +} + +func (self *BlockChain) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) { + for i := 0; block != nil && i < length; i++ { + uncles = append(uncles, block.Uncles()...) + block = self.GetBlock(block.ParentHash()) + } + + return +} + +// setTotalDifficulty updates the TD of the chain manager. Note, this function +// assumes that the `mu` mutex is held! +func (bc *BlockChain) setTotalDifficulty(td *big.Int) { + bc.td = new(big.Int).Set(td) +} + +func (bc *BlockChain) Stop() { + if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { + return + } + close(bc.quit) + atomic.StoreInt32(&bc.procInterrupt, 1) + + bc.wg.Wait() + + glog.V(logger.Info).Infoln("Chain manager stopped") +} + +type queueEvent struct { + queue []interface{} + canonicalCount int + sideCount int + splitCount int +} + +func (self *BlockChain) procFutureBlocks() { + blocks := make([]*types.Block, self.futureBlocks.Len()) + for i, hash := range self.futureBlocks.Keys() { + block, _ := self.futureBlocks.Get(hash) + blocks[i] = block.(*types.Block) + } + if len(blocks) > 0 { + types.BlockBy(types.Number).Sort(blocks) + self.InsertChain(blocks) + } +} + +type writeStatus byte + +const ( + NonStatTy writeStatus = iota + CanonStatTy + SplitStatTy + SideStatTy +) + +// WriteBlock writes the block to the chain. +func (self *BlockChain) WriteBlock(block *types.Block) (status writeStatus, err error) { + self.wg.Add(1) + defer self.wg.Done() + + // Calculate the total difficulty of the block + ptd := self.GetTd(block.ParentHash()) + if ptd == nil { + return NonStatTy, ParentError(block.ParentHash()) + } + td := new(big.Int).Add(block.Difficulty(), ptd) + + self.mu.RLock() + cblock := self.currentBlock + self.mu.RUnlock() + + // Compare the TD of the last known block in the canonical chain to make sure it's greater. + // At this point it's possible that a different chain (fork) becomes the new canonical chain. + if td.Cmp(self.Td()) > 0 { + // chain fork + if block.ParentHash() != cblock.Hash() { + // during split we merge two different chains and create the new canonical chain + err := self.reorg(cblock, block) + if err != nil { + return NonStatTy, err + } + } + status = CanonStatTy + + self.mu.Lock() + self.setTotalDifficulty(td) + self.insert(block) + self.mu.Unlock() + } else { + status = SideStatTy + } + + if err := WriteTd(self.chainDb, block.Hash(), td); err != nil { + glog.Fatalf("failed to write block total difficulty: %v", err) + } + if err := WriteBlock(self.chainDb, block); err != nil { + glog.Fatalf("filed to write block contents: %v", err) + } + // Delete from future blocks + self.futureBlocks.Remove(block.Hash()) + + return +} + +// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned +// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go). +func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { + self.wg.Add(1) + defer self.wg.Done() + + self.chainmu.Lock() + defer self.chainmu.Unlock() + + // A queued approach to delivering events. This is generally + // faster than direct delivery and requires much less mutex + // acquiring. + var ( + queue = make([]interface{}, len(chain)) + queueEvent = queueEvent{queue: queue} + stats struct{ queued, processed, ignored int } + tstart = time.Now() + + nonceChecked = make([]bool, len(chain)) + ) + + // Start the parallel nonce verifier. + nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain) + defer close(nonceAbort) + + txcount := 0 + for i, block := range chain { + if atomic.LoadInt32(&self.procInterrupt) == 1 { + glog.V(logger.Debug).Infoln("Premature abort during chain processing") + break + } + + bstart := time.Now() + // Wait for block i's nonce to be verified before processing + // its state transition. + for !nonceChecked[i] { + r := <-nonceResults + nonceChecked[r.index] = true + if !r.valid { + block := chain[r.index] + return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} + } + } + + if BadHashes[block.Hash()] { + err := BadHashError(block.Hash()) + blockErr(block, err) + return i, err + } + // Call in to the block processor and check for errors. It's likely that if one block fails + // all others will fail too (unless a known block is returned). + logs, receipts, err := self.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + stats.ignored++ + continue + } + + if err == BlockFutureErr { + // Allow up to MaxFuture second in the future blocks. If this limit + // is exceeded the chain is discarded and processed at a later time + // if given. + max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) + if block.Time().Cmp(max) == 1 { + return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) + } + + self.futureBlocks.Add(block.Hash(), block) + stats.queued++ + continue + } + + if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) { + self.futureBlocks.Add(block.Hash(), block) + stats.queued++ + continue + } + + blockErr(block, err) + + go ReportBlock(block, err) + + return i, err + } + if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil { + glog.V(logger.Warn).Infoln("error writing block receipts:", err) + } + + txcount += len(block.Transactions()) + // write the block to the chain and get the status + status, err := self.WriteBlock(block) + if err != nil { + return i, err + } + switch status { + case CanonStatTy: + if glog.V(logger.Debug) { + glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + queue[i] = ChainEvent{block, block.Hash(), logs} + queueEvent.canonicalCount++ + + // This puts transactions in a extra db for rpc + PutTransactions(self.chainDb, block, block.Transactions()) + // store the receipts + PutReceipts(self.chainDb, receipts) + case SideStatTy: + if glog.V(logger.Detail) { + glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) + } + queue[i] = ChainSideEvent{block, logs} + queueEvent.sideCount++ + case SplitStatTy: + queue[i] = ChainSplitEvent{block, logs} + queueEvent.splitCount++ + } + stats.processed++ + } + + if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { + tend := time.Since(tstart) + start, end := chain[0], chain[len(chain)-1] + glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) + } + + go self.eventMux.Post(queueEvent) + + return 0, nil +} + +// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them +// to be part of the new canonical chain and accumulates potential missing transactions and post an +// event about them +func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error { + self.mu.Lock() + defer self.mu.Unlock() + + var ( + newChain types.Blocks + commonBlock *types.Block + oldStart = oldBlock + newStart = newBlock + deletedTxs types.Transactions + ) + + // first reduce whoever is higher bound + if oldBlock.NumberU64() > newBlock.NumberU64() { + // reduce old chain + for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) { + deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + } + } else { + // reduce new chain and append new chain blocks for inserting later on + for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) { + newChain = append(newChain, newBlock) + } + } + if oldBlock == nil { + return fmt.Errorf("Invalid old chain") + } + if newBlock == nil { + return fmt.Errorf("Invalid new chain") + } + + numSplit := newBlock.Number() + for { + if oldBlock.Hash() == newBlock.Hash() { + commonBlock = oldBlock + break + } + newChain = append(newChain, newBlock) + deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + + oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash()) + if oldBlock == nil { + return fmt.Errorf("Invalid old chain") + } + if newBlock == nil { + return fmt.Errorf("Invalid new chain") + } + } + + if glog.V(logger.Debug) { + commonHash := commonBlock.Hash() + glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4]) + } + + var addedTxs types.Transactions + // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly + for _, block := range newChain { + // insert the block in the canonical way, re-writing history + self.insert(block) + // write canonical receipts and transactions + PutTransactions(self.chainDb, block, block.Transactions()) + PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash())) + + addedTxs = append(addedTxs, block.Transactions()...) + } + + // calculate the difference between deleted and added transactions + diff := types.TxDifference(deletedTxs, addedTxs) + // When transactions get deleted from the database that means the + // receipts that were created in the fork must also be deleted + for _, tx := range diff { + DeleteReceipt(self.chainDb, tx.Hash()) + DeleteTransaction(self.chainDb, tx.Hash()) + } + // Must be posted in a goroutine because of the transaction pool trying + // to acquire the chain manager lock + go self.eventMux.Post(RemovedTransactionEvent{diff}) + + return nil +} + +func (self *BlockChain) update() { + events := self.eventMux.Subscribe(queueEvent{}) + futureTimer := time.Tick(5 * time.Second) +out: + for { + select { + case ev := <-events.Chan(): + switch ev := ev.(type) { + case queueEvent: + for _, event := range ev.queue { + switch event := event.(type) { + case ChainEvent: + // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long + // and in most cases isn't even necessary. + if self.currentBlock.Hash() == event.Hash { + self.currentGasLimit = CalcGasLimit(event.Block) + self.eventMux.Post(ChainHeadEvent{event.Block}) + } + } + self.eventMux.Post(event) + } + } + case <-futureTimer: + self.procFutureBlocks() + case <-self.quit: + break out + } + } +} + +func blockErr(block *types.Block, err error) { + if glog.V(logger.Error) { + glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) + glog.Errorf(" %v", err) + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go new file mode 100644 index 000000000..e034417ce --- /dev/null +++ b/core/blockchain_test.go @@ -0,0 +1,652 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "fmt" + "math/big" + "math/rand" + "os" + "path/filepath" + "runtime" + "strconv" + "testing" + + "github.com/ethereum/ethash" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/pow" + "github.com/ethereum/go-ethereum/rlp" + "github.com/hashicorp/golang-lru" +) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +func thePow() pow.PoW { + pow, _ := ethash.NewForTesting() + return pow +} + +func theBlockChain(db ethdb.Database, t *testing.T) *BlockChain { + var eventMux event.TypeMux + WriteTestNetGenesisBlock(db, 0) + blockchain, err := NewBlockChain(db, thePow(), &eventMux) + if err != nil { + t.Error("failed creating chainmanager:", err) + t.FailNow() + return nil + } + blockMan := NewBlockProcessor(db, nil, blockchain, &eventMux) + blockchain.SetProcessor(blockMan) + + return blockchain +} + +// Test fork of length N starting from block i +func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { + // switch databases to process the new chain + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // copy old chain up to i into new db with deterministic canonical + bman2, err := newCanonical(i, db) + if err != nil { + t.Fatal("could not make new canonical in testFork", err) + } + // assert the bmans have the same block at i + bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() + bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() + if bi1 != bi2 { + fmt.Printf("%+v\n%+v\n\n", bi1, bi2) + t.Fatal("chains do not have the same hash at height", i) + } + bman2.bc.SetProcessor(bman2) + + // extend the fork + parent := bman2.bc.CurrentBlock() + chainB := makeChain(parent, N, db, forkSeed) + _, err = bman2.bc.InsertChain(chainB) + if err != nil { + t.Fatal("Insert chain error for fork:", err) + } + + tdpre := bman.bc.Td() + // Test the fork's blocks on the original chain + td, err := testChain(chainB, bman) + if err != nil { + t.Fatal("expected chainB not to give errors:", err) + } + // Compare difficulties + f(tdpre, td) + + // Loop over parents making sure reconstruction is done properly +} + +func printChain(bc *BlockChain) { + for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { + b := bc.GetBlockByNumber(uint64(i)) + fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty()) + } +} + +// process blocks against a chain +func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { + for _, block := range chainB { + _, _, err := bman.bc.processor.Process(block) + if err != nil { + if IsKnownBlockErr(err) { + continue + } + return nil, err + } + bman.bc.mu.Lock() + WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash()))) + WriteBlock(bman.bc.chainDb, block) + bman.bc.mu.Unlock() + } + return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil +} + +func loadChain(fn string, t *testing.T) (types.Blocks, error) { + fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm) + if err != nil { + return nil, err + } + defer fh.Close() + + var chain types.Blocks + if err := rlp.Decode(fh, &chain); err != nil { + return nil, err + } + + return chain, nil +} + +func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *testing.T) { + _, err := blockchain.InsertChain(chain) + if err != nil { + fmt.Println(err) + t.FailNow() + } + done <- true +} + +func TestExtendCanonical(t *testing.T) { + CanonicalLength := 5 + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(CanonicalLength, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) + } + } + // Start fork from current height (CanonicalLength) + testFork(t, bman, CanonicalLength, 1, f) + testFork(t, bman, CanonicalLength, 2, f) + testFork(t, bman, CanonicalLength, 5, f) + testFork(t, bman, CanonicalLength, 10, f) +} + +func TestShorterFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) >= 0 { + t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) + } + } + // Sum of numbers must be less than 10 + // for this to be a shorter fork + testFork(t, bman, 0, 3, f) + testFork(t, bman, 0, 7, f) + testFork(t, bman, 1, 1, f) + testFork(t, bman, 1, 7, f) + testFork(t, bman, 5, 3, f) + testFork(t, bman, 5, 4, f) +} + +func TestLongerFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + // make first chain starting from genesis + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) <= 0 { + t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) + } + } + // Sum of numbers must be greater than 10 + // for this to be a longer fork + testFork(t, bman, 0, 11, f) + testFork(t, bman, 0, 15, f) + testFork(t, bman, 1, 10, f) + testFork(t, bman, 1, 12, f) + testFork(t, bman, 5, 6, f) + testFork(t, bman, 5, 8, f) +} + +func TestEqualFork(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + f := func(td1, td2 *big.Int) { + if td2.Cmp(td1) != 0 { + t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) + } + } + // Sum of numbers must be equal to 10 + // for this to be an equal fork + testFork(t, bman, 0, 10, f) + testFork(t, bman, 1, 9, f) + testFork(t, bman, 2, 8, f) + testFork(t, bman, 5, 5, f) + testFork(t, bman, 6, 4, f) + testFork(t, bman, 9, 1, f) +} + +func TestBrokenChain(t *testing.T) { + db, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman, err := newCanonical(10, db) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + db2, err := ethdb.NewMemDatabase() + if err != nil { + t.Fatal("Failed to create db:", err) + } + bman2, err := newCanonical(10, db2) + if err != nil { + t.Fatal("Could not make new canonical chain:", err) + } + bman2.bc.SetProcessor(bman2) + parent := bman2.bc.CurrentBlock() + chainB := makeChain(parent, 5, db2, forkSeed) + chainB = chainB[1:] + _, err = testChain(chainB, bman) + if err == nil { + t.Error("expected broken chain to return error") + } +} + +func TestChainInsertions(t *testing.T) { + t.Skip("Skipped: outdated test files") + + db, _ := ethdb.NewMemDatabase() + + chain1, err := loadChain("valid1", t) + if err != nil { + fmt.Println(err) + t.FailNow() + } + + chain2, err := loadChain("valid2", t) + if err != nil { + fmt.Println(err) + t.FailNow() + } + + blockchain := theBlockChain(db, t) + + const max = 2 + done := make(chan bool, max) + + go insertChain(done, blockchain, chain1, t) + go insertChain(done, blockchain, chain2, t) + + for i := 0; i < max; i++ { + <-done + } + + if chain2[len(chain2)-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("chain2 is canonical and shouldn't be") + } + + if chain1[len(chain1)-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("chain1 isn't canonical and should be") + } +} + +func TestChainMultipleInsertions(t *testing.T) { + t.Skip("Skipped: outdated test files") + + db, _ := ethdb.NewMemDatabase() + + const max = 4 + chains := make([]types.Blocks, max) + var longest int + for i := 0; i < max; i++ { + var err error + name := "valid" + strconv.Itoa(i+1) + chains[i], err = loadChain(name, t) + if len(chains[i]) >= len(chains[longest]) { + longest = i + } + fmt.Println("loaded", name, "with a length of", len(chains[i])) + if err != nil { + fmt.Println(err) + t.FailNow() + } + } + + blockchain := theBlockChain(db, t) + + done := make(chan bool, max) + for i, chain := range chains { + // XXX the go routine would otherwise reference the same (chain[3]) variable and fail + i := i + chain := chain + go func() { + insertChain(done, blockchain, chain, t) + fmt.Println(i, "done") + }() + } + + for i := 0; i < max; i++ { + <-done + } + + if chains[longest][len(chains[longest])-1].Hash() != blockchain.CurrentBlock().Hash() { + t.Error("Invalid canonical chain") + } +} + +type bproc struct{} + +func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil } + +func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block { + var chain []*types.Block + for i, difficulty := range d { + header := &types.Header{ + Coinbase: common.Address{seed}, + Number: big.NewInt(int64(i + 1)), + Difficulty: big.NewInt(int64(difficulty)), + } + if i == 0 { + header.ParentHash = genesis.Hash() + } else { + header.ParentHash = chain[i-1].Hash() + } + block := types.NewBlockWithHeader(header) + chain = append(chain, block) + } + return chain +} + +func chm(genesis *types.Block, db ethdb.Database) *BlockChain { + var eventMux event.TypeMux + bc := &BlockChain{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} + bc.headerCache, _ = lru.New(100) + bc.bodyCache, _ = lru.New(100) + bc.bodyRLPCache, _ = lru.New(100) + bc.tdCache, _ = lru.New(100) + bc.blockCache, _ = lru.New(100) + bc.futureBlocks, _ = lru.New(100) + bc.processor = bproc{} + bc.ResetWithGenesisBlock(genesis) + + return bc +} + +func TestReorgLongest(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) + chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) + + bc.InsertChain(chain1) + bc.InsertChain(chain2) + + prev := bc.CurrentBlock() + for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { + if prev.ParentHash() != block.Hash() { + t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) + } + } +} + +func TestBadHashes(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) + BadHashes[chain[2].Header().Hash()] = true + + _, err = bc.InsertChain(chain) + if !IsBadHashError(err) { + t.Errorf("error mismatch: want: BadHashError, have: %v", err) + } +} + +func TestReorgBadHashes(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) + bc.InsertChain(chain) + + if chain[3].Header().Hash() != bc.LastBlockHash() { + t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash()) + } + + // NewChainManager should check BadHashes when loading it db + BadHashes[chain[3].Header().Hash()] = true + + var eventMux event.TypeMux + ncm, err := NewBlockChain(db, FakePow{}, &eventMux) + if err != nil { + t.Errorf("NewChainManager err: %s", err) + } + + // check it set head to (valid) parent of bad hash block + if chain[2].Header().Hash() != ncm.LastBlockHash() { + t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash()) + } + + if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 { + t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit()) + } +} + +func TestReorgShortest(t *testing.T) { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + + chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10) + chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11) + + bc.InsertChain(chain1) + bc.InsertChain(chain2) + + prev := bc.CurrentBlock() + for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { + if prev.ParentHash() != block.Hash() { + t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) + } + } +} + +func TestInsertNonceError(t *testing.T) { + for i := 1; i < 25 && !t.Failed(); i++ { + db, _ := ethdb.NewMemDatabase() + genesis, err := WriteTestNetGenesisBlock(db, 0) + if err != nil { + t.Error(err) + t.FailNow() + } + bc := chm(genesis, db) + bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux) + blocks := makeChain(bc.currentBlock, i, db, 0) + + fail := rand.Int() % len(blocks) + failblock := blocks[fail] + bc.pow = failPow{failblock.NumberU64()} + n, err := bc.InsertChain(blocks) + + // Check that the returned error indicates the nonce failure. + if n != fail { + t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail) + } + if !IsBlockNonceErr(err) { + t.Fatalf("(i=%d) got %q, want a nonce error", i, err) + } + nerr := err.(*BlockNonceErr) + if nerr.Number.Cmp(failblock.Number()) != 0 { + t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number()) + } + if nerr.Hash != failblock.Hash() { + t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash()) + } + + // Check that all no blocks after the failing block have been inserted. + for _, block := range blocks[fail:] { + if bc.HasBlock(block.Hash()) { + t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64()) + } + } + } +} + +// Tests that chain reorganizations handle transaction removals and reinsertions. +func TestChainTxReorgs(t *testing.T) { + params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be. + params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block. + + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + addr3 = crypto.PubkeyToAddress(key3.PublicKey) + db, _ = ethdb.NewMemDatabase() + ) + genesis := WriteGenesisBlockForTesting(db, + GenesisAccount{addr1, big.NewInt(1000000)}, + GenesisAccount{addr2, big.NewInt(1000000)}, + GenesisAccount{addr3, big.NewInt(1000000)}, + ) + // Create two transactions shared between the chains: + // - postponed: transaction included at a later block in the forked chain + // - swapped: transaction included at the same block number in the forked chain + postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) + swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) + + // Create two transactions that will be dropped by the forked chain: + // - pastDrop: transaction dropped retroactively from a past block + // - freshDrop: transaction dropped exactly at the block where the reorg is detected + var pastDrop, freshDrop *types.Transaction + + // Create three transactions that will be added in the forked chain: + // - pastAdd: transaction added before the reorganiztion is detected + // - freshAdd: transaction added at the exact block the reorg is detected + // - futureAdd: transaction added after the reorg has already finished + var pastAdd, freshAdd, futureAdd *types.Transaction + + chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) { + switch i { + case 0: + pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) + + gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point + gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork + + case 2: + freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) + + gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point + gen.AddTx(swapped) // This transaction will be swapped out at the exact height + + gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain + } + }) + // Import the chain. This runs all block validation rules. + evmux := &event.TypeMux{} + chainman, _ := NewBlockChain(db, FakePow{}, evmux) + chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) + if i, err := chainman.InsertChain(chain); err != nil { + t.Fatalf("failed to insert original chain[%d]: %v", i, err) + } + + // overwrite the old chain + chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { + switch i { + case 0: + pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(pastAdd) // This transaction needs to be injected during reorg + + case 2: + gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain + gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain + + freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time + + case 3: + futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) + gen.AddTx(futureAdd) // This transaction will be added after a full reorg + } + }) + if _, err := chainman.InsertChain(chain); err != nil { + t.Fatalf("failed to insert forked chain: %v", err) + } + + // removed tx + for i, tx := range (types.Transactions{pastDrop, freshDrop}) { + if GetTransaction(db, tx.Hash()) != nil { + t.Errorf("drop %d: tx found while shouldn't have been", i) + } + if GetReceipt(db, tx.Hash()) != nil { + t.Errorf("drop %d: receipt found while shouldn't have been", i) + } + } + // added tx + for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { + if GetTransaction(db, tx.Hash()) == nil { + t.Errorf("add %d: expected tx to be found", i) + } + if GetReceipt(db, tx.Hash()) == nil { + t.Errorf("add %d: expected receipt to be found", i) + } + } + // shared tx + for i, tx := range (types.Transactions{postponed, swapped}) { + if GetTransaction(db, tx.Hash()) == nil { + t.Errorf("share %d: expected tx to be found", i) + } + if GetReceipt(db, tx.Hash()) == nil { + t.Errorf("share %d: expected receipt to be found", i) + } + } +} diff --git a/core/chain_makers.go b/core/chain_makers.go index 3af9b0b89..ba09b3029 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -153,7 +153,7 @@ func (b *BlockGen) OffsetTime(seconds int64) { // and their coinbase will be the zero address. // // Blocks created by GenerateChain do not contain valid proof of work -// values. Inserting them into ChainManager requires use of FakePow or +// values. Inserting them into BlockChain requires use of FakePow or // a similar non-validating proof of work implementation. func GenerateChain(parent *types.Block, db ethdb.Database, n int, gen func(int, *BlockGen)) []*types.Block { statedb := state.New(parent.Root(), db) @@ -205,7 +205,7 @@ func newCanonical(n int, db ethdb.Database) (*BlockProcessor, error) { evmux := &event.TypeMux{} WriteTestNetGenesisBlock(db, 0) - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) bman := NewBlockProcessor(db, FakePow{}, chainman, evmux) bman.bc.SetProcessor(bman) parent := bman.bc.CurrentBlock() diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go index ac18e5e0b..b33af8d87 100644 --- a/core/chain_makers_test.go +++ b/core/chain_makers_test.go @@ -77,7 +77,7 @@ func ExampleGenerateChain() { // Import the chain. This runs all block validation rules. evmux := &event.TypeMux{} - chainman, _ := NewChainManager(db, FakePow{}, evmux) + chainman, _ := NewBlockChain(db, FakePow{}, evmux) chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) if i, err := chainman.InsertChain(chain); err != nil { fmt.Printf("insert error (block %d): %v\n", i, err) diff --git a/core/chain_manager.go b/core/chain_manager.go deleted file mode 100644 index 49f831a59..000000000 --- a/core/chain_manager.go +++ /dev/null @@ -1,849 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package core implements the Ethereum consensus protocol. -package core - -import ( - "errors" - "fmt" - "io" - "math/big" - "sync" - "sync/atomic" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/logger/glog" - "github.com/ethereum/go-ethereum/metrics" - "github.com/ethereum/go-ethereum/pow" - "github.com/ethereum/go-ethereum/rlp" - "github.com/hashicorp/golang-lru" -) - -var ( - chainlogger = logger.NewLogger("CHAIN") - jsonlogger = logger.NewJsonLogger() - - blockInsertTimer = metrics.NewTimer("chain/inserts") - - ErrNoGenesis = errors.New("Genesis not found in chain") -) - -const ( - headerCacheLimit = 512 - bodyCacheLimit = 256 - tdCacheLimit = 1024 - blockCacheLimit = 256 - maxFutureBlocks = 256 - maxTimeFutureBlocks = 30 - checkpointLimit = 200 -) - -type ChainManager struct { - //eth EthManager - chainDb ethdb.Database - processor types.BlockProcessor - eventMux *event.TypeMux - genesisBlock *types.Block - // Last known total difficulty - mu sync.RWMutex - chainmu sync.RWMutex - tsmu sync.RWMutex - - checkpoint int // checkpoint counts towards the new checkpoint - td *big.Int - currentBlock *types.Block - currentGasLimit *big.Int - - headerCache *lru.Cache // Cache for the most recent block headers - bodyCache *lru.Cache // Cache for the most recent block bodies - bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format - tdCache *lru.Cache // Cache for the most recent block total difficulties - blockCache *lru.Cache // Cache for the most recent entire blocks - futureBlocks *lru.Cache // future blocks are blocks added for later processing - - quit chan struct{} - running int32 // running must be called automically - // procInterrupt must be atomically called - procInterrupt int32 // interrupt signaler for block processing - wg sync.WaitGroup - - pow pow.PoW -} - -func NewChainManager(chainDb ethdb.Database, pow pow.PoW, mux *event.TypeMux) (*ChainManager, error) { - headerCache, _ := lru.New(headerCacheLimit) - bodyCache, _ := lru.New(bodyCacheLimit) - bodyRLPCache, _ := lru.New(bodyCacheLimit) - tdCache, _ := lru.New(tdCacheLimit) - blockCache, _ := lru.New(blockCacheLimit) - futureBlocks, _ := lru.New(maxFutureBlocks) - - bc := &ChainManager{ - chainDb: chainDb, - eventMux: mux, - quit: make(chan struct{}), - headerCache: headerCache, - bodyCache: bodyCache, - bodyRLPCache: bodyRLPCache, - tdCache: tdCache, - blockCache: blockCache, - futureBlocks: futureBlocks, - pow: pow, - } - - bc.genesisBlock = bc.GetBlockByNumber(0) - if bc.genesisBlock == nil { - reader, err := NewDefaultGenesisReader() - if err != nil { - return nil, err - } - bc.genesisBlock, err = WriteGenesisBlock(chainDb, reader) - if err != nil { - return nil, err - } - glog.V(logger.Info).Infoln("WARNING: Wrote default ethereum genesis block") - } - if err := bc.setLastState(); err != nil { - return nil, err - } - // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain - for hash, _ := range BadHashes { - if block := bc.GetBlock(hash); block != nil { - glog.V(logger.Error).Infof("Found bad hash. Reorganising chain to state %x\n", block.ParentHash().Bytes()[:4]) - block = bc.GetBlock(block.ParentHash()) - if block == nil { - glog.Fatal("Unable to complete. Parent block not found. Corrupted DB?") - } - bc.SetHead(block) - - glog.V(logger.Error).Infoln("Chain reorg was successfull. Resuming normal operation") - } - } - // Take ownership of this particular state - go bc.update() - return bc, nil -} - -func (bc *ChainManager) SetHead(head *types.Block) { - bc.mu.Lock() - defer bc.mu.Unlock() - - for block := bc.currentBlock; block != nil && block.Hash() != head.Hash(); block = bc.GetBlock(block.ParentHash()) { - DeleteBlock(bc.chainDb, block.Hash()) - } - bc.headerCache.Purge() - bc.bodyCache.Purge() - bc.bodyRLPCache.Purge() - bc.blockCache.Purge() - bc.futureBlocks.Purge() - - bc.currentBlock = head - bc.setTotalDifficulty(bc.GetTd(head.Hash())) - bc.insert(head) - bc.setLastState() -} - -func (self *ChainManager) Td() *big.Int { - self.mu.RLock() - defer self.mu.RUnlock() - - return new(big.Int).Set(self.td) -} - -func (self *ChainManager) GasLimit() *big.Int { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock.GasLimit() -} - -func (self *ChainManager) LastBlockHash() common.Hash { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock.Hash() -} - -func (self *ChainManager) CurrentBlock() *types.Block { - self.mu.RLock() - defer self.mu.RUnlock() - - return self.currentBlock -} - -func (self *ChainManager) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) { - self.mu.RLock() - defer self.mu.RUnlock() - - return new(big.Int).Set(self.td), self.currentBlock.Hash(), self.genesisBlock.Hash() -} - -func (self *ChainManager) SetProcessor(proc types.BlockProcessor) { - self.processor = proc -} - -func (self *ChainManager) State() *state.StateDB { - return state.New(self.CurrentBlock().Root(), self.chainDb) -} - -func (bc *ChainManager) recover() bool { - data, _ := bc.chainDb.Get([]byte("checkpoint")) - if len(data) != 0 { - block := bc.GetBlock(common.BytesToHash(data)) - if block != nil { - if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { - glog.Fatalf("failed to write database head number: %v", err) - } - if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { - glog.Fatalf("failed to write database head hash: %v", err) - } - bc.currentBlock = block - return true - } - } - return false -} - -func (bc *ChainManager) setLastState() error { - head := GetHeadBlockHash(bc.chainDb) - if head != (common.Hash{}) { - block := bc.GetBlock(head) - if block != nil { - bc.currentBlock = block - } else { - glog.Infof("LastBlock (%x) not found. Recovering...\n", head) - if bc.recover() { - glog.Infof("Recover successful") - } else { - glog.Fatalf("Recover failed. Please report") - } - } - } else { - bc.Reset() - } - bc.td = bc.GetTd(bc.currentBlock.Hash()) - bc.currentGasLimit = CalcGasLimit(bc.currentBlock) - - if glog.V(logger.Info) { - glog.Infof("Last block (#%v) %x TD=%v\n", bc.currentBlock.Number(), bc.currentBlock.Hash(), bc.td) - } - - return nil -} - -// Reset purges the entire blockchain, restoring it to its genesis state. -func (bc *ChainManager) Reset() { - bc.ResetWithGenesisBlock(bc.genesisBlock) -} - -// ResetWithGenesisBlock purges the entire blockchain, restoring it to the -// specified genesis state. -func (bc *ChainManager) ResetWithGenesisBlock(genesis *types.Block) { - bc.mu.Lock() - defer bc.mu.Unlock() - - // Dump the entire block chain and purge the caches - for block := bc.currentBlock; block != nil; block = bc.GetBlock(block.ParentHash()) { - DeleteBlock(bc.chainDb, block.Hash()) - } - bc.headerCache.Purge() - bc.bodyCache.Purge() - bc.bodyRLPCache.Purge() - bc.blockCache.Purge() - bc.futureBlocks.Purge() - - // Prepare the genesis block and reinitialize the chain - if err := WriteTd(bc.chainDb, genesis.Hash(), genesis.Difficulty()); err != nil { - glog.Fatalf("failed to write genesis block TD: %v", err) - } - if err := WriteBlock(bc.chainDb, genesis); err != nil { - glog.Fatalf("failed to write genesis block: %v", err) - } - bc.genesisBlock = genesis - bc.insert(bc.genesisBlock) - bc.currentBlock = bc.genesisBlock - bc.setTotalDifficulty(genesis.Difficulty()) -} - -// Export writes the active chain to the given writer. -func (self *ChainManager) Export(w io.Writer) error { - if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil { - return err - } - return nil -} - -// ExportN writes a subset of the active chain to the given writer. -func (self *ChainManager) ExportN(w io.Writer, first uint64, last uint64) error { - self.mu.RLock() - defer self.mu.RUnlock() - - if first > last { - return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) - } - - glog.V(logger.Info).Infof("exporting %d blocks...\n", last-first+1) - - for nr := first; nr <= last; nr++ { - block := self.GetBlockByNumber(nr) - if block == nil { - return fmt.Errorf("export failed on #%d: not found", nr) - } - - if err := block.EncodeRLP(w); err != nil { - return err - } - } - - return nil -} - -// insert injects a block into the current chain block chain. Note, this function -// assumes that the `mu` mutex is held! -func (bc *ChainManager) insert(block *types.Block) { - // Add the block to the canonical chain number scheme and mark as the head - if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { - glog.Fatalf("failed to insert block number: %v", err) - } - if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { - glog.Fatalf("failed to insert block number: %v", err) - } - // Add a new restore point if we reached some limit - bc.checkpoint++ - if bc.checkpoint > checkpointLimit { - if err := bc.chainDb.Put([]byte("checkpoint"), block.Hash().Bytes()); err != nil { - glog.Fatalf("failed to create checkpoint: %v", err) - } - bc.checkpoint = 0 - } - // Update the internal internal state with the head block - bc.currentBlock = block -} - -// Accessors -func (bc *ChainManager) Genesis() *types.Block { - return bc.genesisBlock -} - -// HasHeader checks if a block header is present in the database or not, caching -// it if present. -func (bc *ChainManager) HasHeader(hash common.Hash) bool { - return bc.GetHeader(hash) != nil -} - -// GetHeader retrieves a block header from the database by hash, caching it if -// found. -func (self *ChainManager) GetHeader(hash common.Hash) *types.Header { - // Short circuit if the header's already in the cache, retrieve otherwise - if header, ok := self.headerCache.Get(hash); ok { - return header.(*types.Header) - } - header := GetHeader(self.chainDb, hash) - if header == nil { - return nil - } - // Cache the found header for next time and return - self.headerCache.Add(header.Hash(), header) - return header -} - -// GetHeaderByNumber retrieves a block header from the database by number, -// caching it (associated with its hash) if found. -func (self *ChainManager) GetHeaderByNumber(number uint64) *types.Header { - hash := GetCanonicalHash(self.chainDb, number) - if hash == (common.Hash{}) { - return nil - } - return self.GetHeader(hash) -} - -// GetBody retrieves a block body (transactions and uncles) from the database by -// hash, caching it if found. -func (self *ChainManager) GetBody(hash common.Hash) *types.Body { - // Short circuit if the body's already in the cache, retrieve otherwise - if cached, ok := self.bodyCache.Get(hash); ok { - body := cached.(*types.Body) - return body - } - body := GetBody(self.chainDb, hash) - if body == nil { - return nil - } - // Cache the found body for next time and return - self.bodyCache.Add(hash, body) - return body -} - -// GetBodyRLP retrieves a block body in RLP encoding from the database by hash, -// caching it if found. -func (self *ChainManager) GetBodyRLP(hash common.Hash) rlp.RawValue { - // Short circuit if the body's already in the cache, retrieve otherwise - if cached, ok := self.bodyRLPCache.Get(hash); ok { - return cached.(rlp.RawValue) - } - body := GetBodyRLP(self.chainDb, hash) - if len(body) == 0 { - return nil - } - // Cache the found body for next time and return - self.bodyRLPCache.Add(hash, body) - return body -} - -// GetTd retrieves a block's total difficulty in the canonical chain from the -// database by hash, caching it if found. -func (self *ChainManager) GetTd(hash common.Hash) *big.Int { - // Short circuit if the td's already in the cache, retrieve otherwise - if cached, ok := self.tdCache.Get(hash); ok { - return cached.(*big.Int) - } - td := GetTd(self.chainDb, hash) - if td == nil { - return nil - } - // Cache the found body for next time and return - self.tdCache.Add(hash, td) - return td -} - -// HasBlock checks if a block is fully present in the database or not, caching -// it if present. -func (bc *ChainManager) HasBlock(hash common.Hash) bool { - return bc.GetBlock(hash) != nil -} - -// GetBlock retrieves a block from the database by hash, caching it if found. -func (self *ChainManager) GetBlock(hash common.Hash) *types.Block { - // Short circuit if the block's already in the cache, retrieve otherwise - if block, ok := self.blockCache.Get(hash); ok { - return block.(*types.Block) - } - block := GetBlock(self.chainDb, hash) - if block == nil { - return nil - } - // Cache the found block for next time and return - self.blockCache.Add(block.Hash(), block) - return block -} - -// GetBlockByNumber retrieves a block from the database by number, caching it -// (associated with its hash) if found. -func (self *ChainManager) GetBlockByNumber(number uint64) *types.Block { - hash := GetCanonicalHash(self.chainDb, number) - if hash == (common.Hash{}) { - return nil - } - return self.GetBlock(hash) -} - -// GetBlockHashesFromHash retrieves a number of block hashes starting at a given -// hash, fetching towards the genesis block. -func (self *ChainManager) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { - // Get the origin header from which to fetch - header := self.GetHeader(hash) - if header == nil { - return nil - } - // Iterate the headers until enough is collected or the genesis reached - chain := make([]common.Hash, 0, max) - for i := uint64(0); i < max; i++ { - if header = self.GetHeader(header.ParentHash); header == nil { - break - } - chain = append(chain, header.Hash()) - if header.Number.Cmp(common.Big0) == 0 { - break - } - } - return chain -} - -// [deprecated by eth/62] -// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors. -func (self *ChainManager) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) { - for i := 0; i < n; i++ { - block := self.GetBlock(hash) - if block == nil { - break - } - blocks = append(blocks, block) - hash = block.ParentHash() - } - return -} - -func (self *ChainManager) GetUnclesInChain(block *types.Block, length int) (uncles []*types.Header) { - for i := 0; block != nil && i < length; i++ { - uncles = append(uncles, block.Uncles()...) - block = self.GetBlock(block.ParentHash()) - } - - return -} - -// setTotalDifficulty updates the TD of the chain manager. Note, this function -// assumes that the `mu` mutex is held! -func (bc *ChainManager) setTotalDifficulty(td *big.Int) { - bc.td = new(big.Int).Set(td) -} - -func (bc *ChainManager) Stop() { - if !atomic.CompareAndSwapInt32(&bc.running, 0, 1) { - return - } - close(bc.quit) - atomic.StoreInt32(&bc.procInterrupt, 1) - - bc.wg.Wait() - - glog.V(logger.Info).Infoln("Chain manager stopped") -} - -type queueEvent struct { - queue []interface{} - canonicalCount int - sideCount int - splitCount int -} - -func (self *ChainManager) procFutureBlocks() { - blocks := make([]*types.Block, self.futureBlocks.Len()) - for i, hash := range self.futureBlocks.Keys() { - block, _ := self.futureBlocks.Get(hash) - blocks[i] = block.(*types.Block) - } - if len(blocks) > 0 { - types.BlockBy(types.Number).Sort(blocks) - self.InsertChain(blocks) - } -} - -type writeStatus byte - -const ( - NonStatTy writeStatus = iota - CanonStatTy - SplitStatTy - SideStatTy -) - -// WriteBlock writes the block to the chain. -func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, err error) { - self.wg.Add(1) - defer self.wg.Done() - - // Calculate the total difficulty of the block - ptd := self.GetTd(block.ParentHash()) - if ptd == nil { - return NonStatTy, ParentError(block.ParentHash()) - } - td := new(big.Int).Add(block.Difficulty(), ptd) - - self.mu.RLock() - cblock := self.currentBlock - self.mu.RUnlock() - - // Compare the TD of the last known block in the canonical chain to make sure it's greater. - // At this point it's possible that a different chain (fork) becomes the new canonical chain. - if td.Cmp(self.Td()) > 0 { - // chain fork - if block.ParentHash() != cblock.Hash() { - // during split we merge two different chains and create the new canonical chain - err := self.reorg(cblock, block) - if err != nil { - return NonStatTy, err - } - } - status = CanonStatTy - - self.mu.Lock() - self.setTotalDifficulty(td) - self.insert(block) - self.mu.Unlock() - } else { - status = SideStatTy - } - - if err := WriteTd(self.chainDb, block.Hash(), td); err != nil { - glog.Fatalf("failed to write block total difficulty: %v", err) - } - if err := WriteBlock(self.chainDb, block); err != nil { - glog.Fatalf("filed to write block contents: %v", err) - } - // Delete from future blocks - self.futureBlocks.Remove(block.Hash()) - - return -} - -// InsertChain will attempt to insert the given chain in to the canonical chain or, otherwise, create a fork. It an error is returned -// it will return the index number of the failing block as well an error describing what went wrong (for possible errors see core/errors.go). -func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) { - self.wg.Add(1) - defer self.wg.Done() - - self.chainmu.Lock() - defer self.chainmu.Unlock() - - // A queued approach to delivering events. This is generally - // faster than direct delivery and requires much less mutex - // acquiring. - var ( - queue = make([]interface{}, len(chain)) - queueEvent = queueEvent{queue: queue} - stats struct{ queued, processed, ignored int } - tstart = time.Now() - - nonceChecked = make([]bool, len(chain)) - ) - - // Start the parallel nonce verifier. - nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain) - defer close(nonceAbort) - - txcount := 0 - for i, block := range chain { - if atomic.LoadInt32(&self.procInterrupt) == 1 { - glog.V(logger.Debug).Infoln("Premature abort during chain processing") - break - } - - bstart := time.Now() - // Wait for block i's nonce to be verified before processing - // its state transition. - for !nonceChecked[i] { - r := <-nonceResults - nonceChecked[r.index] = true - if !r.valid { - block := chain[r.index] - return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} - } - } - - if BadHashes[block.Hash()] { - err := BadHashError(block.Hash()) - blockErr(block, err) - return i, err - } - // Call in to the block processor and check for errors. It's likely that if one block fails - // all others will fail too (unless a known block is returned). - logs, receipts, err := self.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - stats.ignored++ - continue - } - - if err == BlockFutureErr { - // Allow up to MaxFuture second in the future blocks. If this limit - // is exceeded the chain is discarded and processed at a later time - // if given. - max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks) - if block.Time().Cmp(max) == 1 { - return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max) - } - - self.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - } - - if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) { - self.futureBlocks.Add(block.Hash(), block) - stats.queued++ - continue - } - - blockErr(block, err) - - go ReportBlock(block, err) - - return i, err - } - if err := PutBlockReceipts(self.chainDb, block, receipts); err != nil { - glog.V(logger.Warn).Infoln("error writing block receipts:", err) - } - - txcount += len(block.Transactions()) - // write the block to the chain and get the status - status, err := self.WriteBlock(block) - if err != nil { - return i, err - } - switch status { - case CanonStatTy: - if glog.V(logger.Debug) { - glog.Infof("[%v] inserted block #%d (%d TXs %v G %d UNCs) (%x...). Took %v\n", time.Now().UnixNano(), block.Number(), len(block.Transactions()), block.GasUsed(), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - queue[i] = ChainEvent{block, block.Hash(), logs} - queueEvent.canonicalCount++ - - // This puts transactions in a extra db for rpc - PutTransactions(self.chainDb, block, block.Transactions()) - // store the receipts - PutReceipts(self.chainDb, receipts) - case SideStatTy: - if glog.V(logger.Detail) { - glog.Infof("inserted forked block #%d (TD=%v) (%d TXs %d UNCs) (%x...). Took %v\n", block.Number(), block.Difficulty(), len(block.Transactions()), len(block.Uncles()), block.Hash().Bytes()[0:4], time.Since(bstart)) - } - queue[i] = ChainSideEvent{block, logs} - queueEvent.sideCount++ - case SplitStatTy: - queue[i] = ChainSplitEvent{block, logs} - queueEvent.splitCount++ - } - stats.processed++ - } - - if (stats.queued > 0 || stats.processed > 0 || stats.ignored > 0) && bool(glog.V(logger.Info)) { - tend := time.Since(tstart) - start, end := chain[0], chain[len(chain)-1] - glog.Infof("imported %d block(s) (%d queued %d ignored) including %d txs in %v. #%v [%x / %x]\n", stats.processed, stats.queued, stats.ignored, txcount, tend, end.Number(), start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) - } - - go self.eventMux.Post(queueEvent) - - return 0, nil -} - -// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them -// to be part of the new canonical chain and accumulates potential missing transactions and post an -// event about them -func (self *ChainManager) reorg(oldBlock, newBlock *types.Block) error { - self.mu.Lock() - defer self.mu.Unlock() - - var ( - newChain types.Blocks - commonBlock *types.Block - oldStart = oldBlock - newStart = newBlock - deletedTxs types.Transactions - ) - - // first reduce whoever is higher bound - if oldBlock.NumberU64() > newBlock.NumberU64() { - // reduce old chain - for oldBlock = oldBlock; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = self.GetBlock(oldBlock.ParentHash()) { - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - } - } else { - // reduce new chain and append new chain blocks for inserting later on - for newBlock = newBlock; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = self.GetBlock(newBlock.ParentHash()) { - newChain = append(newChain, newBlock) - } - } - if oldBlock == nil { - return fmt.Errorf("Invalid old chain") - } - if newBlock == nil { - return fmt.Errorf("Invalid new chain") - } - - numSplit := newBlock.Number() - for { - if oldBlock.Hash() == newBlock.Hash() { - commonBlock = oldBlock - break - } - newChain = append(newChain, newBlock) - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) - - oldBlock, newBlock = self.GetBlock(oldBlock.ParentHash()), self.GetBlock(newBlock.ParentHash()) - if oldBlock == nil { - return fmt.Errorf("Invalid old chain") - } - if newBlock == nil { - return fmt.Errorf("Invalid new chain") - } - } - - if glog.V(logger.Debug) { - commonHash := commonBlock.Hash() - glog.Infof("Chain split detected @ %x. Reorganising chain from #%v %x to %x", commonHash[:4], numSplit, oldStart.Hash().Bytes()[:4], newStart.Hash().Bytes()[:4]) - } - - var addedTxs types.Transactions - // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly - for _, block := range newChain { - // insert the block in the canonical way, re-writing history - self.insert(block) - // write canonical receipts and transactions - PutTransactions(self.chainDb, block, block.Transactions()) - PutReceipts(self.chainDb, GetBlockReceipts(self.chainDb, block.Hash())) - - addedTxs = append(addedTxs, block.Transactions()...) - } - - // calculate the difference between deleted and added transactions - diff := types.TxDifference(deletedTxs, addedTxs) - // When transactions get deleted from the database that means the - // receipts that were created in the fork must also be deleted - for _, tx := range diff { - DeleteReceipt(self.chainDb, tx.Hash()) - DeleteTransaction(self.chainDb, tx.Hash()) - } - // Must be posted in a goroutine because of the transaction pool trying - // to acquire the chain manager lock - go self.eventMux.Post(RemovedTransactionEvent{diff}) - - return nil -} - -func (self *ChainManager) update() { - events := self.eventMux.Subscribe(queueEvent{}) - futureTimer := time.Tick(5 * time.Second) -out: - for { - select { - case ev := <-events.Chan(): - switch ev := ev.(type) { - case queueEvent: - for _, event := range ev.queue { - switch event := event.(type) { - case ChainEvent: - // We need some control over the mining operation. Acquiring locks and waiting for the miner to create new block takes too long - // and in most cases isn't even necessary. - if self.currentBlock.Hash() == event.Hash { - self.currentGasLimit = CalcGasLimit(event.Block) - self.eventMux.Post(ChainHeadEvent{event.Block}) - } - } - self.eventMux.Post(event) - } - } - case <-futureTimer: - self.procFutureBlocks() - case <-self.quit: - break out - } - } -} - -func blockErr(block *types.Block, err error) { - if glog.V(logger.Error) { - glog.Errorf("Bad block #%v (%s)\n", block.Number(), block.Hash().Hex()) - glog.Errorf(" %v", err) - } -} diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go deleted file mode 100644 index 40286190b..000000000 --- a/core/chain_manager_test.go +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "fmt" - "math/big" - "math/rand" - "os" - "path/filepath" - "runtime" - "strconv" - "testing" - - "github.com/ethereum/ethash" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/pow" - "github.com/ethereum/go-ethereum/rlp" - "github.com/hashicorp/golang-lru" -) - -func init() { - runtime.GOMAXPROCS(runtime.NumCPU()) -} - -func thePow() pow.PoW { - pow, _ := ethash.NewForTesting() - return pow -} - -func theChainManager(db ethdb.Database, t *testing.T) *ChainManager { - var eventMux event.TypeMux - WriteTestNetGenesisBlock(db, 0) - chainMan, err := NewChainManager(db, thePow(), &eventMux) - if err != nil { - t.Error("failed creating chainmanager:", err) - t.FailNow() - return nil - } - blockMan := NewBlockProcessor(db, nil, chainMan, &eventMux) - chainMan.SetProcessor(blockMan) - - return chainMan -} - -// Test fork of length N starting from block i -func testFork(t *testing.T, bman *BlockProcessor, i, N int, f func(td1, td2 *big.Int)) { - // switch databases to process the new chain - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // copy old chain up to i into new db with deterministic canonical - bman2, err := newCanonical(i, db) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - // assert the bmans have the same block at i - bi1 := bman.bc.GetBlockByNumber(uint64(i)).Hash() - bi2 := bman2.bc.GetBlockByNumber(uint64(i)).Hash() - if bi1 != bi2 { - fmt.Printf("%+v\n%+v\n\n", bi1, bi2) - t.Fatal("chains do not have the same hash at height", i) - } - bman2.bc.SetProcessor(bman2) - - // extend the fork - parent := bman2.bc.CurrentBlock() - chainB := makeChain(parent, N, db, forkSeed) - _, err = bman2.bc.InsertChain(chainB) - if err != nil { - t.Fatal("Insert chain error for fork:", err) - } - - tdpre := bman.bc.Td() - // Test the fork's blocks on the original chain - td, err := testChain(chainB, bman) - if err != nil { - t.Fatal("expected chainB not to give errors:", err) - } - // Compare difficulties - f(tdpre, td) - - // Loop over parents making sure reconstruction is done properly -} - -func printChain(bc *ChainManager) { - for i := bc.CurrentBlock().Number().Uint64(); i > 0; i-- { - b := bc.GetBlockByNumber(uint64(i)) - fmt.Printf("\t%x %v\n", b.Hash(), b.Difficulty()) - } -} - -// process blocks against a chain -func testChain(chainB types.Blocks, bman *BlockProcessor) (*big.Int, error) { - for _, block := range chainB { - _, _, err := bman.bc.processor.Process(block) - if err != nil { - if IsKnownBlockErr(err) { - continue - } - return nil, err - } - bman.bc.mu.Lock() - WriteTd(bman.bc.chainDb, block.Hash(), new(big.Int).Add(block.Difficulty(), bman.bc.GetTd(block.ParentHash()))) - WriteBlock(bman.bc.chainDb, block) - bman.bc.mu.Unlock() - } - return bman.bc.GetTd(chainB[len(chainB)-1].Hash()), nil -} - -func loadChain(fn string, t *testing.T) (types.Blocks, error) { - fh, err := os.OpenFile(filepath.Join("..", "_data", fn), os.O_RDONLY, os.ModePerm) - if err != nil { - return nil, err - } - defer fh.Close() - - var chain types.Blocks - if err := rlp.Decode(fh, &chain); err != nil { - return nil, err - } - - return chain, nil -} - -func insertChain(done chan bool, chainMan *ChainManager, chain types.Blocks, t *testing.T) { - _, err := chainMan.InsertChain(chain) - if err != nil { - fmt.Println(err) - t.FailNow() - } - done <- true -} - -func TestExtendCanonical(t *testing.T) { - CanonicalLength := 5 - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(CanonicalLength, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) - } - } - // Start fork from current height (CanonicalLength) - testFork(t, bman, CanonicalLength, 1, f) - testFork(t, bman, CanonicalLength, 2, f) - testFork(t, bman, CanonicalLength, 5, f) - testFork(t, bman, CanonicalLength, 10, f) -} - -func TestShorterFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) >= 0 { - t.Error("expected chainB to have lower difficulty. Got", td2, "expected less than", td1) - } - } - // Sum of numbers must be less than 10 - // for this to be a shorter fork - testFork(t, bman, 0, 3, f) - testFork(t, bman, 0, 7, f) - testFork(t, bman, 1, 1, f) - testFork(t, bman, 1, 7, f) - testFork(t, bman, 5, 3, f) - testFork(t, bman, 5, 4, f) -} - -func TestLongerFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - // make first chain starting from genesis - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Error("expected chainB to have higher difficulty. Got", td2, "expected more than", td1) - } - } - // Sum of numbers must be greater than 10 - // for this to be a longer fork - testFork(t, bman, 0, 11, f) - testFork(t, bman, 0, 15, f) - testFork(t, bman, 1, 10, f) - testFork(t, bman, 1, 12, f) - testFork(t, bman, 5, 6, f) - testFork(t, bman, 5, 8, f) -} - -func TestEqualFork(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - f := func(td1, td2 *big.Int) { - if td2.Cmp(td1) != 0 { - t.Error("expected chainB to have equal difficulty. Got", td2, "expected ", td1) - } - } - // Sum of numbers must be equal to 10 - // for this to be an equal fork - testFork(t, bman, 0, 10, f) - testFork(t, bman, 1, 9, f) - testFork(t, bman, 2, 8, f) - testFork(t, bman, 5, 5, f) - testFork(t, bman, 6, 4, f) - testFork(t, bman, 9, 1, f) -} - -func TestBrokenChain(t *testing.T) { - db, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman, err := newCanonical(10, db) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - db2, err := ethdb.NewMemDatabase() - if err != nil { - t.Fatal("Failed to create db:", err) - } - bman2, err := newCanonical(10, db2) - if err != nil { - t.Fatal("Could not make new canonical chain:", err) - } - bman2.bc.SetProcessor(bman2) - parent := bman2.bc.CurrentBlock() - chainB := makeChain(parent, 5, db2, forkSeed) - chainB = chainB[1:] - _, err = testChain(chainB, bman) - if err == nil { - t.Error("expected broken chain to return error") - } -} - -func TestChainInsertions(t *testing.T) { - t.Skip("Skipped: outdated test files") - - db, _ := ethdb.NewMemDatabase() - - chain1, err := loadChain("valid1", t) - if err != nil { - fmt.Println(err) - t.FailNow() - } - - chain2, err := loadChain("valid2", t) - if err != nil { - fmt.Println(err) - t.FailNow() - } - - chainMan := theChainManager(db, t) - - const max = 2 - done := make(chan bool, max) - - go insertChain(done, chainMan, chain1, t) - go insertChain(done, chainMan, chain2, t) - - for i := 0; i < max; i++ { - <-done - } - - if chain2[len(chain2)-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("chain2 is canonical and shouldn't be") - } - - if chain1[len(chain1)-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("chain1 isn't canonical and should be") - } -} - -func TestChainMultipleInsertions(t *testing.T) { - t.Skip("Skipped: outdated test files") - - db, _ := ethdb.NewMemDatabase() - - const max = 4 - chains := make([]types.Blocks, max) - var longest int - for i := 0; i < max; i++ { - var err error - name := "valid" + strconv.Itoa(i+1) - chains[i], err = loadChain(name, t) - if len(chains[i]) >= len(chains[longest]) { - longest = i - } - fmt.Println("loaded", name, "with a length of", len(chains[i])) - if err != nil { - fmt.Println(err) - t.FailNow() - } - } - - chainMan := theChainManager(db, t) - - done := make(chan bool, max) - for i, chain := range chains { - // XXX the go routine would otherwise reference the same (chain[3]) variable and fail - i := i - chain := chain - go func() { - insertChain(done, chainMan, chain, t) - fmt.Println(i, "done") - }() - } - - for i := 0; i < max; i++ { - <-done - } - - if chains[longest][len(chains[longest])-1].Hash() != chainMan.CurrentBlock().Hash() { - t.Error("Invalid canonical chain") - } -} - -type bproc struct{} - -func (bproc) Process(*types.Block) (vm.Logs, types.Receipts, error) { return nil, nil, nil } - -func makeChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block { - var chain []*types.Block - for i, difficulty := range d { - header := &types.Header{ - Coinbase: common.Address{seed}, - Number: big.NewInt(int64(i + 1)), - Difficulty: big.NewInt(int64(difficulty)), - } - if i == 0 { - header.ParentHash = genesis.Hash() - } else { - header.ParentHash = chain[i-1].Hash() - } - block := types.NewBlockWithHeader(header) - chain = append(chain, block) - } - return chain -} - -func chm(genesis *types.Block, db ethdb.Database) *ChainManager { - var eventMux event.TypeMux - bc := &ChainManager{chainDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}} - bc.headerCache, _ = lru.New(100) - bc.bodyCache, _ = lru.New(100) - bc.bodyRLPCache, _ = lru.New(100) - bc.tdCache, _ = lru.New(100) - bc.blockCache, _ = lru.New(100) - bc.futureBlocks, _ = lru.New(100) - bc.processor = bproc{} - bc.ResetWithGenesisBlock(genesis) - - return bc -} - -func TestReorgLongest(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain1 := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) - chain2 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) - - bc.InsertChain(chain1) - bc.InsertChain(chain2) - - prev := bc.CurrentBlock() - for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { - if prev.ParentHash() != block.Hash() { - t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) - } - } -} - -func TestBadHashes(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain := makeChainWithDiff(genesis, []int{1, 2, 4}, 10) - BadHashes[chain[2].Header().Hash()] = true - - _, err = bc.InsertChain(chain) - if !IsBadHashError(err) { - t.Errorf("error mismatch: want: BadHashError, have: %v", err) - } -} - -func TestReorgBadHashes(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 11) - bc.InsertChain(chain) - - if chain[3].Header().Hash() != bc.LastBlockHash() { - t.Errorf("last block hash mismatch: want: %x, have: %x", chain[3].Header().Hash(), bc.LastBlockHash()) - } - - // NewChainManager should check BadHashes when loading it db - BadHashes[chain[3].Header().Hash()] = true - - var eventMux event.TypeMux - ncm, err := NewChainManager(db, FakePow{}, &eventMux) - if err != nil { - t.Errorf("NewChainManager err: %s", err) - } - - // check it set head to (valid) parent of bad hash block - if chain[2].Header().Hash() != ncm.LastBlockHash() { - t.Errorf("last block hash mismatch: want: %x, have: %x", chain[2].Header().Hash(), ncm.LastBlockHash()) - } - - if chain[2].Header().GasLimit.Cmp(ncm.GasLimit()) != 0 { - t.Errorf("current block gasLimit mismatch: want: %x, have: %x", chain[2].Header().GasLimit, ncm.GasLimit()) - } -} - -func TestReorgShortest(t *testing.T) { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - - chain1 := makeChainWithDiff(genesis, []int{1, 2, 3, 4}, 10) - chain2 := makeChainWithDiff(genesis, []int{1, 10}, 11) - - bc.InsertChain(chain1) - bc.InsertChain(chain2) - - prev := bc.CurrentBlock() - for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) { - if prev.ParentHash() != block.Hash() { - t.Errorf("parent hash mismatch %x - %x", prev.ParentHash(), block.Hash()) - } - } -} - -func TestInsertNonceError(t *testing.T) { - for i := 1; i < 25 && !t.Failed(); i++ { - db, _ := ethdb.NewMemDatabase() - genesis, err := WriteTestNetGenesisBlock(db, 0) - if err != nil { - t.Error(err) - t.FailNow() - } - bc := chm(genesis, db) - bc.processor = NewBlockProcessor(db, bc.pow, bc, bc.eventMux) - blocks := makeChain(bc.currentBlock, i, db, 0) - - fail := rand.Int() % len(blocks) - failblock := blocks[fail] - bc.pow = failPow{failblock.NumberU64()} - n, err := bc.InsertChain(blocks) - - // Check that the returned error indicates the nonce failure. - if n != fail { - t.Errorf("(i=%d) wrong failed block index: got %d, want %d", i, n, fail) - } - if !IsBlockNonceErr(err) { - t.Fatalf("(i=%d) got %q, want a nonce error", i, err) - } - nerr := err.(*BlockNonceErr) - if nerr.Number.Cmp(failblock.Number()) != 0 { - t.Errorf("(i=%d) wrong block number in error, got %v, want %v", i, nerr.Number, failblock.Number()) - } - if nerr.Hash != failblock.Hash() { - t.Errorf("(i=%d) wrong block hash in error, got %v, want %v", i, nerr.Hash, failblock.Hash()) - } - - // Check that all no blocks after the failing block have been inserted. - for _, block := range blocks[fail:] { - if bc.HasBlock(block.Hash()) { - t.Errorf("(i=%d) invalid block %d present in chain", i, block.NumberU64()) - } - } - } -} - -// Tests that chain reorganizations handle transaction removals and reinsertions. -func TestChainTxReorgs(t *testing.T) { - params.MinGasLimit = big.NewInt(125000) // Minimum the gas limit may ever be. - params.GenesisGasLimit = big.NewInt(3141592) // Gas limit of the Genesis block. - - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - db, _ = ethdb.NewMemDatabase() - ) - genesis := WriteGenesisBlockForTesting(db, - GenesisAccount{addr1, big.NewInt(1000000)}, - GenesisAccount{addr2, big.NewInt(1000000)}, - GenesisAccount{addr3, big.NewInt(1000000)}, - ) - // Create two transactions shared between the chains: - // - postponed: transaction included at a later block in the forked chain - // - swapped: transaction included at the same block number in the forked chain - postponed, _ := types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) - swapped, _ := types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key1) - - // Create two transactions that will be dropped by the forked chain: - // - pastDrop: transaction dropped retroactively from a past block - // - freshDrop: transaction dropped exactly at the block where the reorg is detected - var pastDrop, freshDrop *types.Transaction - - // Create three transactions that will be added in the forked chain: - // - pastAdd: transaction added before the reorganiztion is detected - // - freshAdd: transaction added at the exact block the reorg is detected - // - futureAdd: transaction added after the reorg has already finished - var pastAdd, freshAdd, futureAdd *types.Transaction - - chain := GenerateChain(genesis, db, 3, func(i int, gen *BlockGen) { - switch i { - case 0: - pastDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) - - gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point - gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork - - case 2: - freshDrop, _ = types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key2) - - gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point - gen.AddTx(swapped) // This transaction will be swapped out at the exact height - - gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain - } - }) - // Import the chain. This runs all block validation rules. - evmux := &event.TypeMux{} - chainman, _ := NewChainManager(db, FakePow{}, evmux) - chainman.SetProcessor(NewBlockProcessor(db, FakePow{}, chainman, evmux)) - if i, err := chainman.InsertChain(chain); err != nil { - t.Fatalf("failed to insert original chain[%d]: %v", i, err) - } - - // overwrite the old chain - chain = GenerateChain(genesis, db, 5, func(i int, gen *BlockGen) { - switch i { - case 0: - pastAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(pastAdd) // This transaction needs to be injected during reorg - - case 2: - gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain - gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain - - freshAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time - - case 3: - futureAdd, _ = types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, nil, nil).SignECDSA(key3) - gen.AddTx(futureAdd) // This transaction will be added after a full reorg - } - }) - if _, err := chainman.InsertChain(chain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - - // removed tx - for i, tx := range (types.Transactions{pastDrop, freshDrop}) { - if GetTransaction(db, tx.Hash()) != nil { - t.Errorf("drop %d: tx found while shouldn't have been", i) - } - if GetReceipt(db, tx.Hash()) != nil { - t.Errorf("drop %d: receipt found while shouldn't have been", i) - } - } - // added tx - for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { - if GetTransaction(db, tx.Hash()) == nil { - t.Errorf("add %d: expected tx to be found", i) - } - if GetReceipt(db, tx.Hash()) == nil { - t.Errorf("add %d: expected receipt to be found", i) - } - } - // shared tx - for i, tx := range (types.Transactions{postponed, swapped}) { - if GetTransaction(db, tx.Hash()) == nil { - t.Errorf("share %d: expected tx to be found", i) - } - if GetReceipt(db, tx.Hash()) == nil { - t.Errorf("share %d: expected receipt to be found", i) - } - } -} diff --git a/core/helper_test.go b/core/helper_test.go index 81ea6fc22..fd6a5491c 100644 --- a/core/helper_test.go +++ b/core/helper_test.go @@ -34,7 +34,7 @@ type TestManager struct { db ethdb.Database txPool *TxPool - blockChain *ChainManager + blockChain *BlockChain Blocks []*types.Block } @@ -54,7 +54,7 @@ func (s *TestManager) Peers() *list.List { return list.New() } -func (s *TestManager) ChainManager() *ChainManager { +func (s *TestManager) BlockChain() *BlockChain { return s.blockChain } @@ -89,7 +89,7 @@ func NewTestManager() *TestManager { testManager.eventMux = new(event.TypeMux) testManager.db = db // testManager.txPool = NewTxPool(testManager) - // testManager.blockChain = NewChainManager(testManager) + // testManager.blockChain = NewBlockChain(testManager) // testManager.stateManager = NewStateManager(testManager) return testManager diff --git a/core/manager.go b/core/manager.go index 0f108a6de..289c87c11 100644 --- a/core/manager.go +++ b/core/manager.go @@ -26,7 +26,7 @@ import ( type Backend interface { AccountManager() *accounts.Manager BlockProcessor() *BlockProcessor - ChainManager() *ChainManager + BlockChain() *BlockChain TxPool() *TxPool ChainDb() ethdb.Database DappDb() ethdb.Database diff --git a/core/vm/common.go b/core/vm/common.go index b52b598ba..2d1aa9332 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -38,7 +38,7 @@ const ( ) var ( - Pow256 = common.BigPow(2, 256) // Pew256 is 2**256 + Pow256 = common.BigPow(2, 256) // Pow256 is 2**256 U256 = common.U256 // Shortcut to common.U256 S256 = common.S256 // Shortcut to common.S256 @@ -46,7 +46,7 @@ var ( Zero = common.Big0 // Shortcut to common.Big0 One = common.Big1 // Shortcut to common.Big1 - max = big.NewInt(math.MaxInt64) // Maximum 256 bit integer + max = big.NewInt(math.MaxInt64) // Maximum 64 bit integer ) // NewVm returns a new VM based on the Environment diff --git a/core/vm/contract.go b/core/vm/contract.go index 8460cc47b..95417e747 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -118,8 +118,8 @@ func (self *Contract) SetCode(code []byte) { self.Code = code } -// SetCallCode sets the address of the code address and sets the code -// of the contract according to the backing database. +// SetCallCode sets the code of the contract and address of the backing data +// object func (self *Contract) SetCallCode(addr *common.Address, code []byte) { self.Code = code self.CodeAddr = addr diff --git a/core/vm/doc.go b/core/vm/doc.go index 4deb7761d..ab87bf934 100644 --- a/core/vm/doc.go +++ b/core/vm/doc.go @@ -18,15 +18,15 @@ Package vm implements the Ethereum Virtual Machine. The vm package implements two EVMs, a byte code VM and a JIT VM. The BC -(Byte Code) VM loops over a set of bytes and executes them according to a set -of rules defined in the Ethereum yellow paper. When the BC VM is invokes it +(Byte Code) VM loops over a set of bytes and executes them according to the set +of rules defined in the Ethereum yellow paper. When the BC VM is invoked it invokes the JIT VM in a seperate goroutine and compiles the byte code in JIT instructions. The JIT VM, when invoked, loops around a set of pre-defined instructions until -it either runs of gas, causes an internel error, returns or stops. At a later +it either runs of gas, causes an internal error, returns or stops. At a later stage the JIT VM will see some additional features that will cause sets of instructions to be compiled down to segments. Segments are sets of instructions -that can be ran in one go saving precious time during execution. +that can be run in one go saving precious time during execution. */ package vm diff --git a/core/vm/environment.go b/core/vm/environment.go index 606518fd4..f8e19baea 100644 --- a/core/vm/environment.go +++ b/core/vm/environment.go @@ -26,7 +26,7 @@ import ( // it's own isolated environment. // Environment is an EVM requirement and helper which allows access to outside -// information such like states. +// information such as states. type Environment interface { // The state database Db() Database @@ -50,7 +50,7 @@ type Environment interface { GasLimit() *big.Int // Determines whether it's possible to transact CanTransfer(from common.Address, balance *big.Int) bool - // Transfer from to to with amount set + // Transfers amount from one account to the other Transfer(from, to Account, amount *big.Int) error // Adds a LOG to the state AddLog(*Log) diff --git a/core/vm/memory.go b/core/vm/memory.go index 101ec75cb..d01188417 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -18,7 +18,7 @@ package vm import "fmt" -// Memory implements ethereum RAM backed by a simple byte slice +// Memory implements a simple memory model for the ethereum virtual machine. type Memory struct { store []byte } diff --git a/core/vm_env.go b/core/vm_env.go index dea280746..467e34c6b 100644 --- a/core/vm_env.go +++ b/core/vm_env.go @@ -30,13 +30,13 @@ type VMEnv struct { header *types.Header msg Message depth int - chain *ChainManager + chain *BlockChain typ vm.Type // structured logging logs []vm.StructLog } -func NewEnv(state *state.StateDB, chain *ChainManager, msg Message, header *types.Header) *VMEnv { +func NewEnv(state *state.StateDB, chain *BlockChain, msg Message, header *types.Header) *VMEnv { return &VMEnv{ chain: chain, state: state, -- cgit