diff options
author | Péter Szilágyi <peterke@gmail.com> | 2017-02-28 19:35:17 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2017-02-28 21:03:20 +0800 |
commit | e588e0ca2b3b615af0ecfd5679c42df8f1cc4272 (patch) | |
tree | c37e95792ed9f17209b7de26a5d6047ac915db2a /core | |
parent | d4f60d362b8fcf82db1accf89c146a2a71375841 (diff) | |
download | go-tangerine-e588e0ca2b3b615af0ecfd5679c42df8f1cc4272.tar.gz go-tangerine-e588e0ca2b3b615af0ecfd5679c42df8f1cc4272.tar.zst go-tangerine-e588e0ca2b3b615af0ecfd5679c42df8f1cc4272.zip |
all: next batch of log polishes to contextual versions
Diffstat (limited to 'core')
-rw-r--r-- | core/block_validator.go | 2 | ||||
-rw-r--r-- | core/blockchain.go | 151 | ||||
-rw-r--r-- | core/database_util.go | 47 | ||||
-rw-r--r-- | core/genesis.go | 3 | ||||
-rw-r--r-- | core/headerchain.go | 37 | ||||
-rw-r--r-- | core/state_processor.go | 4 | ||||
-rw-r--r-- | core/state_transition.go | 3 | ||||
-rw-r--r-- | core/tx_pool.go | 95 | ||||
-rw-r--r-- | core/vm/contracts.go | 3 | ||||
-rw-r--r-- | core/vm/interpreter.go | 8 |
10 files changed, 150 insertions, 203 deletions
diff --git a/core/block_validator.go b/core/block_validator.go index 117974ee6..ed5cc3ab6 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -170,7 +170,7 @@ func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error { for h := range ancestors { branch += fmt.Sprintf(" O - %x\n |\n", h) } - log.Info(fmt.Sprint(branch)) + log.Warn(branch) return UncleError("uncle[%d](%x) is ancestor", i, hash[:4]) } diff --git a/core/blockchain.go b/core/blockchain.go index e46a76fc1..27dbf6c6d 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -160,9 +160,9 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) // make sure the headerByNumber (if present) is in our current canonical chain if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { - log.Error(fmt.Sprintf("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4])) + log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) bc.SetHead(header.Number.Uint64() - 1) - log.Error(fmt.Sprint("Chain rewind was successful, resuming normal operation")) + log.Error("Chain rewind was successful, resuming normal operation") } } } @@ -219,9 +219,9 @@ func (self *BlockChain) loadLastState() error { blockTd := self.GetTd(self.currentBlock.Hash(), self.currentBlock.NumberU64()) fastTd := self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()) - log.Info(fmt.Sprintf("Last header: #%d [%x…] TD=%v", currentHeader.Number, currentHeader.Hash().Bytes()[:4], headerTd)) - log.Info(fmt.Sprintf("Last block: #%d [%x…] TD=%v", self.currentBlock.Number(), self.currentBlock.Hash().Bytes()[:4], blockTd)) - log.Info(fmt.Sprintf("Fast block: #%d [%x…] TD=%v", self.currentFastBlock.Number(), self.currentFastBlock.Hash().Bytes()[:4], fastTd)) + log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd) + log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd) + log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd) return nil } @@ -262,10 +262,10 @@ func (bc *BlockChain) SetHead(head uint64) { } if err := WriteHeadBlockHash(bc.chainDb, bc.currentBlock.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to reset head block hash: %v", err)) + log.Crit("Failed to reset head full block", "err", err) } if err := WriteHeadFastBlockHash(bc.chainDb, bc.currentFastBlock.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to reset head fast block hash: %v", err)) + log.Crit("Failed to reset head fast block", "err", err) } bc.loadLastState() } @@ -286,7 +286,7 @@ func (self *BlockChain) FastSyncCommitHead(hash common.Hash) error { self.currentBlock = block self.mu.Unlock() - log.Info(fmt.Sprintf("committed block #%d [%x…] as new head", block.Number(), hash[:4])) + log.Info("Committed new head block", "number", block.Number(), "hash", hash) return nil } @@ -390,10 +390,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) { // Prepare the genesis block and reinitialise the chain if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil { - log.Crit(fmt.Sprintf("failed to write genesis block TD: %v", err)) + log.Crit("Failed to write genesis block TD", "err", err) } if err := WriteBlock(bc.chainDb, genesis); err != nil { - log.Crit(fmt.Sprintf("failed to write genesis block: %v", err)) + log.Crit("Failed to write genesis block", "err", err) } bc.genesisBlock = genesis bc.insert(bc.genesisBlock) @@ -416,8 +416,7 @@ func (self *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error { if first > last { return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last) } - - log.Info(fmt.Sprintf("exporting %d blocks...\n", last-first+1)) + log.Info("Exporting batch of blocks", "count", last-first+1) for nr := first; nr <= last; nr++ { block := self.GetBlockByNumber(nr) @@ -445,10 +444,10 @@ func (bc *BlockChain) insert(block *types.Block) { // Add the block to the canonical chain number scheme and mark as the head if err := WriteCanonicalHash(bc.chainDb, block.Hash(), block.NumberU64()); err != nil { - log.Crit(fmt.Sprintf("failed to insert block number: %v", err)) + log.Crit("Failed to insert block number", "err", err) } if err := WriteHeadBlockHash(bc.chainDb, block.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to insert head block hash: %v", err)) + log.Crit("Failed to insert head block hash", "err", err) } bc.currentBlock = block @@ -457,7 +456,7 @@ func (bc *BlockChain) insert(block *types.Block) { bc.hc.SetCurrentHeader(block.Header()) if err := WriteHeadFastBlockHash(bc.chainDb, block.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to insert head fast block hash: %v", err)) + log.Crit("Failed to insert head fast block hash", "err", err) } bc.currentFastBlock = block } @@ -588,8 +587,7 @@ func (bc *BlockChain) Stop() { atomic.StoreInt32(&bc.procInterrupt, 1) bc.wg.Wait() - - log.Info(fmt.Sprint("Chain manager stopped")) + log.Info("Blockchain manager stopped") } func (self *BlockChain) procFutureBlocks() { @@ -683,11 +681,11 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain for i := 1; i < len(blockChain); i++ { if blockChain[i].NumberU64() != blockChain[i-1].NumberU64()+1 || blockChain[i].ParentHash() != blockChain[i-1].Hash() { // Chain broke ancestry, log a messge (programming error) and skip insertion - failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), - blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) + log.Error("Non contiguous receipt insert", "number", blockChain[i].Number(), "hash", blockChain[i].Hash(), "parent", blockChain[i].ParentHash(), + "prevnumber", blockChain[i-1].Number(), "prevhash", blockChain[i-1].Hash()) - log.Error(fmt.Sprint(failure.Error())) - return 0, failure + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, blockChain[i-1].NumberU64(), + blockChain[i-1].Hash().Bytes()[:4], i, blockChain[i].NumberU64(), blockChain[i].Hash().Bytes()[:4], blockChain[i].ParentHash().Bytes()[:4]) } } // Pre-checks passed, start the block body and receipt imports @@ -734,31 +732,31 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain if err := WriteBody(self.chainDb, block.Hash(), block.NumberU64(), block.Body()); err != nil { errs[index] = fmt.Errorf("failed to write block body: %v", err) atomic.AddInt32(&failed, 1) - log.Crit(fmt.Sprint(errs[index])) + log.Crit("Failed to write block body", "err", err) return } if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil { errs[index] = fmt.Errorf("failed to write block receipts: %v", err) atomic.AddInt32(&failed, 1) - log.Crit(fmt.Sprint(errs[index])) + log.Crit("Failed to write block receipts", "err", err) return } if err := WriteMipmapBloom(self.chainDb, block.NumberU64(), receipts); err != nil { errs[index] = fmt.Errorf("failed to write log blooms: %v", err) atomic.AddInt32(&failed, 1) - log.Crit(fmt.Sprint(errs[index])) + log.Crit("Failed to write log blooms", "err", err) return } if err := WriteTransactions(self.chainDb, block); err != nil { errs[index] = fmt.Errorf("failed to write individual transactions: %v", err) atomic.AddInt32(&failed, 1) - log.Crit(fmt.Sprint(errs[index])) + log.Crit("Failed to write individual transactions", "err", err) return } if err := WriteReceipts(self.chainDb, receipts); err != nil { errs[index] = fmt.Errorf("failed to write individual receipts: %v", err) atomic.AddInt32(&failed, 1) - log.Crit(fmt.Sprint(errs[index])) + log.Crit("Failed to write individual receipts", "err", err) return } atomic.AddInt32(&stats.processed, 1) @@ -784,7 +782,7 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain } } if atomic.LoadInt32(&self.procInterrupt) == 1 { - log.Debug(fmt.Sprint("premature abort during receipt chain processing")) + log.Debug("Premature abort during receipts processing") return 0, nil } // Update the head fast sync block if better @@ -792,20 +790,16 @@ func (self *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain head := blockChain[len(errs)-1] if self.GetTd(self.currentFastBlock.Hash(), self.currentFastBlock.NumberU64()).Cmp(self.GetTd(head.Hash(), head.NumberU64())) < 0 { if err := WriteHeadFastBlockHash(self.chainDb, head.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to update head fast block hash: %v", err)) + log.Crit("Failed to update head fast block hash", "err", err) } self.currentFastBlock = head } self.mu.Unlock() // Report some public statistics so the user has a clue what's going on - first, last := blockChain[0], blockChain[len(blockChain)-1] - - ignored := "" - if stats.ignored > 0 { - ignored = fmt.Sprintf(" (%d ignored)", stats.ignored) - } - log.Info(fmt.Sprintf("imported %4d receipts in %9v. #%d [%x… / %x…]%s", stats.processed, common.PrettyDuration(time.Since(start)), last.Number(), first.Hash().Bytes()[:4], last.Hash().Bytes()[:4], ignored)) + last := blockChain[len(blockChain)-1] + log.Info("Imported new block receipts", "count", stats.processed, "number", last.Number(), "hash", last.Hash(), + "elapsed", common.PrettyDuration(time.Since(start)), "ignored", stats.ignored) return 0, nil } @@ -829,10 +823,10 @@ func (self *BlockChain) WriteBlock(block *types.Block) (status WriteStatus, err // Irrelevant of the canonical status, write the block itself to the database if err := self.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil { - log.Crit(fmt.Sprintf("failed to write block total difficulty: %v", err)) + log.Crit("Failed to write block total difficulty", "err", err) } if err := WriteBlock(self.chainDb, block); err != nil { - log.Crit(fmt.Sprintf("failed to write block contents: %v", err)) + log.Crit("Failed to write block contents", "err", err) } // If the total difficulty is higher than our known, add it to the canonical chain @@ -863,11 +857,11 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { for i := 1; i < len(chain); i++ { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { // Chain broke ancestry, log a messge (programming error) and skip insertion - failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", - i-1, chain[i-1].NumberU64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) + log.Error("Non contiguous block insert", "number", chain[i].Number(), "hash", chain[i].Hash(), + "parent", chain[i].ParentHash(), "prevnumber", chain[i-1].Number(), "prevhash", chain[i-1].Hash()) - log.Error(fmt.Sprint(failure.Error())) - return 0, failure + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].NumberU64(), + chain[i-1].Hash().Bytes()[:4], i, chain[i].NumberU64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash().Bytes()[:4]) } } // Pre-checks passed, start the full block imports @@ -893,7 +887,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { for i, block := range chain { if atomic.LoadInt32(&self.procInterrupt) == 1 { - log.Debug(fmt.Sprint("Premature abort during block chain processing")) + log.Debug("Premature abort during blocks processing") break } bstart := time.Now() @@ -903,8 +897,8 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { r := <-nonceResults nonceChecked[r.index] = true if !r.valid { - block := chain[r.index] - return r.index, &BlockNonceErr{Hash: block.Hash(), Number: block.Number(), Nonce: block.Nonce()} + invalid := chain[r.index] + return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()} } } @@ -978,7 +972,7 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { // coalesce logs for later processing coalescedLogs = append(coalescedLogs, logs...) - if err := WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil { + if err = WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil { return i, err } @@ -990,9 +984,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { switch status { case CanonStatTy: - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("inserted block #%d [%x…] in %9v: %3d txs %7v gas %d uncles.", block.Number(), block.Hash().Bytes()[0:4], common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), block.GasUsed(), len(block.Uncles())) - }}) + log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "uncles", len(block.Uncles()), + "txs", len(block.Transactions()), "gas", block.GasUsed(), "elapsed", common.PrettyDuration(time.Since(bstart))) + blockInsertTimer.UpdateSince(bstart) events = append(events, ChainEvent{block, block.Hash(), logs}) @@ -1013,9 +1007,9 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) { return i, err } case SideStatTy: - log.Trace("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("inserted forked block #%d [%x…] (TD=%v) in %9v: %3d txs %d uncles.", block.Number(), block.Hash().Bytes()[0:4], block.Difficulty(), common.PrettyDuration(time.Since(bstart)), len(block.Transactions()), len(block.Uncles())) - }}) + log.Debug("Inserted forked block", "number", block.Number(), "hash", block.Hash(), "diff", block.Difficulty(), "elapsed", + common.PrettyDuration(time.Since(bstart)), "txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles())) + blockInsertTimer.UpdateSince(bstart) events = append(events, ChainSideEvent{block}) @@ -1055,19 +1049,22 @@ func (st *insertStats) report(chain []*types.Block, index int) { ) // If we're at the last block of the batch or report period reached, log if index == len(chain)-1 || elapsed >= statsReportLimit { - start, end := chain[st.lastIndex], chain[index] - txcount := countTransactions(chain[st.lastIndex : index+1]) - - var hashes, extra string - if st.queued > 0 || st.ignored > 0 { - extra = fmt.Sprintf(" (%d queued %d ignored)", st.queued, st.ignored) + var ( + end = chain[index] + txs = countTransactions(chain[st.lastIndex : index+1]) + ) + context := []interface{}{ + "blocks", st.processed, "number", end.Number(), "hash", end.Hash(), "txs", txs, + "mgas", float64(st.usedGas) / 1000000, "elapsed", common.PrettyDuration(elapsed), + "mgasps", float64(st.usedGas) * 1000 / float64(elapsed), } - if st.processed > 1 { - hashes = fmt.Sprintf("%x… / %x…", start.Hash().Bytes()[:4], end.Hash().Bytes()[:4]) - } else { - hashes = fmt.Sprintf("%x…", end.Hash().Bytes()[:4]) + if st.queued > 0 { + context = append(context, []interface{}{"queued", st.queued}...) } - log.Info(fmt.Sprintf("imported %4d blocks, %5d txs (%7.3f Mg) in %9v (%6.3f Mg/s). #%v [%s]%s", st.processed, txcount, float64(st.usedGas)/1000000, common.PrettyDuration(elapsed), float64(st.usedGas)*1000/float64(elapsed), end.Number(), hashes, extra)) + if st.ignored > 0 { + context = append(context, []interface{}{"ignored", st.ignored}...) + } + log.Info("Imported new chain segment", context...) *st = insertStats{startTime: now, lastIndex: index} } @@ -1152,19 +1149,8 @@ func (self *BlockChain) reorg(oldBlock, newBlock *types.Block) error { if len(oldChain) > 63 { logFn = log.Warn } - logFn("", "msg", log.Lazy{Fn: func() string { - oldLen, newLen := len(oldChain), len(newChain) - newLast, newFirst := newChain[0], newChain[newLen-1] - oldLast, oldFirst := oldChain[0], oldChain[oldLen-1] - - return fmt.Sprintf("Chain split detected after #%v [%x…]. Reorganising chain (-%v +%v blocks), rejecting #%v-#%v [%x…/%x…] in favour of #%v-#%v [%x…/%x…]", - commonBlock.Number(), commonBlock.Hash().Bytes()[:4], - oldLen, newLen, - oldFirst.Number(), oldLast.Number(), - oldFirst.Hash().Bytes()[:4], oldLast.Hash().Bytes()[:4], - newFirst.Number(), newLast.Number(), - newFirst.Hash().Bytes()[:4], newLast.Hash().Bytes()[:4]) - }}) + logFn("Chain split detected", "number", commonBlock.Number(), "hash", commonBlock.Hash(), + "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) var addedTxs types.Transactions // insert blocks. Order does not matter. Last block will be written in ImportChain itself which creates the new head properly @@ -1271,12 +1257,12 @@ func (bc *BlockChain) addBadBlock(block *types.Block) { // reportBlock logs a bad block error. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { bc.addBadBlock(block) - log.Error("", "msg", log.Lazy{Fn: func() string { - var receiptString string - for _, receipt := range receipts { - receiptString += fmt.Sprintf("\t%v\n", receipt) - } - return fmt.Sprintf(` + + var receiptString string + for _, receipt := range receipts { + receiptString += fmt.Sprintf("\t%v\n", receipt) + } + log.Error(fmt.Sprintf(` ########## BAD BLOCK ######### Chain config: %v @@ -1286,8 +1272,7 @@ Hash: 0x%x Error: %v ############################## -`, bc.config, block.Number(), block.Hash(), receiptString, err) - }}) +`, bc.config, block.Number(), block.Hash(), receiptString, err)) } // InsertHeaderChain attempts to insert the given header chain in to the local diff --git a/core/database_util.go b/core/database_util.go index 68263f8d8..bcd99be5f 100644 --- a/core/database_util.go +++ b/core/database_util.go @@ -106,7 +106,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 { } header := new(types.Header) if err := rlp.Decode(bytes.NewReader(data), header); err != nil { - log.Crit(fmt.Sprintf("failed to decode block header: %v", err)) + log.Crit("Failed to decode block header", "err", err) } return header.Number.Uint64() } @@ -166,7 +166,7 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header } header := new(types.Header) if err := rlp.Decode(bytes.NewReader(data), header); err != nil { - log.Error(fmt.Sprintf("invalid block header RLP for hash %x: %v", hash, err)) + log.Error("Invalid block header RLP", "hash", hash, "err", err) return nil } return header @@ -190,7 +190,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body { } body := new(types.Body) if err := rlp.Decode(bytes.NewReader(data), body); err != nil { - log.Error(fmt.Sprintf("invalid block body RLP for hash %x: %v", hash, err)) + log.Error("Invalid block body RLP", "hash", hash, "err", err) return nil } return body @@ -208,7 +208,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int { } td := new(big.Int) if err := rlp.Decode(bytes.NewReader(data), td); err != nil { - log.Error(fmt.Sprintf("invalid block total difficulty RLP for hash %x: %v", hash, err)) + log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) return nil } return td @@ -246,7 +246,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types. } storageReceipts := []*types.ReceiptForStorage{} if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { - log.Error(fmt.Sprintf("invalid receipt array RLP for hash %x: %v", hash, err)) + log.Error("Invalid receipt array RLP", "hash", hash, "err", err) return nil } receipts := make(types.Receipts, len(storageReceipts)) @@ -285,15 +285,15 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co } // GetReceipt returns a receipt by hash -func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { - data, _ := db.Get(append(receiptsPrefix, txHash[:]...)) +func GetReceipt(db ethdb.Database, hash common.Hash) *types.Receipt { + data, _ := db.Get(append(receiptsPrefix, hash[:]...)) if len(data) == 0 { return nil } var receipt types.ReceiptForStorage err := rlp.DecodeBytes(data, &receipt) if err != nil { - log.Debug(fmt.Sprint("GetReceipt err:", err)) + log.Error("Invalid receipt RLP", "hash", hash, "err", err) } return (*types.Receipt)(&receipt) } @@ -302,7 +302,7 @@ func GetReceipt(db ethdb.Database, txHash common.Hash) *types.Receipt { func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...) if err := db.Put(key, hash.Bytes()); err != nil { - log.Crit(fmt.Sprintf("failed to store number to hash mapping into database: %v", err)) + log.Crit("Failed to store number to hash mapping", "err", err) } return nil } @@ -310,7 +310,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro // WriteHeadHeaderHash stores the head header's hash. func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { - log.Crit(fmt.Sprintf("failed to store last header's hash into database: %v", err)) + log.Crit("Failed to store last header's hash", "err", err) } return nil } @@ -318,7 +318,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { // WriteHeadBlockHash stores the head block's hash. func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { if err := db.Put(headBlockKey, hash.Bytes()); err != nil { - log.Crit(fmt.Sprintf("failed to store last block's hash into database: %v", err)) + log.Crit("Failed to store last block's hash", "err", err) } return nil } @@ -326,7 +326,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { // WriteHeadFastBlockHash stores the fast head block's hash. func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { if err := db.Put(headFastKey, hash.Bytes()); err != nil { - log.Crit(fmt.Sprintf("failed to store last fast block's hash into database: %v", err)) + log.Crit("Failed to store last fast block's hash", "err", err) } return nil } @@ -342,13 +342,12 @@ func WriteHeader(db ethdb.Database, header *types.Header) error { encNum := encodeBlockNumber(num) key := append(blockHashPrefix, hash...) if err := db.Put(key, encNum); err != nil { - log.Crit(fmt.Sprintf("failed to store hash to number mapping into database: %v", err)) + log.Crit("Failed to store hash to number mapping", "err", err) } key = append(append(headerPrefix, encNum...), hash...) if err := db.Put(key, data); err != nil { - log.Crit(fmt.Sprintf("failed to store header into database: %v", err)) + log.Crit("Failed to store header", "err", err) } - log.Debug(fmt.Sprintf("stored header #%v [%x…]", header.Number, hash[:4])) return nil } @@ -365,9 +364,8 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error { key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) if err := db.Put(key, rlp); err != nil { - log.Crit(fmt.Sprintf("failed to store block body into database: %v", err)) + log.Crit("Failed to store block body", "err", err) } - log.Debug(fmt.Sprintf("stored block body [%x…]", hash.Bytes()[:4])) return nil } @@ -379,9 +377,8 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er } key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...) if err := db.Put(key, data); err != nil { - log.Crit(fmt.Sprintf("failed to store block total difficulty into database: %v", err)) + log.Crit("Failed to store block total difficulty", "err", err) } - log.Debug(fmt.Sprintf("stored block total difficulty [%x…]: %v", hash.Bytes()[:4], td)) return nil } @@ -414,9 +411,8 @@ func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, rece // Store the flattened receipt slice key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) if err := db.Put(key, bytes); err != nil { - log.Crit(fmt.Sprintf("failed to store block receipts into database: %v", err)) + log.Crit("Failed to store block receipts", "err", err) } - log.Debug(fmt.Sprintf("stored block receipts [%x…]", hash.Bytes()[:4])) return nil } @@ -434,7 +430,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error { if err != nil { return err } - if err := batch.Put(tx.Hash().Bytes(), data); err != nil { + if err = batch.Put(tx.Hash().Bytes(), data); err != nil { return err } // Encode and queue up the transaction metadata for storage @@ -457,7 +453,7 @@ func WriteTransactions(db ethdb.Database, block *types.Block) error { } // Write the scheduled data into the database if err := batch.Write(); err != nil { - log.Crit(fmt.Sprintf("failed to store transactions into database: %v", err)) + log.Crit("Failed to store transactions", "err", err) } return nil } @@ -489,7 +485,7 @@ func WriteReceipts(db ethdb.Database, receipts types.Receipts) error { } // Write the scheduled data into the database if err := batch.Write(); err != nil { - log.Crit(fmt.Sprintf("failed to store receipts into database: %v", err)) + log.Crit("Failed to store receipts", "err", err) } return nil } @@ -595,7 +591,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash] for hash, preimage := range preimages { if _, err := table.Get(hash.Bytes()); err != nil { batch.Put(hash.Bytes(), preimage) - hitCount += 1 + hitCount++ } } preimageCounter.Inc(int64(len(preimages))) @@ -604,7 +600,6 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash] if err := batch.Write(); err != nil { return fmt.Errorf("preimage write fail for block %d: %v", number, err) } - log.Debug(fmt.Sprintf("%d preimages in block %d, including %d new", len(preimages), number, hitCount)) } return nil } diff --git a/core/genesis.go b/core/genesis.go index 58d3440d6..d827ac1c1 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -117,7 +117,7 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, }, nil, nil, nil) if block := GetBlock(chainDb, block.Hash(), block.NumberU64()); block != nil { - log.Info(fmt.Sprint("Genesis block already in chain. Writing canonical number")) + log.Info("Genesis block known, writing canonical number") err := WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) if err != nil { return nil, err @@ -146,7 +146,6 @@ func WriteGenesisBlock(chainDb ethdb.Database, reader io.Reader) (*types.Block, if err := WriteChainConfig(chainDb, block.Hash(), genesis.ChainConfig); err != nil { return nil, err } - return block, nil } diff --git a/core/headerchain.go b/core/headerchain.go index a0550a428..775321bc6 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -101,7 +101,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid if err != nil { return nil, err } - log.Info(fmt.Sprint("WARNING: Wrote default ethereum genesis block")) + log.Warn("Wrote default Ethereum genesis block") hc.genesisHeader = genesisBlock.Header() } @@ -154,12 +154,11 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er // Irrelevant of the canonical status, write the td and header to the database if err := hc.WriteTd(hash, number, externTd); err != nil { - log.Crit(fmt.Sprintf("failed to write header total difficulty: %v", err)) + log.Crit("Failed to write header total difficulty", "err", err) } if err := WriteHeader(hc.chainDb, header); err != nil { - log.Crit(fmt.Sprintf("failed to write header contents: %v", err)) + log.Crit("Failed to write header content", "err", err) } - // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf @@ -185,15 +184,13 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er headNumber = headHeader.Number.Uint64() - 1 headHeader = hc.GetHeader(headHash, headNumber) } - // Extend the canonical chain with the new header if err := WriteCanonicalHash(hc.chainDb, hash, number); err != nil { - log.Crit(fmt.Sprintf("failed to insert header number: %v", err)) + log.Crit("Failed to insert header number", "err", err) } if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil { - log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err)) + log.Crit("Failed to insert head header hash", "err", err) } - hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header) status = CanonStatTy @@ -227,11 +224,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w for i := 1; i < len(chain); i++ { if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { // Chain broke ancestry, log a messge (programming error) and skip insertion - failure := fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", - i-1, chain[i-1].Number.Uint64(), chain[i-1].Hash().Bytes()[:4], i, chain[i].Number.Uint64(), chain[i].Hash().Bytes()[:4], chain[i].ParentHash.Bytes()[:4]) + log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), + "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash()) - log.Error(fmt.Sprint(failure.Error())) - return 0, failure + return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, + chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4]) } } // Collect some import statistics to report on @@ -316,7 +313,7 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w for i, header := range chain { // Short circuit insertion if shutting down if hc.procInterrupt() { - log.Debug(fmt.Sprint("premature abort during header chain processing")) + log.Debug("Premature abort during headers processing") break } hash := header.Hash() @@ -332,13 +329,9 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, checkFreq int, w stats.processed++ } // Report some public statistics so the user has a clue what's going on - first, last := chain[0], chain[len(chain)-1] - - ignored := "" - if stats.ignored > 0 { - ignored = fmt.Sprintf(" (%d ignored)", stats.ignored) - } - log.Info(fmt.Sprintf("imported %4d headers%s in %9v. #%v [%x… / %x…]", stats.processed, ignored, common.PrettyDuration(time.Since(start)), last.Number, first.Hash().Bytes()[:4], last.Hash().Bytes()[:4])) + last := chain[len(chain)-1] + log.Info("Imported new block headers", "count", stats.processed, "number", last.Number, "hash", last.Hash(), + "elapsed", common.PrettyDuration(time.Since(start)), "ignored", stats.ignored) return 0, nil } @@ -445,7 +438,7 @@ func (hc *HeaderChain) CurrentHeader() *types.Header { // SetCurrentHeader sets the current head header of the canonical chain. func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil { - log.Crit(fmt.Sprintf("failed to insert head header hash: %v", err)) + log.Crit("Failed to insert head header hash", "err", err) } hc.currentHeader = head hc.currentHeaderHash = head.Hash() @@ -488,7 +481,7 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) { hc.currentHeaderHash = hc.currentHeader.Hash() if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil { - log.Crit(fmt.Sprintf("failed to reset head header hash: %v", err)) + log.Crit("Failed to reset head header hash", "err", err) } } diff --git a/core/state_processor.go b/core/state_processor.go index 72c6e6c37..3edc042a3 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,14 +17,12 @@ package core import ( - "fmt" "math/big" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" ) @@ -122,8 +120,6 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s receipt.Logs = statedb.GetLogs(tx.Hash()) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - log.Debug(fmt.Sprint(receipt)) - return receipt, gas, err } diff --git a/core/state_transition.go b/core/state_transition.go index 98a24af2b..fb7518647 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -18,7 +18,6 @@ package core import ( "errors" - "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -256,7 +255,7 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value) } if vmerr != nil { - log.Debug(fmt.Sprint("vm returned with error:", err)) + log.Debug("VM returned with error", "err", err) // The only possible consensus-error would be if there wasn't // sufficient balance to make the transfer happen. The first // balance transfer may never fail. diff --git a/core/tx_pool.go b/core/tx_pool.go index b0a8eea0f..e52e59a4f 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -162,15 +162,10 @@ func (pool *TxPool) eventLoop() { func (pool *TxPool) resetState() { currentState, err := pool.currentState() if err != nil { - log.Error(fmt.Sprintf("Failed to get current state: %v", err)) + log.Error("Failed reset txpool state", "err", err) return } - managedState := state.ManageState(currentState) - if err != nil { - log.Error(fmt.Sprintf("Failed to get managed state: %v", err)) - return - } - pool.pendingState = managedState + pool.pendingState = state.ManageState(currentState) // validate the pool of pending transactions, this will remove // any transactions that have been included in the block or @@ -192,7 +187,8 @@ func (pool *TxPool) Stop() { pool.events.Unsubscribe() close(pool.quit) pool.wg.Wait() - log.Info(fmt.Sprint("Transaction pool stopped")) + + log.Info("Transaction pool stopped") } func (pool *TxPool) State() *state.ManagedState { @@ -323,24 +319,19 @@ func (pool *TxPool) add(tx *types.Transaction) error { // If the transaction is already known, discard it hash := tx.Hash() if pool.all[hash] != nil { - return fmt.Errorf("Known transaction: %x", hash[:4]) + log.Trace("Discarding already known transaction", "hash", hash) + return fmt.Errorf("known transaction: %x", hash) } // Otherwise ensure basic validation passes and queue it up if err := pool.validateTx(tx); err != nil { + log.Trace("Discarding invalid transaction", "hash", hash, "err", err) invalidTxCounter.Inc(1) return err } pool.enqueueTx(hash, tx) // Print a log message if low enough level is set - log.Debug("", "msg", log.Lazy{Fn: func() string { - rcpt := "[NEW_CONTRACT]" - if to := tx.To(); to != nil { - rcpt = common.Bytes2Hex(to[:4]) - } - from, _ := types.Sender(pool.signer, tx) // from already verified during tx validation - return fmt.Sprintf("(t) 0x%x => %s (%v) %x\n", from[:4], rcpt, tx.Value(), hash) - }}) + log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To()) return nil } @@ -409,7 +400,6 @@ func (pool *TxPool) Add(tx *types.Transaction) error { if err != nil { return err } - pool.promoteExecutables(state) return nil @@ -420,19 +410,21 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error { pool.mu.Lock() defer pool.mu.Unlock() + // Add the batch of transaction, tracking the accepted ones + added := 0 for _, tx := range txs { - if err := pool.add(tx); err != nil { - log.Debug(fmt.Sprint("tx error:", err)) + if err := pool.add(tx); err == nil { + added++ } } - - state, err := pool.currentState() - if err != nil { - return err + // Only reprocess the internal state if something was actually added + if added > 0 { + state, err := pool.currentState() + if err != nil { + return err + } + pool.promoteExecutables(state) } - - pool.promoteExecutables(state) - return nil } @@ -513,33 +505,29 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { for addr, list := range pool.queue { // Drop all transactions that are deemed too old (low nonce) for _, tx := range list.Forward(state.GetNonce(addr)) { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Removed old queued transaction: %v", tx) - }}) - delete(pool.all, tx.Hash()) + hash := tx.Hash() + log.Debug("Removed old queued transaction", "hash", hash) + delete(pool.all, hash) } // Drop all transactions that are too costly (low balance) drops, _ := list.Filter(state.GetBalance(addr)) for _, tx := range drops { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Removed unpayable queued transaction: %v", tx) - }}) - delete(pool.all, tx.Hash()) + hash := tx.Hash() + log.Debug("Removed unpayable queued transaction", "hash", hash) + delete(pool.all, hash) queuedNofundsCounter.Inc(1) } // Gather all executable transactions and promote them for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Promoting queued transaction: %v", tx) - }}) - pool.promoteTx(addr, tx.Hash(), tx) + hash := tx.Hash() + log.Debug("Promoting queued transaction", "hash", hash) + pool.promoteTx(addr, hash, tx) } // Drop all transactions over the allowed limit for _, tx := range list.Cap(int(maxQueuedPerAccount)) { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Removed cap-exceeding queued transaction: %v", tx) - }}) - delete(pool.all, tx.Hash()) + hash := tx.Hash() + log.Debug("Removed cap-exceeding queued transaction", "hash", hash) + delete(pool.all, hash) queuedRLCounter.Inc(1) } queued += uint64(list.Len()) @@ -650,25 +638,22 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) { // Drop all transactions that are deemed too old (low nonce) for _, tx := range list.Forward(nonce) { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Removed old pending transaction: %v", tx) - }}) - delete(pool.all, tx.Hash()) + hash := tx.Hash() + log.Debug("Removed old pending transaction", "hash", hash) + delete(pool.all, hash) } // Drop all transactions that are too costly (low balance), and queue any invalids back for later drops, invalids := list.Filter(state.GetBalance(addr)) for _, tx := range drops { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Removed unpayable pending transaction: %v", tx) - }}) - delete(pool.all, tx.Hash()) + hash := tx.Hash() + log.Debug("Removed unpayable pending transaction", "hash", hash) + delete(pool.all, hash) pendingNofundsCounter.Inc(1) } for _, tx := range invalids { - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("Demoting pending transaction: %v", tx) - }}) - pool.enqueueTx(tx.Hash(), tx) + hash := tx.Hash() + log.Debug("Demoting pending transaction", "hash", hash) + pool.enqueueTx(hash, tx) } // Delete the entire queue entry if it became empty. if list.Empty() { diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 0479adfda..e87640d02 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -18,7 +18,6 @@ package vm import ( "crypto/sha256" - "fmt" "math/big" "github.com/ethereum/go-ethereum/common" @@ -83,7 +82,7 @@ func (c *ecrecover) Run(in []byte) []byte { pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v)) // make sure the public key is a valid one if err != nil { - log.Trace(fmt.Sprint("ECRECOVER error: ", err)) + log.Trace("ECRECOVER failed", "err", err) return nil } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 41f6a53f8..7d12ebc05 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -123,13 +123,9 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e } }() - log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("evm running: %x\n", codehash[:4]) - }}) + log.Debug("EVM running contract", "hash", codehash[:]) tstart := time.Now() - defer log.Debug("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("evm done: %x. time: %v\n", codehash[:4], time.Since(tstart)) - }}) + defer log.Debug("EVM finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart)) // The Interpreter main run loop (contextual). This loop runs until either an // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during |