aboutsummaryrefslogtreecommitdiffstats
path: root/eth/fetcher
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2017-03-02 21:06:16 +0800
committerJeffrey Wilcke <jeffrey@ethereum.org>2017-03-02 21:06:16 +0800
commit9184249b393e4e332ae6a2f5d774880a88a9bfd6 (patch)
tree7788ce54cb04d1af4fe03ab3c2447354bcaac3cc /eth/fetcher
parent82e7c1d1241737fd0ae9b25e0f20857b8597b148 (diff)
downloaddexon-9184249b393e4e332ae6a2f5d774880a88a9bfd6.tar.gz
dexon-9184249b393e4e332ae6a2f5d774880a88a9bfd6.tar.zst
dexon-9184249b393e4e332ae6a2f5d774880a88a9bfd6.zip
Logger updates 3 (#3730)
* accounts, cmd, eth, ethdb: port logs over to new system * ethdb: drop concept of cache distribution between dbs * eth: fix some log nitpicks to make them nicer
Diffstat (limited to 'eth/fetcher')
-rw-r--r--eth/fetcher/fetcher.go58
1 files changed, 19 insertions, 39 deletions
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index 33f9dbe93..d82f4f3e6 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -19,7 +19,6 @@ package fetcher
import (
"errors"
- "fmt"
"math/rand"
"time"
@@ -78,8 +77,8 @@ type announce struct {
origin string // Identifier of the peer originating the notification
- fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
- fetchBodies bodyRequesterFn // [eth/62] Fetcher function to retrieve the body of an announced block
+ fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
+ fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block
}
// headerFilterTask represents a batch of headers needing fetcher filtering.
@@ -220,7 +219,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*types.Header {
- log.Trace(fmt.Sprintf("[eth/62] filtering %d headers", len(headers)))
+ log.Trace("Filtering headers", "headers", len(headers))
// Send the filter channel to the fetcher
filter := make(chan *headerFilterTask)
@@ -248,7 +247,7 @@ func (f *Fetcher) FilterHeaders(headers []*types.Header, time time.Time) []*type
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
func (f *Fetcher) FilterBodies(transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
- log.Trace(fmt.Sprintf("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles)))
+ log.Trace("Filtering bodies", "txs", len(transactions), "uncles", len(uncles))
// Send the filter channel to the fetcher
filter := make(chan *bodyFilterTask)
@@ -323,14 +322,14 @@ func (f *Fetcher) loop() {
count := f.announces[notification.origin] + 1
if count > hashLimit {
- log.Debug(fmt.Sprintf("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit))
+ log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
propAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
- log.Debug(fmt.Sprintf("[eth/62] Peer %s: discarded announcement #%d [%x…], distance %d", notification.origin, notification.number, notification.hash[:4], dist))
+ log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
propAnnounceDropMeter.Mark(1)
break
}
@@ -380,16 +379,8 @@ func (f *Fetcher) loop() {
}
// Send out all block header requests
for peer, hashes := range request {
- if len(hashes) > 0 {
- log.Trace("", "msg", log.Lazy{Fn: func() string {
- list := "["
- for _, hash := range hashes {
- list += fmt.Sprintf("%x…, ", hash[:4])
- }
- list = list[:len(list)-2] + "]"
- return fmt.Sprintf("[eth/62] Peer %s: fetching headers %s", peer, list)
- }})
- }
+ log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
+
// Create a closure of the fetch and schedule in on a new thread
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
go func() {
@@ -422,17 +413,8 @@ func (f *Fetcher) loop() {
}
// Send out all block body requests
for peer, hashes := range request {
- if len(hashes) > 0 {
- log.Trace("", "msg", log.Lazy{Fn: func() string {
- list := "["
- for _, hash := range hashes {
- list += fmt.Sprintf("%x…, ", hash[:4])
- }
- list = list[:len(list)-2] + "]"
+ log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
- return fmt.Sprintf("[eth/62] Peer %s: fetching bodies %s", peer, list)
- }})
- }
// Create a closure of the fetch and schedule in on a new thread
if f.completingHook != nil {
f.completingHook(hashes)
@@ -465,7 +447,7 @@ func (f *Fetcher) loop() {
if announce := f.fetching[hash]; announce != nil && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
// If the delivered header does not match the promised number, drop the announcer
if header.Number.Uint64() != announce.number {
- log.Trace(fmt.Sprintf("[eth/62] Peer %s: invalid block number for [%x…]: announced %d, provided %d", announce.origin, header.Hash().Bytes()[:4], announce.number, header.Number.Uint64()))
+ log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
f.dropPeer(announce.origin)
f.forgetHash(hash)
continue
@@ -477,7 +459,7 @@ func (f *Fetcher) loop() {
// If the block is empty (header only), short circuit into the final import queue
if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
- log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
+ log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
block := types.NewBlockWithHeader(header)
block.ReceivedAt = task.time
@@ -489,7 +471,7 @@ func (f *Fetcher) loop() {
// Otherwise add to the list of blocks needing completion
incomplete = append(incomplete, announce)
} else {
- log.Trace(fmt.Sprintf("[eth/62] Peer %s: block #%d [%x…] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Bytes()[:4]))
+ log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
f.forgetHash(hash)
}
} else {
@@ -620,14 +602,14 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
- log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], exceeded allowance (%d)", peer, block.NumberU64(), hash.Bytes()[:4], blockLimit))
+ log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
propBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
- log.Debug(fmt.Sprintf("Peer %s: discarded block #%d [%x…], distance %d", peer, block.NumberU64(), hash.Bytes()[:4], dist))
+ log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
propBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
@@ -644,9 +626,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
if f.queueChangeHook != nil {
f.queueChangeHook(op.block.Hash(), true)
}
- log.Debug("", "msg", log.Lazy{Fn: func() string {
- return fmt.Sprintf("Peer %s: queued block #%d [%x…], total %v", peer, block.NumberU64(), hash.Bytes()[:4], f.queue.Size())
- }})
+ log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
}
}
@@ -657,14 +637,14 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
hash := block.Hash()
// Run the import on a new thread
- log.Debug(fmt.Sprintf("Peer %s: importing block #%d [%x…]", peer, block.NumberU64(), hash[:4]))
+ log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
go func() {
defer func() { f.done <- hash }()
// If the parent's unknown, abort insertion
parent := f.getBlock(block.ParentHash())
if parent == nil {
- log.Debug(fmt.Sprintf("Peer %s: parent [%x…] of block #%d [%x…] unknown", peer, block.ParentHash().Bytes()[:4], block.NumberU64(), hash[:4]))
+ log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
return
}
// Quickly validate the header and propagate the block if it passes
@@ -679,13 +659,13 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
default:
// Something went very wrong, drop the peer
- log.Debug(fmt.Sprintf("Peer %s: block #%d [%x…] verification failed: %v", peer, block.NumberU64(), hash[:4], err))
+ log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
f.dropPeer(peer)
return
}
// Run the actual import and log any issues
if _, err := f.insertChain(types.Blocks{block}); err != nil {
- log.Warn(fmt.Sprintf("Peer %s: block #%d [%x…] import failed: %v", peer, block.NumberU64(), hash[:4], err))
+ log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
return
}
// If import succeeded, broadcast the block