aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader/queue.go
diff options
context:
space:
mode:
authorNick Johnson <arachnid@notdot.net>2017-06-28 20:25:08 +0800
committerNick Johnson <arachnid@notdot.net>2017-06-29 19:49:18 +0800
commitae11545bc5fc0fcf4340d9f33d488eaf81d8bb2d (patch)
tree6ed575d314c8781e8e258a63a8c2d88753f896a1 /eth/downloader/queue.go
parent055095798911e3c6f2e210220cb7ece1b71f6517 (diff)
downloadgo-tangerine-ae11545bc5fc0fcf4340d9f33d488eaf81d8bb2d.tar.gz
go-tangerine-ae11545bc5fc0fcf4340d9f33d488eaf81d8bb2d.tar.zst
go-tangerine-ae11545bc5fc0fcf4340d9f33d488eaf81d8bb2d.zip
eth, les: Refactor downloader peer to use structs
Diffstat (limited to 'eth/downloader/queue.go')
-rw-r--r--eth/downloader/queue.go10
1 files changed, 5 insertions, 5 deletions
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 8a7735d67..6926f1d8c 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -41,7 +41,7 @@ var (
// fetchRequest is a currently running data retrieval operation.
type fetchRequest struct {
- Peer *peer // Peer to which the request was sent
+ Peer *peerConnection // Peer to which the request was sent
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
Hashes map[common.Hash]int // [eth/61] Requested hashes with their insertion index (priority)
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
@@ -391,7 +391,7 @@ func (q *queue) countProcessableItems() int {
// ReserveHeaders reserves a set of headers for the given peer, skipping any
// previously failed batches.
-func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
+func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
@@ -432,7 +432,7 @@ func (q *queue) ReserveHeaders(p *peer, count int) *fetchRequest {
// ReserveBodies reserves a set of body fetches for the given peer, skipping any
// previously failed downloads. Beside the next batch of needed fetches, it also
// returns a flag whether empty blocks were queued requiring processing.
-func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) {
+func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, error) {
isNoop := func(header *types.Header) bool {
return header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash
}
@@ -445,7 +445,7 @@ func (q *queue) ReserveBodies(p *peer, count int) (*fetchRequest, bool, error) {
// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
// any previously failed downloads. Beside the next batch of needed fetches, it
// also returns a flag whether empty receipts were queued requiring importing.
-func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error) {
+func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, error) {
isNoop := func(header *types.Header) bool {
return header.ReceiptHash == types.EmptyRootHash
}
@@ -462,7 +462,7 @@ func (q *queue) ReserveReceipts(p *peer, count int) (*fetchRequest, bool, error)
// Note, this method expects the queue lock to be already held for writing. The
// reason the lock is not obtained in here is because the parameters already need
// to access the queue, so they already need a lock anyway.
-func (q *queue) reserveHeaders(p *peer, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
+func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,
pendPool map[string]*fetchRequest, donePool map[common.Hash]struct{}, isNoop func(*types.Header) bool) (*fetchRequest, bool, error) {
// Short circuit if the pool has been depleted, or if the peer's already
// downloading something (sanity check not to corrupt state)