aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2018-10-04 21:36:59 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-10-05 15:45:02 +0800
commit6ee3b26f447459d3f3a316dbb572e461a273e193 (patch)
tree39629411ceb3326b70313d3e3eba4440e30d0ace
parent89a32451aeb418db3fd5d9c427a0c29fddb1e85b (diff)
downloaddexon-6ee3b26f447459d3f3a316dbb572e461a273e193.tar.gz
dexon-6ee3b26f447459d3f3a316dbb572e461a273e193.tar.zst
dexon-6ee3b26f447459d3f3a316dbb572e461a273e193.zip
eth/downloader: fix invalid hash chain error due to head mini reorg
-rw-r--r--eth/downloader/downloader.go39
-rw-r--r--eth/downloader/downloader_test.go4
2 files changed, 40 insertions, 3 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 805195034..9cfc8a978 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -60,6 +60,9 @@ var (
maxHeadersProcess = 2048 // Number of header download results to import at once into the chain
maxResultsProcess = 2048 // Number of content download results to import at once into the chain
+ reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection
+ reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
+
fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync
fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected
fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it
@@ -847,6 +850,30 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
}
headers = filled[proced:]
from += uint64(proced)
+ } else {
+ // If we're closing in on the chain head, but haven't yet reached it, delay
+ // the last few headers so mini reorgs on the head don't cause invalid hash
+ // chain errors.
+ if n := len(headers); n > 0 {
+ // Retrieve the current head we're at
+ head := uint64(0)
+ if d.mode == LightSync {
+ head = d.lightchain.CurrentHeader().Number.Uint64()
+ } else {
+ head = d.blockchain.CurrentFastBlock().NumberU64()
+ if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
+ head = full
+ }
+ }
+ // If the head is way older than this batch, delay the last few headers
+ if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
+ delay := reorgProtHeaderDelay
+ if delay > n {
+ delay = n
+ }
+ headers = headers[:n-delay]
+ }
+ }
}
// Insert all the new headers and fetch the next batch
if len(headers) > 0 {
@@ -857,8 +884,18 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64)
return errCancelHeaderFetch
}
from += uint64(len(headers))
+ getHeaders(from)
+ } else {
+ // No headers delivered, or all of them being delayed, sleep a bit and retry
+ p.log.Trace("All headers delayed, waiting")
+ select {
+ case <-time.After(fsHeaderContCheck):
+ getHeaders(from)
+ continue
+ case <-d.cancelCh:
+ return errCancelHeaderFetch
+ }
}
- getHeaders(from)
case <-timeout.C:
if d.dropPeer == nil {
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index d1a9a8694..dad626e89 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -744,7 +744,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
tester.downloader.queue.lock.Unlock()
tester.lock.Unlock()
- if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 {
+ if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {
break
}
}
@@ -754,7 +754,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) {
tester.lock.RLock()
retrieved = len(tester.ownBlocks)
tester.lock.RUnlock()
- if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 {
+ if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {
t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1)
}
// Permit the blocked blocks to import