diff options
author | Péter Szilágyi <peterke@gmail.com> | 2018-10-05 16:03:38 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-10-05 16:03:38 +0800 |
commit | 5d3b7bb023d0e21d597ef2febc1bad03efa90050 (patch) | |
tree | ef2ec86111a6c24843f0fbaf1660097c640c14c4 | |
parent | 092df3ab59faae27b11ec737fffcadb3bc8ea636 (diff) | |
parent | 6ee3b26f447459d3f3a316dbb572e461a273e193 (diff) | |
download | dexon-5d3b7bb023d0e21d597ef2febc1bad03efa90050.tar.gz dexon-5d3b7bb023d0e21d597ef2febc1bad03efa90050.tar.zst dexon-5d3b7bb023d0e21d597ef2febc1bad03efa90050.zip |
Merge pull request #17839 from karalabe/downloader-invalid-hash-chain-fix
eth/downloader: fix invalid hash chain error due to head mini reorg
-rw-r--r-- | eth/downloader/downloader.go | 39 | ||||
-rw-r--r-- | eth/downloader/downloader_test.go | 4 |
2 files changed, 40 insertions, 3 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 805195034..9cfc8a978 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -60,6 +60,9 @@ var ( maxHeadersProcess = 2048 // Number of header download results to import at once into the chain maxResultsProcess = 2048 // Number of content download results to import at once into the chain + reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection + reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs + fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it @@ -847,6 +850,30 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) } headers = filled[proced:] from += uint64(proced) + } else { + // If we're closing in on the chain head, but haven't yet reached it, delay + // the last few headers so mini reorgs on the head don't cause invalid hash + // chain errors. + if n := len(headers); n > 0 { + // Retrieve the current head we're at + head := uint64(0) + if d.mode == LightSync { + head = d.lightchain.CurrentHeader().Number.Uint64() + } else { + head = d.blockchain.CurrentFastBlock().NumberU64() + if full := d.blockchain.CurrentBlock().NumberU64(); head < full { + head = full + } + } + // If the head is way older than this batch, delay the last few headers + if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { + delay := reorgProtHeaderDelay + if delay > n { + delay = n + } + headers = headers[:n-delay] + } + } } // Insert all the new headers and fetch the next batch if len(headers) > 0 { @@ -857,8 +884,18 @@ func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) return errCancelHeaderFetch } from += uint64(len(headers)) + getHeaders(from) + } else { + // No headers delivered, or all of them being delayed, sleep a bit and retry + p.log.Trace("All headers delayed, waiting") + select { + case <-time.After(fsHeaderContCheck): + getHeaders(from) + continue + case <-d.cancelCh: + return errCancelHeaderFetch + } } - getHeaders(from) case <-timeout.C: if d.dropPeer == nil { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index d1a9a8694..dad626e89 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -744,7 +744,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { tester.downloader.queue.lock.Unlock() tester.lock.Unlock() - if cached == blockCacheItems || retrieved+cached+frozen == targetBlocks+1 { + if cached == blockCacheItems || cached == blockCacheItems-reorgProtHeaderDelay || retrieved+cached+frozen == targetBlocks+1 || retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { break } } @@ -754,7 +754,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { tester.lock.RLock() retrieved = len(tester.ownBlocks) tester.lock.RUnlock() - if cached != blockCacheItems && retrieved+cached+frozen != targetBlocks+1 { + if cached != blockCacheItems && cached != blockCacheItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheItems, retrieved, frozen, targetBlocks+1) } // Permit the blocked blocks to import |