aboutsummaryrefslogtreecommitdiffstats
path: root/p2p
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2015-05-25 20:57:44 +0800
committerFelix Lange <fjl@twurst.com>2015-05-27 05:30:40 +0800
commitf539ed1e6647cb27d873ae12e48db66df93df34a (patch)
tree185ce6184d11553fadba78fcb99e7cb1f3d81add /p2p
parent5076170f344afb970c1adc91429ced6ce93b5989 (diff)
downloaddexon-f539ed1e6647cb27d873ae12e48db66df93df34a.tar.gz
dexon-f539ed1e6647cb27d873ae12e48db66df93df34a.tar.zst
dexon-f539ed1e6647cb27d873ae12e48db66df93df34a.zip
p2p/discover: force refresh if the table is empty
Diffstat (limited to 'p2p')
-rw-r--r--p2p/discover/table.go54
1 files changed, 41 insertions, 13 deletions
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index ee1d58cae..38bdea0ca 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -191,6 +191,12 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
result := tab.closest(target, bucketSize)
tab.mutex.Unlock()
+ // If the result set is empty, all nodes were dropped, refresh
+ if len(result.entries) == 0 {
+ tab.refresh()
+ return nil
+ }
+
for {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
@@ -207,7 +213,7 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
tab.db.updateFindFails(n.ID, fails)
glog.V(logger.Detail).Infof("Bumping failures for %x: %d", n.ID[:8], fails)
- if fails > maxFindnodeFailures {
+ if fails >= maxFindnodeFailures {
glog.V(logger.Detail).Infof("Evacuating node %x: %d findnode failures", n.ID[:8], fails)
tab.del(n)
}
@@ -232,19 +238,41 @@ func (tab *Table) Lookup(targetID NodeID) []*Node {
return result.entries
}
-// refresh performs a lookup for a random target to keep buckets full.
+// refresh performs a lookup for a random target to keep buckets full, or seeds
+// the table if it is empty (initial bootstrap or discarded faulty peers).
func (tab *Table) refresh() {
- // The Kademlia paper specifies that the bucket refresh should
- // perform a refresh in the least recently used bucket. We cannot
- // adhere to this because the findnode target is a 512bit value
- // (not hash-sized) and it is not easily possible to generate a
- // sha3 preimage that falls into a chosen bucket.
- //
- // We perform a lookup with a random target instead.
- var target NodeID
- rand.Read(target[:])
- result := tab.Lookup(target)
- if len(result) == 0 {
+ seed := true
+
+ // If the discovery table is empty, seed with previously known nodes
+ tab.mutex.Lock()
+ for _, bucket := range tab.buckets {
+ if len(bucket.entries) > 0 {
+ seed = false
+ break
+ }
+ }
+ tab.mutex.Unlock()
+
+ // If the table is not empty, try to refresh using the live entries
+ if !seed {
+ // The Kademlia paper specifies that the bucket refresh should
+ // perform a refresh in the least recently used bucket. We cannot
+ // adhere to this because the findnode target is a 512bit value
+ // (not hash-sized) and it is not easily possible to generate a
+ // sha3 preimage that falls into a chosen bucket.
+ //
+ // We perform a lookup with a random target instead.
+ var target NodeID
+ rand.Read(target[:])
+
+ result := tab.Lookup(target)
+ if len(result) == 0 {
+ // Lookup failed, seed after all
+ seed = true
+ }
+ }
+
+ if seed {
// Pick a batch of previously know seeds to lookup with
seeds := tab.db.querySeeds(10)
for _, seed := range seeds {