aboutsummaryrefslogtreecommitdiffstats
path: root/p2p/discover/table.go
diff options
context:
space:
mode:
authorFelix Lange <fjl@users.noreply.github.com>2018-09-25 06:59:00 +0800
committerGitHub <noreply@github.com>2018-09-25 06:59:00 +0800
commit30cd5c18549f645002aedb4c00e5bab683cb0835 (patch)
tree9a9098c6ff5a746758660295dfc1880d22e75434 /p2p/discover/table.go
parent0ae462fb80b8a95e38af08d894ea9ecf9e45f2e7 (diff)
downloaddexon-30cd5c18549f645002aedb4c00e5bab683cb0835.tar.gz
dexon-30cd5c18549f645002aedb4c00e5bab683cb0835.tar.zst
dexon-30cd5c18549f645002aedb4c00e5bab683cb0835.zip
all: new p2p node representation (#17643)
Package p2p/enode provides a generalized representation of p2p nodes which can contain arbitrary information in key/value pairs. It is also the new home for the node database. The "v4" identity scheme is also moved here from p2p/enr to remove the dependency on Ethereum crypto from that package. Record signature handling is changed significantly. The identity scheme registry is removed and acceptable schemes must be passed to any method that needs identity. This means records must now be validated explicitly after decoding. The enode API is designed to make signature handling easy and safe: most APIs around the codebase work with enode.Node, which is a wrapper around a valid record. Going from enr.Record to enode.Node requires a valid signature. * p2p/discover: port to p2p/enode This ports the discovery code to the new node representation in p2p/enode. The wire protocol is unchanged, this can be considered a refactoring change. The Kademlia table can now deal with nodes using an arbitrary identity scheme. This requires a few incompatible API changes: - Table.Lookup is not available anymore. It used to take a public key as argument because v4 protocol requires one. Its replacement is LookupRandom. - Table.Resolve takes *enode.Node instead of NodeID. This is also for v4 protocol compatibility because nodes cannot be looked up by ID alone. - Types Node and NodeID are gone. Further commits in the series will be fixes all over the the codebase to deal with those removals. * p2p: port to p2p/enode and discovery changes This adapts package p2p to the changes in p2p/discover. All uses of discover.Node and discover.NodeID are replaced by their equivalents from p2p/enode. New API is added to retrieve the enode.Node instance of a peer. The behavior of Server.Self with discovery disabled is improved. It now tries much harder to report a working IP address, falling back to 127.0.0.1 if no suitable address can be determined through other means. These changes were needed for tests of other packages later in the series. * p2p/simulations, p2p/testing: port to p2p/enode No surprises here, mostly replacements of discover.Node, discover.NodeID with their new equivalents. The 'interesting' API changes are: - testing.ProtocolSession tracks complete nodes, not just their IDs. - adapters.NodeConfig has a new method to create a complete node. These changes were needed to make swarm tests work. Note that the NodeID change makes the code incompatible with old simulation snapshots. * whisper/whisperv5, whisper/whisperv6: port to p2p/enode This port was easy because whisper uses []byte for node IDs and URL strings in the API. * eth: port to p2p/enode Again, easy to port because eth uses strings for node IDs and doesn't care about node information in any way. * les: port to p2p/enode Apart from replacing discover.NodeID with enode.ID, most changes are in the server pool code. It now deals with complete nodes instead of (Pubkey, IP, Port) triples. The database format is unchanged for now, but we should probably change it to use the node database later. * node: port to p2p/enode This change simply replaces discover.Node and discover.NodeID with their new equivalents. * swarm/network: port to p2p/enode Swarm has its own node address representation, BzzAddr, containing both an overlay address (the hash of a secp256k1 public key) and an underlay address (enode:// URL). There are no changes to the BzzAddr format in this commit, but certain operations such as creating a BzzAddr from a node ID are now impossible because node IDs aren't public keys anymore. Most swarm-related changes in the series remove uses of NewAddrFromNodeID, replacing it with NewAddr which takes a complete node as argument. ToOverlayAddr is removed because we can just use the node ID directly.
Diffstat (limited to 'p2p/discover/table.go')
-rw-r--r--p2p/discover/table.go224
1 files changed, 108 insertions, 116 deletions
diff --git a/p2p/discover/table.go b/p2p/discover/table.go
index a130b5494..7a3e41de1 100644
--- a/p2p/discover/table.go
+++ b/p2p/discover/table.go
@@ -23,6 +23,7 @@
package discover
import (
+ "crypto/ecdsa"
crand "crypto/rand"
"encoding/binary"
"fmt"
@@ -35,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
@@ -65,49 +67,44 @@ const (
type Table struct {
mutex sync.Mutex // protects buckets, bucket content, nursery, rand
buckets [nBuckets]*bucket // index of known nodes by distance
- nursery []*Node // bootstrap nodes
+ nursery []*node // bootstrap nodes
rand *mrand.Rand // source of randomness, periodically reseeded
ips netutil.DistinctNetSet
- db *nodeDB // database of known nodes
+ db *enode.DB // database of known nodes
refreshReq chan chan struct{}
initDone chan struct{}
closeReq chan struct{}
closed chan struct{}
- nodeAddedHook func(*Node) // for testing
+ nodeAddedHook func(*node) // for testing
net transport
- self *Node // metadata of the local node
+ self *node // metadata of the local node
}
// transport is implemented by the UDP transport.
// it is an interface so we can test without opening lots of UDP
// sockets and without generating a private key.
type transport interface {
- ping(NodeID, *net.UDPAddr) error
- findnode(toid NodeID, addr *net.UDPAddr, target NodeID) ([]*Node, error)
+ ping(enode.ID, *net.UDPAddr) error
+ findnode(toid enode.ID, addr *net.UDPAddr, target encPubkey) ([]*node, error)
close()
}
// bucket contains nodes, ordered by their last activity. the entry
// that was most recently active is the first element in entries.
type bucket struct {
- entries []*Node // live entries, sorted by time of last contact
- replacements []*Node // recently seen nodes to be used if revalidation fails
+ entries []*node // live entries, sorted by time of last contact
+ replacements []*node // recently seen nodes to be used if revalidation fails
ips netutil.DistinctNetSet
}
-func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string, bootnodes []*Node) (*Table, error) {
- // If no node database was given, use an in-memory one
- db, err := newNodeDB(nodeDBPath, nodeDBVersion, ourID)
- if err != nil {
- return nil, err
- }
+func newTable(t transport, self *enode.Node, db *enode.DB, bootnodes []*enode.Node) (*Table, error) {
tab := &Table{
net: t,
db: db,
- self: NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)),
+ self: wrapNode(self),
refreshReq: make(chan chan struct{}),
initDone: make(chan struct{}),
closeReq: make(chan struct{}),
@@ -125,10 +122,7 @@ func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr, nodeDBPath string
}
tab.seedRand()
tab.loadSeedNodes()
- // Start the background expiration goroutine after loading seeds so that the search for
- // seed nodes also considers older nodes that would otherwise be removed by the
- // expiration.
- tab.db.ensureExpirer()
+
go tab.loop()
return tab, nil
}
@@ -143,15 +137,13 @@ func (tab *Table) seedRand() {
}
// Self returns the local node.
-// The returned node should not be modified by the caller.
-func (tab *Table) Self() *Node {
- return tab.self
+func (tab *Table) Self() *enode.Node {
+ return unwrapNode(tab.self)
}
-// ReadRandomNodes fills the given slice with random nodes from the
-// table. It will not write the same node more than once. The nodes in
-// the slice are copies and can be modified by the caller.
-func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
+// ReadRandomNodes fills the given slice with random nodes from the table. The results
+// are guaranteed to be unique for a single invocation, no node will appear twice.
+func (tab *Table) ReadRandomNodes(buf []*enode.Node) (n int) {
if !tab.isInitDone() {
return 0
}
@@ -159,7 +151,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
defer tab.mutex.Unlock()
// Find all non-empty buckets and get a fresh slice of their entries.
- var buckets [][]*Node
+ var buckets [][]*node
for _, b := range &tab.buckets {
if len(b.entries) > 0 {
buckets = append(buckets, b.entries)
@@ -177,7 +169,7 @@ func (tab *Table) ReadRandomNodes(buf []*Node) (n int) {
var i, j int
for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) {
b := buckets[j]
- buf[i] = &(*b[0])
+ buf[i] = unwrapNode(b[0])
buckets[j] = b[1:]
if len(b) == 1 {
buckets = append(buckets[:j], buckets[j+1:]...)
@@ -202,20 +194,13 @@ func (tab *Table) Close() {
// setFallbackNodes sets the initial points of contact. These nodes
// are used to connect to the network if the table is empty and there
// are no known nodes in the database.
-func (tab *Table) setFallbackNodes(nodes []*Node) error {
+func (tab *Table) setFallbackNodes(nodes []*enode.Node) error {
for _, n := range nodes {
- if err := n.validateComplete(); err != nil {
- return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err)
+ if err := n.ValidateComplete(); err != nil {
+ return fmt.Errorf("bad bootstrap node %q: %v", n, err)
}
}
- tab.nursery = make([]*Node, 0, len(nodes))
- for _, n := range nodes {
- cpy := *n
- // Recompute cpy.sha because the node might not have been
- // created by NewNode or ParseNode.
- cpy.sha = crypto.Keccak256Hash(n.ID[:])
- tab.nursery = append(tab.nursery, &cpy)
- }
+ tab.nursery = wrapNodes(nodes)
return nil
}
@@ -231,47 +216,48 @@ func (tab *Table) isInitDone() bool {
// Resolve searches for a specific node with the given ID.
// It returns nil if the node could not be found.
-func (tab *Table) Resolve(targetID NodeID) *Node {
+func (tab *Table) Resolve(n *enode.Node) *enode.Node {
// If the node is present in the local table, no
// network interaction is required.
- hash := crypto.Keccak256Hash(targetID[:])
+ hash := n.ID()
tab.mutex.Lock()
cl := tab.closest(hash, 1)
tab.mutex.Unlock()
- if len(cl.entries) > 0 && cl.entries[0].ID == targetID {
- return cl.entries[0]
+ if len(cl.entries) > 0 && cl.entries[0].ID() == hash {
+ return unwrapNode(cl.entries[0])
}
// Otherwise, do a network lookup.
- result := tab.Lookup(targetID)
+ result := tab.lookup(encodePubkey(n.Pubkey()), true)
for _, n := range result {
- if n.ID == targetID {
- return n
+ if n.ID() == hash {
+ return unwrapNode(n)
}
}
return nil
}
-// Lookup performs a network search for nodes close
-// to the given target. It approaches the target by querying
-// nodes that are closer to it on each iteration.
-// The given target does not need to be an actual node
-// identifier.
-func (tab *Table) Lookup(targetID NodeID) []*Node {
- return tab.lookup(targetID, true)
+// LookupRandom finds random nodes in the network.
+func (tab *Table) LookupRandom() []*enode.Node {
+ var target encPubkey
+ crand.Read(target[:])
+ return unwrapNodes(tab.lookup(target, true))
}
-func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
+// lookup performs a network search for nodes close to the given target. It approaches the
+// target by querying nodes that are closer to it on each iteration. The given target does
+// not need to be an actual node identifier.
+func (tab *Table) lookup(targetKey encPubkey, refreshIfEmpty bool) []*node {
var (
- target = crypto.Keccak256Hash(targetID[:])
- asked = make(map[NodeID]bool)
- seen = make(map[NodeID]bool)
- reply = make(chan []*Node, alpha)
+ target = enode.ID(crypto.Keccak256Hash(targetKey[:]))
+ asked = make(map[enode.ID]bool)
+ seen = make(map[enode.ID]bool)
+ reply = make(chan []*node, alpha)
pendingQueries = 0
result *nodesByDistance
)
// don't query further if we hit ourself.
// unlikely to happen often in practice.
- asked[tab.self.ID] = true
+ asked[tab.self.ID()] = true
for {
tab.mutex.Lock()
@@ -293,10 +279,10 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
// ask the alpha closest nodes that we haven't asked yet
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ {
n := result.entries[i]
- if !asked[n.ID] {
- asked[n.ID] = true
+ if !asked[n.ID()] {
+ asked[n.ID()] = true
pendingQueries++
- go tab.findnode(n, targetID, reply)
+ go tab.findnode(n, targetKey, reply)
}
}
if pendingQueries == 0 {
@@ -305,8 +291,8 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
}
// wait for the next reply
for _, n := range <-reply {
- if n != nil && !seen[n.ID] {
- seen[n.ID] = true
+ if n != nil && !seen[n.ID()] {
+ seen[n.ID()] = true
result.push(n, bucketSize)
}
}
@@ -315,19 +301,19 @@ func (tab *Table) lookup(targetID NodeID, refreshIfEmpty bool) []*Node {
return result.entries
}
-func (tab *Table) findnode(n *Node, targetID NodeID, reply chan<- []*Node) {
- fails := tab.db.findFails(n.ID)
- r, err := tab.net.findnode(n.ID, n.addr(), targetID)
+func (tab *Table) findnode(n *node, targetKey encPubkey, reply chan<- []*node) {
+ fails := tab.db.FindFails(n.ID())
+ r, err := tab.net.findnode(n.ID(), n.addr(), targetKey)
if err != nil || len(r) == 0 {
fails++
- tab.db.updateFindFails(n.ID, fails)
- log.Trace("Findnode failed", "id", n.ID, "failcount", fails, "err", err)
+ tab.db.UpdateFindFails(n.ID(), fails)
+ log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err)
if fails >= maxFindnodeFailures {
- log.Trace("Too many findnode failures, dropping", "id", n.ID, "failcount", fails)
+ log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails)
tab.delete(n)
}
} else if fails > 0 {
- tab.db.updateFindFails(n.ID, fails-1)
+ tab.db.UpdateFindFails(n.ID(), fails-1)
}
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
@@ -405,7 +391,6 @@ loop:
for _, ch := range waiting {
close(ch)
}
- tab.db.close()
close(tab.closed)
}
@@ -421,7 +406,11 @@ func (tab *Table) doRefresh(done chan struct{}) {
tab.loadSeedNodes()
// Run self lookup to discover new neighbor nodes.
- tab.lookup(tab.self.ID, false)
+ // We can only do this if we have a secp256k1 identity.
+ var key ecdsa.PublicKey
+ if err := tab.self.Load((*enode.Secp256k1)(&key)); err == nil {
+ tab.lookup(encodePubkey(&key), false)
+ }
// The Kademlia paper specifies that the bucket refresh should
// perform a lookup in the least recently used bucket. We cannot
@@ -430,19 +419,19 @@ func (tab *Table) doRefresh(done chan struct{}) {
// sha3 preimage that falls into a chosen bucket.
// We perform a few lookups with a random target instead.
for i := 0; i < 3; i++ {
- var target NodeID
+ var target encPubkey
crand.Read(target[:])
tab.lookup(target, false)
}
}
func (tab *Table) loadSeedNodes() {
- seeds := tab.db.querySeeds(seedCount, seedMaxAge)
+ seeds := wrapNodes(tab.db.QuerySeeds(seedCount, seedMaxAge))
seeds = append(seeds, tab.nursery...)
for i := range seeds {
seed := seeds[i]
- age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.lastPongReceived(seed.ID)) }}
- log.Debug("Found seed node in database", "id", seed.ID, "addr", seed.addr(), "age", age)
+ age := log.Lazy{Fn: func() interface{} { return time.Since(tab.db.LastPongReceived(seed.ID())) }}
+ log.Debug("Found seed node in database", "id", seed.ID(), "addr", seed.addr(), "age", age)
tab.add(seed)
}
}
@@ -459,28 +448,28 @@ func (tab *Table) doRevalidate(done chan<- struct{}) {
}
// Ping the selected node and wait for a pong.
- err := tab.net.ping(last.ID, last.addr())
+ err := tab.net.ping(last.ID(), last.addr())
tab.mutex.Lock()
defer tab.mutex.Unlock()
b := tab.buckets[bi]
if err == nil {
// The node responded, move it to the front.
- log.Trace("Revalidated node", "b", bi, "id", last.ID)
+ log.Debug("Revalidated node", "b", bi, "id", last.ID())
b.bump(last)
return
}
// No reply received, pick a replacement or delete the node if there aren't
// any replacements.
if r := tab.replace(b, last); r != nil {
- log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP)
+ log.Debug("Replaced dead node", "b", bi, "id", last.ID(), "ip", last.IP(), "r", r.ID(), "rip", r.IP())
} else {
- log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP)
+ log.Debug("Removed dead node", "b", bi, "id", last.ID(), "ip", last.IP())
}
}
// nodeToRevalidate returns the last node in a random, non-empty bucket.
-func (tab *Table) nodeToRevalidate() (n *Node, bi int) {
+func (tab *Table) nodeToRevalidate() (n *node, bi int) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
@@ -511,7 +500,7 @@ func (tab *Table) copyLiveNodes() {
for _, b := range &tab.buckets {
for _, n := range b.entries {
if now.Sub(n.addedAt) >= seedMinTableTime {
- tab.db.updateNode(n)
+ tab.db.UpdateNode(unwrapNode(n))
}
}
}
@@ -519,7 +508,7 @@ func (tab *Table) copyLiveNodes() {
// closest returns the n nodes in the table that are closest to the
// given id. The caller must hold tab.mutex.
-func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance {
+func (tab *Table) closest(target enode.ID, nresults int) *nodesByDistance {
// This is a very wasteful way to find the closest nodes but
// obviously correct. I believe that tree-based buckets would make
// this easier to implement efficiently.
@@ -540,8 +529,8 @@ func (tab *Table) len() (n int) {
}
// bucket returns the bucket for the given node ID hash.
-func (tab *Table) bucket(sha common.Hash) *bucket {
- d := logdist(tab.self.sha, sha)
+func (tab *Table) bucket(id enode.ID) *bucket {
+ d := enode.LogDist(tab.self.ID(), id)
if d <= bucketMinDistance {
return tab.buckets[0]
}
@@ -553,11 +542,14 @@ func (tab *Table) bucket(sha common.Hash) *bucket {
// least recently active node in the bucket does not respond to a ping packet.
//
// The caller must not hold tab.mutex.
-func (tab *Table) add(n *Node) {
+func (tab *Table) add(n *node) {
+ if n.ID() == tab.self.ID() {
+ return
+ }
+
tab.mutex.Lock()
defer tab.mutex.Unlock()
-
- b := tab.bucket(n.sha)
+ b := tab.bucket(n.ID())
if !tab.bumpOrAdd(b, n) {
// Node is not in table. Add it to the replacement list.
tab.addReplacement(b, n)
@@ -570,7 +562,7 @@ func (tab *Table) add(n *Node) {
// table could be filled by just sending ping repeatedly.
//
// The caller must not hold tab.mutex.
-func (tab *Table) addThroughPing(n *Node) {
+func (tab *Table) addThroughPing(n *node) {
if !tab.isInitDone() {
return
}
@@ -579,15 +571,15 @@ func (tab *Table) addThroughPing(n *Node) {
// stuff adds nodes the table to the end of their corresponding bucket
// if the bucket is not full. The caller must not hold tab.mutex.
-func (tab *Table) stuff(nodes []*Node) {
+func (tab *Table) stuff(nodes []*node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
for _, n := range nodes {
- if n.ID == tab.self.ID {
+ if n.ID() == tab.self.ID() {
continue // don't add self
}
- b := tab.bucket(n.sha)
+ b := tab.bucket(n.ID())
if len(b.entries) < bucketSize {
tab.bumpOrAdd(b, n)
}
@@ -595,11 +587,11 @@ func (tab *Table) stuff(nodes []*Node) {
}
// delete removes an entry from the node table. It is used to evacuate dead nodes.
-func (tab *Table) delete(node *Node) {
+func (tab *Table) delete(node *node) {
tab.mutex.Lock()
defer tab.mutex.Unlock()
- tab.deleteInBucket(tab.bucket(node.sha), node)
+ tab.deleteInBucket(tab.bucket(node.ID()), node)
}
func (tab *Table) addIP(b *bucket, ip net.IP) bool {
@@ -626,27 +618,27 @@ func (tab *Table) removeIP(b *bucket, ip net.IP) {
b.ips.Remove(ip)
}
-func (tab *Table) addReplacement(b *bucket, n *Node) {
+func (tab *Table) addReplacement(b *bucket, n *node) {
for _, e := range b.replacements {
- if e.ID == n.ID {
+ if e.ID() == n.ID() {
return // already in list
}
}
- if !tab.addIP(b, n.IP) {
+ if !tab.addIP(b, n.IP()) {
return
}
- var removed *Node
+ var removed *node
b.replacements, removed = pushNode(b.replacements, n, maxReplacements)
if removed != nil {
- tab.removeIP(b, removed.IP)
+ tab.removeIP(b, removed.IP())
}
}
// replace removes n from the replacement list and replaces 'last' with it if it is the
// last entry in the bucket. If 'last' isn't the last entry, it has either been replaced
// with someone else or became active.
-func (tab *Table) replace(b *bucket, last *Node) *Node {
- if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID != last.ID {
+func (tab *Table) replace(b *bucket, last *node) *node {
+ if len(b.entries) == 0 || b.entries[len(b.entries)-1].ID() != last.ID() {
// Entry has moved, don't replace it.
return nil
}
@@ -658,15 +650,15 @@ func (tab *Table) replace(b *bucket, last *Node) *Node {
r := b.replacements[tab.rand.Intn(len(b.replacements))]
b.replacements = deleteNode(b.replacements, r)
b.entries[len(b.entries)-1] = r
- tab.removeIP(b, last.IP)
+ tab.removeIP(b, last.IP())
return r
}
// bump moves the given node to the front of the bucket entry list
// if it is contained in that list.
-func (b *bucket) bump(n *Node) bool {
+func (b *bucket) bump(n *node) bool {
for i := range b.entries {
- if b.entries[i].ID == n.ID {
+ if b.entries[i].ID() == n.ID() {
// move it to the front
copy(b.entries[1:], b.entries[:i])
b.entries[0] = n
@@ -678,11 +670,11 @@ func (b *bucket) bump(n *Node) bool {
// bumpOrAdd moves n to the front of the bucket entry list or adds it if the list isn't
// full. The return value is true if n is in the bucket.
-func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
+func (tab *Table) bumpOrAdd(b *bucket, n *node) bool {
if b.bump(n) {
return true
}
- if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP) {
+ if len(b.entries) >= bucketSize || !tab.addIP(b, n.IP()) {
return false
}
b.entries, _ = pushNode(b.entries, n, bucketSize)
@@ -694,13 +686,13 @@ func (tab *Table) bumpOrAdd(b *bucket, n *Node) bool {
return true
}
-func (tab *Table) deleteInBucket(b *bucket, n *Node) {
+func (tab *Table) deleteInBucket(b *bucket, n *node) {
b.entries = deleteNode(b.entries, n)
- tab.removeIP(b, n.IP)
+ tab.removeIP(b, n.IP())
}
// pushNode adds n to the front of list, keeping at most max items.
-func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
+func pushNode(list []*node, n *node, max int) ([]*node, *node) {
if len(list) < max {
list = append(list, nil)
}
@@ -711,9 +703,9 @@ func pushNode(list []*Node, n *Node, max int) ([]*Node, *Node) {
}
// deleteNode removes n from list.
-func deleteNode(list []*Node, n *Node) []*Node {
+func deleteNode(list []*node, n *node) []*node {
for i := range list {
- if list[i].ID == n.ID {
+ if list[i].ID() == n.ID() {
return append(list[:i], list[i+1:]...)
}
}
@@ -723,14 +715,14 @@ func deleteNode(list []*Node, n *Node) []*Node {
// nodesByDistance is a list of nodes, ordered by
// distance to target.
type nodesByDistance struct {
- entries []*Node
- target common.Hash
+ entries []*node
+ target enode.ID
}
// push adds the given node to the list, keeping the total size below maxElems.
-func (h *nodesByDistance) push(n *Node, maxElems int) {
+func (h *nodesByDistance) push(n *node, maxElems int) {
ix := sort.Search(len(h.entries), func(i int) bool {
- return distcmp(h.target, h.entries[i].sha, n.sha) > 0
+ return enode.DistCmp(h.target, h.entries[i].ID(), n.ID()) > 0
})
if len(h.entries) < maxElems {
h.entries = append(h.entries, n)