diff options
Diffstat (limited to 'eth')
-rw-r--r-- | eth/backend.go | 26 | ||||
-rw-r--r-- | eth/downloader/downloader.go | 295 | ||||
-rw-r--r-- | eth/downloader/downloader_test.go | 52 | ||||
-rw-r--r-- | eth/downloader/peer.go | 15 | ||||
-rw-r--r-- | eth/downloader/queue.go | 408 | ||||
-rw-r--r-- | eth/downloader/queue_test.go | 17 | ||||
-rw-r--r-- | eth/handler.go | 8 | ||||
-rw-r--r-- | eth/sync.go | 56 |
8 files changed, 482 insertions, 395 deletions
diff --git a/eth/backend.go b/eth/backend.go index 791336d75..0f23cde2f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -60,8 +60,9 @@ type Config struct { VmDebug bool NatSpec bool - MaxPeers int - Port string + MaxPeers int + MaxPendingPeers int + Port string // Space-separated list of discovery node URLs BootNodes string @@ -280,16 +281,17 @@ func New(config *Config) (*Ethereum, error) { protocols = append(protocols, eth.whisper.Protocol()) } eth.net = &p2p.Server{ - PrivateKey: netprv, - Name: config.Name, - MaxPeers: config.MaxPeers, - Protocols: protocols, - NAT: config.NAT, - NoDial: !config.Dial, - BootstrapNodes: config.parseBootNodes(), - StaticNodes: config.parseNodes(staticNodes), - TrustedNodes: config.parseNodes(trustedNodes), - NodeDatabase: nodeDb, + PrivateKey: netprv, + Name: config.Name, + MaxPeers: config.MaxPeers, + MaxPendingPeers: config.MaxPendingPeers, + Protocols: protocols, + NAT: config.NAT, + NoDial: !config.Dial, + BootstrapNodes: config.parseBootNodes(), + StaticNodes: config.parseNodes(staticNodes), + TrustedNodes: config.parseNodes(trustedNodes), + NodeDatabase: nodeDb, } if len(config.Port) > 0 { eth.net.ListenAddr = ":" + config.Port diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 15f4cb0a3..18f8d2ba8 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -11,11 +11,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" - "gopkg.in/fatih/set.v0" ) const ( - maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk + maxBlockFetch = 128 // Amount of max blocks to be fetched per chunk peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out ) @@ -25,12 +24,12 @@ var ( blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out errLowTd = errors.New("peer's TD is too low") - errBusy = errors.New("busy") + ErrBusy = errors.New("busy") errUnknownPeer = errors.New("peer's unknown or unhealthy") - ErrBadPeer = errors.New("action from bad peer ignored") + errBadPeer = errors.New("action from bad peer ignored") errNoPeers = errors.New("no peers to keep download active") errPendingQueue = errors.New("pending items in queue") - errTimeout = errors.New("timeout") + ErrTimeout = errors.New("timeout") errEmptyHashSet = errors.New("empty hash set by peer") errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") errAlreadyInPool = errors.New("hash already in pool") @@ -69,8 +68,7 @@ type Downloader struct { getBlock getBlockFn // Status - fetchingHashes int32 - downloadingBlocks int32 + synchronising int32 // Channels newPeerCh chan *peer @@ -80,7 +78,7 @@ type Downloader struct { func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { downloader := &Downloader{ - queue: newqueue(), + queue: newQueue(), peers: make(peers), hasBlock: hasBlock, getBlock: getBlock, @@ -93,7 +91,7 @@ func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { } func (d *Downloader) Stats() (current int, max int) { - return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size() + return d.queue.Size() } func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { @@ -111,7 +109,7 @@ func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFet return nil } -// UnregisterPeer unregister's a peer. This will prevent any action from the specified peer. +// UnregisterPeer unregisters a peer. This will prevent any action from the specified peer. func (d *Downloader) UnregisterPeer(id string) { d.mu.Lock() defer d.mu.Unlock() @@ -121,104 +119,58 @@ func (d *Downloader) UnregisterPeer(id string) { delete(d.peers, id) } -// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given -// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the +// Synchronise will select the peer and use it for synchronising. If an empty string is given +// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous func (d *Downloader) Synchronise(id string, hash common.Hash) error { - // Make sure it's doing neither. Once done we can restart the - // downloading process if the TD is higher. For now just get on - // with whatever is going on. This prevents unecessary switching. - if d.isBusy() { - return errBusy + // Make sure only one goroutine is ever allowed past this point at once + if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { + return ErrBusy } + defer atomic.StoreInt32(&d.synchronising, 0) - // When a synchronisation attempt is made while the queue stil - // contains items we abort the sync attempt - if d.queue.size() > 0 { + // Abort if the queue still contains some leftover data + if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil { return errPendingQueue } + // Reset the queue to clean any internal leftover state + d.queue.Reset() - // Fetch the peer using the id or throw an error if the peer couldn't be found + // Retrieve the origin peer and initiate the downloading process p := d.peers[id] if p == nil { return errUnknownPeer } - - // Get the hash from the peer and initiate the downloading progress. - err := d.getFromPeer(p, hash, false) - if err != nil { - return err - } - - return nil -} - -// Done lets the downloader know that whatever previous hashes were taken -// are processed. If the block count reaches zero and done is called -// we reset the queue for the next batch of incoming hashes and blocks. -func (d *Downloader) Done() { - d.queue.mu.Lock() - defer d.queue.mu.Unlock() - - if len(d.queue.blocks) == 0 { - d.queue.resetNoTS() - } + return d.getFromPeer(p, hash, false) } // TakeBlocks takes blocks from the queue and yields them to the blockTaker handler // it's possible it yields no blocks func (d *Downloader) TakeBlocks() types.Blocks { - d.queue.mu.Lock() - defer d.queue.mu.Unlock() - - var blocks types.Blocks - if len(d.queue.blocks) > 0 { - // Make sure the parent hash is known - if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) { - return nil - } - - for _, block := range d.queue.blocks { - if block == nil { - break - } - - blocks = append(blocks, block) - } - d.queue.blockOffset += len(blocks) - // delete the blocks from the slice and let them be garbage collected - // without this slice trick the blocks would stay in memory until nil - // would be assigned to d.queue.blocks - copy(d.queue.blocks, d.queue.blocks[len(blocks):]) - for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ { - d.queue.blocks[k] = nil - } - d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)] - - //d.queue.blocks = d.queue.blocks[len(blocks):] - if len(d.queue.blocks) == 0 { - d.queue.blocks = nil - } - + // Check that there are blocks available and its parents are known + head := d.queue.GetHeadBlock() + if head == nil || !d.hasBlock(head.ParentHash()) { + return nil } - - return blocks + // Retrieve a full batch of blocks + return d.queue.TakeBlocks(head) } func (d *Downloader) Has(hash common.Hash) bool { - return d.queue.has(hash) + return d.queue.Has(hash) } func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) { + d.activePeer = p.id defer func() { // reset on error if err != nil { - d.queue.reset() + d.queue.Reset() } }() - glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) + glog.V(logger.Debug).Infoln("Synchronizing with the network using:", p.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels // respectively. @@ -234,20 +186,13 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) return err } - glog.V(logger.Detail).Infoln("Sync completed") + glog.V(logger.Debug).Infoln("Synchronization completed") return nil } // XXX Make synchronous func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial bool) error { - atomic.StoreInt32(&d.fetchingHashes, 1) - defer atomic.StoreInt32(&d.fetchingHashes, 0) - - if d.queue.has(h) { - return errAlreadyInPool - } - glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id) start := time.Now() @@ -256,7 +201,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b // In such circumstances we don't need to download the block so don't add it to the queue. if !ignoreInitial { // Add the hash to the queue first - d.queue.hashPool.Add(h) + d.queue.Insert([]common.Hash{h}) } // Get the first batch of hashes p.getHashes(h) @@ -273,7 +218,7 @@ out: for { select { case hashPack := <-d.hashCh: - // make sure the active peer is giving us the hashes + // Make sure the active peer is giving us the hashes if hashPack.peerId != activePeer.id { glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId) break @@ -281,43 +226,37 @@ out: failureResponseTimer.Reset(hashTtl) - var ( - hashes = hashPack.hashes - done bool // determines whether we're done fetching hashes (i.e. common hash found) - ) - hashSet := set.New() - for _, hash = range hashes { - if d.hasBlock(hash) || d.queue.blockHashes.Has(hash) { - glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) + // Make sure the peer actually gave something valid + if len(hashPack.hashes) == 0 { + glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id) + d.queue.Reset() + return errEmptyHashSet + } + // Determine if we're done fetching hashes (queue up all pending), and continue if not done + done, index := false, 0 + for index, hash = range hashPack.hashes { + if d.hasBlock(hash) || d.queue.GetBlock(hash) != nil { + glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) + hashPack.hashes = hashPack.hashes[:index] done = true break } - - hashSet.Add(hash) } - d.queue.put(hashSet) - - // Add hashes to the chunk set - if len(hashes) == 0 { // Make sure the peer actually gave you something valid - glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id) - d.queue.reset() + d.queue.Insert(hashPack.hashes) - return errEmptyHashSet - } else if !done { // Check if we're done fetching - // Get the next set of hashes + if !done { activePeer.getHashes(hash) - } else { // we're done - // The offset of the queue is determined by the highest known block - var offset int - if block := d.getBlock(hash); block != nil { - offset = int(block.NumberU64() + 1) - } - // allocate proper size for the queueue - d.queue.alloc(offset, d.queue.hashPool.Size()) - - break out + continue + } + // We're done, allocate the download cache and proceed pulling the blocks + offset := 0 + if block := d.getBlock(hash); block != nil { + offset = int(block.NumberU64() + 1) } + d.queue.Alloc(offset) + break out + case <-failureResponseTimer.C: glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id) @@ -326,7 +265,7 @@ out: // already fetched hash list. This can't guarantee 100% correctness but does // a fair job. This is always either correct or false incorrect. for id, peer := range d.peers { - if d.queue.hashPool.Has(peer.recentHash) && !attemptedPeers[id] { + if d.queue.Has(peer.recentHash) && !attemptedPeers[id] { p = peer break } @@ -335,8 +274,8 @@ out: // if all peers have been tried, abort the process entirely or if the hash is // the zero hash. if p == nil || (hash == common.Hash{}) { - d.queue.reset() - return errTimeout + d.queue.Reset() + return ErrTimeout } // set p to the active peer. this will invalidate any hashes that may be returned @@ -346,15 +285,14 @@ out: glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id) } } - glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start)) + glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start)) return nil } func (d *Downloader) startFetchingBlocks(p *peer) error { - glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "block(s)") - atomic.StoreInt32(&d.downloadingBlocks, 1) - defer atomic.StoreInt32(&d.downloadingBlocks, 0) + glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)") + // Defer the peer reset. This will empty the peer requested set // and makes sure there are no lingering peers with an incorrect // state @@ -362,7 +300,7 @@ func (d *Downloader) startFetchingBlocks(p *peer) error { start := time.Now() - // default ticker for re-fetching blocks everynow and then + // default ticker for re-fetching blocks every now and then ticker := time.NewTicker(20 * time.Millisecond) out: for { @@ -371,7 +309,7 @@ out: // If the peer was previously banned and failed to deliver it's pack // in a reasonable time frame, ignore it's message. if d.peers[blockPack.peerId] != nil { - err := d.queue.deliver(blockPack.peerId, blockPack.blocks) + err := d.queue.Deliver(blockPack.peerId, blockPack.blocks) if err != nil { glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err) // FIXME d.UnregisterPeer(blockPack.peerId) @@ -385,86 +323,70 @@ out: d.peers.setState(blockPack.peerId, idleState) } case <-ticker.C: - // after removing bad peers make sure we actually have suffucient peer left to keep downlading + // Check for bad peers. Bad peers may indicate a peer not responding + // to a `getBlocks` message. A timeout of 5 seconds is set. Peers + // that badly or poorly behave are removed from the peer set (not banned). + // Bad peers are excluded from the available peer set and therefor won't be + // reused. XXX We could re-introduce peers after X time. + badPeers := d.queue.Expire(blockTtl) + for _, pid := range badPeers { + // XXX We could make use of a reputation system here ranking peers + // in their performance + // 1) Time for them to respond; + // 2) Measure their speed; + // 3) Amount and availability. + if peer := d.peers[pid]; peer != nil { + peer.demote() + peer.reset() + } + } + // After removing bad peers make sure we actually have sufficient peer left to keep downloading if len(d.peers) == 0 { - d.queue.reset() - + d.queue.Reset() return errNoPeers } - // If there are unrequested hashes left start fetching // from the available peers. - if d.queue.hashPool.Size() > 0 { + if d.queue.Pending() > 0 { + // Throttle the download if block cache is full and waiting processing + if d.queue.Throttle() { + continue + } + availablePeers := d.peers.get(idleState) for _, peer := range availablePeers { // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. - chunk := d.queue.get(peer, maxBlockFetch) - if chunk == nil { + request := d.queue.Reserve(peer, maxBlockFetch) + if request == nil { continue } - // XXX make fetch blocking. // Fetch the chunk and check for error. If the peer was somehow // already fetching a chunk due to a bug, it will be returned to // the queue - if err := peer.fetch(chunk); err != nil { + if err := peer.fetch(request); err != nil { // log for tracing glog.V(logger.Debug).Infof("peer %s received double work (state = %v)\n", peer.id, peer.state) - d.queue.put(chunk.hashes) + d.queue.Cancel(request) } } - // make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if len(d.queue.fetching) == 0 { - d.queue.reset() + if d.queue.InFlight() == 0 { + d.queue.Reset() - return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.hashPool.Size()) + return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.Pending()) } - } else if len(d.queue.fetching) == 0 { - // When there are no more queue and no more `fetching`. We can + } else if d.queue.InFlight() == 0 { + // When there are no more queue and no more in flight, We can // safely assume we're done. Another part of the process will check // for parent errors and will re-request anything that's missing break out - } else { - // Check for bad peers. Bad peers may indicate a peer not responding - // to a `getBlocks` message. A timeout of 5 seconds is set. Peers - // that badly or poorly behave are removed from the peer set (not banned). - // Bad peers are excluded from the available peer set and therefor won't be - // reused. XXX We could re-introduce peers after X time. - d.queue.mu.Lock() - var badPeers []string - for pid, chunk := range d.queue.fetching { - if time.Since(chunk.itime) > blockTtl { - badPeers = append(badPeers, pid) - // remove peer as good peer from peer list - // FIXME d.UnregisterPeer(pid) - } - } - d.queue.mu.Unlock() - - for _, pid := range badPeers { - // A nil chunk is delivered so that the chunk's hashes are given - // back to the queue objects. When hashes are put back in the queue - // other (decent) peers can pick them up. - // XXX We could make use of a reputation system here ranking peers - // in their performance - // 1) Time for them to respond; - // 2) Measure their speed; - // 3) Amount and availability. - d.queue.deliver(pid, nil) - if peer := d.peers[pid]; peer != nil { - peer.demote() - peer.reset() - } - } - } } } - glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start)) return nil @@ -484,28 +406,11 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer) } - if glog.V(logger.Detail) && len(hashes) != 0 { + if glog.V(logger.Debug) && len(hashes) != 0 { from, to := hashes[0], hashes[len(hashes)-1] - glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.hashPool.Size(), from[:4], to[:4], id) + glog.V(logger.Debug).Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id) } - d.hashCh <- hashPack{id, hashes} return nil } - -func (d *Downloader) isFetchingHashes() bool { - return atomic.LoadInt32(&d.fetchingHashes) == 1 -} - -func (d *Downloader) isDownloadingBlocks() bool { - return atomic.LoadInt32(&d.downloadingBlocks) == 1 -} - -func (d *Downloader) isBusy() bool { - return d.isFetchingHashes() || d.isDownloadingBlocks() -} - -func (d *Downloader) IsBusy() bool { - return d.isBusy() -} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 872ea02eb..8ccc4d1a5 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -128,7 +128,7 @@ func TestDownload(t *testing.T) { t.Error("download error", err) } - inqueue := len(tester.downloader.queue.blocks) + inqueue := len(tester.downloader.queue.blockCache) if inqueue != targetBlocks { t.Error("expected", targetBlocks, "have", inqueue) } @@ -151,7 +151,7 @@ func TestMissing(t *testing.T) { t.Error("download error", err) } - inqueue := len(tester.downloader.queue.blocks) + inqueue := len(tester.downloader.queue.blockCache) if inqueue != targetBlocks { t.Error("expected", targetBlocks, "have", inqueue) } @@ -181,3 +181,51 @@ func TestTaking(t *testing.T) { t.Error("expected to take 1000, got", len(bs1)) } } + +func TestThrottling(t *testing.T) { + minDesiredPeerCount = 4 + blockTtl = 1 * time.Second + + targetBlocks := 4 * blockCacheLimit + hashes := createHashes(0, targetBlocks) + blocks := createBlocksFromHashes(hashes) + tester := newTester(t, hashes, blocks) + + tester.newPeer("peer1", big.NewInt(10000), hashes[0]) + tester.newPeer("peer2", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + + // Concurrently download and take the blocks + errc := make(chan error, 1) + go func() { + errc <- tester.sync("peer1", hashes[0]) + }() + + done := make(chan struct{}) + took := []*types.Block{} + go func() { + for { + select { + case <-done: + took = append(took, tester.downloader.TakeBlocks()...) + done <- struct{}{} + return + default: + took = append(took, tester.downloader.TakeBlocks()...) + } + } + }() + + // Synchronise the two threads and verify + err := <-errc + done <- struct{}{} + <-done + + if err != nil { + t.Fatalf("failed to synchronise blocks: %v", err) + } + if len(took) != targetBlocks { + t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks) + } +} diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 91977f592..45ec1cbfd 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -78,7 +78,7 @@ func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blo } // fetch a chunk using the peer -func (p *peer) fetch(chunk *chunk) error { +func (p *peer) fetch(request *fetchRequest) error { p.mu.Lock() defer p.mu.Unlock() @@ -88,13 +88,12 @@ func (p *peer) fetch(chunk *chunk) error { // set working state p.state = workingState - // convert the set to a fetchable slice - hashes, i := make([]common.Hash, chunk.hashes.Size()), 0 - chunk.hashes.Each(func(v interface{}) bool { - hashes[i] = v.(common.Hash) - i++ - return true - }) + + // Convert the hash set to a fetchable slice + hashes := make([]common.Hash, 0, len(request.Hashes)) + for hash, _ := range request.Hashes { + hashes = append(hashes, hash) + } p.getBlocks(hashes) return nil diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 1b63a5ffb..515440bca 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -1,201 +1,341 @@ package downloader import ( + "errors" "fmt" - "math" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "gopkg.in/fatih/set.v0" + "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) +const ( + blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download +) + +// fetchRequest is a currently running block retrieval operation. +type fetchRequest struct { + Peer *peer // Peer to which the request was sent + Hashes map[common.Hash]int // Requested hashes with their insertion index (priority) + Time time.Time // Time when the request was made +} + // queue represents hashes that are either need fetching or are being fetched type queue struct { - hashPool *set.Set - fetchPool *set.Set - blockHashes *set.Set + hashPool map[common.Hash]int // Pending hashes, mapping to their insertion index (priority) + hashQueue *prque.Prque // Priority queue of the block hashes to fetch + hashCounter int // Counter indexing the added hashes to ensure retrieval order - mu sync.Mutex - fetching map[string]*chunk + pendPool map[string]*fetchRequest // Currently pending block retrieval operations - blockOffset int - blocks []*types.Block + blockPool map[common.Hash]int // Hash-set of the downloaded data blocks, mapping to cache indexes + blockCache []*types.Block // Downloaded but not yet delivered blocks + blockOffset int // Offset of the first cached block in the block-chain + + lock sync.RWMutex } -func newqueue() *queue { +// newQueue creates a new download queue for scheduling block retrieval. +func newQueue() *queue { return &queue{ - hashPool: set.New(), - fetchPool: set.New(), - blockHashes: set.New(), - fetching: make(map[string]*chunk), + hashPool: make(map[common.Hash]int), + hashQueue: prque.New(), + pendPool: make(map[string]*fetchRequest), + blockPool: make(map[common.Hash]int), } } -func (c *queue) reset() { - c.mu.Lock() - defer c.mu.Unlock() +// Reset clears out the queue contents. +func (q *queue) Reset() { + q.lock.Lock() + defer q.lock.Unlock() + + q.hashPool = make(map[common.Hash]int) + q.hashQueue.Reset() + q.hashCounter = 0 - c.resetNoTS() + q.pendPool = make(map[string]*fetchRequest) + + q.blockPool = make(map[common.Hash]int) + q.blockOffset = 0 + q.blockCache = nil } -func (c *queue) resetNoTS() { - c.blockOffset = 0 - c.hashPool.Clear() - c.fetchPool.Clear() - c.blockHashes.Clear() - c.blocks = nil - c.fetching = make(map[string]*chunk) + +// Size retrieves the number of hashes in the queue, returning separately for +// pending and already downloaded. +func (q *queue) Size() (int, int) { + q.lock.RLock() + defer q.lock.RUnlock() + + return len(q.hashPool), len(q.blockPool) } -func (c *queue) size() int { - return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size() +// Pending retrieves the number of hashes pending for retrieval. +func (q *queue) Pending() int { + q.lock.RLock() + defer q.lock.RUnlock() + + return q.hashQueue.Size() } -// reserve a `max` set of hashes for `p` peer. -func (c *queue) get(p *peer, max int) *chunk { - c.mu.Lock() - defer c.mu.Unlock() +// InFlight retrieves the number of fetch requests currently in flight. +func (q *queue) InFlight() int { + q.lock.RLock() + defer q.lock.RUnlock() - // return nothing if the pool has been depleted - if c.hashPool.Size() == 0 { - return nil - } + return len(q.pendPool) +} - limit := int(math.Min(float64(max), float64(c.hashPool.Size()))) - // Create a new set of hashes - hashes, i := set.New(), 0 - c.hashPool.Each(func(v interface{}) bool { - // break on limit - if i == limit { - return false - } - // skip any hashes that have previously been requested from the peer - if p.ignored.Has(v) { - return true - } +// Throttle checks if the download should be throttled (active block fetches +// exceed block cache). +func (q *queue) Throttle() bool { + q.lock.RLock() + defer q.lock.RUnlock() + + // Calculate the currently in-flight block requests + pending := 0 + for _, request := range q.pendPool { + pending += len(request.Hashes) + } + // Throttle if more blocks are in-flight than free space in the cache + return pending >= len(q.blockCache)-len(q.blockPool) +} - hashes.Add(v) - i++ +// Has checks if a hash is within the download queue or not. +func (q *queue) Has(hash common.Hash) bool { + q.lock.RLock() + defer q.lock.RUnlock() + if _, ok := q.hashPool[hash]; ok { + return true + } + if _, ok := q.blockPool[hash]; ok { return true - }) - // if no hashes can be requested return a nil chunk - if hashes.Size() == 0 { - return nil } + return false +} - // remove the fetchable hashes from hash pool - c.hashPool.Separate(hashes) - c.fetchPool.Merge(hashes) +// Insert adds a set of hashes for the download queue for scheduling. +func (q *queue) Insert(hashes []common.Hash) { + q.lock.Lock() + defer q.lock.Unlock() - // Create a new chunk for the seperated hashes. The time is being used - // to reset the chunk (timeout) - chunk := &chunk{p, hashes, time.Now()} - // register as 'fetching' state - c.fetching[p.id] = chunk + // Insert all the hashes prioritized in the arrival order + for i, hash := range hashes { + index := q.hashCounter + i - // create new chunk for peer - return chunk + q.hashPool[hash] = index + q.hashQueue.Push(hash, float32(index)) // Highest gets schedules first + } + // Update the hash counter for the next batch of inserts + q.hashCounter += len(hashes) } -func (c *queue) has(hash common.Hash) bool { - return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash) +// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't +// been downloaded yet (or simply non existent). +func (q *queue) GetHeadBlock() *types.Block { + q.lock.RLock() + defer q.lock.RUnlock() + + if len(q.blockCache) == 0 { + return nil + } + return q.blockCache[0] } -func (c *queue) getBlock(hash common.Hash) *types.Block { - c.mu.Lock() - defer c.mu.Unlock() +// GetBlock retrieves a downloaded block, or nil if non-existent. +func (q *queue) GetBlock(hash common.Hash) *types.Block { + q.lock.RLock() + defer q.lock.RUnlock() - if !c.blockHashes.Has(hash) { + // Short circuit if the block hasn't been downloaded yet + index, ok := q.blockPool[hash] + if !ok { return nil } - - for _, block := range c.blocks { - if block.Hash() == hash { - return block - } + // Return the block if it's still available in the cache + if q.blockOffset <= index && index < q.blockOffset+len(q.blockCache) { + return q.blockCache[index-q.blockOffset] } return nil } -// deliver delivers a chunk to the queue that was requested of the peer -func (c *queue) deliver(id string, blocks []*types.Block) (err error) { - c.mu.Lock() - defer c.mu.Unlock() - - chunk := c.fetching[id] - // If the chunk was never requested simply ignore it - if chunk != nil { - delete(c.fetching, id) - // check the length of the returned blocks. If the length of blocks is 0 - // we'll assume the peer doesn't know about the chain. - if len(blocks) == 0 { - // So we can ignore the blocks we didn't know about - chunk.peer.ignored.Merge(chunk.hashes) - } +// TakeBlocks retrieves and permanently removes a batch of blocks from the cache. +// The head parameter is required to prevent a race condition where concurrent +// takes may fail parent verifications. +func (q *queue) TakeBlocks(head *types.Block) types.Blocks { + q.lock.Lock() + defer q.lock.Unlock() - // Add the blocks - for i, block := range blocks { - // See (1) for future limitation - n := int(block.NumberU64()) - c.blockOffset - if n > len(c.blocks) || n < 0 { - // set the error and set the blocks which could be processed - // abort the rest of the blocks (FIXME this could be improved) - err = fmt.Errorf("received block which overflow (N=%v O=%v)", block.Number(), c.blockOffset) - blocks = blocks[:i] - break - } - c.blocks[n] = block + // Short circuit if the head block's different + if len(q.blockCache) == 0 || q.blockCache[0] != head { + return nil + } + // Otherwise accumulate all available blocks + var blocks types.Blocks + for _, block := range q.blockCache { + if block == nil { + break } - // seperate the blocks and the hashes - blockHashes := chunk.fetchedHashes(blocks) - // merge block hashes - c.blockHashes.Merge(blockHashes) - // Add back whatever couldn't be delivered - c.hashPool.Merge(chunk.hashes) - // Remove the hashes from the fetch pool - c.fetchPool.Separate(chunk.hashes) + blocks = append(blocks, block) + delete(q.blockPool, block.Hash()) } + // Delete the blocks from the slice and let them be garbage collected + // without this slice trick the blocks would stay in memory until nil + // would be assigned to q.blocks + copy(q.blockCache, q.blockCache[len(blocks):]) + for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ { + q.blockCache[k] = nil + } + q.blockOffset += len(blocks) - return + return blocks } -func (c *queue) alloc(offset, size int) { - c.mu.Lock() - defer c.mu.Unlock() +// Reserve reserves a set of hashes for the given peer, skipping any previously +// failed download. +func (q *queue) Reserve(p *peer, max int) *fetchRequest { + q.lock.Lock() + defer q.lock.Unlock() - if c.blockOffset < offset { - c.blockOffset = offset + // Short circuit if the pool has been depleted, or if the peer's already + // downloading something (sanity check not to corrupt state) + if q.hashQueue.Empty() { + return nil } - - // (1) XXX at some point we could limit allocation to memory and use the disk - // to store future blocks. - if len(c.blocks) < size { - c.blocks = append(c.blocks, make([]*types.Block, size)...) + if _, ok := q.pendPool[p.id]; ok { + return nil + } + // Retrieve a batch of hashes, skipping previously failed ones + send := make(map[common.Hash]int) + skip := make(map[common.Hash]int) + + for len(send) < max && !q.hashQueue.Empty() { + hash, priority := q.hashQueue.Pop() + if p.ignored.Has(hash) { + skip[hash.(common.Hash)] = int(priority) + } else { + send[hash.(common.Hash)] = int(priority) + } + } + // Merge all the skipped hashes back + for hash, index := range skip { + q.hashQueue.Push(hash, float32(index)) + } + // Assemble and return the block download request + if len(send) == 0 { + return nil + } + request := &fetchRequest{ + Peer: p, + Hashes: send, + Time: time.Now(), } + q.pendPool[p.id] = request + + return request } -// puts puts sets of hashes on to the queue for fetching -func (c *queue) put(hashes *set.Set) { - c.mu.Lock() - defer c.mu.Unlock() +// Cancel aborts a fetch request, returning all pending hashes to the queue. +func (q *queue) Cancel(request *fetchRequest) { + q.lock.Lock() + defer q.lock.Unlock() - c.hashPool.Merge(hashes) + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + delete(q.pendPool, request.Peer.id) } -type chunk struct { - peer *peer - hashes *set.Set - itime time.Time +// Expire checks for in flight requests that exceeded a timeout allowance, +// canceling them and returning the responsible peers for penalization. +func (q *queue) Expire(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + + // Iterate over the expired requests and return each to the queue + peers := []string{} + for id, request := range q.pendPool { + if time.Since(request.Time) > timeout { + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + peers = append(peers, id) + } + } + // Remove the expired requests from the pending pool + for _, id := range peers { + delete(q.pendPool, id) + } + return peers } -func (ch *chunk) fetchedHashes(blocks []*types.Block) *set.Set { - fhashes := set.New() +// Deliver injects a block retrieval response into the download queue. +func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { + q.lock.Lock() + defer q.lock.Unlock() + + // Short circuit if the blocks were never requested + request := q.pendPool[id] + if request == nil { + return errors.New("no fetches pending") + } + delete(q.pendPool, id) + + // If no blocks were retrieved, mark them as unavailable for the origin peer + if len(blocks) == 0 { + for hash, _ := range request.Hashes { + request.Peer.ignored.Add(hash) + } + } + // Iterate over the downloaded blocks and add each of them + errs := make([]error, 0) for _, block := range blocks { - fhashes.Add(block.Hash()) + // Skip any blocks that fall outside the cache range + index := int(block.NumberU64()) - q.blockOffset + if index >= len(q.blockCache) || index < 0 { + //fmt.Printf("block cache overflown (N=%v O=%v, C=%v)", block.Number(), q.blockOffset, len(q.blockCache)) + continue + } + // Skip any blocks that were not requested + hash := block.Hash() + if _, ok := request.Hashes[hash]; !ok { + errs = append(errs, fmt.Errorf("non-requested block %v", hash)) + continue + } + // Otherwise merge the block and mark the hash block + q.blockCache[index] = block + + delete(request.Hashes, hash) + delete(q.hashPool, hash) + q.blockPool[hash] = int(block.NumberU64()) } - ch.hashes.Separate(fhashes) + // Return all failed fetches to the queue + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + if len(errs) != 0 { + return fmt.Errorf("multiple failures: %v", errs) + } + return nil +} - return fhashes +// Alloc ensures that the block cache is the correct size, given a starting +// offset, and a memory cap. +func (q *queue) Alloc(offset int) { + q.lock.Lock() + defer q.lock.Unlock() + + if q.blockOffset < offset { + q.blockOffset = offset + } + size := len(q.hashPool) + if size > blockCacheLimit { + size = blockCacheLimit + } + if len(q.blockCache) < size { + q.blockCache = append(q.blockCache, make([]*types.Block, size-len(q.blockCache))...) + } } diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index b163bd9c7..b1f3591f3 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -32,31 +32,30 @@ func createBlocksFromHashSet(hashes *set.Set) []*types.Block { } func TestChunking(t *testing.T) { - queue := newqueue() + queue := newQueue() peer1 := newPeer("peer1", common.Hash{}, nil, nil) peer2 := newPeer("peer2", common.Hash{}, nil, nil) // 99 + 1 (1 == known genesis hash) hashes := createHashes(0, 99) - hashSet := createHashSet(hashes) - queue.put(hashSet) + queue.Insert(hashes) - chunk1 := queue.get(peer1, 99) + chunk1 := queue.Reserve(peer1, 99) if chunk1 == nil { t.Errorf("chunk1 is nil") t.FailNow() } - chunk2 := queue.get(peer2, 99) + chunk2 := queue.Reserve(peer2, 99) if chunk2 == nil { t.Errorf("chunk2 is nil") t.FailNow() } - if chunk1.hashes.Size() != 99 { - t.Error("expected chunk1 hashes to be 99, got", chunk1.hashes.Size()) + if len(chunk1.Hashes) != 99 { + t.Error("expected chunk1 hashes to be 99, got", len(chunk1.Hashes)) } - if chunk2.hashes.Size() != 1 { - t.Error("expected chunk1 hashes to be 1, got", chunk2.hashes.Size()) + if len(chunk2.Hashes) != 1 { + t.Error("expected chunk1 hashes to be 1, got", len(chunk2.Hashes)) } } diff --git a/eth/handler.go b/eth/handler.go index 1e0663816..41b6728d9 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -19,9 +19,9 @@ import ( ) const ( - peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount - blockProcTimer = 500 * time.Millisecond - minDesiredPeerCount = 5 // Amount of peers desired to start syncing + forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available + blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process + minDesiredPeerCount = 5 // Amount of peers desired to start syncing blockProcAmount = 256 ) @@ -307,7 +307,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { // Attempt to insert the newly received by checking if the parent exists. // if the parent exists we process the block and propagate to our peers - // otherwise synchronise with the peer + // otherwise synchronize with the peer if self.chainman.HasBlock(request.Block.ParentHash()) { if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil { glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error") diff --git a/eth/sync.go b/eth/sync.go index 9e8b21a7c..c49f5209d 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -12,10 +12,8 @@ import ( // Sync contains all synchronisation code for the eth protocol func (pm *ProtocolManager) update() { - // itimer is used to determine when to start ignoring `minDesiredPeerCount` - itimer := time.NewTimer(peerCountTimeout) - // btimer is used for picking of blocks from the downloader - btimer := time.Tick(blockProcTimer) + forceSync := time.Tick(forceSyncCycle) + blockProc := time.Tick(blockProcCycle) for { select { @@ -24,27 +22,22 @@ func (pm *ProtocolManager) update() { if len(pm.peers) < minDesiredPeerCount { break } - - // Find the best peer + // Find the best peer and synchronise with it peer := getBestPeer(pm.peers) if peer == nil { - glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available") + glog.V(logger.Debug).Infoln("Sync attempt canceled. No peers available") } - - itimer.Stop() go pm.synchronise(peer) - case <-itimer.C: - // The timer will make sure that the downloader keeps an active state - // in which it attempts to always check the network for highest td peers - // Either select the peer or restart the timer if no peers could - // be selected. + + case <-forceSync: + // Force a sync even if not enough peers are present if peer := getBestPeer(pm.peers); peer != nil { go pm.synchronise(peer) - } else { - itimer.Reset(5 * time.Second) } - case <-btimer: + case <-blockProc: + // Try to pull some blocks from the downloaded go pm.processBlocks() + case <-pm.quitSync: return } @@ -59,12 +52,11 @@ func (pm *ProtocolManager) processBlocks() error { pm.wg.Add(1) defer pm.wg.Done() + // Take a batch of blocks (will return nil if a previous batch has not reached the chain yet) blocks := pm.downloader.TakeBlocks() if len(blocks) == 0 { return nil } - defer pm.downloader.Done() - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number()) for len(blocks) != 0 && !pm.quit { @@ -83,26 +75,28 @@ func (pm *ProtocolManager) synchronise(peer *peer) { if peer.td.Cmp(pm.chainman.Td()) <= 0 { return } - // Check downloader if it's busy so it doesn't show the sync message - // for every attempty - if pm.downloader.IsBusy() { - return - } - // FIXME if we have the hash in our chain and the TD of the peer is // much higher than ours, something is wrong with us or the peer. // Check if the hash is on our own chain if pm.chainman.HasBlock(peer.recentHash) { return } - // Get the hashes from the peer (synchronously) + glog.V(logger.Debug).Infof("Attempting synchronisation: %v, 0x%x", peer.id, peer.recentHash) + err := pm.downloader.Synchronise(peer.id, peer.recentHash) - if err != nil && err == downloader.ErrBadPeer { - glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action") + switch err { + case nil: + glog.V(logger.Debug).Infof("Synchronisation completed") + + case downloader.ErrBusy: + glog.V(logger.Debug).Infof("Synchronisation already in progress") + + case downloader.ErrTimeout: + glog.V(logger.Debug).Infof("Removing peer %v due to sync timeout", peer.id) pm.removePeer(peer) - } else if err != nil { - // handle error - glog.V(logger.Detail).Infoln("error downloading:", err) + + default: + glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) } } |