diff options
author | Péter Szilágyi <peterke@gmail.com> | 2015-06-12 18:35:29 +0800 |
---|---|---|
committer | Péter Szilágyi <peterke@gmail.com> | 2015-06-15 14:22:37 +0800 |
commit | fc7abd98865f3bdc6cc36258026db98a649cd577 (patch) | |
tree | 0bca7e6e40445df3f21304e84b6df4dd5d3e7bf5 /eth/sync.go | |
parent | 0fc71877a7d7a46f35147f753cba0de7b937c77a (diff) | |
download | go-tangerine-fc7abd98865f3bdc6cc36258026db98a649cd577.tar.gz go-tangerine-fc7abd98865f3bdc6cc36258026db98a649cd577.tar.zst go-tangerine-fc7abd98865f3bdc6cc36258026db98a649cd577.zip |
eth, eth/downloader: move block processing into the downlaoder
Diffstat (limited to 'eth/sync.go')
-rw-r--r-- | eth/sync.go | 53 |
1 files changed, 3 insertions, 50 deletions
diff --git a/eth/sync.go b/eth/sync.go index b127ca979..88a76805c 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -1,9 +1,7 @@ package eth import ( - "math" "math/rand" - "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" @@ -15,12 +13,10 @@ import ( const ( forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available - blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process notifyCheckCycle = 100 * time.Millisecond // Time interval to allow hash notifies to fulfill before hard fetching notifyArriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested notifyFetchTimeout = 5 * time.Second // Maximum alloted time to return an explicitly requested block minDesiredPeerCount = 5 // Amount of peers desired to start syncing - blockProcAmount = 256 // This is the target size for the packs of transactions sent by txsyncLoop. // A pack can get larger than this if a single transactions exceeds this size. @@ -254,10 +250,10 @@ func (pm *ProtocolManager) fetcher() { // syncer is responsible for periodically synchronising with the network, both // downloading hashes and blocks as well as retrieving cached ones. func (pm *ProtocolManager) syncer() { - forceSync := time.Tick(forceSyncCycle) - blockProc := time.Tick(blockProcCycle) - blockProcPend := int32(0) + // Abort any pending syncs if we terminate + defer pm.downloader.Cancel() + forceSync := time.Tick(forceSyncCycle) for { select { case <-pm.newPeerCh: @@ -271,55 +267,12 @@ func (pm *ProtocolManager) syncer() { // Force a sync even if not enough peers are present go pm.synchronise(pm.peers.BestPeer()) - case <-blockProc: - // Try to pull some blocks from the downloaded - if atomic.CompareAndSwapInt32(&blockProcPend, 0, 1) { - go func() { - pm.processBlocks() - atomic.StoreInt32(&blockProcPend, 0) - }() - } - case <-pm.quitSync: return } } } -// processBlocks retrieves downloaded blocks from the download cache and tries -// to construct the local block chain with it. Note, since the block retrieval -// order matters, access to this function *must* be synchronized/serialized. -func (pm *ProtocolManager) processBlocks() error { - pm.wg.Add(1) - defer pm.wg.Done() - - // Short circuit if no blocks are available for insertion - blocks := pm.downloader.TakeBlocks() - if len(blocks) == 0 { - return nil - } - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].RawBlock.Number(), blocks[len(blocks)-1].RawBlock.Number()) - - for len(blocks) != 0 && !pm.quit { - // Retrieve the first batch of blocks to insert - max := int(math.Min(float64(len(blocks)), float64(blockProcAmount))) - raw := make(types.Blocks, 0, max) - for _, block := range blocks[:max] { - raw = append(raw, block.RawBlock) - } - // Try to inset the blocks, drop the originating peer if there's an error - index, err := pm.chainman.InsertChain(raw) - if err != nil { - glog.V(logger.Debug).Infoln("Downloaded block import failed:", err) - pm.removePeer(blocks[index].OriginPeer) - pm.downloader.Cancel() - return err - } - blocks = blocks[max:] - } - return nil -} - // synchronise tries to sync up our local block chain with a remote peer, both // adding various sanity checks as well as wrapping it with various log entries. func (pm *ProtocolManager) synchronise(peer *peer) { |