aboutsummaryrefslogtreecommitdiffstats
path: root/eth/downloader
diff options
context:
space:
mode:
Diffstat (limited to 'eth/downloader')
-rw-r--r--eth/downloader/downloader.go282
-rw-r--r--eth/downloader/downloader_test.go297
-rw-r--r--eth/downloader/peer.go58
-rw-r--r--eth/downloader/queue.go49
-rw-r--r--eth/downloader/queue_test.go31
5 files changed, 461 insertions, 256 deletions
diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go
index 85531ce15..29b627771 100644
--- a/eth/downloader/downloader.go
+++ b/eth/downloader/downloader.go
@@ -1,35 +1,33 @@
package downloader
import (
+ "bytes"
"errors"
"math/rand"
"sync"
"sync/atomic"
"time"
- "gopkg.in/fatih/set.v0"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog"
+ "gopkg.in/fatih/set.v0"
)
-const (
+var (
MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
- peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
- hashTTL = 5 * time.Second // Time it takes for a hash request to time out
-)
+ hashTTL = 5 * time.Second // Time it takes for a hash request to time out
+ blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth
+ blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired
+ crossCheckCycle = time.Second // Period after which to check for expired cross checks
-var (
- blockTTL = 5 * time.Second // Time it takes for a block request to time out
- crossCheckCycle = time.Second // Period after which to check for expired cross checks
- minDesiredPeerCount = 5 // Amount of peers desired to start syncing
+ maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
)
var (
@@ -38,10 +36,11 @@ var (
errUnknownPeer = errors.New("peer is unknown or unhealthy")
ErrBadPeer = errors.New("action from bad peer ignored")
ErrStallingPeer = errors.New("peer is stalling")
+ errBannedHead = errors.New("peer head hash already banned")
errNoPeers = errors.New("no peers to keep download active")
ErrPendingQueue = errors.New("pending items in queue")
ErrTimeout = errors.New("timeout")
- errEmptyHashSet = errors.New("empty hash set by peer")
+ ErrEmptyHashSet = errors.New("empty hash set by peer")
errPeersUnavailable = errors.New("no peers available or all peers tried for block download process")
errAlreadyInPool = errors.New("hash already in pool")
ErrInvalidChain = errors.New("retrieved hash chain is invalid")
@@ -74,11 +73,10 @@ type crossCheck struct {
type Downloader struct {
mux *event.TypeMux
- mu sync.RWMutex
queue *queue // Scheduler for selecting the hashes to download
peers *peerSet // Set of active peers from which download can proceed
checks map[common.Hash]*crossCheck // Pending cross checks to verify a hash chain
- banned *set.SetNonTS // Set of hashes we've received and banned
+ banned *set.Set // Set of hashes we've received and banned
// Callbacks
hasBlock hashCheckFn
@@ -116,7 +114,7 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock getBlockFn) *Downloa
blockCh: make(chan blockPack, 1),
}
// Inject all the known bad hashes
- downloader.banned = set.NewNonTS()
+ downloader.banned = set.New()
for hash, _ := range core.BadHashes {
downloader.banned.Add(hash)
}
@@ -135,6 +133,12 @@ func (d *Downloader) Synchronising() bool {
// RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
+ // If the peer wants to send a banned hash, reject
+ if d.banned.Has(head) {
+ glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id)
+ return errBannedHead
+ }
+ // Otherwise try to construct and register the peer
glog.V(logger.Detail).Infoln("Registering peer", id)
if err := d.peers.Register(newPeer(id, head, getHashes, getBlocks)); err != nil {
glog.V(logger.Error).Infoln("Register failed:", err)
@@ -164,6 +168,10 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
}
defer atomic.StoreInt32(&d.synchronising, 0)
+ // If the head hash is banned, terminate immediately
+ if d.banned.Has(hash) {
+ return ErrInvalidChain
+ }
// Post a user notification of the sync (only once per session)
if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
glog.V(logger.Info).Infoln("Block synchronisation started")
@@ -197,6 +205,8 @@ func (d *Downloader) TakeBlocks() []*Block {
return d.queue.TakeBlocks()
}
+// Has checks if the downloader knows about a particular hash, meaning that its
+// either already downloaded of pending retrieval.
func (d *Downloader) Has(hash common.Hash) bool {
return d.queue.Has(hash)
}
@@ -253,23 +263,29 @@ func (d *Downloader) Cancel() bool {
// XXX Make synchronous
func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
- glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id)
-
- start := time.Now()
-
- // Add the hash to the queue first, and start hash retrieval
- d.queue.Insert([]common.Hash{h})
- p.getHashes(h)
-
var (
+ start = time.Now()
active = p // active peer will help determine the current active peer
head = common.Hash{} // common and last hash
- timeout = time.NewTimer(hashTTL) // timer to dump a non-responsive active peer
+ timeout = time.NewTimer(0) // timer to dump a non-responsive active peer
attempted = make(map[string]bool) // attempted peers will help with retries
crossTicker = time.NewTicker(crossCheckCycle) // ticker to periodically check expired cross checks
)
defer crossTicker.Stop()
+ defer timeout.Stop()
+
+ glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id)
+ <-timeout.C // timeout channel should be initially empty.
+
+ getHashes := func(from common.Hash) {
+ active.getHashes(from)
+ timeout.Reset(hashTTL)
+ }
+
+ // Add the hash to the queue, and start hash retrieval.
+ d.queue.Insert([]common.Hash{h})
+ getHashes(h)
attempted[p.id] = true
for finished := false; !finished; {
@@ -280,19 +296,24 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
case hashPack := <-d.hashCh:
// Make sure the active peer is giving us the hashes
if hashPack.peerId != active.id {
- glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId)
+ glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
- timeout.Reset(hashTTL)
+ timeout.Stop()
// Make sure the peer actually gave something valid
if len(hashPack.hashes) == 0 {
- glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", active.id)
- return errEmptyHashSet
+ glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set", active.id)
+ return ErrEmptyHashSet
}
- for _, hash := range hashPack.hashes {
+ for index, hash := range hashPack.hashes {
if d.banned.Has(hash) {
- glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain\n", active.id)
+ glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id)
+
+ d.queue.Insert(hashPack.hashes[:index+1])
+ if err := d.banBlocks(active.id, hash); err != nil {
+ glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
+ }
return ErrInvalidChain
}
}
@@ -300,7 +321,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
done, index := false, 0
for index, head = range hashPack.hashes {
if d.hasBlock(head) || d.queue.GetBlock(head) != nil {
- glog.V(logger.Debug).Infof("Found common hash %x\n", head[:4])
+ glog.V(logger.Debug).Infof("Found common hash %x", head[:4])
hashPack.hashes = hashPack.hashes[:index]
done = true
break
@@ -309,7 +330,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
// Insert all the new hashes, but only continue if got something useful
inserts := d.queue.Insert(hashPack.hashes)
if len(inserts) == 0 && !done {
- glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes\n", active.id)
+ glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
return ErrBadPeer
}
if !done {
@@ -324,21 +345,21 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
glog.V(logger.Detail).Infof("Cross checking (%s) with %x/%x", active.id, origin, parent)
d.checks[origin] = &crossCheck{
- expire: time.Now().Add(blockTTL),
+ expire: time.Now().Add(blockSoftTTL),
parent: parent,
}
active.getBlocks([]common.Hash{origin})
// Also fetch a fresh
- active.getHashes(head)
+ getHashes(head)
continue
}
- // We're done, allocate the download cache and proceed pulling the blocks
+ // We're done, prepare the download cache and proceed pulling the blocks
offset := 0
if block := d.getBlock(head); block != nil {
offset = int(block.NumberU64() + 1)
}
- d.queue.Alloc(offset)
+ d.queue.Prepare(offset)
finished = true
case blockPack := <-d.blockCh:
@@ -364,7 +385,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
}
case <-timeout.C:
- glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id)
+ glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request", p.id)
var p *peer // p will be set if a peer can be found
// Attempt to find a new peer by checking inclusion of peers best hash in our
@@ -384,11 +405,11 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
// set p to the active peer. this will invalidate any hashes that may be returned
// by our previous (delayed) peer.
active = p
- p.getHashes(head)
- glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id)
+ getHashes(head)
+ glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)", p.id)
}
}
- glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start))
+ glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v", d.queue.Pending(), time.Since(start))
return nil
}
@@ -400,71 +421,92 @@ func (d *Downloader) fetchBlocks() error {
glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)")
start := time.Now()
- // default ticker for re-fetching blocks every now and then
+ // Start a ticker to continue throttled downloads and check for bad peers
ticker := time.NewTicker(20 * time.Millisecond)
+ defer ticker.Stop()
+
out:
for {
select {
case <-d.cancelCh:
return errCancelBlockFetch
+ case <-d.hashCh:
+ // Out of bounds hashes received, ignore them
+
case blockPack := <-d.blockCh:
// Short circuit if it's a stale cross check
if len(blockPack.blocks) == 1 {
block := blockPack.blocks[0]
if _, ok := d.checks[block.Hash()]; ok {
delete(d.checks, block.Hash())
- continue
+ break
}
}
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if peer := d.peers.Peer(blockPack.peerId); peer != nil {
- // Deliver the received chunk of blocks
- if err := d.queue.Deliver(blockPack.peerId, blockPack.blocks); err != nil {
- if err == ErrInvalidChain {
- // The hash chain is invalid (blocks are not ordered properly), abort
- return err
+ // Deliver the received chunk of blocks, and demote in case of errors
+ err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
+ switch err {
+ case nil:
+ // If no blocks were delivered, demote the peer (need the delivery above)
+ if len(blockPack.blocks) == 0 {
+ peer.Demote()
+ peer.SetIdle()
+ glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
+ break
}
- // Peer did deliver, but some blocks were off, penalize
- glog.V(logger.Debug).Infof("Failed delivery for peer %s: %v\n", blockPack.peerId, err)
+ // All was successful, promote the peer
+ peer.Promote()
+ peer.SetIdle()
+ glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
+
+ case ErrInvalidChain:
+ // The hash chain is invalid (blocks are not ordered properly), abort
+ return err
+
+ case errNoFetchesPending:
+ // Peer probably timed out with its delivery but came through
+ // in the end, demote, but allow to to pull from this peer.
peer.Demote()
- break
- }
- if glog.V(logger.Debug) && len(blockPack.blocks) > 0 {
- glog.Infof("Added %d blocks from: %s\n", len(blockPack.blocks), blockPack.peerId)
+ peer.SetIdle()
+ glog.V(logger.Detail).Infof("%s: out of bound delivery", peer)
+
+ case errStaleDelivery:
+ // Delivered something completely else than requested, usually
+ // caused by a timeout and delivery during a new sync cycle.
+ // Don't set it to idle as the original request should still be
+ // in flight.
+ peer.Demote()
+ glog.V(logger.Detail).Infof("%s: stale delivery", peer)
+
+ default:
+ // Peer did something semi-useful, demote but keep it around
+ peer.Demote()
+ peer.SetIdle()
+ glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
}
- // Promote the peer and update it's idle state
- peer.Promote()
- peer.SetIdle()
}
+
case <-ticker.C:
- // Check for bad peers. Bad peers may indicate a peer not responding
- // to a `getBlocks` message. A timeout of 5 seconds is set. Peers
- // that badly or poorly behave are removed from the peer set (not banned).
- // Bad peers are excluded from the available peer set and therefor won't be
- // reused. XXX We could re-introduce peers after X time.
- badPeers := d.queue.Expire(blockTTL)
+ // Short circuit if we lost all our peers
+ if d.peers.Len() == 0 {
+ return errNoPeers
+ }
+ // Check for block request timeouts and demote the responsible peers
+ badPeers := d.queue.Expire(blockHardTTL)
for _, pid := range badPeers {
- // XXX We could make use of a reputation system here ranking peers
- // in their performance
- // 1) Time for them to respond;
- // 2) Measure their speed;
- // 3) Amount and availability.
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
+ glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
}
}
- // After removing bad peers make sure we actually have sufficient peer left to keep downloading
- if d.peers.Len() == 0 {
- return errNoPeers
- }
- // If there are unrequested hashes left start fetching
- // from the available peers.
+ // If there are unrequested hashes left start fetching from the available peers
if d.queue.Pending() > 0 {
// Throttle the download if block cache is full and waiting processing
if d.queue.Throttle() {
- continue
+ break
}
// Send a download request to all idle peers, until throttled
idlePeers := d.peers.IdlePeers()
@@ -475,15 +517,18 @@ out:
}
// Get a possible chunk. If nil is returned no chunk
// could be returned due to no hashes available.
- request := d.queue.Reserve(peer, MaxBlockFetch)
+ request := d.queue.Reserve(peer, peer.Capacity())
if request == nil {
continue
}
+ if glog.V(logger.Detail) {
+ glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
+ }
// Fetch the chunk and check for error. If the peer was somehow
// already fetching a chunk due to a bug, it will be returned to
// the queue
if err := peer.Fetch(request); err != nil {
- glog.V(logger.Error).Infof("Peer %s received double work\n", peer.id)
+ glog.V(logger.Error).Infof("Peer %s received double work", peer.id)
d.queue.Cancel(request)
}
}
@@ -502,10 +547,95 @@ out:
}
}
glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start))
-
return nil
}
+// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes,
+// and bans the head of the retrieved batch.
+//
+// This method only fetches one single batch as the goal is not ban an entire
+// (potentially long) invalid chain - wasting a lot of time in the meanwhile -,
+// but rather to gradually build up a blacklist if the peer keeps reconnecting.
+func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
+ glog.V(logger.Debug).Infof("Banning a batch out of %d blocks from %s", d.queue.Pending(), peerId)
+
+ // Ask the peer being banned for a batch of blocks from the banning point
+ peer := d.peers.Peer(peerId)
+ if peer == nil {
+ return nil
+ }
+ request := d.queue.Reserve(peer, MaxBlockFetch)
+ if request == nil {
+ return nil
+ }
+ if err := peer.Fetch(request); err != nil {
+ return err
+ }
+ // Wait a bit for the reply to arrive, and ban if done so
+ timeout := time.After(blockHardTTL)
+ for {
+ select {
+ case <-d.cancelCh:
+ return errCancelBlockFetch
+
+ case <-timeout:
+ return ErrTimeout
+
+ case <-d.hashCh:
+ // Out of bounds hashes received, ignore them
+
+ case blockPack := <-d.blockCh:
+ blocks := blockPack.blocks
+
+ // Short circuit if it's a stale cross check
+ if len(blocks) == 1 {
+ block := blocks[0]
+ if _, ok := d.checks[block.Hash()]; ok {
+ delete(d.checks, block.Hash())
+ break
+ }
+ }
+ // Short circuit if it's not from the peer being banned
+ if blockPack.peerId != peerId {
+ break
+ }
+ // Short circuit if no blocks were returned
+ if len(blocks) == 0 {
+ return errors.New("no blocks returned to ban")
+ }
+ // Reconstruct the original chain order and ensure we're banning the correct blocks
+ types.BlockBy(types.Number).Sort(blocks)
+ if bytes.Compare(blocks[0].Hash().Bytes(), head.Bytes()) != 0 {
+ return errors.New("head block not the banned one")
+ }
+ index := 0
+ for _, block := range blocks[1:] {
+ if bytes.Compare(block.ParentHash().Bytes(), blocks[index].Hash().Bytes()) != 0 {
+ break
+ }
+ index++
+ }
+ // Ban the head hash and phase out any excess
+ d.banned.Add(blocks[index].Hash())
+ for d.banned.Size() > maxBannedHashes {
+ var evacuate common.Hash
+
+ d.banned.Each(func(item interface{}) bool {
+ // Skip any hard coded bans
+ if core.BadHashes[item.(common.Hash)] {
+ return true
+ }
+ evacuate = item.(common.Hash)
+ return false
+ })
+ d.banned.Remove(evacuate)
+ }
+ glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId)
+ return nil
+ }
+ }
+}
+
// DeliverBlocks injects a new batch of blocks received from a remote node.
// This is usually invoked through the BlocksMsg by the protocol handler.
func (d *Downloader) DeliverBlocks(id string, blocks []*types.Block) error {
diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go
index 66be1ca18..5f10fb41f 100644
--- a/eth/downloader/downloader_test.go
+++ b/eth/downloader/downloader_test.go
@@ -7,6 +7,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
@@ -14,6 +15,7 @@ import (
var (
knownHash = common.Hash{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
unknownHash = common.Hash{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}
+ bannedHash = common.Hash{5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}
)
func createHashes(start, amount int) (hashes []common.Hash) {
@@ -21,7 +23,7 @@ func createHashes(start, amount int) (hashes []common.Hash) {
hashes[len(hashes)-1] = knownHash
for i := range hashes[:len(hashes)-1] {
- binary.BigEndian.PutUint64(hashes[i][:8], uint64(i+2))
+ binary.BigEndian.PutUint64(hashes[i][:8], uint64(start+i+2))
}
return
}
@@ -56,7 +58,6 @@ type downloadTester struct {
maxHashFetch int // Overrides the maximum number of retrieved hashes
t *testing.T
- pcount int
done chan bool
activePeerId string
}
@@ -114,12 +115,6 @@ func (dl *downloadTester) syncTake(peerId string, head common.Hash) ([]*Block, e
return took, err
}
-func (dl *downloadTester) insertBlocks(blocks types.Blocks) {
- for _, block := range blocks {
- dl.chain = append(dl.chain, block.Hash())
- }
-}
-
func (dl *downloadTester) hasBlock(hash common.Hash) bool {
for _, h := range dl.chain {
if h == hash {
@@ -174,158 +169,131 @@ func (dl *downloadTester) getBlocks(id string) func([]common.Hash) error {
}
}
-func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) {
- dl.pcount++
-
- dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
-}
-
-func (dl *downloadTester) badBlocksPeer(id string, td *big.Int, hash common.Hash) {
- dl.pcount++
-
- // This bad peer never returns any blocks
- dl.downloader.RegisterPeer(id, hash, dl.getHashes, func([]common.Hash) error {
- return nil
- })
+// newPeer registers a new block download source into the syncer.
+func (dl *downloadTester) newPeer(id string, td *big.Int, hash common.Hash) error {
+ return dl.downloader.RegisterPeer(id, hash, dl.getHashes, dl.getBlocks(id))
}
-func TestDownload(t *testing.T) {
- minDesiredPeerCount = 4
- blockTTL = 1 * time.Second
-
- targetBlocks := 1000
+// Tests that simple synchronization, without throttling from a good peer works.
+func TestSynchronisation(t *testing.T) {
+ // Create a small enough block chain to download and the tester
+ targetBlocks := blockCacheLimit - 15
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
- tester := newTester(t, hashes, blocks)
- tester.newPeer("peer1", big.NewInt(10000), hashes[0])
- tester.newPeer("peer2", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
- tester.activePeerId = "peer1"
-
- err := tester.sync("peer1", hashes[0])
- if err != nil {
- t.Error("download error", err)
- }
-
- inqueue := len(tester.downloader.queue.blockCache)
- if inqueue != targetBlocks {
- t.Error("expected", targetBlocks, "have", inqueue)
- }
-}
-
-func TestMissing(t *testing.T) {
- targetBlocks := 1000
- hashes := createHashes(0, 1000)
- extraHashes := createHashes(1001, 1003)
- blocks := createBlocksFromHashes(append(extraHashes, hashes...))
tester := newTester(t, hashes, blocks)
+ tester.newPeer("peer", big.NewInt(10000), hashes[0])
- tester.newPeer("peer1", big.NewInt(10000), hashes[len(hashes)-1])
-
- hashes = append(extraHashes, hashes[:len(hashes)-1]...)
- tester.newPeer("peer2", big.NewInt(0), common.Hash{})
-
- err := tester.sync("peer1", hashes[0])
- if err != nil {
- t.Error("download error", err)
+ // Synchronise with the peer and make sure all blocks were retrieved
+ if err := tester.sync("peer", hashes[0]); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
}
-
- inqueue := len(tester.downloader.queue.blockCache)
- if inqueue != targetBlocks {
- t.Error("expected", targetBlocks, "have", inqueue)
+ if queued := len(tester.downloader.queue.blockPool); queued != targetBlocks {
+ t.Fatalf("synchronised block mismatch: have %v, want %v", queued, targetBlocks)
}
}
-func TestTaking(t *testing.T) {
- minDesiredPeerCount = 4
- blockTTL = 1 * time.Second
-
- targetBlocks := 1000
+// Tests that the synchronized blocks can be correctly retrieved.
+func TestBlockTaking(t *testing.T) {
+ // Create a small enough block chain to download and the tester
+ targetBlocks := blockCacheLimit - 15
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
- tester := newTester(t, hashes, blocks)
- tester.newPeer("peer1", big.NewInt(10000), hashes[0])
- tester.newPeer("peer2", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
+ tester := newTester(t, hashes, blocks)
+ tester.newPeer("peer", big.NewInt(10000), hashes[0])
- err := tester.sync("peer1", hashes[0])
- if err != nil {
- t.Error("download error", err)
+ // Synchronise with the peer and test block retrieval
+ if err := tester.sync("peer", hashes[0]); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
}
- bs := tester.downloader.TakeBlocks()
- if len(bs) != targetBlocks {
- t.Error("retrieved block mismatch: have %v, want %v", len(bs), targetBlocks)
+ if took := tester.downloader.TakeBlocks(); len(took) != targetBlocks {
+ t.Fatalf("took block mismatch: have %v, want %v", len(took), targetBlocks)
}
}
+// Tests that an inactive downloader will not accept incoming hashes and blocks.
func TestInactiveDownloader(t *testing.T) {
- targetBlocks := 1000
+ // Create a small enough block chain to download and the tester
+ targetBlocks := blockCacheLimit - 15
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashSet(createHashSet(hashes))
- tester := newTester(t, hashes, nil)
- err := tester.downloader.DeliverHashes("bad peer 001", hashes)
- if err != errNoSyncActive {
- t.Error("expected no sync error, got", err)
- }
+ tester := newTester(t, nil, nil)
- err = tester.downloader.DeliverBlocks("bad peer 001", blocks)
- if err != errNoSyncActive {
- t.Error("expected no sync error, got", err)
+ // Check that neither hashes nor blocks are accepted
+ if err := tester.downloader.DeliverHashes("bad peer", hashes); err != errNoSyncActive {
+ t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
+ }
+ if err := tester.downloader.DeliverBlocks("bad peer", blocks); err != errNoSyncActive {
+ t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
}
}
+// Tests that a canceled download wipes all previously accumulated state.
func TestCancel(t *testing.T) {
- minDesiredPeerCount = 4
- blockTTL = 1 * time.Second
-
- targetBlocks := 1000
+ // Create a small enough block chain to download and the tester
+ targetBlocks := blockCacheLimit - 15
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
- tester := newTester(t, hashes, blocks)
- tester.newPeer("peer1", big.NewInt(10000), hashes[0])
+ tester := newTester(t, hashes, blocks)
+ tester.newPeer("peer", big.NewInt(10000), hashes[0])
- err := tester.sync("peer1", hashes[0])
- if err != nil {
- t.Error("download error", err)
+ // Synchronise with the peer, but cancel afterwards
+ if err := tester.sync("peer", hashes[0]); err != nil {
+ t.Fatalf("failed to synchronise blocks: %v", err)
}
-
if !tester.downloader.Cancel() {
- t.Error("cancel operation unsuccessfull")
+ t.Fatalf("cancel operation failed")
}
-
- hashSize, blockSize := tester.downloader.queue.Size()
- if hashSize > 0 || blockSize > 0 {
- t.Error("block (", blockSize, ") or hash (", hashSize, ") not 0")
+ // Make sure the queue reports empty and no blocks can be taken
+ hashCount, blockCount := tester.downloader.queue.Size()
+ if hashCount > 0 || blockCount > 0 {
+ t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
+ }
+ if took := tester.downloader.TakeBlocks(); len(took) != 0 {
+ t.Errorf("taken blocks mismatch: have %d, want %d", len(took), 0)
}
}
+// Tests that if a large batch of blocks are being downloaded, it is throttled
+// until the cached blocks are retrieved.
func TestThrottling(t *testing.T) {
- minDesiredPeerCount = 4
- blockTTL = 1 * time.Second
-
- targetBlocks := 16 * blockCacheLimit
+ // Create a long block chain to download and the tester
+ targetBlocks := 8 * blockCacheLimit
hashes := createHashes(0, targetBlocks)
blocks := createBlocksFromHashes(hashes)
- tester := newTester(t, hashes, blocks)
- tester.newPeer("peer1", big.NewInt(10000), hashes[0])
- tester.newPeer("peer2", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{})
- tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{})
+ tester := newTester(t, hashes, blocks)
+ tester.newPeer("peer", big.NewInt(10000), hashes[0])
- // Concurrently download and take the blocks
- took, err := tester.syncTake("peer1", hashes[0])
- if err != nil {
- t.Fatalf("failed to synchronise blocks: %v", err)
+ // Start a synchronisation concurrently
+ errc := make(chan error)
+ go func() {
+ errc <- tester.sync("peer", hashes[0])
+ }()
+ // Iteratively take some blocks, always checking the retrieval count
+ for total := 0; total < targetBlocks; {
+ // Wait a bit for sync to complete
+ for start := time.Now(); time.Since(start) < 3*time.Second; {
+ time.Sleep(25 * time.Millisecond)
+ if len(tester.downloader.queue.blockPool) == blockCacheLimit {
+ break
+ }
+ }
+ // Fetch the next batch of blocks
+ took := tester.downloader.TakeBlocks()
+ if len(took) != blockCacheLimit {
+ t.Fatalf("block count mismatch: have %v, want %v", len(took), blockCacheLimit)
+ }
+ total += len(took)
+ if total > targetBlocks {
+ t.Fatalf("target block count mismatch: have %v, want %v", total, targetBlocks)
+ }
}
- if len(took) != targetBlocks {
- t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks)
+ if err := <-errc; err != nil {
+ t.Fatalf("block synchronization failed: %v", err)
}
}
@@ -461,7 +429,7 @@ func TestInvalidHashOrderAttack(t *testing.T) {
// Tests that if a malicious peer makes up a random hash chain and tries to push
// indefinitely, it actually gets caught with it.
func TestMadeupHashChainAttack(t *testing.T) {
- blockTTL = 100 * time.Millisecond
+ blockSoftTTL = 100 * time.Millisecond
crossCheckCycle = 25 * time.Millisecond
// Create a long chain of hashes without backing blocks
@@ -495,10 +463,10 @@ func TestMadeupHashChainDrippingAttack(t *testing.T) {
// Tests that if a malicious peer makes up a random block chain, and tried to
// push indefinitely, it actually gets caught with it.
func TestMadeupBlockChainAttack(t *testing.T) {
- defaultBlockTTL := blockTTL
+ defaultBlockTTL := blockSoftTTL
defaultCrossCheckCycle := crossCheckCycle
- blockTTL = 100 * time.Millisecond
+ blockSoftTTL = 100 * time.Millisecond
crossCheckCycle = 25 * time.Millisecond
// Create a long chain of blocks and simulate an invalid chain by dropping every second
@@ -516,7 +484,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
}
// Ensure that a valid chain can still pass sync
- blockTTL = defaultBlockTTL
+ blockSoftTTL = defaultBlockTTL
crossCheckCycle = defaultCrossCheckCycle
tester.hashes = hashes
@@ -530,10 +498,10 @@ func TestMadeupBlockChainAttack(t *testing.T) {
// attacker make up a valid hashes for random blocks, but also forges the block
// parents to point to existing hashes.
func TestMadeupParentBlockChainAttack(t *testing.T) {
- defaultBlockTTL := blockTTL
+ defaultBlockTTL := blockSoftTTL
defaultCrossCheckCycle := crossCheckCycle
- blockTTL = 100 * time.Millisecond
+ blockSoftTTL = 100 * time.Millisecond
crossCheckCycle = 25 * time.Millisecond
// Create a long chain of blocks and simulate an invalid chain by dropping every second
@@ -550,7 +518,7 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrCrossCheckFailed)
}
// Ensure that a valid chain can still pass sync
- blockTTL = defaultBlockTTL
+ blockSoftTTL = defaultBlockTTL
crossCheckCycle = defaultCrossCheckCycle
tester.blocks = blocks
@@ -559,3 +527,86 @@ func TestMadeupParentBlockChainAttack(t *testing.T) {
t.Fatalf("failed to synchronise blocks: %v", err)
}
}
+
+// Tests that if one/multiple malicious peers try to feed a banned blockchain to
+// the downloader, it will not keep refetching the same chain indefinitely, but
+// gradually block pieces of it, until it's head is also blocked.
+func TestBannedChainStarvationAttack(t *testing.T) {
+ // Construct a valid chain, but ban one of the hashes in it
+ hashes := createHashes(0, 8*blockCacheLimit)
+ hashes[len(hashes)/2+23] = bannedHash // weird index to have non multiple of ban chunk size
+
+ blocks := createBlocksFromHashes(hashes)
+
+ // Create the tester and ban the selected hash
+ tester := newTester(t, hashes, blocks)
+ tester.downloader.banned.Add(bannedHash)
+
+ // Iteratively try to sync, and verify that the banned hash list grows until
+ // the head of the invalid chain is blocked too.
+ tester.newPeer("attack", big.NewInt(10000), hashes[0])
+ for banned := tester.downloader.banned.Size(); ; {
+ // Try to sync with the attacker, check hash chain failure
+ if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
+ t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
+ }
+ // Check that the ban list grew with at least 1 new item, or all banned
+ bans := tester.downloader.banned.Size()
+ if bans < banned+1 {
+ if tester.downloader.banned.Has(hashes[0]) {
+ break
+ }
+ t.Fatalf("ban count mismatch: have %v, want %v+", bans, banned+1)
+ }
+ banned = bans
+ }
+ // Check that after banning an entire chain, bad peers get dropped
+ if err := tester.newPeer("new attacker", big.NewInt(10000), hashes[0]); err != errBannedHead {
+ t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
+ }
+ if peer := tester.downloader.peers.Peer("net attacker"); peer != nil {
+ t.Fatalf("banned attacker registered: %v", peer)
+ }
+}
+
+// Tests that if a peer sends excessively many/large invalid chains that are
+// gradually banned, it will have an upper limit on the consumed memory and also
+// the origin bad hashes will not be evacuated.
+func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
+ // Reduce the test size a bit
+ MaxBlockFetch = 4
+ maxBannedHashes = 256
+
+ // Construct a banned chain with more chunks than the ban limit
+ hashes := createHashes(0, maxBannedHashes*MaxBlockFetch)
+ hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size
+
+ blocks := createBlocksFromHashes(hashes)
+
+ // Create the tester and ban the selected hash
+ tester := newTester(t, hashes, blocks)
+ tester.downloader.banned.Add(bannedHash)
+
+ // Iteratively try to sync, and verify that the banned hash list grows until
+ // the head of the invalid chain is blocked too.
+ tester.newPeer("attack", big.NewInt(10000), hashes[0])
+ for {
+ // Try to sync with the attacker, check hash chain failure
+ if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
+ t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
+ }
+ // Short circuit if the entire chain was banned
+ if tester.downloader.banned.Has(hashes[0]) {
+ break
+ }
+ // Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched
+ if bans := tester.downloader.banned.Size(); bans > maxBannedHashes {
+ t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes)
+ }
+ for hash, _ := range core.BadHashes {
+ if !tester.downloader.banned.Has(hash) {
+ t.Fatalf("hard coded ban evacuated: %x", hash)
+ }
+ }
+ }
+}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index 4abae8d5e..9614a6951 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -5,8 +5,11 @@ package downloader
import (
"errors"
+ "fmt"
+ "math"
"sync"
"sync/atomic"
+ "time"
"github.com/ethereum/go-ethereum/common"
"gopkg.in/fatih/set.v0"
@@ -27,14 +30,15 @@ type peer struct {
head common.Hash // Hash of the peers latest known block
idle int32 // Current activity state of the peer (idle = 0, active = 1)
- rep int32 // Simple peer reputation (not used currently)
+ rep int32 // Simple peer reputation
- mu sync.RWMutex
+ capacity int32 // Number of blocks allowed to fetch per request
+ started time.Time // Time instance when the last fetch was started
- ignored *set.Set
+ ignored *set.Set // Set of hashes not to request (didn't have previously)
- getHashes hashFetcherFn
- getBlocks blockFetcherFn
+ getHashes hashFetcherFn // Method to retrieve a batch of hashes (mockable for testing)
+ getBlocks blockFetcherFn // Method to retrieve a batch of blocks (mockable for testing)
}
// newPeer create a new downloader peer, with specific hash and block retrieval
@@ -43,6 +47,7 @@ func newPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blo
return &peer{
id: id,
head: head,
+ capacity: 1,
getHashes: getHashes,
getBlocks: getBlocks,
ignored: set.New(),
@@ -52,6 +57,7 @@ func newPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blo
// Reset clears the internal state of a peer entity.
func (p *peer) Reset() {
atomic.StoreInt32(&p.idle, 0)
+ atomic.StoreInt32(&p.capacity, 1)
p.ignored.Clear()
}
@@ -61,6 +67,8 @@ func (p *peer) Fetch(request *fetchRequest) error {
if !atomic.CompareAndSwapInt32(&p.idle, 0, 1) {
return errAlreadyFetching
}
+ p.started = time.Now()
+
// Convert the hash set to a retrievable slice
hashes := make([]common.Hash, 0, len(request.Hashes))
for hash, _ := range request.Hashes {
@@ -72,10 +80,41 @@ func (p *peer) Fetch(request *fetchRequest) error {
}
// SetIdle sets the peer to idle, allowing it to execute new retrieval requests.
+// Its block retrieval allowance will also be updated either up- or downwards,
+// depending on whether the previous fetch completed in time or not.
func (p *peer) SetIdle() {
+ // Update the peer's download allowance based on previous performance
+ scale := 2.0
+ if time.Since(p.started) > blockSoftTTL {
+ scale = 0.5
+ if time.Since(p.started) > blockHardTTL {
+ scale = 1 / float64(MaxBlockFetch) // reduces capacity to 1
+ }
+ }
+ for {
+ // Calculate the new download bandwidth allowance
+ prev := atomic.LoadInt32(&p.capacity)
+ next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale)))
+
+ // Try to update the old value
+ if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {
+ // If we're having problems at 1 capacity, try to find better peers
+ if next == 1 {
+ p.Demote()
+ }
+ break
+ }
+ }
+ // Set the peer to idle to allow further block requests
atomic.StoreInt32(&p.idle, 0)
}
+// Capacity retrieves the peers block download allowance based on its previously
+// discovered bandwidth capacity.
+func (p *peer) Capacity() int {
+ return int(atomic.LoadInt32(&p.capacity))
+}
+
// Promote increases the peer's reputation.
func (p *peer) Promote() {
atomic.AddInt32(&p.rep, 1)
@@ -95,6 +134,15 @@ func (p *peer) Demote() {
}
}
+// String implements fmt.Stringer.
+func (p *peer) String() string {
+ return fmt.Sprintf("Peer %s [%s]", p.id,
+ fmt.Sprintf("reputation %3d, ", atomic.LoadInt32(&p.rep))+
+ fmt.Sprintf("capacity %3d, ", atomic.LoadInt32(&p.capacity))+
+ fmt.Sprintf("ignored %4d", p.ignored.Size()),
+ )
+}
+
// peerSet represents the collection of active peer participating in the block
// download procedure.
type peerSet struct {
diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go
index 7ea400dc4..7abbd42fd 100644
--- a/eth/downloader/queue.go
+++ b/eth/downloader/queue.go
@@ -16,10 +16,15 @@ import (
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
-const (
+var (
blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download
)
+var (
+ errNoFetchesPending = errors.New("no fetches pending")
+ errStaleDelivery = errors.New("stale delivery")
+)
+
// fetchRequest is a currently running block retrieval operation.
type fetchRequest struct {
Peer *peer // Peer to which the request was sent
@@ -45,10 +50,11 @@ type queue struct {
// newQueue creates a new download queue for scheduling block retrieval.
func newQueue() *queue {
return &queue{
- hashPool: make(map[common.Hash]int),
- hashQueue: prque.New(),
- pendPool: make(map[string]*fetchRequest),
- blockPool: make(map[common.Hash]int),
+ hashPool: make(map[common.Hash]int),
+ hashQueue: prque.New(),
+ pendPool: make(map[string]*fetchRequest),
+ blockPool: make(map[common.Hash]int),
+ blockCache: make([]*Block, blockCacheLimit),
}
}
@@ -65,7 +71,7 @@ func (q *queue) Reset() {
q.blockPool = make(map[common.Hash]int)
q.blockOffset = 0
- q.blockCache = nil
+ q.blockCache = make([]*Block, blockCacheLimit)
}
// Size retrieves the number of hashes in the queue, returning separately for
@@ -203,7 +209,7 @@ func (q *queue) TakeBlocks() []*Block {
// Reserve reserves a set of hashes for the given peer, skipping any previously
// failed download.
-func (q *queue) Reserve(p *peer, max int) *fetchRequest {
+func (q *queue) Reserve(p *peer, count int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
@@ -215,11 +221,16 @@ func (q *queue) Reserve(p *peer, max int) *fetchRequest {
if _, ok := q.pendPool[p.id]; ok {
return nil
}
+ // Calculate an upper limit on the hashes we might fetch (i.e. throttling)
+ space := len(q.blockCache) - len(q.blockPool)
+ for _, request := range q.pendPool {
+ space -= len(request.Hashes)
+ }
// Retrieve a batch of hashes, skipping previously failed ones
send := make(map[common.Hash]int)
skip := make(map[common.Hash]int)
- for len(send) < max && !q.hashQueue.Empty() {
+ for proc := 0; proc < space && len(send) < count && !q.hashQueue.Empty(); proc++ {
hash, priority := q.hashQueue.Pop()
if p.ignored.Has(hash) {
skip[hash.(common.Hash)] = int(priority)
@@ -287,7 +298,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
// Short circuit if the blocks were never requested
request := q.pendPool[id]
if request == nil {
- return errors.New("no fetches pending")
+ return errNoFetchesPending
}
delete(q.pendPool, id)
@@ -303,7 +314,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
// Skip any blocks that were not requested
hash := block.Hash()
if _, ok := request.Hashes[hash]; !ok {
- errs = append(errs, fmt.Errorf("non-requested block %v", hash))
+ errs = append(errs, fmt.Errorf("non-requested block %x", hash))
continue
}
// If a requested block falls out of the range, the hash chain is invalid
@@ -320,30 +331,26 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
delete(q.hashPool, hash)
q.blockPool[hash] = int(block.NumberU64())
}
- // Return all failed fetches to the queue
+ // Return all failed or missing fetches to the queue
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
+ // If none of the blocks were good, it's a stale delivery
if len(errs) != 0 {
+ if len(errs) == len(blocks) {
+ return errStaleDelivery
+ }
return fmt.Errorf("multiple failures: %v", errs)
}
return nil
}
-// Alloc ensures that the block cache is the correct size, given a starting
-// offset, and a memory cap.
-func (q *queue) Alloc(offset int) {
+// Prepare configures the block cache offset to allow accepting inbound blocks.
+func (q *queue) Prepare(offset int) {
q.lock.Lock()
defer q.lock.Unlock()
if q.blockOffset < offset {
q.blockOffset = offset
}
- size := len(q.hashPool)
- if size > blockCacheLimit {
- size = blockCacheLimit
- }
- if len(q.blockCache) < size {
- q.blockCache = append(q.blockCache, make([]*Block, size-len(q.blockCache))...)
- }
}
diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go
index b1f3591f3..ee6141f71 100644
--- a/eth/downloader/queue_test.go
+++ b/eth/downloader/queue_test.go
@@ -1,8 +1,6 @@
package downloader
import (
- "testing"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"gopkg.in/fatih/set.v0"
@@ -30,32 +28,3 @@ func createBlocksFromHashSet(hashes *set.Set) []*types.Block {
return blocks
}
-
-func TestChunking(t *testing.T) {
- queue := newQueue()
- peer1 := newPeer("peer1", common.Hash{}, nil, nil)
- peer2 := newPeer("peer2", common.Hash{}, nil, nil)
-
- // 99 + 1 (1 == known genesis hash)
- hashes := createHashes(0, 99)
- queue.Insert(hashes)
-
- chunk1 := queue.Reserve(peer1, 99)
- if chunk1 == nil {
- t.Errorf("chunk1 is nil")
- t.FailNow()
- }
- chunk2 := queue.Reserve(peer2, 99)
- if chunk2 == nil {
- t.Errorf("chunk2 is nil")
- t.FailNow()
- }
-
- if len(chunk1.Hashes) != 99 {
- t.Error("expected chunk1 hashes to be 99, got", len(chunk1.Hashes))
- }
-
- if len(chunk2.Hashes) != 1 {
- t.Error("expected chunk1 hashes to be 1, got", len(chunk2.Hashes))
- }
-}