aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/storage/feed
diff options
context:
space:
mode:
Diffstat (limited to 'swarm/storage/feed')
-rw-r--r--swarm/storage/feed/binaryserializer.go44
-rw-r--r--swarm/storage/feed/binaryserializer_test.go98
-rw-r--r--swarm/storage/feed/cacheentry.go48
-rw-r--r--swarm/storage/feed/doc.go43
-rw-r--r--swarm/storage/feed/error.go73
-rw-r--r--swarm/storage/feed/feed.go125
-rw-r--r--swarm/storage/feed/feed_test.go36
-rw-r--r--swarm/storage/feed/handler.go298
-rw-r--r--swarm/storage/feed/handler_test.go505
-rw-r--r--swarm/storage/feed/id.go123
-rw-r--r--swarm/storage/feed/id_test.go28
-rw-r--r--swarm/storage/feed/lookup/algorithm_fluzcapacitor.go63
-rw-r--r--swarm/storage/feed/lookup/algorithm_longearth.go185
-rw-r--r--swarm/storage/feed/lookup/epoch.go91
-rw-r--r--swarm/storage/feed/lookup/epoch_test.go57
-rw-r--r--swarm/storage/feed/lookup/lookup.go136
-rw-r--r--swarm/storage/feed/lookup/lookup_test.go641
-rw-r--r--swarm/storage/feed/lookup/store_test.go154
-rw-r--r--swarm/storage/feed/lookup/timesim_test.go128
-rw-r--r--swarm/storage/feed/query.go78
-rw-r--r--swarm/storage/feed/query_test.go38
-rw-r--r--swarm/storage/feed/request.go286
-rw-r--r--swarm/storage/feed/request_test.go312
-rw-r--r--swarm/storage/feed/sign.go75
-rw-r--r--swarm/storage/feed/testutil.go72
-rw-r--r--swarm/storage/feed/timestampprovider.go62
-rw-r--r--swarm/storage/feed/topic.go105
-rw-r--r--swarm/storage/feed/topic_test.go50
-rw-r--r--swarm/storage/feed/update.go134
-rw-r--r--swarm/storage/feed/update_test.go50
30 files changed, 0 insertions, 4138 deletions
diff --git a/swarm/storage/feed/binaryserializer.go b/swarm/storage/feed/binaryserializer.go
deleted file mode 100644
index 4e4f67a09..000000000
--- a/swarm/storage/feed/binaryserializer.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import "github.com/ethereum/go-ethereum/common/hexutil"
-
-type binarySerializer interface {
- binaryPut(serializedData []byte) error
- binaryLength() int
- binaryGet(serializedData []byte) error
-}
-
-// Values interface represents a string key-value store
-// useful for building query strings
-type Values interface {
- Get(key string) string
- Set(key, value string)
-}
-
-type valueSerializer interface {
- FromValues(values Values) error
- AppendValues(values Values)
-}
-
-// Hex serializes the structure and converts it to a hex string
-func Hex(bin binarySerializer) string {
- b := make([]byte, bin.binaryLength())
- bin.binaryPut(b)
- return hexutil.Encode(b)
-}
diff --git a/swarm/storage/feed/binaryserializer_test.go b/swarm/storage/feed/binaryserializer_test.go
deleted file mode 100644
index 37828d1c9..000000000
--- a/swarm/storage/feed/binaryserializer_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "encoding/json"
- "reflect"
- "testing"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-// KV mocks a key value store
-type KV map[string]string
-
-func (kv KV) Get(key string) string {
- return kv[key]
-}
-func (kv KV) Set(key, value string) {
- kv[key] = value
-}
-
-func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
- if hexutil.Encode(actualValue) != expectedHex {
- t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
- }
-}
-
-func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) {
- name := reflect.TypeOf(bin).Elem().Name()
- serialized := make([]byte, bin.binaryLength())
- if err := bin.binaryPut(serialized); err != nil {
- t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err)
- }
-
- compareByteSliceToExpectedHex(t, name, serialized, expectedHex)
-
- recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer)
- if err := recovered.binaryGet(serialized); err != nil {
- t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err)
- }
-
- if !reflect.DeepEqual(bin, recovered) {
- t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name)
- }
-
- serializedWrongLength := make([]byte, 1)
- copy(serializedWrongLength[:], serialized)
- if err := recovered.binaryGet(serializedWrongLength); err == nil {
- t.Fatalf("Expected %s.binaryGet to fail since data is too small", name)
- }
-}
-
-func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) {
- name := reflect.TypeOf(bin).Elem().Name()
- // make a slice that is too small to contain the metadata
- serialized := make([]byte, bin.binaryLength()-1)
-
- if err := bin.binaryPut(serialized); err == nil {
- t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name)
- }
-}
-
-func testValueSerializer(t *testing.T, v valueSerializer, expected KV) {
- name := reflect.TypeOf(v).Elem().Name()
- kv := make(KV)
-
- v.AppendValues(kv)
- if !reflect.DeepEqual(expected, kv) {
- expj, _ := json.Marshal(expected)
- gotj, _ := json.Marshal(kv)
- t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj))
- }
-
- recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer)
- err := recovered.FromValues(kv)
- if err != nil {
- t.Fatal(err)
- }
-
- if !reflect.DeepEqual(recovered, v) {
- t.Fatalf("Expected recovered %s to be the same", name)
- }
-}
diff --git a/swarm/storage/feed/cacheentry.go b/swarm/storage/feed/cacheentry.go
deleted file mode 100644
index 1c7e22619..000000000
--- a/swarm/storage/feed/cacheentry.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "bytes"
- "context"
- "time"
-
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-const (
- hasherCount = 8
- feedsHashAlgorithm = storage.SHA3Hash
- defaultRetrieveTimeout = 1000 * time.Millisecond
-)
-
-// cacheEntry caches the last known update of a specific Swarm feed.
-type cacheEntry struct {
- Update
- *bytes.Reader
- lastKey storage.Address
-}
-
-// implements storage.LazySectionReader
-func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) {
- return int64(len(r.Update.data)), nil
-}
-
-//returns the feed's topic
-func (r *cacheEntry) Topic() Topic {
- return r.Feed.Topic
-}
diff --git a/swarm/storage/feed/doc.go b/swarm/storage/feed/doc.go
deleted file mode 100644
index 1f07948f2..000000000
--- a/swarm/storage/feed/doc.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Package feeds defines Swarm Feeds.
-
-Swarm Feeds allows a user to build an update feed about a particular topic
-without resorting to ENS on each update.
-The update scheme is built on swarm chunks with chunk keys following
-a predictable, versionable pattern.
-
-A Feed is tied to a unique identifier that is deterministically generated out of
-the chosen topic.
-
-A Feed is defined as the series of updates of a specific user about a particular topic
-
-Actual data updates are also made in the form of swarm chunks. The keys
-of the updates are the hash of a concatenation of properties as follows:
-
-updateAddr = H(Feed, Epoch ID)
-where H is the SHA3 hash function
-Feed is the combination of Topic and the user address
-Epoch ID is a time slot. See the lookup package for more information.
-
-A user looking up a the latest update in a Feed only needs to know the Topic
-and the other user's address.
-
-The Feed Update data is:
-updatedata = Feed|Epoch|data
-
-The full update data that goes in the chunk payload is:
-updatedata|sign(updatedata)
-
-Structure Summary:
-
-Request: Feed Update with signature
- Update: headers + data
- Header: Protocol version and reserved for future use placeholders
- ID: Information about how to locate a specific update
- Feed: Represents a user's series of publications about a specific Topic
- Topic: Item that the updates are about
- User: User who updates the Feed
- Epoch: time slot where the update is stored
-
-*/
-package feed
diff --git a/swarm/storage/feed/error.go b/swarm/storage/feed/error.go
deleted file mode 100644
index 206ba3316..000000000
--- a/swarm/storage/feed/error.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "fmt"
-)
-
-const (
- ErrInit = iota
- ErrNotFound
- ErrIO
- ErrUnauthorized
- ErrInvalidValue
- ErrDataOverflow
- ErrNothingToReturn
- ErrCorruptData
- ErrInvalidSignature
- ErrNotSynced
- ErrPeriodDepth
- ErrCnt
-)
-
-// Error is a the typed error object used for Swarm feeds
-type Error struct {
- code int
- err string
-}
-
-// Error implements the error interface
-func (e *Error) Error() string {
- return e.err
-}
-
-// Code returns the error code
-// Error codes are enumerated in the error.go file within the feeds package
-func (e *Error) Code() int {
- return e.code
-}
-
-// NewError creates a new Swarm feeds Error object with the specified code and custom error message
-func NewError(code int, s string) error {
- if code < 0 || code >= ErrCnt {
- panic("no such error code!")
- }
- r := &Error{
- err: s,
- }
- switch code {
- case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
- r.code = code
- }
- return r
-}
-
-// NewErrorf is a convenience version of NewError that incorporates printf-style formatting
-func NewErrorf(code int, format string, args ...interface{}) error {
- return NewError(code, fmt.Sprintf(format, args...))
-}
diff --git a/swarm/storage/feed/feed.go b/swarm/storage/feed/feed.go
deleted file mode 100644
index b6ea665a6..000000000
--- a/swarm/storage/feed/feed.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "hash"
- "unsafe"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-// Feed represents a particular user's stream of updates on a topic
-type Feed struct {
- Topic Topic `json:"topic"`
- User common.Address `json:"user"`
-}
-
-// Feed layout:
-// TopicLength bytes
-// userAddr common.AddressLength bytes
-const feedLength = TopicLength + common.AddressLength
-
-// mapKey calculates a unique id for this feed. Used by the cache map in `Handler`
-func (f *Feed) mapKey() uint64 {
- serializedData := make([]byte, feedLength)
- f.binaryPut(serializedData)
- hasher := hashPool.Get().(hash.Hash)
- defer hashPool.Put(hasher)
- hasher.Reset()
- hasher.Write(serializedData)
- hash := hasher.Sum(nil)
- return *(*uint64)(unsafe.Pointer(&hash[0]))
-}
-
-// binaryPut serializes this feed instance into the provided slice
-func (f *Feed) binaryPut(serializedData []byte) error {
- if len(serializedData) != feedLength {
- return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize feed. Expected %d, got %d", feedLength, len(serializedData))
- }
- var cursor int
- copy(serializedData[cursor:cursor+TopicLength], f.Topic[:TopicLength])
- cursor += TopicLength
-
- copy(serializedData[cursor:cursor+common.AddressLength], f.User[:])
- cursor += common.AddressLength
-
- return nil
-}
-
-// binaryLength returns the expected size of this structure when serialized
-func (f *Feed) binaryLength() int {
- return feedLength
-}
-
-// binaryGet restores the current instance from the information contained in the passed slice
-func (f *Feed) binaryGet(serializedData []byte) error {
- if len(serializedData) != feedLength {
- return NewErrorf(ErrInvalidValue, "Incorrect slice size to read feed. Expected %d, got %d", feedLength, len(serializedData))
- }
-
- var cursor int
- copy(f.Topic[:], serializedData[cursor:cursor+TopicLength])
- cursor += TopicLength
-
- copy(f.User[:], serializedData[cursor:cursor+common.AddressLength])
- cursor += common.AddressLength
-
- return nil
-}
-
-// Hex serializes the feed to a hex string
-func (f *Feed) Hex() string {
- serializedData := make([]byte, feedLength)
- f.binaryPut(serializedData)
- return hexutil.Encode(serializedData)
-}
-
-// FromValues deserializes this instance from a string key-value store
-// useful to parse query strings
-func (f *Feed) FromValues(values Values) (err error) {
- topic := values.Get("topic")
- if topic != "" {
- if err := f.Topic.FromHex(values.Get("topic")); err != nil {
- return err
- }
- } else { // see if the user set name and relatedcontent
- name := values.Get("name")
- relatedContent, _ := hexutil.Decode(values.Get("relatedcontent"))
- if len(relatedContent) > 0 {
- if len(relatedContent) < storage.AddressLength {
- return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength)
- }
- relatedContent = relatedContent[:storage.AddressLength]
- }
- f.Topic, err = NewTopic(name, relatedContent)
- if err != nil {
- return err
- }
- }
- f.User = common.HexToAddress(values.Get("user"))
- return nil
-}
-
-// AppendValues serializes this structure into the provided string key-value store
-// useful to build query strings
-func (f *Feed) AppendValues(values Values) {
- values.Set("topic", f.Topic.Hex())
- values.Set("user", f.User.Hex())
-}
diff --git a/swarm/storage/feed/feed_test.go b/swarm/storage/feed/feed_test.go
deleted file mode 100644
index 6a575594f..000000000
--- a/swarm/storage/feed/feed_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-package feed
-
-import (
- "testing"
-)
-
-func getTestFeed() *Feed {
- topic, _ := NewTopic("world news report, every hour", nil)
- return &Feed{
- Topic: topic,
- User: newCharlieSigner().Address(),
- }
-}
-
-func TestFeedSerializerDeserializer(t *testing.T) {
- testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c")
-}
-
-func TestFeedSerializerLengthCheck(t *testing.T) {
- testBinarySerializerLengthCheck(t, getTestFeed())
-}
diff --git a/swarm/storage/feed/handler.go b/swarm/storage/feed/handler.go
deleted file mode 100644
index 98ed7fa99..000000000
--- a/swarm/storage/feed/handler.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-// Handler is the API for feeds
-// It enables creating, updating, syncing and retrieving feed updates and their data
-package feed
-
-import (
- "bytes"
- "context"
- "fmt"
- "sync"
- "sync/atomic"
-
- "github.com/ethereum/go-ethereum/swarm/chunk"
-
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-type Handler struct {
- chunkStore *storage.NetStore
- HashSize int
- cache map[uint64]*cacheEntry
- cacheLock sync.RWMutex
-}
-
-// HandlerParams pass parameters to the Handler constructor NewHandler
-// Signer and TimestampProvider are mandatory parameters
-type HandlerParams struct {
-}
-
-// hashPool contains a pool of ready hashers
-var hashPool sync.Pool
-
-// init initializes the package and hashPool
-func init() {
- hashPool = sync.Pool{
- New: func() interface{} {
- return storage.MakeHashFunc(feedsHashAlgorithm)()
- },
- }
-}
-
-// NewHandler creates a new Swarm feeds API
-func NewHandler(params *HandlerParams) *Handler {
- fh := &Handler{
- cache: make(map[uint64]*cacheEntry),
- }
-
- for i := 0; i < hasherCount; i++ {
- hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)()
- if fh.HashSize == 0 {
- fh.HashSize = hashfunc.Size()
- }
- hashPool.Put(hashfunc)
- }
-
- return fh
-}
-
-// SetStore sets the store backend for the Swarm feeds API
-func (h *Handler) SetStore(store *storage.NetStore) {
- h.chunkStore = store
-}
-
-// Validate is a chunk validation method
-// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature
-// It implements the storage.ChunkValidator interface
-func (h *Handler) Validate(chunk storage.Chunk) bool {
- if len(chunk.Data()) < minimumSignedUpdateLength {
- return false
- }
-
- // check if it is a properly formatted update chunk with
- // valid signature and proof of ownership of the feed it is trying
- // to update
-
- // First, deserialize the chunk
- var r Request
- if err := r.fromChunk(chunk); err != nil {
- log.Debug("Invalid feed update chunk", "addr", chunk.Address(), "err", err)
- return false
- }
-
- // Verify signatures and that the signer actually owns the feed
- // If it fails, it means either the signature is not valid, data is corrupted
- // or someone is trying to update someone else's feed.
- if err := r.Verify(); err != nil {
- log.Debug("Invalid feed update signature", "err", err)
- return false
- }
-
- return true
-}
-
-// GetContent retrieves the data payload of the last synced update of the feed
-func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) {
- if feed == nil {
- return nil, nil, NewError(ErrInvalidValue, "feed is nil")
- }
- feedUpdate := h.get(feed)
- if feedUpdate == nil {
- return nil, nil, NewError(ErrNotFound, "feed update not cached")
- }
- return feedUpdate.lastKey, feedUpdate.data, nil
-}
-
-// NewRequest prepares a Request structure with all the necessary information to
-// just add the desired data and sign it.
-// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
-func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) {
- if feed == nil {
- return nil, NewError(ErrInvalidValue, "feed cannot be nil")
- }
-
- now := TimestampProvider.Now().Time
- request = new(Request)
- request.Header.Version = ProtocolVersion
-
- query := NewQueryLatest(feed, lookup.NoClue)
-
- feedUpdate, err := h.Lookup(ctx, query)
- if err != nil {
- if err.(*Error).code != ErrNotFound {
- return nil, err
- }
- // not finding updates means that there is a network error
- // or that the feed really does not have updates
- }
-
- request.Feed = *feed
-
- // if we already have an update, then find next epoch
- if feedUpdate != nil {
- request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now)
- } else {
- request.Epoch = lookup.GetFirstEpoch(now)
- }
-
- return request, nil
-}
-
-// Lookup retrieves a specific or latest feed update
-// Lookup works differently depending on the configuration of `query`
-// See the `query` documentation and helper functions:
-// `NewQueryLatest` and `NewQuery`
-func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) {
-
- timeLimit := query.TimeLimit
- if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update
- timeLimit = TimestampProvider.Now().Time
- }
-
- if query.Hint == lookup.NoClue { // try to use our cache
- entry := h.get(&query.Feed)
- if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints
- query.Hint = entry.Epoch
- }
- }
-
- // we can't look for anything without a store
- if h.chunkStore == nil {
- return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
- }
-
- var readCount int32
-
- // Invoke the lookup engine.
- // The callback will be called every time the lookup algorithm needs to guess
- requestPtr, err := lookup.Lookup(ctx, timeLimit, query.Hint, func(ctx context.Context, epoch lookup.Epoch, now uint64) (interface{}, error) {
- atomic.AddInt32(&readCount, 1)
- id := ID{
- Feed: query.Feed,
- Epoch: epoch,
- }
- ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout)
- defer cancel()
-
- ch, err := h.chunkStore.Get(ctx, chunk.ModeGetLookup, id.Addr())
- if err != nil {
- if err == context.DeadlineExceeded { // chunk not found
- return nil, nil
- }
- return nil, err //something else happened or context was cancelled.
- }
-
- var request Request
- if err := request.fromChunk(ch); err != nil {
- return nil, nil
- }
- if request.Time <= timeLimit {
- return &request, nil
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount))
-
- request, _ := requestPtr.(*Request)
- if request == nil {
- return nil, NewError(ErrNotFound, "no feed updates found")
- }
- return h.updateCache(request)
-
-}
-
-// update feed updates cache with specified content
-func (h *Handler) updateCache(request *Request) (*cacheEntry, error) {
-
- updateAddr := request.Addr()
- log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level)
-
- entry := h.get(&request.Feed)
- if entry == nil {
- entry = &cacheEntry{}
- h.set(&request.Feed, entry)
- }
-
- // update our rsrcs entry map
- entry.lastKey = updateAddr
- entry.Update = request.Update
- entry.Reader = bytes.NewReader(entry.data)
- return entry, nil
-}
-
-// Update publishes a feed update
-// Note that a feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature.
-// This results in a max payload of `maxUpdateDataLength` (check update.go for more details)
-// An error will be returned if the total length of the chunk payload will exceed this limit.
-// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
-// on the network.
-func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) {
-
- // we can't update anything without a store
- if h.chunkStore == nil {
- return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
- }
-
- feedUpdate := h.get(&r.Feed)
- if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure
- return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist")
- }
-
- ch, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
- if err != nil {
- return nil, err
- }
-
- // send the chunk
- h.chunkStore.Put(ctx, chunk.ModePutUpload, ch)
- log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", ch.Data())
- // update our feed updates map cache entry if the new update is older than the one we have, if we have it.
- if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) {
- feedUpdate.Epoch = r.Epoch
- feedUpdate.data = make([]byte, len(r.data))
- feedUpdate.lastKey = r.idAddr
- copy(feedUpdate.data, r.data)
- feedUpdate.Reader = bytes.NewReader(feedUpdate.data)
- }
-
- return r.idAddr, nil
-}
-
-// Retrieves the feed update cache value for the given nameHash
-func (h *Handler) get(feed *Feed) *cacheEntry {
- mapKey := feed.mapKey()
- h.cacheLock.RLock()
- defer h.cacheLock.RUnlock()
- feedUpdate := h.cache[mapKey]
- return feedUpdate
-}
-
-// Sets the feed update cache value for the given feed
-func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) {
- mapKey := feed.mapKey()
- h.cacheLock.Lock()
- defer h.cacheLock.Unlock()
- h.cache[mapKey] = feedUpdate
-}
diff --git a/swarm/storage/feed/handler_test.go b/swarm/storage/feed/handler_test.go
deleted file mode 100644
index 3d8213e60..000000000
--- a/swarm/storage/feed/handler_test.go
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "bytes"
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "os"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/swarm/chunk"
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
- "github.com/ethereum/go-ethereum/swarm/storage/localstore"
-)
-
-var (
- loglevel = flag.Int("loglevel", 3, "loglevel")
- startTime = Timestamp{
- Time: uint64(4200),
- }
- cleanF func()
- subtopicName = "føø.bar"
-)
-
-func init() {
- flag.Parse()
- log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
-}
-
-// simulated timeProvider
-type fakeTimeProvider struct {
- currentTime uint64
-}
-
-func (f *fakeTimeProvider) Tick() {
- f.currentTime++
-}
-
-func (f *fakeTimeProvider) Set(time uint64) {
- f.currentTime = time
-}
-
-func (f *fakeTimeProvider) FastForward(offset uint64) {
- f.currentTime += offset
-}
-
-func (f *fakeTimeProvider) Now() Timestamp {
- return Timestamp{
- Time: f.currentTime,
- }
-}
-
-// make updates and retrieve them based on periods and versions
-func TestFeedsHandler(t *testing.T) {
-
- // make fake timeProvider
- clock := &fakeTimeProvider{
- currentTime: startTime.Time, // clock starts at t=4200
- }
-
- // signer containing private key
- signer := newAliceSigner()
-
- feedsHandler, datadir, teardownTest, err := setupTest(clock, signer)
- if err != nil {
- t.Fatal(err)
- }
- defer teardownTest()
-
- // create a new feed
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- topic, _ := NewTopic("Mess with Swarm feeds code and see what ghost catches you", nil)
- fd := Feed{
- Topic: topic,
- User: signer.Address(),
- }
-
- // data for updates:
- updates := []string{
- "blinky", // t=4200
- "pinky", // t=4242
- "inky", // t=4284
- "clyde", // t=4285
- }
-
- request := NewFirstRequest(fd.Topic) // this timestamps the update at t = 4200 (start time)
- chunkAddress := make(map[string]storage.Address)
- data := []byte(updates[0])
- request.SetData(data)
- if err := request.Sign(signer); err != nil {
- t.Fatal(err)
- }
- chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request)
- if err != nil {
- t.Fatal(err)
- }
-
- // move the clock ahead 21 seconds
- clock.FastForward(21) // t=4221
-
- request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221
- if err != nil {
- t.Fatal(err)
- }
- if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 {
- t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1)
- }
-
- request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail
- data = []byte(updates[1])
- request.SetData(data)
- if err := request.Sign(signer); err != nil {
- t.Fatal(err)
- }
- chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request)
- if err == nil {
- t.Fatal("Expected update to fail since an update in this epoch already exists")
- }
-
- // move the clock ahead 21 seconds
- clock.FastForward(21) // t=4242
- request, err = feedsHandler.NewRequest(ctx, &request.Feed)
- if err != nil {
- t.Fatal(err)
- }
- request.SetData(data)
- if err := request.Sign(signer); err != nil {
- t.Fatal(err)
- }
- chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request)
- if err != nil {
- t.Fatal(err)
- }
-
- // move the clock ahead 42 seconds
- clock.FastForward(42) // t=4284
- request, err = feedsHandler.NewRequest(ctx, &request.Feed)
- if err != nil {
- t.Fatal(err)
- }
- data = []byte(updates[2])
- request.SetData(data)
- if err := request.Sign(signer); err != nil {
- t.Fatal(err)
- }
- chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request)
- if err != nil {
- t.Fatal(err)
- }
-
- // move the clock ahead 1 second
- clock.FastForward(1) // t=4285
- request, err = feedsHandler.NewRequest(ctx, &request.Feed)
- if err != nil {
- t.Fatal(err)
- }
- if request.Epoch.Base() != 0 || request.Epoch.Level != 28 {
- t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 28, request.Epoch.Level)
- }
- data = []byte(updates[3])
- request.SetData(data)
-
- if err := request.Sign(signer); err != nil {
- t.Fatal(err)
- }
- chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request)
- if err != nil {
- t.Fatal(err)
- }
-
- time.Sleep(time.Second)
- feedsHandler.Close()
-
- // check we can retrieve the updates after close
- clock.FastForward(2000) // t=6285
-
- feedParams := &HandlerParams{}
-
- feedsHandler2, err := NewTestHandler(datadir, feedParams)
- if err != nil {
- t.Fatal(err)
- }
-
- update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue))
- if err != nil {
- t.Fatal(err)
- }
-
- // last update should be "clyde"
- if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) {
- t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1])
- }
- if update2.Level != 28 {
- t.Fatalf("feed update epoch level was %d, expected 28", update2.Level)
- }
- if update2.Base() != 0 {
- t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base())
- }
- log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
-
- // specific point in time
- update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue))
- if err != nil {
- t.Fatal(err)
- }
- // check data
- if !bytes.Equal(update.data, []byte(updates[2])) {
- t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2])
- }
- log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data)
-
- // beyond the first should yield an error
- update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue))
- if err == nil {
- t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data)
- }
-
-}
-
-const Day = 60 * 60 * 24
-const Year = Day * 365
-const Month = Day * 30
-
-func generateData(x uint64) []byte {
- return []byte(fmt.Sprintf("%d", x))
-}
-
-func TestSparseUpdates(t *testing.T) {
-
- // make fake timeProvider
- timeProvider := &fakeTimeProvider{
- currentTime: startTime.Time,
- }
-
- // signer containing private key
- signer := newAliceSigner()
-
- rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
- if err != nil {
- t.Fatal(err)
- }
- defer teardownTest()
- defer os.RemoveAll(datadir)
-
- // create a new feed
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- topic, _ := NewTopic("Very slow updates", nil)
- fd := Feed{
- Topic: topic,
- User: signer.Address(),
- }
-
- // publish one update every 5 years since Unix 0 until today
- today := uint64(1533799046)
- var epoch lookup.Epoch
- var lastUpdateTime uint64
- for T := uint64(0); T < today; T += 5 * Year {
- request := NewFirstRequest(fd.Topic)
- request.Epoch = lookup.GetNextEpoch(epoch, T)
- request.data = generateData(T) // this generates some data that depends on T, so we can check later
- request.Sign(signer)
- if err != nil {
- t.Fatal(err)
- }
-
- if _, err := rh.Update(ctx, request); err != nil {
- t.Fatal(err)
- }
- epoch = request.Epoch
- lastUpdateTime = T
- }
-
- query := NewQuery(&fd, today, lookup.NoClue)
-
- _, err = rh.Lookup(ctx, query)
- if err != nil {
- t.Fatal(err)
- }
-
- _, content, err := rh.GetContent(&fd)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(generateData(lastUpdateTime), content) {
- t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content))
- }
-
- // lookup the closest update to 35*Year + 6* Month (~ June 2005):
- // it should find the update we put on 35*Year, since we were updating every 5 years.
-
- query.TimeLimit = 35*Year + 6*Month
-
- _, err = rh.Lookup(ctx, query)
- if err != nil {
- t.Fatal(err)
- }
-
- _, content, err = rh.GetContent(&fd)
- if err != nil {
- t.Fatal(err)
- }
-
- if !bytes.Equal(generateData(35*Year), content) {
- t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content))
- }
-}
-
-func TestValidator(t *testing.T) {
-
- // make fake timeProvider
- timeProvider := &fakeTimeProvider{
- currentTime: startTime.Time,
- }
-
- // signer containing private key. Alice will be the good girl
- signer := newAliceSigner()
-
- // set up sim timeProvider
- rh, _, teardownTest, err := setupTest(timeProvider, signer)
- if err != nil {
- t.Fatal(err)
- }
- defer teardownTest()
-
- // create new feed
- topic, _ := NewTopic(subtopicName, nil)
- fd := Feed{
- Topic: topic,
- User: signer.Address(),
- }
- mr := NewFirstRequest(fd.Topic)
-
- // chunk with address
- data := []byte("foo")
- mr.SetData(data)
- if err := mr.Sign(signer); err != nil {
- t.Fatalf("sign fail: %v", err)
- }
-
- chunk, err := mr.toChunk()
- if err != nil {
- t.Fatal(err)
- }
- if !rh.Validate(chunk) {
- t.Fatal("Chunk validator fail on update chunk")
- }
-
- address := chunk.Address()
- // mess with the address
- address[0] = 11
- address[15] = 99
-
- if rh.Validate(storage.NewChunk(address, chunk.Data())) {
- t.Fatal("Expected Validate to fail with false chunk address")
- }
-}
-
-// tests that the content address validator correctly checks the data
-// tests that feed update chunks are passed through content address validator
-// there is some redundancy in this test as it also tests content addressed chunks,
-// which should be evaluated as invalid chunks by this validator
-func TestValidatorInStore(t *testing.T) {
-
- // make fake timeProvider
- TimestampProvider = &fakeTimeProvider{
- currentTime: startTime.Time,
- }
-
- // signer containing private key
- signer := newAliceSigner()
-
- // set up localstore
- datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(datadir)
-
- localstore, err := localstore.New(datadir, make([]byte, 32), nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // set up Swarm feeds handler and add is as a validator to the localstore
- fhParams := &HandlerParams{}
- fh := NewHandler(fhParams)
- store := chunk.NewValidatorStore(localstore, fh)
-
- // create content addressed chunks, one good, one faulty
- chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2)
- goodChunk := chunks[0]
- badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data())
-
- topic, _ := NewTopic("xyzzy", nil)
- fd := Feed{
- Topic: topic,
- User: signer.Address(),
- }
-
- // create a feed update chunk with correct publickey
- id := ID{
- Epoch: lookup.Epoch{Time: 42,
- Level: 1,
- },
- Feed: fd,
- }
-
- updateAddr := id.Addr()
- data := []byte("bar")
-
- r := new(Request)
- r.idAddr = updateAddr
- r.Update.ID = id
- r.data = data
-
- r.Sign(signer)
-
- uglyChunk, err := r.toChunk()
- if err != nil {
- t.Fatal(err)
- }
-
- // put the chunks in the store and check their error status
- _, err = store.Put(context.Background(), chunk.ModePutUpload, goodChunk)
- if err == nil {
- t.Fatal("expected error on good content address chunk with feed update validator only, but got nil")
- }
- _, err = store.Put(context.Background(), chunk.ModePutUpload, badChunk)
- if err == nil {
- t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil")
- }
- _, err = store.Put(context.Background(), chunk.ModePutUpload, uglyChunk)
- if err != nil {
- t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err)
- }
-}
-
-// create rpc and feeds Handler
-func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) {
-
- var fsClean func()
- var rpcClean func()
- cleanF = func() {
- if fsClean != nil {
- fsClean()
- }
- if rpcClean != nil {
- rpcClean()
- }
- }
-
- // temp datadir
- datadir, err = ioutil.TempDir("", "fh")
- if err != nil {
- return nil, "", nil, err
- }
- fsClean = func() {
- os.RemoveAll(datadir)
- }
-
- TimestampProvider = timeProvider
- fhParams := &HandlerParams{}
- fh, err = NewTestHandler(datadir, fhParams)
- return fh, datadir, cleanF, err
-}
-
-func newAliceSigner() *GenericSigner {
- privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
- return NewGenericSigner(privKey)
-}
-
-func newBobSigner() *GenericSigner {
- privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
- return NewGenericSigner(privKey)
-}
-
-func newCharlieSigner() *GenericSigner {
- privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
- return NewGenericSigner(privKey)
-}
diff --git a/swarm/storage/feed/id.go b/swarm/storage/feed/id.go
deleted file mode 100644
index 7e17743c1..000000000
--- a/swarm/storage/feed/id.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "fmt"
- "hash"
- "strconv"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-// ID uniquely identifies an update on the network.
-type ID struct {
- Feed `json:"feed"`
- lookup.Epoch `json:"epoch"`
-}
-
-// ID layout:
-// Feed feedLength bytes
-// Epoch EpochLength
-const idLength = feedLength + lookup.EpochLength
-
-// Addr calculates the feed update chunk address corresponding to this ID
-func (u *ID) Addr() (updateAddr storage.Address) {
- serializedData := make([]byte, idLength)
- var cursor int
- u.Feed.binaryPut(serializedData[cursor : cursor+feedLength])
- cursor += feedLength
-
- eid := u.Epoch.ID()
- copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:])
-
- hasher := hashPool.Get().(hash.Hash)
- defer hashPool.Put(hasher)
- hasher.Reset()
- hasher.Write(serializedData)
- return hasher.Sum(nil)
-}
-
-// binaryPut serializes this instance into the provided slice
-func (u *ID) binaryPut(serializedData []byte) error {
- if len(serializedData) != idLength {
- return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData))
- }
- var cursor int
- if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil {
- return err
- }
- cursor += feedLength
-
- epochBytes, err := u.Epoch.MarshalBinary()
- if err != nil {
- return err
- }
- copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:])
- cursor += lookup.EpochLength
-
- return nil
-}
-
-// binaryLength returns the expected size of this structure when serialized
-func (u *ID) binaryLength() int {
- return idLength
-}
-
-// binaryGet restores the current instance from the information contained in the passed slice
-func (u *ID) binaryGet(serializedData []byte) error {
- if len(serializedData) != idLength {
- return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData))
- }
-
- var cursor int
- if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil {
- return err
- }
- cursor += feedLength
-
- if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil {
- return err
- }
- cursor += lookup.EpochLength
-
- return nil
-}
-
-// FromValues deserializes this instance from a string key-value store
-// useful to parse query strings
-func (u *ID) FromValues(values Values) error {
- level, _ := strconv.ParseUint(values.Get("level"), 10, 32)
- u.Epoch.Level = uint8(level)
- u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64)
-
- if u.Feed.User == (common.Address{}) {
- return u.Feed.FromValues(values)
- }
- return nil
-}
-
-// AppendValues serializes this structure into the provided string key-value store
-// useful to build query strings
-func (u *ID) AppendValues(values Values) {
- values.Set("level", fmt.Sprintf("%d", u.Epoch.Level))
- values.Set("time", fmt.Sprintf("%d", u.Epoch.Time))
- u.Feed.AppendValues(values)
-}
diff --git a/swarm/storage/feed/id_test.go b/swarm/storage/feed/id_test.go
deleted file mode 100644
index 8a820abfe..000000000
--- a/swarm/storage/feed/id_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package feed
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-func getTestID() *ID {
- return &ID{
- Feed: *getTestFeed(),
- Epoch: lookup.GetFirstEpoch(1000),
- }
-}
-
-func TestIDAddr(t *testing.T) {
- id := getTestID()
- updateAddr := id.Addr()
- compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x842d0a81987b9755dfeaa5558f5c134c1c0af48b6545005cac7b533d9411453a")
-}
-
-func TestIDSerializer(t *testing.T) {
- testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce80300000000001f")
-}
-
-func TestIDLengthCheck(t *testing.T) {
- testBinarySerializerLengthCheck(t, getTestID())
-}
diff --git a/swarm/storage/feed/lookup/algorithm_fluzcapacitor.go b/swarm/storage/feed/lookup/algorithm_fluzcapacitor.go
deleted file mode 100644
index 3840bd0fd..000000000
--- a/swarm/storage/feed/lookup/algorithm_fluzcapacitor.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package lookup
-
-import "context"
-
-// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found
-// going back and forth in time
-// First, it will attempt to find an update where it should be now if the hint was
-// really the last update. If that lookup fails, then the last update must be either the hint itself
-// or the epochs right below. If however, that lookup succeeds, then the update must be
-// that one or within the epochs right below.
-// see the guide for a more graphical representation
-func FluzCapacitorAlgorithm(ctx context.Context, now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) {
- var lastFound interface{}
- var epoch Epoch
- if hint == NoClue {
- hint = worstHint
- }
-
- t := now
-
- for {
- epoch = GetNextEpoch(hint, t)
- value, err = read(ctx, epoch, now)
- if err != nil {
- return nil, err
- }
- if value != nil {
- lastFound = value
- if epoch.Level == LowestLevel || epoch.Equals(hint) {
- return value, nil
- }
- hint = epoch
- continue
- }
- if epoch.Base() == hint.Base() {
- if lastFound != nil {
- return lastFound, nil
- }
- // we have reached the hint itself
- if hint == worstHint {
- return nil, nil
- }
- // check it out
- value, err = read(ctx, hint, now)
- if err != nil {
- return nil, err
- }
- if value != nil {
- return value, nil
- }
- // bad hint.
- t = hint.Base()
- hint = worstHint
- continue
- }
- base := epoch.Base()
- if base == 0 {
- return nil, nil
- }
- t = base - 1
- }
-
-}
diff --git a/swarm/storage/feed/lookup/algorithm_longearth.go b/swarm/storage/feed/lookup/algorithm_longearth.go
deleted file mode 100644
index d0342f67c..000000000
--- a/swarm/storage/feed/lookup/algorithm_longearth.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package lookup
-
-import (
- "context"
- "sync/atomic"
- "time"
-)
-
-type stepFunc func(ctx context.Context, t uint64, hint Epoch) interface{}
-
-// LongEarthLookaheadDelay is the headstart the lookahead gives R before it launches
-var LongEarthLookaheadDelay = 250 * time.Millisecond
-
-// LongEarthLookbackDelay is the headstart the lookback gives R before it launches
-var LongEarthLookbackDelay = 250 * time.Millisecond
-
-// LongEarthAlgorithm explores possible lookup paths in parallel, pruning paths as soon
-// as a more promising lookup path is found. As a result, this lookup algorithm is an order
-// of magnitude faster than the FluzCapacitor algorithm, but at the expense of more exploratory reads.
-// This algorithm works as follows. On each step, the next epoch is immediately looked up (R)
-// and given a head start, while two parallel "steps" are launched a short time after:
-// look ahead (A) is the path the algorithm would take if the R lookup returns a value, whereas
-// look back (B) is the path the algorithm would take if the R lookup failed.
-// as soon as R is actually finished, the A or B paths are pruned depending on the value of R.
-// if A returns earlier than R, then R and B read operations can be safely canceled, saving time.
-// The maximum number of active read operations is calculated as 2^(timeout/headstart).
-// If headstart is infinite, this algorithm behaves as FluzCapacitor.
-// timeout is the maximum execution time of the passed `read` function.
-// the two head starts can be configured by changing LongEarthLookaheadDelay or LongEarthLookbackDelay
-func LongEarthAlgorithm(ctx context.Context, now uint64, hint Epoch, read ReadFunc) (interface{}, error) {
- if hint == NoClue {
- hint = worstHint
- }
-
- var stepCounter int32 // for debugging, stepCounter allows to give an ID to each step instance
-
- errc := make(chan struct{}) // errc will help as an error shortcut signal
- var gerr error // in case of error, this variable will be set
-
- var step stepFunc // For efficiency, the algorithm step is defined as a closure
- step = func(ctxS context.Context, t uint64, last Epoch) interface{} {
- stepID := atomic.AddInt32(&stepCounter, 1) // give an ID to this call instance
- trace(stepID, "init: t=%d, last=%s", t, last.String())
- var valueA, valueB, valueR interface{}
-
- // initialize the three read contexts
- ctxR, cancelR := context.WithCancel(ctxS) // will handle the current read operation
- ctxA, cancelA := context.WithCancel(ctxS) // will handle the lookahead path
- ctxB, cancelB := context.WithCancel(ctxS) // will handle the lookback path
-
- epoch := GetNextEpoch(last, t) // calculate the epoch to look up in this step instance
-
- // define the lookAhead function, which will follow the path as if R was successful
- lookAhead := func() {
- valueA = step(ctxA, t, epoch) // launch the next step, recursively.
- if valueA != nil { // if this path is successful, we don't need R or B.
- cancelB()
- cancelR()
- }
- }
-
- // define the lookBack function, which will follow the path as if R was unsuccessful
- lookBack := func() {
- if epoch.Base() == last.Base() {
- return
- }
- base := epoch.Base()
- if base == 0 {
- return
- }
- valueB = step(ctxB, base-1, last)
- }
-
- go func() { //goroutine to read the current epoch (R)
- defer cancelR()
- var err error
- valueR, err = read(ctxR, epoch, now) // read this epoch
- if valueR == nil { // if unsuccessful, cancel lookahead, otherwise cancel lookback.
- cancelA()
- } else {
- cancelB()
- }
- if err != nil && err != context.Canceled {
- gerr = err
- close(errc)
- }
- }()
-
- go func() { // goroutine to give a headstart to R and then launch lookahead.
- defer cancelA()
-
- // if we are at the lowest level or the epoch to look up equals the last one,
- // then we cannot lookahead (can't go lower or repeat the same lookup, this would
- // cause an infinite loop)
- if epoch.Level == LowestLevel || epoch.Equals(last) {
- return
- }
-
- // give a head start to R, or launch immediately if R finishes early enough
- select {
- case <-TimeAfter(LongEarthLookaheadDelay):
- lookAhead()
- case <-ctxR.Done():
- if valueR != nil {
- lookAhead() // only look ahead if R was successful
- }
- case <-ctxA.Done():
- }
- }()
-
- go func() { // goroutine to give a headstart to R and then launch lookback.
- defer cancelB()
-
- // give a head start to R, or launch immediately if R finishes early enough
- select {
- case <-TimeAfter(LongEarthLookbackDelay):
- lookBack()
- case <-ctxR.Done():
- if valueR == nil {
- lookBack() // only look back in case R failed
- }
- case <-ctxB.Done():
- }
- }()
-
- <-ctxA.Done()
- if valueA != nil {
- trace(stepID, "Returning valueA=%v", valueA)
- return valueA
- }
-
- <-ctxR.Done()
- if valueR != nil {
- trace(stepID, "Returning valueR=%v", valueR)
- return valueR
- }
- <-ctxB.Done()
- trace(stepID, "Returning valueB=%v", valueB)
- return valueB
- }
-
- var value interface{}
- stepCtx, cancel := context.WithCancel(ctx)
-
- go func() { // launch the root step in its own goroutine to allow cancellation
- defer cancel()
- value = step(stepCtx, now, hint)
- }()
-
- // wait for the algorithm to finish, but shortcut in case
- // of errors
- select {
- case <-stepCtx.Done():
- case <-errc:
- cancel()
- return nil, gerr
- }
-
- if ctx.Err() != nil {
- return nil, ctx.Err()
- }
-
- if value != nil || hint == worstHint {
- return value, nil
- }
-
- // at this point the algorithm did not return a value,
- // so we challenge the hint given.
- value, err := read(ctx, hint, now)
- if err != nil {
- return nil, err
- }
- if value != nil {
- return value, nil // hint is valid, return it.
- }
-
- // hint is invalid. Invoke the algorithm
- // without hint.
- now = hint.Base()
- if hint.Level == HighestLevel {
- now--
- }
-
- return LongEarthAlgorithm(ctx, now, NoClue, read)
-}
diff --git a/swarm/storage/feed/lookup/epoch.go b/swarm/storage/feed/lookup/epoch.go
deleted file mode 100644
index 6d75ba243..000000000
--- a/swarm/storage/feed/lookup/epoch.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package lookup
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
-)
-
-// Epoch represents a time slot at a particular frequency level
-type Epoch struct {
- Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place
- Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2
-}
-
-// EpochID is a unique identifier for an Epoch, based on its level and base time.
-type EpochID [8]byte
-
-// EpochLength stores the serialized binary length of an Epoch
-const EpochLength = 8
-
-// MaxTime contains the highest possible time value an Epoch can handle
-const MaxTime uint64 = (1 << 56) - 1
-
-// Base returns the base time of the Epoch
-func (e *Epoch) Base() uint64 {
- return getBaseTime(e.Time, e.Level)
-}
-
-// ID Returns the unique identifier of this epoch
-func (e *Epoch) ID() EpochID {
- base := e.Base()
- var id EpochID
- binary.LittleEndian.PutUint64(id[:], base)
- id[7] = e.Level
- return id
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaller interface
-func (e *Epoch) MarshalBinary() (data []byte, err error) {
- b := make([]byte, 8)
- binary.LittleEndian.PutUint64(b[:], e.Time)
- b[7] = e.Level
- return b, nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface
-func (e *Epoch) UnmarshalBinary(data []byte) error {
- if len(data) != EpochLength {
- return errors.New("Invalid data unmarshalling Epoch")
- }
- b := make([]byte, 8)
- copy(b, data)
- e.Level = b[7]
- b[7] = 0
- e.Time = binary.LittleEndian.Uint64(b)
- return nil
-}
-
-// After returns true if this epoch occurs later or exactly at the other epoch.
-func (e *Epoch) After(epoch Epoch) bool {
- if e.Time == epoch.Time {
- return e.Level < epoch.Level
- }
- return e.Time >= epoch.Time
-}
-
-// Equals compares two epochs and returns true if they refer to the same time period.
-func (e *Epoch) Equals(epoch Epoch) bool {
- return e.Level == epoch.Level && e.Base() == epoch.Base()
-}
-
-// String implements the Stringer interface.
-func (e *Epoch) String() string {
- return fmt.Sprintf("Epoch{Base: %d, Time:%d, Level:%d}", e.Base(), e.Time, e.Level)
-}
diff --git a/swarm/storage/feed/lookup/epoch_test.go b/swarm/storage/feed/lookup/epoch_test.go
deleted file mode 100644
index 0629f3d1d..000000000
--- a/swarm/storage/feed/lookup/epoch_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package lookup_test
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-func TestMarshallers(t *testing.T) {
-
- for i := uint64(1); i < lookup.MaxTime; i *= 3 {
- e := lookup.Epoch{
- Time: i,
- Level: uint8(i % 20),
- }
- b, err := e.MarshalBinary()
- if err != nil {
- t.Fatal(err)
- }
- var e2 lookup.Epoch
- if err := e2.UnmarshalBinary(b); err != nil {
- t.Fatal(err)
- }
- if e != e2 {
- t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)")
- }
- }
-
-}
-
-func TestAfter(t *testing.T) {
- a := lookup.Epoch{
- Time: 5,
- Level: 3,
- }
- b := lookup.Epoch{
- Time: 6,
- Level: 3,
- }
- c := lookup.Epoch{
- Time: 6,
- Level: 4,
- }
-
- if !b.After(a) {
- t.Fatal("Expected 'after' to be true, got false")
- }
-
- if b.After(b) {
- t.Fatal("Expected 'after' to be false when both epochs are identical, got true")
- }
-
- if !b.After(c) {
- t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false")
- }
-
-}
diff --git a/swarm/storage/feed/lookup/lookup.go b/swarm/storage/feed/lookup/lookup.go
deleted file mode 100644
index 4b233a0e0..000000000
--- a/swarm/storage/feed/lookup/lookup.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-/*
-Package lookup defines feed lookup algorithms and provides tools to place updates
-so they can be found
-*/
-package lookup
-
-import (
- "context"
- "time"
-)
-
-const maxuint64 = ^uint64(0)
-
-// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2.
-const LowestLevel uint8 = 0 // default is 0 (1 second)
-
-// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2.
-// 31 -> 2^31 equals to roughly 38 years.
-const HighestLevel = 31
-
-// DefaultLevel sets what level will be chosen to search when there is no hint
-const DefaultLevel = HighestLevel
-
-//Algorithm is the function signature of a lookup algorithm
-type Algorithm func(ctx context.Context, now uint64, hint Epoch, read ReadFunc) (value interface{}, err error)
-
-// Lookup finds the update with the highest timestamp that is smaller or equal than 'now'
-// It takes a hint which should be the epoch where the last known update was
-// If you don't know in what epoch the last update happened, simply submit lookup.NoClue
-// read() will be called on each lookup attempt
-// Returns an error only if read() returns an error
-// Returns nil if an update was not found
-var Lookup Algorithm = LongEarthAlgorithm
-
-// TimeAfter must point to a function that returns a timer
-// This is here so that tests can replace it with
-// a mock up timer factory to simulate time deterministically
-var TimeAfter = time.After
-
-// ReadFunc is a handler called by Lookup each time it attempts to find a value
-// It should return <nil> if a value is not found
-// It should return <nil> if a value is found, but its timestamp is higher than "now"
-// It should only return an error in case the handler wants to stop the
-// lookup process entirely.
-type ReadFunc func(ctx context.Context, epoch Epoch, now uint64) (interface{}, error)
-
-// NoClue is a hint that can be provided when the Lookup caller does not have
-// a clue about where the last update may be
-var NoClue = Epoch{}
-
-// getBaseTime returns the epoch base time of the given
-// time and level
-func getBaseTime(t uint64, level uint8) uint64 {
- return t & (maxuint64 << level)
-}
-
-// Hint creates a hint based only on the last known update time
-func Hint(last uint64) Epoch {
- return Epoch{
- Time: last,
- Level: DefaultLevel,
- }
-}
-
-// GetNextLevel returns the frequency level a next update should be placed at, provided where
-// the last update was and what time it is now.
-// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit
-// but limited to not return a level that is smaller than the last-1
-func GetNextLevel(last Epoch, now uint64) uint8 {
- // First XOR the last epoch base time with the current clock.
- // This will set all the common most significant bits to zero.
- mix := (last.Base() ^ now)
-
- // Then, make sure we stop the below loop before one level below the current, by setting
- // that level's bit to 1.
- // If the next level is lower than the current one, it must be exactly level-1 and not lower.
- mix |= (1 << (last.Level - 1))
-
- // if the last update was more than 2^highestLevel seconds ago, choose the highest level
- if mix > (maxuint64 >> (64 - HighestLevel - 1)) {
- return HighestLevel
- }
-
- // set up a mask to scan for nonzero bits, starting at the highest level
- mask := uint64(1 << (HighestLevel))
-
- for i := uint8(HighestLevel); i > LowestLevel; i-- {
- if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at.
- return i
- }
- mask = mask >> 1 // move our bit one position to the right
- }
- return 0
-}
-
-// GetNextEpoch returns the epoch where the next update should be located
-// according to where the previous update was
-// and what time it is now.
-func GetNextEpoch(last Epoch, now uint64) Epoch {
- if last == NoClue {
- return GetFirstEpoch(now)
- }
- level := GetNextLevel(last, now)
- return Epoch{
- Level: level,
- Time: now,
- }
-}
-
-// GetFirstEpoch returns the epoch where the first update should be located
-// based on what time it is now.
-func GetFirstEpoch(now uint64) Epoch {
- return Epoch{Level: HighestLevel, Time: now}
-}
-
-var worstHint = Epoch{Time: 0, Level: 63}
-
-var trace = func(id int32, formatString string, a ...interface{}) {
- //fmt.Printf("Step ID #%d "+formatString+"\n", append([]interface{}{id}, a...)...)
-}
diff --git a/swarm/storage/feed/lookup/lookup_test.go b/swarm/storage/feed/lookup/lookup_test.go
deleted file mode 100644
index b0d132de6..000000000
--- a/swarm/storage/feed/lookup/lookup_test.go
+++ /dev/null
@@ -1,641 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package lookup_test
-
-import (
- "context"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-type AlgorithmInfo struct {
- Lookup lookup.Algorithm
- Name string
-}
-
-var algorithms = []AlgorithmInfo{
- {lookup.FluzCapacitorAlgorithm, "FluzCapacitor"},
- {lookup.LongEarthAlgorithm, "LongEarth"},
-}
-
-const enablePrintMetrics = false // set to true to display algorithm benchmarking stats
-
-func printMetric(metric string, store *Store, elapsed time.Duration) {
- if enablePrintMetrics {
- fmt.Printf("metric=%s, readcount=%d (successful=%d, failed=%d), cached=%d, canceled=%d, maxSimult=%d, elapsed=%s\n", metric,
- store.reads, store.successful, store.failed, store.cacheHits, store.canceled, store.maxSimultaneous, elapsed)
- }
-}
-
-const Day = 60 * 60 * 24
-const Year = Day * 365
-const Month = Day * 30
-
-// DefaultStoreConfig indicates the time the different read
-// operations will take in the simulation
-// This allows to measure an algorithm performance relative
-// to other
-var DefaultStoreConfig = &StoreConfig{
- CacheReadTime: 50 * time.Millisecond,
- FailedReadTime: 1000 * time.Millisecond,
- SuccessfulReadTime: 500 * time.Millisecond,
-}
-
-// TestLookup verifies if the last update and intermediates are
-// found and if that same last update is found faster if a hint is given
-func TestLookup(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and generate updates
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- // write an update every month for 12 months 3 years ago and then silence for two years
- now := uint64(1533799046)
- var epoch lookup.Epoch
-
- var lastData *Data
- for i := uint64(0); i < 12; i++ {
- t := uint64(now - Year*3 + i*Month)
- data := Data{
- Payload: t, //our "payload" will be the timestamp itself.
- Time: t,
- }
- epoch = store.Update(epoch, t, &data)
- lastData = &data
- }
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
-
- store.Reset() // reset the store read counters
-
- // ### 3.1.- Test how long it takes to find the last update without a hint:
- timeElapsedWithoutHint := stopwatch.Measure(func() {
-
- // try to get the last value
- value, err := algo.Lookup(context.Background(), now, lookup.NoClue, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
-
- })
- printMetric("SIMPLE READ", store, timeElapsedWithoutHint)
-
- store.Reset() // reset the read counters for the next test
-
- // ### 3.2.- Test how long it takes to find the last update *with* a hint.
- // it should take less time!
- timeElapsed := stopwatch.Measure(func() {
- // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update
- value, err := algo.Lookup(context.Background(), now, epoch, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
- })
- printMetric("WITH HINT", store, stopwatch.Elapsed())
-
- if timeElapsed > timeElapsedWithoutHint {
- t.Fatalf("Expected lookup to complete faster than %s since we provided a hint. Took %s", timeElapsedWithoutHint, timeElapsed)
- }
-
- store.Reset() // reset the read counters for the next test
-
- // ### 3.3.- try to get an intermediate value
- // if we look for a value in, e.g., now - Year*3 + 6*Month, we should get that value
- // Since the "payload" is the timestamp itself, we can check this.
- expectedTime := now - Year*3 + 6*Month
- timeElapsed = stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), expectedTime, lookup.NoClue, readFunc)
- if err != nil {
- t.Fatal(err)
- }
-
- data, ok := value.(*Data)
-
- if !ok {
- t.Fatal("Expected value to contain data")
- }
-
- if data.Time != expectedTime {
- t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime)
- }
- })
- printMetric("INTERMEDIATE READ", store, timeElapsed)
- })
- }
-}
-
-// TestOneUpdateAt0 checks if the lookup algorithm can return an update that
-// is precisely set at t=0
-func TestOneUpdateAt0(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and generate updates
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(1533903729)
-
- var epoch lookup.Epoch
- data := Data{
- Payload: 79,
- Time: 0,
- }
- store.Update(epoch, 0, &data) //place 1 update in t=0
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset() // reset the read counters for the next test
- timeElapsed := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, lookup.NoClue, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != &data {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value)
- }
- })
- printMetric("SIMPLE", store, timeElapsed)
- })
- }
-}
-
-// TestBadHint tests if the update is found even when a bad hint is given
-func TestBadHint(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and generate updates
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(1533903729)
-
- var epoch lookup.Epoch
- data := Data{
- Payload: 79,
- Time: 0,
- }
-
- // place an update for t=1200
- store.Update(epoch, 1200, &data)
-
- // come up with some evil hint
- badHint := lookup.Epoch{
- Level: 18,
- Time: 1200000000,
- }
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset()
- timeElapsed := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, badHint, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != &data {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value)
- }
- })
- printMetric("SIMPLE", store, timeElapsed)
- })
- }
-}
-
-// TestBadHintNextToUpdate checks whether the update is found when the bad hint is exactly below the last update
-func TestBadHintNextToUpdate(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and generate updates
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(1533903729)
- var last *Data
-
- /* the following loop places updates in the following epochs:
- Update# Time Base Level
- 0 1200000000 1174405120 25
- 1 1200000001 1191182336 24
- 2 1200000002 1199570944 23
- 3 1200000003 1199570944 22
- 4 1200000004 1199570944 21
-
- The situation we want to trigger is to give a bad hint exactly
- in T=1200000005, B=1199570944 and L=20, which is where the next
- update would have logically been.
- This affects only when the bad hint's base == previous update's base,
- in this case 1199570944
-
- */
- var epoch lookup.Epoch
- for i := uint64(0); i < 5; i++ {
- data := Data{
- Payload: i,
- Time: 0,
- }
- last = &data
- epoch = store.Update(epoch, 1200000000+i, &data)
- }
-
- // come up with some evil hint:
- // put it where the next update would have been
- badHint := lookup.Epoch{
- Level: 20,
- Time: 1200000005,
- }
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset() // reset read counters for next test
-
- timeElapsed := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, badHint, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != last {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", last, value)
- }
- })
- printMetric("SIMPLE", store, timeElapsed)
- })
- }
-}
-
-// TestContextCancellation checks whether a lookup can be canceled
-func TestContextCancellation(t *testing.T) {
-
- // ### 1.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
-
- // ### 2.1.- Test a simple cancel of an always blocking read function
- readFunc := func(ctx context.Context, epoch lookup.Epoch, now uint64) (interface{}, error) {
- <-ctx.Done()
- return nil, ctx.Err()
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- errc := make(chan error)
-
- go func() {
- _, err := algo.Lookup(ctx, 1200000000, lookup.NoClue, readFunc)
- errc <- err
- }()
-
- cancel() //actually cancel the lookup
-
- if err := <-errc; err != context.Canceled {
- t.Fatalf("Expected lookup to return a context canceled error, got %v", err)
- }
-
- // ### 2.2.- Test context cancellation during hint lookup:
- ctx, cancel = context.WithCancel(context.Background())
- errc = make(chan error)
- someHint := lookup.Epoch{
- Level: 25,
- Time: 300,
- }
- // put up a read function that gets canceled only on hint lookup
- readFunc = func(ctx context.Context, epoch lookup.Epoch, now uint64) (interface{}, error) {
- if epoch == someHint {
- go cancel()
- <-ctx.Done()
- return nil, ctx.Err()
- }
- return nil, nil
- }
-
- go func() {
- _, err := algo.Lookup(ctx, 301, someHint, readFunc)
- errc <- err
- }()
-
- if err := <-errc; err != context.Canceled {
- t.Fatalf("Expected lookup to return a context canceled error, got %v", err)
- }
- })
- }
-
-}
-
-// TestLookupFail makes sure the lookup function fails on a timely manner
-// when there are no updates at all
-func TestLookupFail(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage, without adding updates
- // don't write anything and try to look up.
- // we're testing we don't get stuck in a loop and that the lookup
- // function converges in a timely fashion
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(1533903729)
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset()
-
- stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, lookup.NoClue, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- if value != nil {
- t.Fatal("Expected value to be nil, since the update should've failed")
- }
- })
-
- printMetric("SIMPLE", store, stopwatch.Elapsed())
- })
- }
-}
-
-func TestHighFreqUpdates(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and add one update per second
- // for the last 1000 seconds:
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(1533903729)
-
- var epoch lookup.Epoch
-
- var lastData *Data
- for i := uint64(0); i <= 994; i++ {
- T := uint64(now - 1000 + i)
- data := Data{
- Payload: T, //our "payload" will be the timestamp itself.
- Time: T,
- }
- epoch = store.Update(epoch, T, &data)
- lastData = &data
- }
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset() // reset read counters for next test
-
- // ### 3.1.- Test how long it takes to find the last update without a hint:
- timeElapsedWithoutHint := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), lastData.Time, lookup.NoClue, readFunc)
- stopwatch.Stop()
- if err != nil {
- t.Fatal(err)
- }
-
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
- })
- printMetric("SIMPLE", store, timeElapsedWithoutHint)
-
- // reset the read count for the next test
- store.Reset()
-
- // ### 3.2.- Now test how long it takes to find the last update *with* a hint,
- // it should take less time!
- timeElapsed := stopwatch.Measure(func() {
- // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update
- value, err := algo.Lookup(context.Background(), now, epoch, readFunc)
- stopwatch.Stop()
- if err != nil {
- t.Fatal(err)
- }
-
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
-
- })
- if timeElapsed > timeElapsedWithoutHint {
- t.Fatalf("Expected lookup to complete faster than %s since we provided a hint. Took %s", timeElapsedWithoutHint, timeElapsed)
- }
- printMetric("WITH HINT", store, timeElapsed)
-
- store.Reset() // reset read counters
-
- // ### 3.3.- Test multiple lookups at different intervals
- timeElapsed = stopwatch.Measure(func() {
- for i := uint64(0); i <= 10; i++ {
- T := uint64(now - 1000 + i)
- value, err := algo.Lookup(context.Background(), T, lookup.NoClue, readFunc)
- if err != nil {
- t.Fatal(err)
- }
- data, _ := value.(*Data)
- if data == nil {
- t.Fatalf("Expected lookup to return %d, got nil", T)
- }
- if data.Payload != T {
- t.Fatalf("Expected lookup to return %d, got %d", T, data.Time)
- }
- }
- })
- printMetric("MULTIPLE", store, timeElapsed)
- })
- }
-}
-
-// TestSparseUpdates checks the lookup algorithm when
-// updates come sparsely and in bursts
-func TestSparseUpdates(t *testing.T) {
- // ### 1.- Initialize stopwatch time sim
- stopwatch := NewStopwatch(50 * time.Millisecond)
- lookup.TimeAfter = stopwatch.TimeAfter()
- defer stopwatch.Stop()
-
- // ### 2.- Setup mock storage and write an updates sparsely in bursts,
- // every 5 years 3 times starting in Jan 1st 1970 and then silence
- store := NewStore(DefaultStoreConfig)
- readFunc := store.MakeReadFunc()
-
- now := uint64(633799046)
- var epoch lookup.Epoch
-
- var lastData *Data
- for i := uint64(0); i < 3; i++ {
- for j := uint64(0); j < 10; j++ {
- T := uint64(Year*5*i + j) // write a burst of 10 updates every 5 years 3 times starting in Jan 1st 1970 and then silence
- data := Data{
- Payload: T, //our "payload" will be the timestamp itself.
- Time: T,
- }
- epoch = store.Update(epoch, T, &data)
- lastData = &data
- }
- }
-
- // ### 3.- Test all algorithms
- for _, algo := range algorithms {
- t.Run(algo.Name, func(t *testing.T) {
- store.Reset() // reset read counters for next test
-
- // ### 3.1.- Test how long it takes to find the last update without a hint:
- timeElapsedWithoutHint := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, lookup.NoClue, readFunc)
- stopwatch.Stop()
- if err != nil {
- t.Fatal(err)
- }
-
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
- })
- printMetric("SIMPLE", store, timeElapsedWithoutHint)
-
- // reset the read count for the next test
- store.Reset()
-
- // ### 3.2.- Now test how long it takes to find the last update *with* a hint,
- // it should take less time!
- timeElapsed := stopwatch.Measure(func() {
- value, err := algo.Lookup(context.Background(), now, epoch, readFunc)
- if err != nil {
- t.Fatal(err)
- }
-
- if value != lastData {
- t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value)
- }
- })
- if timeElapsed > timeElapsedWithoutHint {
- t.Fatalf("Expected lookup to complete faster than %s since we provided a hint. Took %s", timeElapsedWithoutHint, timeElapsed)
- }
-
- printMetric("WITH HINT", store, stopwatch.Elapsed())
-
- })
- }
-}
-
-// testG will hold precooked test results
-// fields are abbreviated to reduce the size of the literal below
-type testG struct {
- e lookup.Epoch // last
- n uint64 // next level
- x uint8 // expected result
-}
-
-// test cases
-var testGetNextLevelCases = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989807323, x: 24}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995807649, x: 17}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 969111431, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 993087627, x: 13}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 962941578, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963497509, x: 15}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 929292183, x: 27}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939126953, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907846146, x: 11}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 703619757, x: 28}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 913578332, x: 21}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 903843025, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 149120378, x: 29}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 858997793, x: 26}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925177237, x: 13}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 907146511, x: 22}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941168924, x: 13}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 538054817, x: 28}, {e: lookup.Epoch{Time: 891721312, Level: 18}, n: 890975671, x: 21}, {e: lookup.Epoch{Time: 920397342, Level: 11}, n: 920396960, x: 10}, {e: lookup.Epoch{Time: 953406530, Level: 3}, n: 953406530, x: 2}, {e: lookup.Epoch{Time: 920024527, Level: 23}, n: 920024527, x: 22}, {e: lookup.Epoch{Time: 927050922, Level: 7}, n: 927049632, x: 11}, {e: lookup.Epoch{Time: 894599900, Level: 10}, n: 890021707, x: 22}, {e: lookup.Epoch{Time: 883010150, Level: 3}, n: 882969902, x: 15}, {e: lookup.Epoch{Time: 855561102, Level: 22}, n: 855561102, x: 21}, {e: lookup.Epoch{Time: 828245477, Level: 19}, n: 825245571, x: 22}, {e: lookup.Epoch{Time: 851095026, Level: 4}, n: 851083702, x: 13}, {e: lookup.Epoch{Time: 879209039, Level: 11}, n: 879209039, x: 10}, {e: lookup.Epoch{Time: 859265651, Level: 0}, n: 840582083, x: 24}, {e: lookup.Epoch{Time: 827349870, Level: 24}, n: 827349869, x: 23}, {e: lookup.Epoch{Time: 819602318, Level: 3}, n: 18446744073490860182, x: 31}, {e: lookup.Epoch{Time: 849708538, Level: 7}, n: 849708538, x: 6}, {e: lookup.Epoch{Time: 873885094, Level: 11}, n: 873881798, x: 11}, {e: lookup.Epoch{Time: 852169070, Level: 1}, n: 852049399, x: 17}, {e: lookup.Epoch{Time: 852885343, Level: 8}, n: 852875652, x: 13}, {e: lookup.Epoch{Time: 830957057, Level: 8}, n: 830955867, x: 10}, {e: lookup.Epoch{Time: 807353611, Level: 4}, n: 807325211, x: 16}, {e: lookup.Epoch{Time: 803198793, Level: 8}, n: 696477575, x: 26}, {e: lookup.Epoch{Time: 791356887, Level: 10}, n: 791356003, x: 10}, {e: lookup.Epoch{Time: 817771215, Level: 12}, n: 817708431, x: 17}, {e: lookup.Epoch{Time: 846211146, Level: 14}, n: 846211146, x: 13}, {e: lookup.Epoch{Time: 821849822, Level: 9}, n: 821849229, x: 9}, {e: lookup.Epoch{Time: 789508756, Level: 9}, n: 789508755, x: 8}, {e: lookup.Epoch{Time: 814088521, Level: 12}, n: 814088512, x: 11}, {e: lookup.Epoch{Time: 813665673, Level: 6}, n: 813548257, x: 17}, {e: lookup.Epoch{Time: 791472209, Level: 6}, n: 720857845, x: 26}, {e: lookup.Epoch{Time: 805687744, Level: 2}, n: 805687720, x: 6}, {e: lookup.Epoch{Time: 783153927, Level: 12}, n: 783134053, x: 14}, {e: lookup.Epoch{Time: 815033655, Level: 11}, n: 815033654, x: 10}, {e: lookup.Epoch{Time: 821184581, Level: 6}, n: 821184464, x: 11}, {e: lookup.Epoch{Time: 841908114, Level: 2}, n: 841636025, x: 18}, {e: lookup.Epoch{Time: 862969167, Level: 20}, n: 862919955, x: 19}, {e: lookup.Epoch{Time: 887604565, Level: 21}, n: 887604564, x: 20}, {e: lookup.Epoch{Time: 863723789, Level: 10}, n: 858274530, x: 22}, {e: lookup.Epoch{Time: 851533290, Level: 10}, n: 851531385, x: 11}, {e: lookup.Epoch{Time: 826032484, Level: 14}, n: 826032484, x: 13}, {e: lookup.Epoch{Time: 819401505, Level: 7}, n: 818943526, x: 18}, {e: lookup.Epoch{Time: 800886832, Level: 12}, n: 800563106, x: 19}, {e: lookup.Epoch{Time: 780767476, Level: 10}, n: 694450997, x: 26}, {e: lookup.Epoch{Time: 789209418, Level: 15}, n: 789209417, x: 14}, {e: lookup.Epoch{Time: 816086666, Level: 9}, n: 816034646, x: 18}, {e: lookup.Epoch{Time: 835407077, Level: 21}, n: 835407076, x: 20}, {e: lookup.Epoch{Time: 846527322, Level: 20}, n: 846527321, x: 19}, {e: lookup.Epoch{Time: 850131130, Level: 19}, n: 18446744073670013406, x: 31}, {e: lookup.Epoch{Time: 842248607, Level: 24}, n: 783963834, x: 28}, {e: lookup.Epoch{Time: 816181999, Level: 2}, n: 816124867, x: 15}, {e: lookup.Epoch{Time: 806627026, Level: 17}, n: 756013427, x: 28}, {e: lookup.Epoch{Time: 826223084, Level: 4}, n: 826169865, x: 16}, {e: lookup.Epoch{Time: 835380147, Level: 21}, n: 835380147, x: 20}, {e: lookup.Epoch{Time: 860137874, Level: 3}, n: 860137782, x: 7}, {e: lookup.Epoch{Time: 860623757, Level: 8}, n: 860621582, x: 12}, {e: lookup.Epoch{Time: 875464114, Level: 24}, n: 875464114, x: 23}, {e: lookup.Epoch{Time: 853804052, Level: 6}, n: 853804051, x: 5}, {e: lookup.Epoch{Time: 864150903, Level: 14}, n: 854360673, x: 24}, {e: lookup.Epoch{Time: 850104561, Level: 23}, n: 850104561, x: 22}, {e: lookup.Epoch{Time: 878020186, Level: 24}, n: 878020186, x: 23}, {e: lookup.Epoch{Time: 900150940, Level: 8}, n: 899224760, x: 21}, {e: lookup.Epoch{Time: 869566202, Level: 2}, n: 869566199, x: 3}, {e: lookup.Epoch{Time: 851878045, Level: 5}, n: 851878045, x: 4}, {e: lookup.Epoch{Time: 824469671, Level: 12}, n: 824466504, x: 13}, {e: lookup.Epoch{Time: 819830223, Level: 9}, n: 816550241, x: 22}, {e: lookup.Epoch{Time: 813720249, Level: 20}, n: 801351581, x: 28}, {e: lookup.Epoch{Time: 831200185, Level: 20}, n: 830760165, x: 19}, {e: lookup.Epoch{Time: 838915973, Level: 9}, n: 838915972, x: 8}, {e: lookup.Epoch{Time: 812902644, Level: 5}, n: 812902644, x: 4}, {e: lookup.Epoch{Time: 812755887, Level: 3}, n: 812755887, x: 2}, {e: lookup.Epoch{Time: 822497779, Level: 8}, n: 822486000, x: 14}, {e: lookup.Epoch{Time: 832407585, Level: 9}, n: 579450238, x: 28}, {e: lookup.Epoch{Time: 799645403, Level: 23}, n: 799645403, x: 22}, {e: lookup.Epoch{Time: 827279665, Level: 2}, n: 826723872, x: 19}, {e: lookup.Epoch{Time: 846062554, Level: 6}, n: 765881119, x: 28}, {e: lookup.Epoch{Time: 855122998, Level: 6}, n: 855122978, x: 5}, {e: lookup.Epoch{Time: 841905104, Level: 4}, n: 751401236, x: 28}, {e: lookup.Epoch{Time: 857737438, Level: 12}, n: 325468127, x: 29}, {e: lookup.Epoch{Time: 838103691, Level: 18}, n: 779030823, x: 28}, {e: lookup.Epoch{Time: 841581240, Level: 22}, n: 841581239, x: 21}}
-
-// TestGetNextLevel tests the lookup.GetNextLevel function
-func TestGetNextLevel(t *testing.T) {
-
- // First, test well-known cases
- last := lookup.Epoch{
- Time: 1533799046,
- Level: 5,
- }
-
- level := lookup.GetNextLevel(last, last.Time)
- expected := uint8(4)
- if level != expected {
- t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level)
- }
-
- level = lookup.GetNextLevel(last, last.Time+(1<<lookup.HighestLevel)+3000)
- expected = lookup.HighestLevel
- if level != expected {
- t.Fatalf("Expected GetNextLevel to return %d for updates set 2^lookup.HighestLevel seconds away, got %d", expected, level)
- }
-
- level = lookup.GetNextLevel(last, last.Time+(1<<last.Level))
- expected = last.Level
- if level != expected {
- t.Fatalf("Expected GetNextLevel to return %d for updates set 2^last.Level seconds away, got %d", expected, level)
- }
-
- last.Level = 0
- level = lookup.GetNextLevel(last, last.Time)
- expected = 0
- if level != expected {
- t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a zero level, got %d", expected, level)
- }
-
- // run a batch of 100 cooked tests
- for _, s := range testGetNextLevelCases {
- level := lookup.GetNextLevel(s.e, s.n)
- if level != s.x {
- t.Fatalf("Expected GetNextLevel to return %d for last=%s when now=%d, got %d", s.x, s.e.String(), s.n, level)
- }
- }
-
-}
-
-// CookGetNextLevelTests is used to generate a deterministic
-// set of cases for TestGetNextLevel and thus "freeze" its current behavior
-func CookGetNextLevelTests(t *testing.T) {
- st := ""
- var last lookup.Epoch
- last.Time = 1000000000
- var now uint64
- var expected uint8
- for i := 0; i < 100; i++ {
- last.Time += uint64(rand.Intn(1<<26)) - (1 << 25)
- last.Level = uint8(rand.Intn(25))
- v := last.Level + uint8(rand.Intn(lookup.HighestLevel))
- if v > lookup.HighestLevel {
- v = 0
- }
- now = last.Time + uint64(rand.Intn(1<<v+1)) - (1 << v)
- expected = lookup.GetNextLevel(last, now)
- st = fmt.Sprintf("%s,testG{e:lookup.Epoch{Time:%d, Level:%d}, n:%d, x:%d}", st, last.Time, last.Level, now, expected)
- }
- fmt.Println(st)
-}
diff --git a/swarm/storage/feed/lookup/store_test.go b/swarm/storage/feed/lookup/store_test.go
deleted file mode 100644
index ed5209319..000000000
--- a/swarm/storage/feed/lookup/store_test.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package lookup_test
-
-/*
-This file contains components to mock a storage for testing
-lookup algorithms and measure the number of reads.
-*/
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-// Data is a struct to keep a value to store/retrieve during testing
-type Data struct {
- Payload uint64
- Time uint64
-}
-
-// String implements fmt.Stringer
-func (d *Data) String() string {
- return fmt.Sprintf("%d-%d", d.Payload, d.Time)
-}
-
-// Datamap is an internal map to hold the mocked storage
-type DataMap map[lookup.EpochID]*Data
-
-// StoreConfig allows to specify the simulated delays for each type of
-// read operation
-type StoreConfig struct {
- CacheReadTime time.Duration // time it takes to read from the cache
- FailedReadTime time.Duration // time it takes to acknowledge a read as failed
- SuccessfulReadTime time.Duration // time it takes to fetch data
-}
-
-// StoreCounters will track read count metrics
-type StoreCounters struct {
- reads int
- cacheHits int
- failed int
- successful int
- canceled int
- maxSimultaneous int
-}
-
-// Store simulates a store and keeps track of performance counters
-type Store struct {
- StoreConfig
- StoreCounters
- data DataMap
- cache DataMap
- lock sync.RWMutex
- activeReads int
-}
-
-// NewStore returns a new mock store ready for use
-func NewStore(config *StoreConfig) *Store {
- store := &Store{
- StoreConfig: *config,
- data: make(DataMap),
- }
-
- store.Reset()
- return store
-}
-
-// Reset reset performance counters and clears the cache
-func (s *Store) Reset() {
- s.cache = make(DataMap)
- s.StoreCounters = StoreCounters{}
-}
-
-// Put stores a value in the mock store at the given epoch
-func (s *Store) Put(epoch lookup.Epoch, value *Data) {
- log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload)
- s.data[epoch.ID()] = value
-}
-
-// Update runs the seed algorithm to place the update in the appropriate epoch
-func (s *Store) Update(last lookup.Epoch, now uint64, value *Data) lookup.Epoch {
- epoch := lookup.GetNextEpoch(last, now)
- s.Put(epoch, value)
- return epoch
-}
-
-// Get retrieves data at the specified epoch, simulating a delay
-func (s *Store) Get(ctx context.Context, epoch lookup.Epoch, now uint64) (value interface{}, err error) {
- epochID := epoch.ID()
- var operationTime time.Duration
-
- defer func() { // simulate a delay according to what has actually happened
- select {
- case <-lookup.TimeAfter(operationTime):
- case <-ctx.Done():
- s.lock.Lock()
- s.canceled++
- s.lock.Unlock()
- value = nil
- err = ctx.Err()
- }
- s.lock.Lock()
- s.activeReads--
- s.lock.Unlock()
- }()
-
- s.lock.Lock()
- defer s.lock.Unlock()
- s.reads++
- s.activeReads++
- if s.activeReads > s.maxSimultaneous {
- s.maxSimultaneous = s.activeReads
- }
-
- // 1.- Simulate a cache read
- item := s.cache[epochID]
- operationTime += s.CacheReadTime
-
- if item != nil {
- s.cacheHits++
- if item.Time <= now {
- s.successful++
- return item, nil
- }
- return nil, nil
- }
-
- // 2.- simulate a full read
-
- item = s.data[epochID]
- if item != nil {
- operationTime += s.SuccessfulReadTime
- s.successful++
- s.cache[epochID] = item
- if item.Time <= now {
- return item, nil
- }
- } else {
- operationTime += s.FailedReadTime
- s.failed++
- }
- return nil, nil
-}
-
-// MakeReadFunc returns a read function suitable for the lookup algorithm, mapped
-// to this mock storage
-func (s *Store) MakeReadFunc() lookup.ReadFunc {
- return func(ctx context.Context, epoch lookup.Epoch, now uint64) (interface{}, error) {
- return s.Get(ctx, epoch, now)
- }
-}
diff --git a/swarm/storage/feed/lookup/timesim_test.go b/swarm/storage/feed/lookup/timesim_test.go
deleted file mode 100644
index 2a254188c..000000000
--- a/swarm/storage/feed/lookup/timesim_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package lookup_test
-
-// This file contains simple time simulation tools for testing
-// and measuring time-aware algorithms
-
-import (
- "sync"
- "time"
-)
-
-// Timer tracks information about a simulated timer
-type Timer struct {
- deadline time.Time
- signal chan time.Time
- id int
-}
-
-// Stopwatch measures simulated execution time and manages simulated timers
-type Stopwatch struct {
- t time.Time
- resolution time.Duration
- timers map[int]*Timer
- timerCounter int
- stopSignal chan struct{}
- lock sync.RWMutex
-}
-
-// NewStopwatch returns a simulated clock that ticks on `resolution` intervals
-func NewStopwatch(resolution time.Duration) *Stopwatch {
- s := &Stopwatch{
- resolution: resolution,
- }
- s.Reset()
- return s
-}
-
-// Reset clears all timers and sents the stopwatch to zero
-func (s *Stopwatch) Reset() {
- s.t = time.Time{}
- s.timers = make(map[int]*Timer)
- s.Stop()
-}
-
-// Tick advances simulated time by the stopwatch's resolution and triggers
-// all due timers
-func (s *Stopwatch) Tick() {
- s.t = s.t.Add(s.resolution)
-
- s.lock.Lock()
- defer s.lock.Unlock()
-
- for id, timer := range s.timers {
- if s.t.After(timer.deadline) || s.t.Equal(timer.deadline) {
- timer.signal <- s.t
- close(timer.signal)
- delete(s.timers, id)
- }
- }
-}
-
-// NewTimer returns a new timer that will trigger after `duration` elapses in the
-// simulation
-func (s *Stopwatch) NewTimer(duration time.Duration) <-chan time.Time {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- s.timerCounter++
- timer := &Timer{
- deadline: s.t.Add(duration),
- signal: make(chan time.Time, 1),
- id: s.timerCounter,
- }
-
- s.timers[timer.id] = timer
- return timer.signal
-}
-
-// TimeAfter returns a simulated timer factory that can replace `time.After`
-func (s *Stopwatch) TimeAfter() func(d time.Duration) <-chan time.Time {
- return func(d time.Duration) <-chan time.Time {
- return s.NewTimer(d)
- }
-}
-
-// Elapsed returns the time that has passed in the simulation
-func (s *Stopwatch) Elapsed() time.Duration {
- return s.t.Sub(time.Time{})
-}
-
-// Run starts the time simulation
-func (s *Stopwatch) Run() {
- go func() {
- stopSignal := make(chan struct{})
- s.lock.Lock()
- if s.stopSignal != nil {
- close(s.stopSignal)
- }
- s.stopSignal = stopSignal
- s.lock.Unlock()
- for {
- select {
- case <-time.After(1 * time.Millisecond):
- s.Tick()
- case <-stopSignal:
- return
- }
- }
- }()
-}
-
-// Stop stops the time simulation
-func (s *Stopwatch) Stop() {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if s.stopSignal != nil {
- close(s.stopSignal)
- s.stopSignal = nil
- }
-}
-
-func (s *Stopwatch) Measure(measuredFunc func()) time.Duration {
- s.Reset()
- s.Run()
- defer s.Stop()
- measuredFunc()
- return s.Elapsed()
-}
diff --git a/swarm/storage/feed/query.go b/swarm/storage/feed/query.go
deleted file mode 100644
index 8be78a952..000000000
--- a/swarm/storage/feed/query.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "fmt"
- "strconv"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-// Query is used to specify constraints when performing an update lookup
-// TimeLimit indicates an upper bound for the search. Set to 0 for "now"
-type Query struct {
- Feed
- Hint lookup.Epoch
- TimeLimit uint64
-}
-
-// FromValues deserializes this instance from a string key-value store
-// useful to parse query strings
-func (q *Query) FromValues(values Values) error {
- time, _ := strconv.ParseUint(values.Get("time"), 10, 64)
- q.TimeLimit = time
-
- level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32)
- q.Hint.Level = uint8(level)
- q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64)
- if q.Feed.User == (common.Address{}) {
- return q.Feed.FromValues(values)
- }
- return nil
-}
-
-// AppendValues serializes this structure into the provided string key-value store
-// useful to build query strings
-func (q *Query) AppendValues(values Values) {
- if q.TimeLimit != 0 {
- values.Set("time", fmt.Sprintf("%d", q.TimeLimit))
- }
- if q.Hint.Level != 0 {
- values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level))
- }
- if q.Hint.Time != 0 {
- values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time))
- }
- q.Feed.AppendValues(values)
-}
-
-// NewQuery constructs an Query structure to find updates on or before `time`
-// if time == 0, the latest update will be looked up
-func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query {
- return &Query{
- TimeLimit: time,
- Feed: *feed,
- Hint: hint,
- }
-}
-
-// NewQueryLatest generates lookup parameters that look for the latest update to a feed
-func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query {
- return NewQuery(feed, 0, hint)
-}
diff --git a/swarm/storage/feed/query_test.go b/swarm/storage/feed/query_test.go
deleted file mode 100644
index 1ec45762e..000000000
--- a/swarm/storage/feed/query_test.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "testing"
-)
-
-func getTestQuery() *Query {
- id := getTestID()
- return &Query{
- TimeLimit: 5000,
- Feed: id.Feed,
- Hint: id.Epoch,
- }
-}
-
-func TestQueryValues(t *testing.T) {
- var expected = KV{"hint.level": "31", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"}
-
- query := getTestQuery()
- testValueSerializer(t, query, expected)
-
-}
diff --git a/swarm/storage/feed/request.go b/swarm/storage/feed/request.go
deleted file mode 100644
index dd91a7cf4..000000000
--- a/swarm/storage/feed/request.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "bytes"
- "encoding/json"
- "hash"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-// Request represents a request to sign or signed feed update message
-type Request struct {
- Update // actual content that will be put on the chunk, less signature
- Signature *Signature
- idAddr storage.Address // cached chunk address for the update (not serialized, for internal use)
- binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use)
-}
-
-// updateRequestJSON represents a JSON-serialized UpdateRequest
-type updateRequestJSON struct {
- ID
- ProtocolVersion uint8 `json:"protocolVersion"`
- Data string `json:"data,omitempty"`
- Signature string `json:"signature,omitempty"`
-}
-
-// Request layout
-// Update bytes
-// SignatureLength bytes
-const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength
-
-// NewFirstRequest returns a ready to sign request to publish a first feed update
-func NewFirstRequest(topic Topic) *Request {
-
- request := new(Request)
-
- // get the current time
- now := TimestampProvider.Now().Time
- request.Epoch = lookup.GetFirstEpoch(now)
- request.Feed.Topic = topic
- request.Header.Version = ProtocolVersion
-
- return request
-}
-
-// SetData stores the payload data the feed update will be updated with
-func (r *Request) SetData(data []byte) {
- r.data = data
- r.Signature = nil
-}
-
-// IsUpdate returns true if this request models a signed update or otherwise it is a signature request
-func (r *Request) IsUpdate() bool {
- return r.Signature != nil
-}
-
-// Verify checks that signatures are valid
-func (r *Request) Verify() (err error) {
- if len(r.data) == 0 {
- return NewError(ErrInvalidValue, "Update does not contain data")
- }
- if r.Signature == nil {
- return NewError(ErrInvalidSignature, "Missing signature field")
- }
-
- digest, err := r.GetDigest()
- if err != nil {
- return err
- }
-
- // get the address of the signer (which also checks that it's a valid signature)
- r.Feed.User, err = getUserAddr(digest, *r.Signature)
- if err != nil {
- return err
- }
-
- // check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
- // that was used to retrieve this chunk
- // if this validation fails, someone forged a chunk.
- if !bytes.Equal(r.idAddr, r.Addr()) {
- return NewError(ErrInvalidSignature, "Signature address does not match with update user address")
- }
-
- return nil
-}
-
-// Sign executes the signature to validate the update message
-func (r *Request) Sign(signer Signer) error {
- r.Feed.User = signer.Address()
- r.binaryData = nil //invalidate serialized data
- digest, err := r.GetDigest() // computes digest and serializes into .binaryData
- if err != nil {
- return err
- }
-
- signature, err := signer.Sign(digest)
- if err != nil {
- return err
- }
-
- // Although the Signer interface returns the public address of the signer,
- // recover it from the signature to see if they match
- userAddr, err := getUserAddr(digest, signature)
- if err != nil {
- return NewError(ErrInvalidSignature, "Error verifying signature")
- }
-
- if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
- return NewError(ErrInvalidSignature, "Signer address does not match update user address")
- }
-
- r.Signature = &signature
- r.idAddr = r.Addr()
- return nil
-}
-
-// GetDigest creates the feed update digest used in signatures
-// the serialized payload is cached in .binaryData
-func (r *Request) GetDigest() (result common.Hash, err error) {
- hasher := hashPool.Get().(hash.Hash)
- defer hashPool.Put(hasher)
- hasher.Reset()
- dataLength := r.Update.binaryLength()
- if r.binaryData == nil {
- r.binaryData = make([]byte, dataLength+signatureLength)
- if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil {
- return result, err
- }
- }
- hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
-
- return common.BytesToHash(hasher.Sum(nil)), nil
-}
-
-// create an update chunk.
-func (r *Request) toChunk() (storage.Chunk, error) {
-
- // Check that the update is signed and serialized
- // For efficiency, data is serialized during signature and cached in
- // the binaryData field when computing the signature digest in .getDigest()
- if r.Signature == nil || r.binaryData == nil {
- return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.")
- }
-
- updateLength := r.Update.binaryLength()
-
- // signature is the last item in the chunk data
- copy(r.binaryData[updateLength:], r.Signature[:])
-
- chunk := storage.NewChunk(r.idAddr, r.binaryData)
- return chunk, nil
-}
-
-// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
-func (r *Request) fromChunk(chunk storage.Chunk) error {
- // for update chunk layout see Request definition
-
- chunkdata := chunk.Data()
-
- //deserialize the feed update portion
- if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil {
- return err
- }
-
- // Extract the signature
- var signature *Signature
- cursor := r.Update.binaryLength()
- sigdata := chunkdata[cursor : cursor+signatureLength]
- if len(sigdata) > 0 {
- signature = &Signature{}
- copy(signature[:], sigdata)
- }
-
- r.Signature = signature
- r.idAddr = chunk.Address()
- r.binaryData = chunkdata
-
- return nil
-
-}
-
-// FromValues deserializes this instance from a string key-value store
-// useful to parse query strings
-func (r *Request) FromValues(values Values, data []byte) error {
- signatureBytes, err := hexutil.Decode(values.Get("signature"))
- if err != nil {
- r.Signature = nil
- } else {
- if len(signatureBytes) != signatureLength {
- return NewError(ErrInvalidSignature, "Incorrect signature length")
- }
- r.Signature = new(Signature)
- copy(r.Signature[:], signatureBytes)
- }
- err = r.Update.FromValues(values, data)
- if err != nil {
- return err
- }
- r.idAddr = r.Addr()
- return err
-}
-
-// AppendValues serializes this structure into the provided string key-value store
-// useful to build query strings
-func (r *Request) AppendValues(values Values) []byte {
- if r.Signature != nil {
- values.Set("signature", hexutil.Encode(r.Signature[:]))
- }
- return r.Update.AppendValues(values)
-}
-
-// fromJSON takes an update request JSON and populates an UpdateRequest
-func (r *Request) fromJSON(j *updateRequestJSON) error {
-
- r.ID = j.ID
- r.Header.Version = j.ProtocolVersion
-
- var err error
- if j.Data != "" {
- r.data, err = hexutil.Decode(j.Data)
- if err != nil {
- return NewError(ErrInvalidValue, "Cannot decode data")
- }
- }
-
- if j.Signature != "" {
- sigBytes, err := hexutil.Decode(j.Signature)
- if err != nil || len(sigBytes) != signatureLength {
- return NewError(ErrInvalidSignature, "Cannot decode signature")
- }
- r.Signature = new(Signature)
- r.idAddr = r.Addr()
- copy(r.Signature[:], sigBytes)
- }
- return nil
-}
-
-// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
-// Implements json.Unmarshaler interface
-func (r *Request) UnmarshalJSON(rawData []byte) error {
- var requestJSON updateRequestJSON
- if err := json.Unmarshal(rawData, &requestJSON); err != nil {
- return err
- }
- return r.fromJSON(&requestJSON)
-}
-
-// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
-// Implements json.Marshaler interface
-func (r *Request) MarshalJSON() (rawData []byte, err error) {
- var signatureString, dataString string
- if r.Signature != nil {
- signatureString = hexutil.Encode(r.Signature[:])
- }
- if r.data != nil {
- dataString = hexutil.Encode(r.data)
- }
-
- requestJSON := &updateRequestJSON{
- ID: r.ID,
- ProtocolVersion: r.Header.Version,
- Data: dataString,
- Signature: signatureString,
- }
-
- return json.Marshal(requestJSON)
-}
diff --git a/swarm/storage/feed/request_test.go b/swarm/storage/feed/request_test.go
deleted file mode 100644
index b9c1381c6..000000000
--- a/swarm/storage/feed/request_test.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "bytes"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "reflect"
- "testing"
-
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
-)
-
-func areEqualJSON(s1, s2 string) (bool, error) {
- //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67
- var o1 interface{}
- var o2 interface{}
-
- err := json.Unmarshal([]byte(s1), &o1)
- if err != nil {
- return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error())
- }
- err = json.Unmarshal([]byte(s2), &o2)
- if err != nil {
- return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error())
- }
-
- return reflect.DeepEqual(o1, o2), nil
-}
-
-// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
-// while also checking cryptographically that only the owner of a feed can update it.
-func TestEncodingDecodingUpdateRequests(t *testing.T) {
-
- charlie := newCharlieSigner() //Charlie
- bob := newBobSigner() //Bob
-
- // Create a feed to our good guy Charlie's name
- topic, _ := NewTopic("a good topic name", nil)
- firstRequest := NewFirstRequest(topic)
- firstRequest.User = charlie.Address()
-
- // We now encode the create message to simulate we send it over the wire
- messageRawData, err := firstRequest.MarshalJSON()
- if err != nil {
- t.Fatalf("Error encoding first feed update request: %s", err)
- }
-
- // ... the message arrives and is decoded...
- var recoveredFirstRequest Request
- if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil {
- t.Fatalf("Error decoding first feed update request: %s", err)
- }
-
- // ... but verification should fail because it is not signed!
- if err := recoveredFirstRequest.Verify(); err == nil {
- t.Fatal("Expected Verify to fail since the message is not signed")
- }
-
- // We now assume that the feed ypdate was created and propagated.
-
- const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601"
- const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}`
-
- //Put together an unsigned update request that we will serialize to send it to the signer.
- data := []byte("This hour's update: Swarm 99.0 has been released!")
- request := &Request{
- Update: Update{
- ID: ID{
- Epoch: lookup.Epoch{
- Time: 1000,
- Level: 1,
- },
- Feed: firstRequest.Update.Feed,
- },
- data: data,
- },
- }
-
- messageRawData, err = request.MarshalJSON()
- if err != nil {
- t.Fatalf("Error encoding update request: %s", err)
- }
-
- equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON)
- if err != nil {
- t.Fatalf("Error decoding update request JSON: %s", err)
- }
- if !equalJSON {
- t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData))
- }
-
- // now the encoded message messageRawData is sent over the wire and arrives to the signer
-
- //Attempt to extract an UpdateRequest out of the encoded message
- var recoveredRequest Request
- if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
- t.Fatalf("Error decoding update request: %s", err)
- }
-
- //sign the request and see if it matches our predefined signature above.
- if err := recoveredRequest.Sign(charlie); err != nil {
- t.Fatalf("Error signing request: %s", err)
- }
-
- compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature)
-
- // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
- // to alter the signature field.
- var j updateRequestJSON
- if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil {
- t.Fatal("Error unmarshalling test json, check expectedJSON constant")
- }
- j.Signature = "Certainly not a signature"
- corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature
- var corruptRequest Request
- if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil {
- t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
- }
-
- // Now imagine Bob wants to create an update of his own about the same feed,
- // signing a message with his private key
- if err := request.Sign(bob); err != nil {
- t.Fatalf("Error signing: %s", err)
- }
-
- // Now Bob encodes the message to send it over the wire...
- messageRawData, err = request.MarshalJSON()
- if err != nil {
- t.Fatalf("Error encoding message:%s", err)
- }
-
- // ... the message arrives to our Swarm node and it is decoded.
- recoveredRequest = Request{}
- if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
- t.Fatalf("Error decoding message:%s", err)
- }
-
- // Before checking what happened with Bob's update, let's see what would happen if we mess
- // with the signature big time to see if Verify catches it
- savedSignature := *recoveredRequest.Signature // save the signature for later
- binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature
- if err = recoveredRequest.Verify(); err == nil {
- t.Fatal("Expected Verify to fail on corrupt signature")
- }
-
- // restore the Bob's signature from corruption
- *recoveredRequest.Signature = savedSignature
-
- // Now the signature is not corrupt
- if err = recoveredRequest.Verify(); err != nil {
- t.Fatal(err)
- }
-
- // Reuse object and sign with our friend Charlie's private key
- if err := recoveredRequest.Sign(charlie); err != nil {
- t.Fatalf("Error signing with the correct private key: %s", err)
- }
-
- // And now, Verify should work since this update now belongs to Charlie
- if err = recoveredRequest.Verify(); err != nil {
- t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err)
- }
-
- // mess with the lookup key to make sure Verify fails:
- recoveredRequest.Time = 77999 // this will alter the lookup key
- if err = recoveredRequest.Verify(); err == nil {
- t.Fatalf("Expected Verify to fail since the lookup key has been altered")
- }
-}
-
-func getTestRequest() *Request {
- return &Request{
- Update: *getTestFeedUpdate(),
- }
-}
-
-func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
-
- // Test that parseUpdate fails if the chunk is too small
- var r Request
- if err := r.fromChunk(storage.NewChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength))); err == nil {
- t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
- }
-
- r = *getTestRequest()
-
- _, err := r.toChunk()
- if err == nil {
- t.Fatal("Expected request.toChunk to fail when there is no data")
- }
- r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data
- _, err = r.toChunk()
- if err == nil {
- t.Fatal("expected request.toChunk to fail when there is no signature")
- }
-
- charlie := newCharlieSigner()
- if err := r.Sign(charlie); err != nil {
- t.Fatalf("error signing:%s", err)
- }
-
- chunk, err := r.toChunk()
- if err != nil {
- t.Fatalf("error creating update chunk:%s", err)
- }
-
- compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce80300000000001f416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f9896df5937e64e51a7994479ff3fe0ed790d539b9b3e85e93c0014a8a64374f23603c79d16e99b50a757896d3816d7022ac594ad1415679a9b164afb2e5926d801")
-
- var recovered Request
- recovered.fromChunk(chunk)
- if !reflect.DeepEqual(recovered, r) {
- t.Fatal("Expected recovered feed update request to equal the original one")
- }
-}
-
-// check that signature address matches update signer address
-func TestReverse(t *testing.T) {
-
- epoch := lookup.Epoch{
- Time: 7888,
- Level: 6,
- }
-
- // make fake timeProvider
- timeProvider := &fakeTimeProvider{
- currentTime: startTime.Time,
- }
-
- // signer containing private key
- signer := newAliceSigner()
-
- // set up rpc and create feeds handler
- _, _, teardownTest, err := setupTest(timeProvider, signer)
- if err != nil {
- t.Fatal(err)
- }
- defer teardownTest()
-
- topic, _ := NewTopic("Cervantes quotes", nil)
- fd := Feed{
- Topic: topic,
- User: signer.Address(),
- }
-
- data := []byte("Donde una puerta se cierra, otra se abre")
-
- request := new(Request)
- request.Feed = fd
- request.Epoch = epoch
- request.data = data
-
- // generate a chunk key for this request
- key := request.Addr()
-
- if err = request.Sign(signer); err != nil {
- t.Fatal(err)
- }
-
- chunk, err := request.toChunk()
- if err != nil {
- t.Fatal(err)
- }
-
- // check that we can recover the owner account from the update chunk's signature
- var checkUpdate Request
- if err := checkUpdate.fromChunk(chunk); err != nil {
- t.Fatal(err)
- }
- checkdigest, err := checkUpdate.GetDigest()
- if err != nil {
- t.Fatal(err)
- }
- recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature)
- if err != nil {
- t.Fatalf("Retrieve address from signature fail: %v", err)
- }
- originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
-
- // check that the metadata retrieved from the chunk matches what we gave it
- if recoveredAddr != originalAddr {
- t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr)
- }
-
- if !bytes.Equal(key[:], chunk.Address()[:]) {
- t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address())
- }
- if epoch != checkUpdate.Epoch {
- t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String())
- }
- if !bytes.Equal(data, checkUpdate.data) {
- t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data)
- }
-}
diff --git a/swarm/storage/feed/sign.go b/swarm/storage/feed/sign.go
deleted file mode 100644
index 5f0ea0b33..000000000
--- a/swarm/storage/feed/sign.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "crypto/ecdsa"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/crypto"
-)
-
-const signatureLength = 65
-
-// Signature is an alias for a static byte array with the size of a signature
-type Signature [signatureLength]byte
-
-// Signer signs feed update payloads
-type Signer interface {
- Sign(common.Hash) (Signature, error)
- Address() common.Address
-}
-
-// GenericSigner implements the Signer interface
-// It is the vanilla signer that probably should be used in most cases
-type GenericSigner struct {
- PrivKey *ecdsa.PrivateKey
- address common.Address
-}
-
-// NewGenericSigner builds a signer that will sign everything with the provided private key
-func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner {
- return &GenericSigner{
- PrivKey: privKey,
- address: crypto.PubkeyToAddress(privKey.PublicKey),
- }
-}
-
-// Sign signs the supplied data
-// It wraps the ethereum crypto.Sign() method
-func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
- signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey)
- if err != nil {
- return
- }
- copy(signature[:], signaturebytes)
- return
-}
-
-// Address returns the public key of the signer's private key
-func (s *GenericSigner) Address() common.Address {
- return s.address
-}
-
-// getUserAddr extracts the address of the feed update signer
-func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) {
- pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
- if err != nil {
- return common.Address{}, err
- }
- return crypto.PubkeyToAddress(*pub), nil
-}
diff --git a/swarm/storage/feed/testutil.go b/swarm/storage/feed/testutil.go
deleted file mode 100644
index db2d989e1..000000000
--- a/swarm/storage/feed/testutil.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "context"
- "path/filepath"
- "sync"
-
- "github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/swarm/chunk"
- "github.com/ethereum/go-ethereum/swarm/storage"
- "github.com/ethereum/go-ethereum/swarm/storage/localstore"
-)
-
-const (
- testDbDirName = "feeds"
-)
-
-type TestHandler struct {
- *Handler
-}
-
-func (t *TestHandler) Close() {
- t.chunkStore.Close()
-}
-
-type mockNetFetcher struct{}
-
-func (m *mockNetFetcher) Request(hopCount uint8) {
-}
-func (m *mockNetFetcher) Offer(source *enode.ID) {
-}
-
-func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher {
- return &mockNetFetcher{}
-}
-
-// NewTestHandler creates Handler object to be used for testing purposes.
-func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
- path := filepath.Join(datadir, testDbDirName)
- fh := NewHandler(params)
-
- db, err := localstore.New(filepath.Join(path, "chunks"), make([]byte, 32), nil)
- if err != nil {
- return nil, err
- }
-
- localStore := chunk.NewValidatorStore(db, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm)), fh)
-
- netStore, err := storage.NewNetStore(localStore, nil)
- if err != nil {
- return nil, err
- }
- netStore.NewNetFetcherFunc = newFakeNetFetcher
- fh.SetStore(netStore)
- return &TestHandler{fh}, nil
-}
diff --git a/swarm/storage/feed/timestampprovider.go b/swarm/storage/feed/timestampprovider.go
deleted file mode 100644
index fb60cea9c..000000000
--- a/swarm/storage/feed/timestampprovider.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "encoding/json"
- "time"
-)
-
-// TimestampProvider sets the time source of the feeds package
-var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
-
-// Timestamp encodes a point in time as a Unix epoch
-type Timestamp struct {
- Time uint64 `json:"time"` // Unix epoch timestamp, in seconds
-}
-
-// timestampProvider interface describes a source of timestamp information
-type timestampProvider interface {
- Now() Timestamp // returns the current timestamp information
-}
-
-// UnmarshalJSON implements the json.Unmarshaller interface
-func (t *Timestamp) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &t.Time)
-}
-
-// MarshalJSON implements the json.Marshaller interface
-func (t *Timestamp) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.Time)
-}
-
-// DefaultTimestampProvider is a TimestampProvider that uses system time
-// as time source
-type DefaultTimestampProvider struct {
-}
-
-// NewDefaultTimestampProvider creates a system clock based timestamp provider
-func NewDefaultTimestampProvider() *DefaultTimestampProvider {
- return &DefaultTimestampProvider{}
-}
-
-// Now returns the current time according to this provider
-func (dtp *DefaultTimestampProvider) Now() Timestamp {
- return Timestamp{
- Time: uint64(time.Now().Unix()),
- }
-}
diff --git a/swarm/storage/feed/topic.go b/swarm/storage/feed/topic.go
deleted file mode 100644
index 43a7b4ba4..000000000
--- a/swarm/storage/feed/topic.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-
- "github.com/ethereum/go-ethereum/common/bitutil"
- "github.com/ethereum/go-ethereum/common/hexutil"
- "github.com/ethereum/go-ethereum/swarm/storage"
-)
-
-// TopicLength establishes the max length of a topic string
-const TopicLength = storage.AddressLength
-
-// Topic represents what a feed is about
-type Topic [TopicLength]byte
-
-// ErrTopicTooLong is returned when creating a topic with a name/related content too long
-var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength)
-
-// NewTopic creates a new topic from a provided name and "related content" byte array,
-// merging the two together.
-// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned
-// name can be an empty string
-// relatedContent can be nil
-func NewTopic(name string, relatedContent []byte) (topic Topic, err error) {
- if relatedContent != nil {
- contentLength := len(relatedContent)
- if contentLength > TopicLength {
- contentLength = TopicLength
- err = ErrTopicTooLong
- }
- copy(topic[:], relatedContent[:contentLength])
- }
- nameBytes := []byte(name)
- nameLength := len(nameBytes)
- if nameLength > TopicLength {
- nameLength = TopicLength
- err = ErrTopicTooLong
- }
- bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength])
- return topic, err
-}
-
-// Hex will return the topic encoded as an hex string
-func (t *Topic) Hex() string {
- return hexutil.Encode(t[:])
-}
-
-// FromHex will parse a hex string into this Topic instance
-func (t *Topic) FromHex(hex string) error {
- bytes, err := hexutil.Decode(hex)
- if err != nil || len(bytes) != len(t) {
- return NewErrorf(ErrInvalidValue, "Cannot decode topic")
- }
- copy(t[:], bytes)
- return nil
-}
-
-// Name will try to extract the topic name out of the Topic
-func (t *Topic) Name(relatedContent []byte) string {
- nameBytes := *t
- if relatedContent != nil {
- contentLength := len(relatedContent)
- if contentLength > TopicLength {
- contentLength = TopicLength
- }
- bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength])
- }
- z := bytes.IndexByte(nameBytes[:], 0)
- if z < 0 {
- z = TopicLength
- }
- return string(nameBytes[:z])
-
-}
-
-// UnmarshalJSON implements the json.Unmarshaller interface
-func (t *Topic) UnmarshalJSON(data []byte) error {
- var hex string
- json.Unmarshal(data, &hex)
- return t.FromHex(hex)
-}
-
-// MarshalJSON implements the json.Marshaller interface
-func (t *Topic) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.Hex())
-}
diff --git a/swarm/storage/feed/topic_test.go b/swarm/storage/feed/topic_test.go
deleted file mode 100644
index 0403204f7..000000000
--- a/swarm/storage/feed/topic_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package feed
-
-import (
- "testing"
-
- "github.com/ethereum/go-ethereum/common/hexutil"
-)
-
-func TestTopic(t *testing.T) {
- related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789")
- topicName := "test-topic"
- topic, _ := NewTopic(topicName, related)
- hex := topic.Hex()
- expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"
- if hex != expectedHex {
- t.Fatalf("Expected %s, got %s", expectedHex, hex)
- }
-
- var topic2 Topic
- topic2.FromHex(hex)
- if topic2 != topic {
- t.Fatal("Expected recovered topic to be equal to original one")
- }
-
- if topic2.Name(related) != topicName {
- t.Fatal("Retrieved name does not match")
- }
-
- bytes, err := topic2.MarshalJSON()
- if err != nil {
- t.Fatal(err)
- }
- expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"`
- equal, err := areEqualJSON(expectedJSON, string(bytes))
- if err != nil {
- t.Fatal(err)
- }
- if !equal {
- t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes))
- }
-
- err = topic2.UnmarshalJSON(bytes)
- if err != nil {
- t.Fatal(err)
- }
- if topic2 != topic {
- t.Fatal("Expected recovered topic to be equal to original one")
- }
-
-}
diff --git a/swarm/storage/feed/update.go b/swarm/storage/feed/update.go
deleted file mode 100644
index 21c004ca4..000000000
--- a/swarm/storage/feed/update.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "fmt"
- "strconv"
-
- "github.com/ethereum/go-ethereum/swarm/chunk"
-)
-
-// ProtocolVersion defines the current version of the protocol that will be included in each update message
-const ProtocolVersion uint8 = 0
-
-const headerLength = 8
-
-// Header defines a update message header including a protocol version byte
-type Header struct {
- Version uint8 // Protocol version
- Padding [headerLength - 1]uint8 // reserved for future use
-}
-
-// Update encapsulates the information sent as part of a feed update
-type Update struct {
- Header Header //
- ID // Feed Update identifying information
- data []byte // actual data payload
-}
-
-const minimumUpdateDataLength = idLength + headerLength + 1
-
-//MaxUpdateDataLength indicates the maximum payload size for a feed update
-const MaxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength
-
-// binaryPut serializes the feed update information into the given slice
-func (r *Update) binaryPut(serializedData []byte) error {
- datalength := len(r.data)
- if datalength == 0 {
- return NewError(ErrInvalidValue, "a feed update must contain data")
- }
-
- if datalength > MaxUpdateDataLength {
- return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, MaxUpdateDataLength)
- }
-
- if len(serializedData) != r.binaryLength() {
- return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
- }
-
- var cursor int
- // serialize Header
- serializedData[cursor] = r.Header.Version
- copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1])
- cursor += headerLength
-
- // serialize ID
- if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil {
- return err
- }
- cursor += idLength
-
- // add the data
- copy(serializedData[cursor:], r.data)
- cursor += datalength
-
- return nil
-}
-
-// binaryLength returns the expected number of bytes this structure will take to encode
-func (r *Update) binaryLength() int {
- return idLength + headerLength + len(r.data)
-}
-
-// binaryGet populates this instance from the information contained in the passed byte slice
-func (r *Update) binaryGet(serializedData []byte) error {
- if len(serializedData) < minimumUpdateDataLength {
- return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength)
- }
- dataLength := len(serializedData) - idLength - headerLength
- // at this point we can be satisfied that we have the correct data length to read
-
- var cursor int
-
- // deserialize Header
- r.Header.Version = serializedData[cursor] // extract the protocol version
- copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding
- cursor += headerLength
-
- if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil {
- return err
- }
- cursor += idLength
-
- data := serializedData[cursor : cursor+dataLength]
- cursor += dataLength
-
- // now that all checks have passed, copy data into structure
- r.data = make([]byte, dataLength)
- copy(r.data, data)
-
- return nil
-
-}
-
-// FromValues deserializes this instance from a string key-value store
-// useful to parse query strings
-func (r *Update) FromValues(values Values, data []byte) error {
- r.data = data
- version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32)
- r.Header.Version = uint8(version)
- return r.ID.FromValues(values)
-}
-
-// AppendValues serializes this structure into the provided string key-value store
-// useful to build query strings
-func (r *Update) AppendValues(values Values) []byte {
- r.ID.AppendValues(values)
- values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version))
- return r.data
-}
diff --git a/swarm/storage/feed/update_test.go b/swarm/storage/feed/update_test.go
deleted file mode 100644
index e4e0963e9..000000000
--- a/swarm/storage/feed/update_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-
-package feed
-
-import (
- "testing"
-)
-
-func getTestFeedUpdate() *Update {
- return &Update{
- ID: *getTestID(),
- data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
- }
-}
-
-func TestUpdateSerializer(t *testing.T) {
- testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce80300000000001f456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f")
-}
-
-func TestUpdateLengthCheck(t *testing.T) {
- testBinarySerializerLengthCheck(t, getTestFeedUpdate())
- // Test fail if update is too big
- update := getTestFeedUpdate()
- update.data = make([]byte, MaxUpdateDataLength+100)
- serialized := make([]byte, update.binaryLength())
- if err := update.binaryPut(serialized); err == nil {
- t.Fatal("Expected update.binaryPut to fail since update is too big")
- }
-
- // test fail if data is empty or nil
- update.data = nil
- serialized = make([]byte, update.binaryLength())
- if err := update.binaryPut(serialized); err == nil {
- t.Fatal("Expected update.binaryPut to fail since data is empty")
- }
-}