From f1e86ad9cf0470051b7106ee83794d27276b528d Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Sat, 29 Sep 2018 01:00:28 +0200 Subject: swarm/storage/mru: Renamed all identifiers to Feeds --- swarm/api/api.go | 14 ++--- swarm/api/client/client_test.go | 2 +- swarm/api/http/server.go | 8 +-- swarm/api/http/server_test.go | 4 +- swarm/api/manifest.go | 4 +- swarm/network/README.md | 2 +- swarm/storage/mru/cacheentry.go | 8 +-- swarm/storage/mru/doc.go | 2 +- swarm/storage/mru/handler.go | 112 +++++++++++++++++++------------------- swarm/storage/mru/handler_test.go | 78 +++++++++++++------------- swarm/storage/mru/id.go | 24 ++++---- swarm/storage/mru/id_test.go | 6 +- swarm/storage/mru/query.go | 16 +++--- swarm/storage/mru/query_test.go | 6 +- swarm/storage/mru/request.go | 34 ++++++------ swarm/storage/mru/request_test.go | 38 ++++++------- swarm/storage/mru/testutil.go | 12 ++-- swarm/storage/mru/topic.go | 2 +- swarm/storage/mru/update.go | 20 +++---- swarm/storage/mru/update_test.go | 18 +++--- swarm/storage/mru/view.go | 40 +++++++------- swarm/storage/mru/view_test.go | 12 ++-- 22 files changed, 231 insertions(+), 231 deletions(-) (limited to 'swarm') diff --git a/swarm/api/api.go b/swarm/api/api.go index 7b8f04c13..e6b676dba 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -956,14 +956,14 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver return addr, manifestEntryMap, nil } -// ResourceLookup finds mutable resource updates at specific periods and versions +// ResourceLookup finds Swarm Feeds at specific periods and versions func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) { _, err := a.resource.Lookup(ctx, query) if err != nil { return nil, err } var data []byte - _, data, err = a.resource.GetContent(&query.View) + _, data, err = a.resource.GetContent(&query.Feed) if err != nil { return nil, err } @@ -971,7 +971,7 @@ func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, err } // ResourceNewRequest creates a Request object to update a specific mutable resource -func (a *API) ResourceNewRequest(ctx context.Context, view *mru.View) (*mru.Request, error) { +func (a *API) ResourceNewRequest(ctx context.Context, view *mru.Feed) (*mru.Request, error) { return a.resource.NewRequest(ctx, view) } @@ -993,7 +993,7 @@ var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest") var ErrNotAResourceManifest = errors.New("Not a resource manifest") // ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID. -func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.View, error) { +func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { return nil, ErrCannotLoadResourceManifest @@ -1016,8 +1016,8 @@ var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view") // ResolveResourceView attempts to extract View information out of the manifest, if provided // If not, it attempts to extract the View out of a set of key-value pairs -func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.View, error) { - var view *mru.View +func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.Feed, error) { + var view *mru.Feed var err error if uri.Addr != "" { // resolve the content key. @@ -1036,7 +1036,7 @@ func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Valu } log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex()) } else { - var v mru.View + var v mru.Feed if err := v.FromValues(values); err != nil { return nil, ErrCannotResolveResourceView diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index 02980de1d..c6ad0237b 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -508,7 +508,7 @@ func TestClientCreateUpdateResource(t *testing.T) { // now try retrieving resource without a manifest - view := &mru.View{ + view := &mru.Feed{ Topic: topic, User: signer.Address(), } diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 5ec69373d..4c19dd6df 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -517,7 +517,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } var updateRequest mru.Request - updateRequest.View = *view + updateRequest.Feed = *view query := r.URL.Query() if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters @@ -544,7 +544,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { // we create a manifest so we can retrieve the resource with bzz:// later // this manifest has a special "resource type" manifest, and saves the // resource view ID used to retrieve the resource later - m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.View) + m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.Feed) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return @@ -563,7 +563,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } } -// Retrieve mutable resource updates: +// Retrieve Swarm Feeds: // bzz-resource:// - get latest update // bzz-resource:///?period=n - get latest update on period n // bzz-resource:///?period=n&version=m - get update version m of period n @@ -606,7 +606,7 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { return } - lookupParams := &mru.Query{View: *view} + lookupParams := &mru.Query{Feed: *view} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest) return diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 817519a30..0855d30a2 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -206,7 +206,7 @@ func TestBzzResourceMultihash(t *testing.T) { } } -// Test resource updates using the raw update methods +// Test Swarm Feeds using the raw update methods func TestBzzResource(t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc, nil) signer, _ := newTestSigner() @@ -406,7 +406,7 @@ func TestBzzResource(t *testing.T) { // test manifest-less queries log.Info("get first update in update1Timestamp via direct query") - query := mru.NewQuery(&updateRequest.View, update1Timestamp, lookup.NoClue) + query := mru.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 06be7323e..9672ca540 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -56,7 +56,7 @@ type ManifestEntry struct { ModTime time.Time `json:"mod_time,omitempty"` Status int `json:"status,omitempty"` Access *AccessEntry `json:"access,omitempty"` - ResourceView *mru.View `json:"resourceView,omitempty"` + ResourceView *mru.Feed `json:"resourceView,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -82,7 +82,7 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(ctx context.Context, view *mru.View) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, view *mru.Feed) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ ResourceView: view, diff --git a/swarm/network/README.md b/swarm/network/README.md index e9c9d30d0..0955fe94e 100644 --- a/swarm/network/README.md +++ b/swarm/network/README.md @@ -37,7 +37,7 @@ Using the streamer logic, various stream types are easy to implement: * live session syncing * historical syncing * simple retrieve requests and deliveries -* mutable resource updates streams +* Swarm Feeds streams * receipting for finger pointing ## Syncing diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go index 280331f77..024ed61c3 100644 --- a/swarm/storage/mru/cacheentry.go +++ b/swarm/storage/mru/cacheentry.go @@ -26,23 +26,23 @@ import ( const ( hasherCount = 8 - resourceHashAlgorithm = storage.SHA3Hash + feedsHashAlgorithm = storage.SHA3Hash defaultRetrieveTimeout = 100 * time.Millisecond ) // cacheEntry caches resource data and the metadata of its root chunk. type cacheEntry struct { - ResourceUpdate + Update *bytes.Reader lastKey storage.Address } // implements storage.LazySectionReader func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { - return int64(len(r.ResourceUpdate.data)), nil + return int64(len(r.Update.data)), nil } //returns the resource's topic func (r *cacheEntry) Topic() Topic { - return r.View.Topic + return r.Feed.Topic } diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index 19330e0c1..a9ea2076c 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -1,5 +1,5 @@ /* -Package mru defines Mutable resource updates. +Package feeds defines Swarm Feeds. A Mutable Resource is an entity which allows updates to a resource without resorting to ENS on each update. diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 3e7654795..034934d05 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -34,8 +34,8 @@ import ( type Handler struct { chunkStore *storage.NetStore HashSize int - resources map[uint64]*cacheEntry - resourceLock sync.RWMutex + cache map[uint64]*cacheEntry + cacheLock sync.RWMutex storeTimeout time.Duration queryMaxPeriods uint32 } @@ -52,26 +52,26 @@ var hashPool sync.Pool func init() { hashPool = sync.Pool{ New: func() interface{} { - return storage.MakeHashFunc(resourceHashAlgorithm)() + return storage.MakeHashFunc(feedsHashAlgorithm)() }, } } // NewHandler creates a new Mutable Resource API func NewHandler(params *HandlerParams) *Handler { - rh := &Handler{ - resources: make(map[uint64]*cacheEntry), + fh := &Handler{ + cache: make(map[uint64]*cacheEntry), } for i := 0; i < hasherCount; i++ { - hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)() - if rh.HashSize == 0 { - rh.HashSize = hashfunc.Size() + hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() + if fh.HashSize == 0 { + fh.HashSize = hashfunc.Size() } hashPool.Put(hashfunc) } - return rh + return fh } // SetStore sets the store backend for the Mutable Resource API @@ -95,7 +95,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // First, deserialize the chunk var r Request if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) + log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } @@ -103,7 +103,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. if err := r.Verify(); err != nil { - log.Debug("Invalid signature", "err", err) + log.Debug("Invalid feed update signature", "err", err) return false } @@ -111,32 +111,32 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { } // GetContent retrieves the data payload of the last synced update of the Mutable Resource -func (h *Handler) GetContent(view *View) (storage.Address, []byte, error) { - if view == nil { +func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { + if feed == nil { return nil, nil, NewError(ErrInvalidValue, "view is nil") } - rsrc := h.get(view) - if rsrc == nil { + feedUpdate := h.get(feed) + if feedUpdate == nil { return nil, nil, NewError(ErrNotFound, "resource does not exist") } - return rsrc.lastKey, rsrc.data, nil + return feedUpdate.lastKey, feedUpdate.data, nil } // NewRequest prepares a Request structure with all the necessary information to // just add the desired data and sign it. // The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, err error) { - if view == nil { - return nil, NewError(ErrInvalidValue, "view cannot be nil") +func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { + if feed == nil { + return nil, NewError(ErrInvalidValue, "feed cannot be nil") } now := TimestampProvider.Now().Time request = new(Request) request.Header.Version = ProtocolVersion - query := NewQueryLatest(view, lookup.NoClue) + query := NewQueryLatest(feed, lookup.NoClue) - rsrc, err := h.Lookup(ctx, query) + feedUpdate, err := h.Lookup(ctx, query) if err != nil { if err.(*Error).code != ErrNotFound { return nil, err @@ -145,11 +145,11 @@ func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, // or that the resource really does not have updates } - request.View = *view + request.Feed = *feed // if we already have an update, then find next epoch - if rsrc != nil { - request.Epoch = lookup.GetNextEpoch(rsrc.Epoch, now) + if feedUpdate != nil { + request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) } else { request.Epoch = lookup.GetFirstEpoch(now) } @@ -172,7 +172,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) } if query.Hint == lookup.NoClue { // try to use our cache - entry := h.get(&query.View) + entry := h.get(&query.Feed) if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints query.Hint = entry.Epoch } @@ -183,19 +183,19 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") } - var ul ID - ul.View = query.View + var id ID + id.Feed = query.Feed var readCount int // Invoke the lookup engine. // The callback will be called every time the lookup algorithm needs to guess requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { readCount++ - ul.Epoch = epoch + id.Epoch = epoch ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) defer cancel() - chunk, err := h.chunkStore.Get(ctx, ul.Addr()) + chunk, err := h.chunkStore.Get(ctx, id.Addr()) if err != nil { // TODO: check for catastrophic errors other than chunk not found return nil, nil } @@ -227,19 +227,19 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { updateAddr := request.Addr() - log.Trace("resource cache update", "topic", request.Topic.Hex(), "updatekey", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) - rsrc := h.get(&request.View) - if rsrc == nil { - rsrc = &cacheEntry{} - h.set(&request.View, rsrc) + feedUpdate := h.get(&request.Feed) + if feedUpdate == nil { + feedUpdate = &cacheEntry{} + h.set(&request.Feed, feedUpdate) } // update our rsrcs entry map - rsrc.lastKey = updateAddr - rsrc.ResourceUpdate = request.ResourceUpdate - rsrc.Reader = bytes.NewReader(rsrc.data) - return rsrc, nil + feedUpdate.lastKey = updateAddr + feedUpdate.Update = request.Update + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + return feedUpdate, nil } // Update adds an actual data update @@ -255,8 +255,8 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") } - rsrc := h.get(&r.View) - if rsrc != nil && rsrc.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + feedUpdate := h.get(&r.Feed) + if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") } @@ -267,32 +267,32 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad // send the chunk h.chunkStore.Put(ctx, chunk) - log.Trace("resource update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) // update our resources map cache entry if the new update is older than the one we have, if we have it. - if rsrc != nil && r.Epoch.After(rsrc.Epoch) { - rsrc.Epoch = r.Epoch - rsrc.data = make([]byte, len(r.data)) - rsrc.lastKey = r.idAddr - copy(rsrc.data, r.data) - rsrc.Reader = bytes.NewReader(rsrc.data) + if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { + feedUpdate.Epoch = r.Epoch + feedUpdate.data = make([]byte, len(r.data)) + feedUpdate.lastKey = r.idAddr + copy(feedUpdate.data, r.data) + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) } return r.idAddr, nil } // Retrieves the resource cache value for the given nameHash -func (h *Handler) get(view *View) *cacheEntry { +func (h *Handler) get(view *Feed) *cacheEntry { mapKey := view.mapKey() - h.resourceLock.RLock() - defer h.resourceLock.RUnlock() - rsrc := h.resources[mapKey] - return rsrc + h.cacheLock.RLock() + defer h.cacheLock.RUnlock() + feedUpdate := h.cache[mapKey] + return feedUpdate } // Sets the resource cache value for the given View -func (h *Handler) set(view *View, rsrc *cacheEntry) { +func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) { mapKey := view.mapKey() - h.resourceLock.Lock() - defer h.resourceLock.Unlock() - h.resources[mapKey] = rsrc + h.cacheLock.Lock() + defer h.cacheLock.Unlock() + h.cache[mapKey] = feedUpdate } diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go index 13eb9e51b..a4c7969a3 100644 --- a/swarm/storage/mru/handler_test.go +++ b/swarm/storage/mru/handler_test.go @@ -40,7 +40,7 @@ var ( Time: uint64(4200), } cleanF func() - resourceName = "føø.bar" + subtopicName = "føø.bar" hashfunc = storage.MakeHashFunc(storage.DefaultHash) ) @@ -73,7 +73,7 @@ func (f *fakeTimeProvider) Now() Timestamp { } // make updates and retrieve them based on periods and versions -func TestResourceHandler(t *testing.T) { +func TestFeedsHandler(t *testing.T) { // make fake timeProvider clock := &fakeTimeProvider{ @@ -83,7 +83,7 @@ func TestResourceHandler(t *testing.T) { // signer containing private key signer := newAliceSigner() - rh, datadir, teardownTest, err := setupTest(clock, signer) + feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) if err != nil { t.Fatal(err) } @@ -93,8 +93,8 @@ func TestResourceHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - topic, _ := NewTopic("Mess with mru code and see what ghost catches you", nil) - view := View{ + topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) + view := Feed{ Topic: topic, User: signer.Address(), } @@ -108,13 +108,13 @@ func TestResourceHandler(t *testing.T) { } request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) - resourcekey := make(map[string]storage.Address) + chunkAddress := make(map[string]storage.Address) data := []byte(updates[0]) request.SetData(data) if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[0]], err = rh.Update(ctx, request) + chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } @@ -122,7 +122,7 @@ func TestResourceHandler(t *testing.T) { // move the clock ahead 21 seconds clock.FastForward(21) // t=4221 - request, err = rh.NewRequest(ctx, &request.View) // this timestamps the update at t = 4221 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 if err != nil { t.Fatal(err) } @@ -136,14 +136,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[1]], err = rh.Update(ctx, request) + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) if err == nil { t.Fatal("Expected update to fail since an update in this epoch already exists") } // move the clock ahead 21 seconds clock.FastForward(21) // t=4242 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -151,14 +151,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[1]], err = rh.Update(ctx, request) + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } // move the clock ahead 42 seconds clock.FastForward(42) // t=4284 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -167,14 +167,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[2]], err = rh.Update(ctx, request) + chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } // move the clock ahead 1 second clock.FastForward(1) // t=4285 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -187,25 +187,25 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[3]], err = rh.Update(ctx, request) + chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } time.Sleep(time.Second) - rh.Close() + feedsHandler.Close() // check we can retrieve the updates after close clock.FastForward(2000) // t=6285 - rhparams := &HandlerParams{} + feedParams := &HandlerParams{} - rh2, err := NewTestHandler(datadir, rhparams) + feedsHandler2, err := NewTestHandler(datadir, feedParams) if err != nil { t.Fatal(err) } - rsrc2, err := rh2.Lookup(ctx, NewQueryLatest(&request.View, lookup.NoClue)) + rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) if err != nil { t.Fatal(err) } @@ -223,7 +223,7 @@ func TestResourceHandler(t *testing.T) { log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) // specific point in time - rsrc, err := rh2.Lookup(ctx, NewQuery(&request.View, 4284, lookup.NoClue)) + rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) if err != nil { t.Fatal(err) } @@ -234,7 +234,7 @@ func TestResourceHandler(t *testing.T) { log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) // beyond the first should yield an error - rsrc, err = rh2.Lookup(ctx, NewQuery(&request.View, startTime.Time-1, lookup.NoClue)) + rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) if err == nil { t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) } @@ -270,7 +270,7 @@ func TestSparseUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Very slow updates", nil) - view := View{ + view := Feed{ Topic: topic, User: signer.Address(), } @@ -349,12 +349,12 @@ func TestValidator(t *testing.T) { defer teardownTest() // create new resource - topic, _ := NewTopic(resourceName, nil) - view := View{ + topic, _ := NewTopic(subtopicName, nil) + feed := Feed{ Topic: topic, User: signer.Address(), } - mr := NewFirstRequest(view.Topic) + mr := NewFirstRequest(feed.Topic) // chunk with address data := []byte("foo") @@ -410,9 +410,9 @@ func TestValidatorInStore(t *testing.T) { } // set up resource handler and add is as a validator to the localstore - rhParams := &HandlerParams{} - rh := NewHandler(rhParams) - store.Validators = append(store.Validators, rh) + fhParams := &HandlerParams{} + fh := NewHandler(fhParams) + store.Validators = append(store.Validators, fh) // create content addressed chunks, one good, one faulty chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) @@ -420,7 +420,7 @@ func TestValidatorInStore(t *testing.T) { badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) topic, _ := NewTopic("xyzzy", nil) - view := View{ + feed := Feed{ Topic: topic, User: signer.Address(), } @@ -430,7 +430,7 @@ func TestValidatorInStore(t *testing.T) { Epoch: lookup.Epoch{Time: 42, Level: 1, }, - View: view, + Feed: feed, } updateAddr := id.Addr() @@ -438,7 +438,7 @@ func TestValidatorInStore(t *testing.T) { r := new(Request) r.idAddr = updateAddr - r.ResourceUpdate.ID = id + r.Update.ID = id r.data = data r.Sign(signer) @@ -451,20 +451,20 @@ func TestValidatorInStore(t *testing.T) { // put the chunks in the store and check their error status err = store.Put(context.Background(), goodChunk) if err == nil { - t.Fatal("expected error on good content address chunk with resource validator only, but got nil") + t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") } err = store.Put(context.Background(), badChunk) if err == nil { - t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") + t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") } err = store.Put(context.Background(), uglyChunk) if err != nil { - t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) + t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) } } // create rpc and resourcehandler -func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { +func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { var fsClean func() var rpcClean func() @@ -478,7 +478,7 @@ func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, } // temp datadir - datadir, err = ioutil.TempDir("", "rh") + datadir, err = ioutil.TempDir("", "fh") if err != nil { return nil, "", nil, err } @@ -487,9 +487,9 @@ func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, } TimestampProvider = timeProvider - rhparams := &HandlerParams{} - rh, err = NewTestHandler(datadir, rhparams) - return rh, datadir, cleanF, err + fhParams := &HandlerParams{} + fh, err = NewTestHandler(datadir, fhParams) + return fh, datadir, cleanF, err } func newAliceSigner() *GenericSigner { diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go index f008169ed..09ef9e450 100644 --- a/swarm/storage/mru/id.go +++ b/swarm/storage/mru/id.go @@ -29,21 +29,21 @@ import ( // ID uniquely identifies an update on the network. type ID struct { - View `json:"view"` + Feed `json:"view"` lookup.Epoch `json:"epoch"` } // ID layout: -// View viewLength bytes +// Feed feedLength bytes // Epoch EpochLength -const idLength = viewLength + lookup.EpochLength +const idLength = feedLength + lookup.EpochLength // Addr calculates the resource update chunk address corresponding to this ID func (u *ID) Addr() (updateAddr storage.Address) { serializedData := make([]byte, idLength) var cursor int - u.View.binaryPut(serializedData[cursor : cursor+viewLength]) - cursor += viewLength + u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) + cursor += feedLength eid := u.Epoch.ID() copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) @@ -61,10 +61,10 @@ func (u *ID) binaryPut(serializedData []byte) error { return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) } var cursor int - if err := u.View.binaryPut(serializedData[cursor : cursor+viewLength]); err != nil { + if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { return err } - cursor += viewLength + cursor += feedLength epochBytes, err := u.Epoch.MarshalBinary() if err != nil { @@ -88,10 +88,10 @@ func (u *ID) binaryGet(serializedData []byte) error { } var cursor int - if err := u.View.binaryGet(serializedData[cursor : cursor+viewLength]); err != nil { + if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { return err } - cursor += viewLength + cursor += feedLength if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { return err @@ -108,8 +108,8 @@ func (u *ID) FromValues(values Values) error { u.Epoch.Level = uint8(level) u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) - if u.View.User == (common.Address{}) { - return u.View.FromValues(values) + if u.Feed.User == (common.Address{}) { + return u.Feed.FromValues(values) } return nil } @@ -119,5 +119,5 @@ func (u *ID) FromValues(values Values) error { func (u *ID) AppendValues(values Values) { values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) - u.View.AppendValues(values) + u.Feed.AppendValues(values) } diff --git a/swarm/storage/mru/id_test.go b/swarm/storage/mru/id_test.go index eba58fbf3..767b3c159 100644 --- a/swarm/storage/mru/id_test.go +++ b/swarm/storage/mru/id_test.go @@ -8,14 +8,14 @@ import ( func getTestID() *ID { return &ID{ - View: *getTestView(), + Feed: *getTestFeed(), Epoch: lookup.GetFirstEpoch(1000), } } func TestIDAddr(t *testing.T) { - ul := getTestID() - updateAddr := ul.Addr() + id := getTestID() + updateAddr := id.Addr() compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") } diff --git a/swarm/storage/mru/query.go b/swarm/storage/mru/query.go index 13a28eaab..c8a7cbe5a 100644 --- a/swarm/storage/mru/query.go +++ b/swarm/storage/mru/query.go @@ -27,7 +27,7 @@ import ( // Query is used to specify constraints when performing an update lookup // TimeLimit indicates an upper bound for the search. Set to 0 for "now" type Query struct { - View + Feed Hint lookup.Epoch TimeLimit uint64 } @@ -41,8 +41,8 @@ func (q *Query) FromValues(values Values) error { level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) q.Hint.Level = uint8(level) q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) - if q.View.User == (common.Address{}) { - return q.View.FromValues(values) + if q.Feed.User == (common.Address{}) { + return q.Feed.FromValues(values) } return nil } @@ -59,20 +59,20 @@ func (q *Query) AppendValues(values Values) { if q.Hint.Time != 0 { values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) } - q.View.AppendValues(values) + q.Feed.AppendValues(values) } // NewQuery constructs an Query structure to find updates on or before `time` // if time == 0, the latest update will be looked up -func NewQuery(view *View, time uint64, hint lookup.Epoch) *Query { +func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { return &Query{ TimeLimit: time, - View: *view, + Feed: *feed, Hint: hint, } } // NewQueryLatest generates lookup parameters that look for the latest version of a resource -func NewQueryLatest(view *View, hint lookup.Epoch) *Query { - return NewQuery(view, 0, hint) +func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { + return NewQuery(feed, 0, hint) } diff --git a/swarm/storage/mru/query_test.go b/swarm/storage/mru/query_test.go index 189a465d6..4cfc597b2 100644 --- a/swarm/storage/mru/query_test.go +++ b/swarm/storage/mru/query_test.go @@ -21,11 +21,11 @@ import ( ) func getTestQuery() *Query { - ul := getTestID() + id := getTestID() return &Query{ TimeLimit: 5000, - View: ul.View, - Hint: ul.Epoch, + Feed: id.Feed, + Hint: id.Epoch, } } diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go index f6d0f38ff..03ffbf038 100644 --- a/swarm/storage/mru/request.go +++ b/swarm/storage/mru/request.go @@ -29,10 +29,10 @@ import ( // Request represents an update and/or resource create message type Request struct { - ResourceUpdate // actual content that will be put on the chunk, less signature - Signature *Signature - idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) - binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) + Update // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) } // updateRequestJSON represents a JSON-serialized UpdateRequest @@ -44,11 +44,11 @@ type updateRequestJSON struct { } // Request layout -// resourceUpdate bytes +// Update bytes // SignatureLength bytes const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength -// NewFirstRequest returns a ready to sign request to publish a first update +// NewFirstRequest returns a ready to sign request to publish a first feed update func NewFirstRequest(topic Topic) *Request { request := new(Request) @@ -56,7 +56,7 @@ func NewFirstRequest(topic Topic) *Request { // get the current time now := TimestampProvider.Now().Time request.Epoch = lookup.GetFirstEpoch(now) - request.View.Topic = topic + request.Feed.Topic = topic request.Header.Version = ProtocolVersion return request @@ -88,7 +88,7 @@ func (r *Request) Verify() (err error) { } // get the address of the signer (which also checks that it's a valid signature) - r.View.User, err = getUserAddr(digest, *r.Signature) + r.Feed.User, err = getUserAddr(digest, *r.Signature) if err != nil { return err } @@ -105,7 +105,7 @@ func (r *Request) Verify() (err error) { // Sign executes the signature to validate the resource func (r *Request) Sign(signer Signer) error { - r.View.User = signer.Address() + r.Feed.User = signer.Address() r.binaryData = nil //invalidate serialized data digest, err := r.GetDigest() // computes digest and serializes into .binaryData if err != nil { @@ -139,10 +139,10 @@ func (r *Request) GetDigest() (result common.Hash, err error) { hasher := hashPool.Get().(hash.Hash) defer hashPool.Put(hasher) hasher.Reset() - dataLength := r.ResourceUpdate.binaryLength() + dataLength := r.Update.binaryLength() if r.binaryData == nil { r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.ResourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { + if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { return result, err } } @@ -161,10 +161,10 @@ func (r *Request) toChunk() (storage.Chunk, error) { return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") } - resourceUpdateLength := r.ResourceUpdate.binaryLength() + updateLength := r.Update.binaryLength() // signature is the last item in the chunk data - copy(r.binaryData[resourceUpdateLength:], r.Signature[:]) + copy(r.binaryData[updateLength:], r.Signature[:]) chunk := storage.NewChunk(r.idAddr, r.binaryData) return chunk, nil @@ -175,13 +175,13 @@ func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error // for update chunk layout see Request definition //deserialize the resource update portion - if err := r.ResourceUpdate.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { + if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { return err } // Extract the signature var signature *Signature - cursor := r.ResourceUpdate.binaryLength() + cursor := r.Update.binaryLength() sigdata := chunkdata[cursor : cursor+signatureLength] if len(sigdata) > 0 { signature = &Signature{} @@ -209,7 +209,7 @@ func (r *Request) FromValues(values Values, data []byte) error { r.Signature = new(Signature) copy(r.Signature[:], signatureBytes) } - err = r.ResourceUpdate.FromValues(values, data) + err = r.Update.FromValues(values, data) if err != nil { return err } @@ -223,7 +223,7 @@ func (r *Request) AppendValues(values Values) []byte { if r.Signature != nil { values.Set("signature", hexutil.Encode(r.Signature[:])) } - return r.ResourceUpdate.AppendValues(values) + return r.Update.AppendValues(values) } // fromJSON takes an update request JSON and populates an UpdateRequest diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index c32d5ec13..e58bf28aa 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -53,25 +53,25 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { charlie := newCharlieSigner() //Charlie bob := newBobSigner() //Bob - // Create a resource to our good guy Charlie's name - topic, _ := NewTopic("a good resource name", nil) - createRequest := NewFirstRequest(topic) - createRequest.User = charlie.Address() + // Create a feed to our good guy Charlie's name + topic, _ := NewTopic("a good topic name", nil) + firstRequest := NewFirstRequest(topic) + firstRequest.User = charlie.Address() // We now encode the create message to simulate we send it over the wire - messageRawData, err := createRequest.MarshalJSON() + messageRawData, err := firstRequest.MarshalJSON() if err != nil { - t.Fatalf("Error encoding create resource request: %s", err) + t.Fatalf("Error encoding first feed update request: %s", err) } // ... the message arrives and is decoded... - var recoveredCreateRequest Request - if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding create resource request: %s", err) + var recoveredFirstRequest Request + if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding first feed update request: %s", err) } // ... but verification should fail because it is not signed! - if err := recoveredCreateRequest.Verify(); err == nil { + if err := recoveredFirstRequest.Verify(); err == nil { t.Fatal("Expected Verify to fail since the message is not signed") } @@ -85,13 +85,13 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { //Put together an unsigned update request that we will serialize to send it to the signer. data := []byte("This hour's update: Swarm 99.0 has been released!") request := &Request{ - ResourceUpdate: ResourceUpdate{ + Update: Update{ ID: ID{ Epoch: lookup.Epoch{ Time: 1000, Level: 1, }, - View: createRequest.ResourceUpdate.View, + Feed: firstRequest.Update.Feed, }, data: data, }, @@ -191,7 +191,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { func getTestRequest() *Request { return &Request{ - ResourceUpdate: *getTestResourceUpdate(), + Update: *getTestFeedUpdate(), } } @@ -258,7 +258,7 @@ func TestReverse(t *testing.T) { defer teardownTest() topic, _ := NewTopic("Cervantes quotes", nil) - view := View{ + view := Feed{ Topic: topic, User: signer.Address(), } @@ -266,7 +266,7 @@ func TestReverse(t *testing.T) { data := []byte("Donde una puerta se cierra, otra se abre") request := new(Request) - request.View = view + request.Feed = view request.Epoch = epoch request.data = data @@ -291,15 +291,15 @@ func TestReverse(t *testing.T) { if err != nil { t.Fatal(err) } - recoveredaddress, err := getUserAddr(checkdigest, *checkUpdate.Signature) + recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) if err != nil { t.Fatalf("Retrieve address from signature fail: %v", err) } - originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) // check that the metadata retrieved from the chunk matches what we gave it - if recoveredaddress != originaladdress { - t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) + if recoveredAddr != originalAddr { + t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) } if !bytes.Equal(key[:], chunk.Address()[:]) { diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go index 7a5a9e4d9..80e0d4cf0 100644 --- a/swarm/storage/mru/testutil.go +++ b/swarm/storage/mru/testutil.go @@ -27,7 +27,7 @@ import ( ) const ( - testDbDirName = "mru" + testDbDirName = "feeds" ) type TestHandler struct { @@ -52,20 +52,20 @@ func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetF // NewTestHandler creates Handler object to be used for testing purposes. func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { path := filepath.Join(datadir, testDbDirName) - rh := NewHandler(params) + fh := NewHandler(params) localstoreparams := storage.NewDefaultLocalStoreParams() localstoreparams.Init(path) localStore, err := storage.NewLocalStore(localstoreparams, nil) if err != nil { return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) } - localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm))) - localStore.Validators = append(localStore.Validators, rh) + localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) + localStore.Validators = append(localStore.Validators, fh) netStore, err := storage.NewNetStore(localStore, nil) if err != nil { return nil, err } netStore.NewNetFetcherFunc = newFakeNetFetcher - rh.SetStore(netStore) - return &TestHandler{rh}, nil + fh.SetStore(netStore) + return &TestHandler{fh}, nil } diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go index f318a5593..5f5720ae2 100644 --- a/swarm/storage/mru/topic.go +++ b/swarm/storage/mru/topic.go @@ -29,7 +29,7 @@ import ( // TopicLength establishes the max length of a topic string const TopicLength = storage.AddressLength -// Topic represents what a resource talks about +// Topic represents what a feed is about type Topic [TopicLength]byte // ErrTopicTooLong is returned when creating a topic with a name/related content too long diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index 6aa57fce1..892cb9d1b 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -34,8 +34,8 @@ type Header struct { Padding [headerLength - 1]uint8 // reserved for future use } -// ResourceUpdate encapsulates the information sent as part of a resource update -type ResourceUpdate struct { +// Update encapsulates the information sent as part of a feed update +type Update struct { Header Header // ID // Resource update identifying information data []byte // actual data payload @@ -44,15 +44,15 @@ type ResourceUpdate struct { const minimumUpdateDataLength = idLength + headerLength + 1 const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength -// binaryPut serializes the resource update information into the given slice -func (r *ResourceUpdate) binaryPut(serializedData []byte) error { +// binaryPut serializes the feed update information into the given slice +func (r *Update) binaryPut(serializedData []byte) error { datalength := len(r.data) if datalength == 0 { - return NewError(ErrInvalidValue, "cannot update a resource with no data") + return NewError(ErrInvalidValue, "a feed update must contain data") } if datalength > maxUpdateDataLength { - return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) + return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) } if len(serializedData) != r.binaryLength() { @@ -79,12 +79,12 @@ func (r *ResourceUpdate) binaryPut(serializedData []byte) error { } // binaryLength returns the expected number of bytes this structure will take to encode -func (r *ResourceUpdate) binaryLength() int { +func (r *Update) binaryLength() int { return idLength + headerLength + len(r.data) } // binaryGet populates this instance from the information contained in the passed byte slice -func (r *ResourceUpdate) binaryGet(serializedData []byte) error { +func (r *Update) binaryGet(serializedData []byte) error { if len(serializedData) < minimumUpdateDataLength { return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) } @@ -116,7 +116,7 @@ func (r *ResourceUpdate) binaryGet(serializedData []byte) error { // FromValues deserializes this instance from a string key-value store // useful to parse query strings -func (r *ResourceUpdate) FromValues(values Values, data []byte) error { +func (r *Update) FromValues(values Values, data []byte) error { r.data = data version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) r.Header.Version = uint8(version) @@ -125,7 +125,7 @@ func (r *ResourceUpdate) FromValues(values Values, data []byte) error { // AppendValues serializes this structure into the provided string key-value store // useful to build query strings -func (r *ResourceUpdate) AppendValues(values Values) []byte { +func (r *Update) AppendValues(values Values) []byte { r.ID.AppendValues(values) values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) return r.data diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go index bd706d83a..62c401f3f 100644 --- a/swarm/storage/mru/update_test.go +++ b/swarm/storage/mru/update_test.go @@ -20,31 +20,31 @@ import ( "testing" ) -func getTestResourceUpdate() *ResourceUpdate { - return &ResourceUpdate{ +func getTestFeedUpdate() *Update { + return &Update{ ID: *getTestID(), data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), } } -func TestResourceUpdateSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestResourceUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +func TestUpdateSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") } -func TestResourceUpdateLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestResourceUpdate()) +func TestUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeedUpdate()) // Test fail if update is too big - update := getTestResourceUpdate() + update := getTestFeedUpdate() update.data = make([]byte, maxUpdateDataLength+100) serialized := make([]byte, update.binaryLength()) if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big") + t.Fatal("Expected update.binaryPut to fail since update is too big") } // test fail if data is empty or nil update.data = nil serialized = make([]byte, update.binaryLength()) if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since data is empty") + t.Fatal("Expected update.binaryPut to fail since data is empty") } } diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go index 2e4ce4a0b..f1a588d44 100644 --- a/swarm/storage/mru/view.go +++ b/swarm/storage/mru/view.go @@ -25,8 +25,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" ) -// View represents a particular user's view of a resource -type View struct { +// Feed represents a particular user's view of a resource +type Feed struct { Topic Topic `json:"topic"` User common.Address `json:"user"` } @@ -34,11 +34,11 @@ type View struct { // View layout: // TopicLength bytes // userAddr common.AddressLength bytes -const viewLength = TopicLength + common.AddressLength +const feedLength = TopicLength + common.AddressLength -// mapKey calculates a unique id for this view for the cache map in `Handler` -func (u *View) mapKey() uint64 { - serializedData := make([]byte, viewLength) +// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` +func (u *Feed) mapKey() uint64 { + serializedData := make([]byte, feedLength) u.binaryPut(serializedData) hasher := hashPool.Get().(hash.Hash) defer hashPool.Put(hasher) @@ -48,10 +48,10 @@ func (u *View) mapKey() uint64 { return *(*uint64)(unsafe.Pointer(&hash[0])) } -// binaryPut serializes this View instance into the provided slice -func (u *View) binaryPut(serializedData []byte) error { - if len(serializedData) != viewLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", viewLength, len(serializedData)) +// binaryPut serializes this Feed instance into the provided slice +func (u *Feed) binaryPut(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) @@ -64,14 +64,14 @@ func (u *View) binaryPut(serializedData []byte) error { } // binaryLength returns the expected size of this structure when serialized -func (u *View) binaryLength() int { - return viewLength +func (u *Feed) binaryLength() int { + return feedLength } // binaryGet restores the current instance from the information contained in the passed slice -func (u *View) binaryGet(serializedData []byte) error { - if len(serializedData) != viewLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read View. Expected %d, got %d", viewLength, len(serializedData)) +func (u *Feed) binaryGet(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read Feed. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int @@ -84,16 +84,16 @@ func (u *View) binaryGet(serializedData []byte) error { return nil } -// Hex serializes the View to a hex string -func (u *View) Hex() string { - serializedData := make([]byte, viewLength) +// Hex serializes the Feed to a hex string +func (u *Feed) Hex() string { + serializedData := make([]byte, feedLength) u.binaryPut(serializedData) return hexutil.Encode(serializedData) } // FromValues deserializes this instance from a string key-value store // useful to parse query strings -func (u *View) FromValues(values Values) (err error) { +func (u *Feed) FromValues(values Values) (err error) { topic := values.Get("topic") if topic != "" { if err := u.Topic.FromHex(values.Get("topic")); err != nil { @@ -119,7 +119,7 @@ func (u *View) FromValues(values Values) (err error) { // AppendValues serializes this structure into the provided string key-value store // useful to build query strings -func (u *View) AppendValues(values Values) { +func (u *Feed) AppendValues(values Values) { values.Set("topic", u.Topic.Hex()) values.Set("user", u.User.Hex()) } diff --git a/swarm/storage/mru/view_test.go b/swarm/storage/mru/view_test.go index 45720ba79..e2f4d6b30 100644 --- a/swarm/storage/mru/view_test.go +++ b/swarm/storage/mru/view_test.go @@ -19,18 +19,18 @@ import ( "testing" ) -func getTestView() *View { +func getTestFeed() *Feed { topic, _ := NewTopic("world news report, every hour", nil) - return &View{ + return &Feed{ Topic: topic, User: newCharlieSigner().Address(), } } -func TestViewSerializerDeserializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestView(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +func TestFeedSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") } -func TestMetadataSerializerLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestView()) +func TestFeedSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeed()) } -- cgit From b35622cf3c758d96874f287d137725946fc6341d Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Sun, 30 Sep 2018 07:48:49 +0200 Subject: swarm/storage/mru: Renamed all comments to Feeds --- swarm/storage/mru/cacheentry.go | 4 +- swarm/storage/mru/doc.go | 29 +++++++-------- swarm/storage/mru/error.go | 2 +- swarm/storage/mru/handler.go | 57 ++++++++++++++--------------- swarm/storage/mru/handler_test.go | 54 +++++++++++++-------------- swarm/storage/mru/id.go | 4 +- swarm/storage/mru/lookup/lookup.go | 2 +- swarm/storage/mru/query.go | 2 +- swarm/storage/mru/request.go | 12 +++--- swarm/storage/mru/request_test.go | 16 ++++---- swarm/storage/mru/resource_sign.go | 75 -------------------------------------- swarm/storage/mru/sign.go | 75 ++++++++++++++++++++++++++++++++++++++ swarm/storage/mru/topic.go | 2 +- swarm/storage/mru/update.go | 4 +- swarm/storage/mru/view.go | 6 +-- 15 files changed, 169 insertions(+), 175 deletions(-) delete mode 100644 swarm/storage/mru/resource_sign.go create mode 100644 swarm/storage/mru/sign.go (limited to 'swarm') diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go index 024ed61c3..f6ed72818 100644 --- a/swarm/storage/mru/cacheentry.go +++ b/swarm/storage/mru/cacheentry.go @@ -30,7 +30,7 @@ const ( defaultRetrieveTimeout = 100 * time.Millisecond ) -// cacheEntry caches resource data and the metadata of its root chunk. +// cacheEntry caches the last known update of a specific Feed. type cacheEntry struct { Update *bytes.Reader @@ -42,7 +42,7 @@ func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { return int64(len(r.Update.data)), nil } -//returns the resource's topic +//returns the Feed's topic func (r *cacheEntry) Topic() Topic { return r.Feed.Topic } diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index a9ea2076c..2cf2d3757 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -1,43 +1,42 @@ /* Package feeds defines Swarm Feeds. -A Mutable Resource is an entity which allows updates to a resource +Swarm Feeds allows a user to build an update feed about a particular topic without resorting to ENS on each update. The update scheme is built on swarm chunks with chunk keys following a predictable, versionable pattern. -A Resource is tied to a unique identifier that is deterministically generated out of +A Feed is tied to a unique identifier that is deterministically generated out of the chosen topic. -A Resource View is defined as a specific user's point of view about a particular resource. -Thus, a View is a Topic + the user's address (userAddr) +A Feed is defined as the series of updates of a specific user about a particular topic Actual data updates are also made in the form of swarm chunks. The keys of the updates are the hash of a concatenation of properties as follows: -updateAddr = H(View, Epoch ID) +updateAddr = H(Feed, Epoch ID) where H is the SHA3 hash function -View is the combination of Topic and the user address +Feed is the combination of Topic and the user address Epoch ID is a time slot. See the lookup package for more information. -A user looking up a resource would only need to know the View in order to -another user's updates +A user looking up a the latest update in a Feed only needs to know the Topic +and the other user's address. -The resource update data is: -resourcedata = View|Epoch|data +The Feed Update data is: +updatedata = Feed|Epoch|data -the full update data that goes in the chunk payload is: +The full update data that goes in the chunk payload is: resourcedata|sign(resourcedata) Structure Summary: -Request: Resource update with signature - ResourceUpdate: headers + data +Request: Feed Update with signature + Update: headers + data Header: Protocol version and reserved for future use placeholders ID: Information about how to locate a specific update - View: Author of the update and what is updating + Feed: Represents a user's series of publications about a specific Topic Topic: Item that the updates are about - User: User who updates the resource + User: User who updates the Feed Epoch: time slot where the update is stored */ diff --git a/swarm/storage/mru/error.go b/swarm/storage/mru/error.go index 18ab52558..714426449 100644 --- a/swarm/storage/mru/error.go +++ b/swarm/storage/mru/error.go @@ -52,7 +52,7 @@ func (e *Error) Code() int { return e.code } -// NewError creates a new Mutable Resource Error object with the specified code and custom error message +// NewError creates a new Swarm Feeds Error object with the specified code and custom error message func NewError(code int, s string) error { if code < 0 || code >= ErrCnt { panic("no such error code!") diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 034934d05..3ddeaafae 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -57,7 +57,7 @@ func init() { } } -// NewHandler creates a new Mutable Resource API +// NewHandler creates a new Swarm Feeds API func NewHandler(params *HandlerParams) *Handler { fh := &Handler{ cache: make(map[uint64]*cacheEntry), @@ -74,13 +74,13 @@ func NewHandler(params *HandlerParams) *Handler { return fh } -// SetStore sets the store backend for the Mutable Resource API +// SetStore sets the store backend for the Swarm Feeds API func (h *Handler) SetStore(store *storage.NetStore) { h.chunkStore = store } // Validate is a chunk validation method -// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature +// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature // It implements the storage.ChunkValidator interface func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { dataLength := len(data) @@ -89,7 +89,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { } // check if it is a properly formatted update chunk with - // valid signature and proof of ownership of the resource it is trying + // valid signature and proof of ownership of the feed it is trying // to update // First, deserialize the chunk @@ -99,9 +99,9 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { return false } - // Verify signatures and that the signer actually owns the resource + // Verify signatures and that the signer actually owns the feed // If it fails, it means either the signature is not valid, data is corrupted - // or someone is trying to update someone else's resource. + // or someone is trying to update someone else's feed. if err := r.Verify(); err != nil { log.Debug("Invalid feed update signature", "err", err) return false @@ -110,14 +110,14 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { return true } -// GetContent retrieves the data payload of the last synced update of the Mutable Resource +// GetContent retrieves the data payload of the last synced update of the Feed func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { if feed == nil { - return nil, nil, NewError(ErrInvalidValue, "view is nil") + return nil, nil, NewError(ErrInvalidValue, "feed is nil") } feedUpdate := h.get(feed) if feedUpdate == nil { - return nil, nil, NewError(ErrNotFound, "resource does not exist") + return nil, nil, NewError(ErrNotFound, "feed update not cached") } return feedUpdate.lastKey, feedUpdate.data, nil } @@ -142,7 +142,7 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, return nil, err } // not finding updates means that there is a network error - // or that the resource really does not have updates + // or that the feed really does not have updates } request.Feed = *feed @@ -157,13 +157,10 @@ func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, return request, nil } -// Lookup retrieves a specific or latest version of the resource -// Lookup works differently depending on the configuration of `ID` -// See the `ID` documentation and helper functions: -// `LookupLatest` and `LookupBefore` -// When looking for the latest update, it starts at the next period after the current time. -// upon failure tries the corresponding keys of each previous period until one is found -// (or startTime is reached, in which case there are no updates). +// Lookup retrieves a specific or latest feed update +// Lookup works differently depending on the configuration of `query` +// See the `query` documentation and helper functions: +// `NewQueryLatest` and `NewQuery` func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { timeLimit := query.TimeLimit @@ -213,17 +210,17 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) return nil, err } - log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount)) + log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount)) request, _ := requestPtr.(*Request) if request == nil { - return nil, NewError(ErrNotFound, "no updates found") + return nil, NewError(ErrNotFound, "no feed updates found") } return h.updateCache(request) } -// update mutable resource cache map with specified content +// update feed updates cache with specified content func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { updateAddr := request.Addr() @@ -242,10 +239,10 @@ func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { return feedUpdate, nil } -// Update adds an actual data update -// Uses the Mutable Resource metadata currently loaded in the resources map entry. -// It is the caller's responsibility to make sure that this data is not stale. -// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit. +// Update publishes a feed update +// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. +// This results in a max payload of `maxUpdateDataLength` (check update.go for more details) +// An error will be returned if the total length of the chunk payload will exceed this limit. // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update // on the network. func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { @@ -280,18 +277,18 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad return r.idAddr, nil } -// Retrieves the resource cache value for the given nameHash -func (h *Handler) get(view *Feed) *cacheEntry { - mapKey := view.mapKey() +// Retrieves the feed update cache value for the given nameHash +func (h *Handler) get(feed *Feed) *cacheEntry { + mapKey := feed.mapKey() h.cacheLock.RLock() defer h.cacheLock.RUnlock() feedUpdate := h.cache[mapKey] return feedUpdate } -// Sets the resource cache value for the given View -func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) { - mapKey := view.mapKey() +// Sets the feed update cache value for the given Feed +func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { + mapKey := feed.mapKey() h.cacheLock.Lock() defer h.cacheLock.Unlock() h.cache[mapKey] = feedUpdate diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go index a4c7969a3..3bf2bda8b 100644 --- a/swarm/storage/mru/handler_test.go +++ b/swarm/storage/mru/handler_test.go @@ -89,12 +89,12 @@ func TestFeedsHandler(t *testing.T) { } defer teardownTest() - // create a new resource + // create a new Feed ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) - view := Feed{ + feed := Feed{ Topic: topic, User: signer.Address(), } @@ -107,7 +107,7 @@ func TestFeedsHandler(t *testing.T) { "clyde", // t=4285 } - request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) + request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time) chunkAddress := make(map[string]storage.Address) data := []byte(updates[0]) request.SetData(data) @@ -205,38 +205,38 @@ func TestFeedsHandler(t *testing.T) { t.Fatal(err) } - rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) + update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) if err != nil { t.Fatal(err) } // last update should be "clyde" - if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) + if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) { + t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1]) } - if rsrc2.Level != 22 { - t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level) + if update2.Level != 22 { + t.Fatalf("feed update epoch level was %d, expected 22", update2.Level) } - if rsrc2.Base() != 0 { - t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base()) + if update2.Base() != 0 { + t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base()) } - log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) // specific point in time - rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) + update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) if err != nil { t.Fatal(err) } // check data - if !bytes.Equal(rsrc.data, []byte(updates[2])) { - t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) + if !bytes.Equal(update.data, []byte(updates[2])) { + t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2]) } - log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) // beyond the first should yield an error - rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) + update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) if err == nil { - t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) + t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data) } } @@ -266,11 +266,11 @@ func TestSparseUpdates(t *testing.T) { defer teardownTest() defer os.RemoveAll(datadir) - // create a new resource + // create a new Feed ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Very slow updates", nil) - view := Feed{ + feed := Feed{ Topic: topic, User: signer.Address(), } @@ -280,7 +280,7 @@ func TestSparseUpdates(t *testing.T) { var epoch lookup.Epoch var lastUpdateTime uint64 for T := uint64(0); T < today; T += 5 * Year { - request := NewFirstRequest(view.Topic) + request := NewFirstRequest(feed.Topic) request.Epoch = lookup.GetNextEpoch(epoch, T) request.data = generateData(T) // this generates some data that depends on T, so we can check later request.Sign(signer) @@ -295,14 +295,14 @@ func TestSparseUpdates(t *testing.T) { lastUpdateTime = T } - query := NewQuery(&view, today, lookup.NoClue) + query := NewQuery(&feed, today, lookup.NoClue) _, err = rh.Lookup(ctx, query) if err != nil { t.Fatal(err) } - _, content, err := rh.GetContent(&view) + _, content, err := rh.GetContent(&feed) if err != nil { t.Fatal(err) } @@ -321,7 +321,7 @@ func TestSparseUpdates(t *testing.T) { t.Fatal(err) } - _, content, err = rh.GetContent(&view) + _, content, err = rh.GetContent(&feed) if err != nil { t.Fatal(err) } @@ -348,7 +348,7 @@ func TestValidator(t *testing.T) { } defer teardownTest() - // create new resource + // create new Feed topic, _ := NewTopic(subtopicName, nil) feed := Feed{ Topic: topic, @@ -382,7 +382,7 @@ func TestValidator(t *testing.T) { } // tests that the content address validator correctly checks the data -// tests that resource update chunks are passed through content address validator +// tests that Feed update chunks are passed through content address validator // there is some redundancy in this test as it also tests content addressed chunks, // which should be evaluated as invalid chunks by this validator func TestValidatorInStore(t *testing.T) { @@ -409,7 +409,7 @@ func TestValidatorInStore(t *testing.T) { t.Fatal(err) } - // set up resource handler and add is as a validator to the localstore + // set up Swarm Feeds handler and add is as a validator to the localstore fhParams := &HandlerParams{} fh := NewHandler(fhParams) store.Validators = append(store.Validators, fh) @@ -425,7 +425,7 @@ func TestValidatorInStore(t *testing.T) { User: signer.Address(), } - // create a resource update chunk with correct publickey + // create a feed update chunk with correct publickey id := ID{ Epoch: lookup.Epoch{Time: 42, Level: 1, diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go index 09ef9e450..dcc88ac2a 100644 --- a/swarm/storage/mru/id.go +++ b/swarm/storage/mru/id.go @@ -29,7 +29,7 @@ import ( // ID uniquely identifies an update on the network. type ID struct { - Feed `json:"view"` + Feed `json:"feed"` lookup.Epoch `json:"epoch"` } @@ -38,7 +38,7 @@ type ID struct { // Epoch EpochLength const idLength = feedLength + lookup.EpochLength -// Addr calculates the resource update chunk address corresponding to this ID +// Addr calculates the feed update chunk address corresponding to this ID func (u *ID) Addr() (updateAddr storage.Address) { serializedData := make([]byte, idLength) var cursor int diff --git a/swarm/storage/mru/lookup/lookup.go b/swarm/storage/mru/lookup/lookup.go index c98248d70..a5154d261 100644 --- a/swarm/storage/mru/lookup/lookup.go +++ b/swarm/storage/mru/lookup/lookup.go @@ -15,7 +15,7 @@ // along with the go-ethereum library. If not, see . /* -Package lookup defines resource lookup algorithms and provides tools to place updates +Package lookup defines Feed lookup algorithms and provides tools to place updates so they can be found */ package lookup diff --git a/swarm/storage/mru/query.go b/swarm/storage/mru/query.go index c8a7cbe5a..a0b358bfd 100644 --- a/swarm/storage/mru/query.go +++ b/swarm/storage/mru/query.go @@ -72,7 +72,7 @@ func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { } } -// NewQueryLatest generates lookup parameters that look for the latest version of a resource +// NewQueryLatest generates lookup parameters that look for the latest update to a feed func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { return NewQuery(feed, 0, hint) } diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go index 03ffbf038..ca88adda1 100644 --- a/swarm/storage/mru/request.go +++ b/swarm/storage/mru/request.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" ) -// Request represents an update and/or resource create message +// Request represents a request to sign or signed Feed Update message type Request struct { Update // actual content that will be put on the chunk, less signature Signature *Signature @@ -62,7 +62,7 @@ func NewFirstRequest(topic Topic) *Request { return request } -// SetData stores the payload data the resource will be updated with +// SetData stores the payload data the feed update will be updated with func (r *Request) SetData(data []byte) { r.data = data r.Signature = nil @@ -73,7 +73,7 @@ func (r *Request) IsUpdate() bool { return r.Signature != nil } -// Verify checks that signatures are valid and that the signer owns the resource to be updated +// Verify checks that signatures are valid func (r *Request) Verify() (err error) { if len(r.data) == 0 { return NewError(ErrInvalidValue, "Update does not contain data") @@ -103,7 +103,7 @@ func (r *Request) Verify() (err error) { return nil } -// Sign executes the signature to validate the resource +// Sign executes the signature to validate the update message func (r *Request) Sign(signer Signer) error { r.Feed.User = signer.Address() r.binaryData = nil //invalidate serialized data @@ -133,7 +133,7 @@ func (r *Request) Sign(signer Signer) error { return nil } -// GetDigest creates the resource update digest used in signatures +// GetDigest creates the feed update digest used in signatures // the serialized payload is cached in .binaryData func (r *Request) GetDigest() (result common.Hash, err error) { hasher := hashPool.Get().(hash.Hash) @@ -174,7 +174,7 @@ func (r *Request) toChunk() (storage.Chunk, error) { func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { // for update chunk layout see Request definition - //deserialize the resource update portion + //deserialize the feed update portion if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { return err } diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index e58bf28aa..33e975756 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -47,7 +47,7 @@ func areEqualJSON(s1, s2 string) (bool, error) { } // TestEncodingDecodingUpdateRequests ensures that requests are serialized properly -// while also checking cryptographically that only the owner of a resource can update it. +// while also checking cryptographically that only the owner of a Feed can update it. func TestEncodingDecodingUpdateRequests(t *testing.T) { charlie := newCharlieSigner() //Charlie @@ -75,12 +75,10 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatal("Expected Verify to fail since the message is not signed") } - // We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata - // and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct - // proof of ownership + // We now assume that the feed ypdate was created and propagated. - const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00" - const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` + const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601" + const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` //Put together an unsigned update request that we will serialize to send it to the signer. data := []byte("This hour's update: Swarm 99.0 has been released!") @@ -138,7 +136,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") } - // Now imagine Bob wants to create an update of his own about the same resource, + // Now imagine Bob wants to create an update of his own about the same Feed, // signing a message with his private key if err := request.Sign(bob); err != nil { t.Fatalf("Error signing: %s", err) @@ -258,7 +256,7 @@ func TestReverse(t *testing.T) { defer teardownTest() topic, _ := NewTopic("Cervantes quotes", nil) - view := Feed{ + feed := Feed{ Topic: topic, User: signer.Address(), } @@ -266,7 +264,7 @@ func TestReverse(t *testing.T) { data := []byte("Donde una puerta se cierra, otra se abre") request := new(Request) - request.Feed = view + request.Feed = feed request.Epoch = epoch request.data = data diff --git a/swarm/storage/mru/resource_sign.go b/swarm/storage/mru/resource_sign.go deleted file mode 100644 index 58196f10e..000000000 --- a/swarm/storage/mru/resource_sign.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "crypto/ecdsa" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -const signatureLength = 65 - -// Signature is an alias for a static byte array with the size of a signature -type Signature [signatureLength]byte - -// Signer signs Mutable Resource update payloads -type Signer interface { - Sign(common.Hash) (Signature, error) - Address() common.Address -} - -// GenericSigner implements the Signer interface -// It is the vanilla signer that probably should be used in most cases -type GenericSigner struct { - PrivKey *ecdsa.PrivateKey - address common.Address -} - -// NewGenericSigner builds a signer that will sign everything with the provided private key -func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { - return &GenericSigner{ - PrivKey: privKey, - address: crypto.PubkeyToAddress(privKey.PublicKey), - } -} - -// Sign signs the supplied data -// It wraps the ethereum crypto.Sign() method -func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { - signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) - if err != nil { - return - } - copy(signature[:], signaturebytes) - return -} - -// Address returns the public key of the signer's private key -func (s *GenericSigner) Address() common.Address { - return s.address -} - -// getUserAddr extracts the address of the resource update signer -func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} diff --git a/swarm/storage/mru/sign.go b/swarm/storage/mru/sign.go new file mode 100644 index 000000000..03b0fa2b6 --- /dev/null +++ b/swarm/storage/mru/sign.go @@ -0,0 +1,75 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package mru + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const signatureLength = 65 + +// Signature is an alias for a static byte array with the size of a signature +type Signature [signatureLength]byte + +// Signer signs Feed update payloads +type Signer interface { + Sign(common.Hash) (Signature, error) + Address() common.Address +} + +// GenericSigner implements the Signer interface +// It is the vanilla signer that probably should be used in most cases +type GenericSigner struct { + PrivKey *ecdsa.PrivateKey + address common.Address +} + +// NewGenericSigner builds a signer that will sign everything with the provided private key +func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { + return &GenericSigner{ + PrivKey: privKey, + address: crypto.PubkeyToAddress(privKey.PublicKey), + } +} + +// Sign signs the supplied data +// It wraps the ethereum crypto.Sign() method +func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { + signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) + if err != nil { + return + } + copy(signature[:], signaturebytes) + return +} + +// Address returns the public key of the signer's private key +func (s *GenericSigner) Address() common.Address { + return s.address +} + +// getUserAddr extracts the address of the Feed update signer +func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go index 5f5720ae2..b6adb4cd7 100644 --- a/swarm/storage/mru/topic.go +++ b/swarm/storage/mru/topic.go @@ -74,7 +74,7 @@ func (t *Topic) FromHex(hex string) error { return nil } -// Name will try to extract the resource name out of the topic +// Name will try to extract the topic name out of the Topic func (t *Topic) Name(relatedContent []byte) string { nameBytes := *t if relatedContent != nil { diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index 892cb9d1b..f6e70b4d8 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -37,7 +37,7 @@ type Header struct { // Update encapsulates the information sent as part of a feed update type Update struct { Header Header // - ID // Resource update identifying information + ID // Feed Update identifying information data []byte // actual data payload } @@ -86,7 +86,7 @@ func (r *Update) binaryLength() int { // binaryGet populates this instance from the information contained in the passed byte slice func (r *Update) binaryGet(serializedData []byte) error { if len(serializedData) < minimumUpdateDataLength { - return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) + return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength) } dataLength := len(serializedData) - idLength - headerLength // at this point we can be satisfied that we have the correct data length to read diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go index f1a588d44..8d21ef7a4 100644 --- a/swarm/storage/mru/view.go +++ b/swarm/storage/mru/view.go @@ -25,13 +25,13 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" ) -// Feed represents a particular user's view of a resource +// Feed represents a particular user's stream of updates on a Topic type Feed struct { Topic Topic `json:"topic"` User common.Address `json:"user"` } -// View layout: +// Feed layout: // TopicLength bytes // userAddr common.AddressLength bytes const feedLength = TopicLength + common.AddressLength @@ -51,7 +51,7 @@ func (u *Feed) mapKey() uint64 { // binaryPut serializes this Feed instance into the provided slice func (u *Feed) binaryPut(serializedData []byte) error { if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", feedLength, len(serializedData)) + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) -- cgit From 83705ef6aa3645a6305a400fa175e44904a929f7 Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Sun, 30 Sep 2018 09:43:10 +0200 Subject: swarm/storage/mru: Renamed rest of MRU references --- swarm/api/api.go | 113 +++++++++++++++++----------------- swarm/api/client/client.go | 57 +++++++++--------- swarm/api/client/client_test.go | 70 ++++++++++----------- swarm/api/http/server.go | 124 +++++++++++++++----------------------- swarm/api/http/server_test.go | 101 +++++++++---------------------- swarm/api/manifest.go | 30 ++++----- swarm/api/uri.go | 6 +- swarm/network/README.md | 2 +- swarm/storage/localstore_test.go | 4 +- swarm/storage/mru/doc.go | 2 +- swarm/storage/mru/error.go | 2 +- swarm/storage/mru/handler.go | 6 +- swarm/storage/mru/handler_test.go | 4 +- swarm/storage/mru/request_test.go | 4 +- swarm/swarm.go | 12 ++-- swarm/testutil/http.go | 8 +-- 16 files changed, 239 insertions(+), 306 deletions(-) (limited to 'swarm') diff --git a/swarm/api/api.go b/swarm/api/api.go index e6b676dba..9b4571bee 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -235,18 +235,18 @@ on top of the FileStore it is the public interface of the FileStore which is included in the ethereum stack */ type API struct { - resource *mru.Handler + feeds *mru.Handler fileStore *storage.FileStore dns Resolver Decryptor func(context.Context, string) DecryptFunc } // NewAPI the api constructor initialises a new API instance. -func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler, pk *ecdsa.PrivateKey) (self *API) { +func NewAPI(fileStore *storage.FileStore, dns Resolver, feedsHandler *mru.Handler, pk *ecdsa.PrivateKey) (self *API) { self = &API{ fileStore: fileStore, dns: dns, - resource: resourceHandler, + feeds: feedsHandler, Decryptor: func(ctx context.Context, credentials string) DecryptFunc { return self.doDecrypt(ctx, credentials, pk) }, @@ -403,24 +403,24 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage return a.Get(ctx, decrypt, adr, entry.Path) } - // we need to do some extra work if this is a mutable resource manifest - if entry.ContentType == ResourceContentType { - if entry.ResourceView == nil { - return reader, mimeType, status, nil, fmt.Errorf("Cannot decode ResourceView in manifest") + // we need to do some extra work if this is a Feed manifest + if entry.ContentType == FeedContentType { + if entry.Feed == nil { + return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest") } - _, err := a.resource.Lookup(ctx, mru.NewQueryLatest(entry.ResourceView, lookup.NoClue)) + _, err := a.feeds.Lookup(ctx, mru.NewQueryLatest(entry.Feed, lookup.NoClue)) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Debug(fmt.Sprintf("get resource content error: %v", err)) + log.Debug(fmt.Sprintf("get feed update content error: %v", err)) return reader, mimeType, status, nil, err } // get the data of the update - _, rsrcData, err := a.resource.GetContent(entry.ResourceView) + _, rsrcData, err := a.feeds.GetContent(entry.Feed) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Warn(fmt.Sprintf("get resource content error: %v", err)) + log.Warn(fmt.Sprintf("get feed update content error: %v", err)) return reader, mimeType, status, nil, err } @@ -429,18 +429,18 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage if err != nil { apiGetInvalid.Inc(1) status = http.StatusUnprocessableEntity - log.Warn("invalid resource multihash", "err", err) + log.Warn("invalid multihash in feed update", "err", err) return reader, mimeType, status, nil, err } manifestAddr = storage.Address(decodedMultihash) - log.Trace("resource is multihash", "key", manifestAddr) + log.Trace("feed update contains multihash", "key", manifestAddr) // get the manifest the multihash digest points to trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) + log.Warn(fmt.Sprintf("loadManifestTrie (feed update multihash) error: %v", err)) return reader, mimeType, status, nil, err } @@ -450,13 +450,13 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage if entry == nil { status = http.StatusNotFound apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) - log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) + err = fmt.Errorf("manifest (feed update multihash) entry for '%s' not found", path) + log.Trace("manifest (feed update multihash) entry not found", "key", manifestAddr, "path", path) return reader, mimeType, status, nil, err } } - // regardless of resource update manifests or normal manifests we will converge at this point + // regardless of feed update manifests or normal manifests we will converge at this point // get the key the manifest entry points to and serve it if it's unambiguous contentAddr = common.Hex2Bytes(entry.Hash) status = entry.Status @@ -956,68 +956,67 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver return addr, manifestEntryMap, nil } -// ResourceLookup finds Swarm Feeds at specific periods and versions -func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) { - _, err := a.resource.Lookup(ctx, query) +// FeedsLookup finds Swarm Feeds Updates at specific points in time, or the latest update +func (a *API) FeedsLookup(ctx context.Context, query *mru.Query) ([]byte, error) { + _, err := a.feeds.Lookup(ctx, query) if err != nil { return nil, err } var data []byte - _, data, err = a.resource.GetContent(&query.Feed) + _, data, err = a.feeds.GetContent(&query.Feed) if err != nil { return nil, err } return data, nil } -// ResourceNewRequest creates a Request object to update a specific mutable resource -func (a *API) ResourceNewRequest(ctx context.Context, view *mru.Feed) (*mru.Request, error) { - return a.resource.NewRequest(ctx, view) +// FeedsNewRequest creates a Request object to update a specific Feed +func (a *API) FeedsNewRequest(ctx context.Context, feed *mru.Feed) (*mru.Request, error) { + return a.feeds.NewRequest(ctx, feed) } -// ResourceUpdate updates a Mutable Resource with arbitrary data. -// Upon retrieval the update will be retrieved verbatim as bytes. -func (a *API) ResourceUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) { - return a.resource.Update(ctx, request) +// FeedsUpdate publishes a new update on the given Feed +func (a *API) FeedsUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) { + return a.feeds.Update(ctx, request) } -// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function -func (a *API) ResourceHashSize() int { - return a.resource.HashSize +// FeedsHashSize returned the size of the digest produced by Swarm Feeds' hashing function +func (a *API) FeedsHashSize() int { + return a.feeds.HashSize } -// ErrCannotLoadResourceManifest is returned when looking up a resource manifest fails -var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest") +// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails +var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") -// ErrNotAResourceManifest is returned when the address provided returned something other than a valid manifest -var ErrNotAResourceManifest = errors.New("Not a resource manifest") +// ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest +var ErrNotAFeedManifest = errors.New("Not a feed manifest") -// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID. -func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.Feed, error) { +// ResolveFeedManifest retrieves the Feed manifest for the given address, and returns the referenced Feed. +func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*mru.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { - return nil, ErrCannotLoadResourceManifest + return nil, ErrCannotLoadFeedManifest } entry, _ := trie.getEntry("") - if entry.ContentType != ResourceContentType { - return nil, ErrNotAResourceManifest + if entry.ContentType != FeedContentType { + return nil, ErrNotAFeedManifest } - return entry.ResourceView, nil + return entry.Feed, nil } -// ErrCannotResolveResourceURI is returned when the ENS resolver is not able to translate a name to a resource -var ErrCannotResolveResourceURI = errors.New("Cannot resolve Resource URI") +// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Feed +var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI") -// ErrCannotResolveResourceView is returned when values provided are not enough or invalid to recreate a -// resource view out of them. -var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view") +// ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a +// Feed out of them. +var ErrCannotResolveFeed = errors.New("Cannot resolve Feed") -// ResolveResourceView attempts to extract View information out of the manifest, if provided -// If not, it attempts to extract the View out of a set of key-value pairs -func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.Feed, error) { - var view *mru.Feed +// ResolveFeed attempts to extract Feed information out of the manifest, if provided +// If not, it attempts to extract the Feed out of a set of key-value pairs +func (a *API) ResolveFeed(ctx context.Context, uri *URI, values mru.Values) (*mru.Feed, error) { + var feed *mru.Feed var err error if uri.Addr != "" { // resolve the content key. @@ -1025,25 +1024,25 @@ func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Valu if manifestAddr == nil { manifestAddr, err = a.Resolve(ctx, uri.Addr) if err != nil { - return nil, ErrCannotResolveResourceURI + return nil, ErrCannotResolveFeedURI } } - // get the resource view from the manifest - view, err = a.ResolveResourceManifest(ctx, manifestAddr) + // get the Feed from the manifest + feed, err = a.ResolveFeedManifest(ctx, manifestAddr) if err != nil { return nil, err } - log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex()) + log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", feed.Hex()) } else { var v mru.Feed if err := v.FromValues(values); err != nil { - return nil, ErrCannotResolveResourceView + return nil, ErrCannotResolveFeed } - view = &v + feed = &v } - return view, nil + return feed, nil } // MimeOctetStream default value of http Content-Type header diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 47a6980de..76ada1297 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -601,16 +601,15 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) return string(data), nil } -// ErrNoResourceUpdatesFound is returned when Swarm cannot find updates of the given resource -var ErrNoResourceUpdatesFound = errors.New("No updates found for this resource") +// ErrNoFeedUpdatesFound is returned when Swarm cannot find updates of the given feed +var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed") -// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided -// data. Data is interpreted as multihash or not depending on the multihash parameter. -// startTime=0 means "now" -// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent) -// or reference future updates (Client.UpdateResource) -func (c *Client) CreateResource(request *mru.Request) (string, error) { - responseStream, err := c.updateResource(request, true) +// CreateFeedWithManifest creates a Feed Manifest, initializing it with the provided +// data +// Returns the resulting Feed Manifest address that you can use to include in an ENS Resolver (setContent) +// or reference future updates (Client.UpdateFeed) +func (c *Client) CreateFeedWithManifest(request *mru.Request) (string, error) { + responseStream, err := c.updateFeed(request, true) if err != nil { return "", err } @@ -628,18 +627,18 @@ func (c *Client) CreateResource(request *mru.Request) (string, error) { return manifestAddress, nil } -// UpdateResource allows you to set a new version of your content -func (c *Client) UpdateResource(request *mru.Request) error { - _, err := c.updateResource(request, false) +// UpdateFeed allows you to set a new version of your content +func (c *Client) UpdateFeed(request *mru.Request) error { + _, err := c.updateFeed(request, false) return err } -func (c *Client) updateResource(request *mru.Request, createManifest bool) (io.ReadCloser, error) { +func (c *Client) updateFeed(request *mru.Request, createManifest bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } - URL.Path = "/bzz-resource:/" + URL.Path = "/bzz-feed:/" values := URL.Query() body := request.AppendValues(values) if createManifest { @@ -660,23 +659,23 @@ func (c *Client) updateResource(request *mru.Request, createManifest bool) (io.R return res.Body, nil } -// GetResource returns a byte stream with the raw content of the resource -// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver +// QueryFeed returns a byte stream with the raw content of the feed update +// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) GetResource(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { - return c.getResource(query, manifestAddressOrDomain, false) +func (c *Client) QueryFeed(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { + return c.queryFeed(query, manifestAddressOrDomain, false) } -// getResource returns a byte stream with the raw content of the resource -// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver +// queryFeed returns a byte stream with the raw content of the feed update +// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -// meta set to true will instruct the node return resource metainformation instead -func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { +// meta set to true will instruct the node return Feed metainformation instead +func (c *Client) queryFeed(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } - URL.Path = "/bzz-resource:/" + manifestAddressOrDomain + URL.Path = "/bzz-feed:/" + manifestAddressOrDomain values := URL.Query() if query != nil { query.AppendValues(values) //adds query parameters @@ -692,7 +691,7 @@ func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, m if res.StatusCode != http.StatusOK { if res.StatusCode == http.StatusNotFound { - return nil, ErrNoResourceUpdatesFound + return nil, ErrNoFeedUpdatesFound } errorMessageBytes, err := ioutil.ReadAll(res.Body) var errorMessage string @@ -701,18 +700,18 @@ func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, m } else { errorMessage = string(errorMessageBytes) } - return nil, fmt.Errorf("Error retrieving resource: %s", errorMessage) + return nil, fmt.Errorf("Error retrieving feed updates: %s", errorMessage) } return res.Body, nil } -// GetResourceMetadata returns a structure that describes the Mutable Resource -// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver +// GetFeedMetadata returns a structure that describes the referenced Feed status +// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) GetResourceMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) { +func (c *Client) GetFeedMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) { - responseStream, err := c.getResource(query, manifestAddressOrDomain, true) + responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true) if err != nil { return nil, err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index c6ad0237b..fbc49a6e1 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -369,12 +369,12 @@ func newTestSigner() (*mru.GenericSigner, error) { return mru.NewGenericSigner(privKey), nil } -// test the transparent resolving of multihash resource types with bzz:// scheme +// test the transparent resolving of multihash feed updates with bzz:// scheme // -// first upload data, and store the multihash to the resulting manifest in a resource update +// first upload data, and store the multihash to the resulting manifest in a feed update // retrieving the update with the multihash should return the manifest pointing directly to the data // and raw retrieve of that hash should return the data -func TestClientCreateResourceMultihash(t *testing.T) { +func TestClientCreateFeedMultihash(t *testing.T) { signer, _ := newTestSigner() @@ -393,7 +393,7 @@ func TestClientCreateResourceMultihash(t *testing.T) { s := common.FromHex(swarmHash) mh := multihash.ToMultihash(s) - // our mutable resource topic + // our feed topic topic, _ := mru.NewTopic("foo.eth", nil) createRequest := mru.NewFirstRequest(topic) @@ -403,26 +403,26 @@ func TestClientCreateResourceMultihash(t *testing.T) { t.Fatalf("Error signing update: %s", err) } - resourceManifestHash, err := client.CreateResource(createRequest) + feedManifestHash, err := client.CreateFeedWithManifest(createRequest) if err != nil { - t.Fatalf("Error creating resource: %s", err) + t.Fatalf("Error creating feed manifest: %s", err) } - correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" - if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b" + if feedManifestHash != correctManifestAddrHex { + t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash) } - // Check we get a not found error when trying to get the resource with a made-up manifest - _, err = client.GetResource(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") - if err != ErrNoResourceUpdatesFound { - t.Fatalf("Expected to receive ErrNoResourceUpdatesFound error. Got: %s", err) + // Check we get a not found error when trying to get feed updates with a made-up manifest + _, err = client.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != ErrNoFeedUpdatesFound { + t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err) } - reader, err := client.GetResource(nil, correctManifestAddrHex) + reader, err := client.QueryFeed(nil, correctManifestAddrHex) if err != nil { - t.Fatalf("Error retrieving resource: %s", err) + t.Fatalf("Error retrieving feed updates: %s", err) } defer reader.Close() gotData, err := ioutil.ReadAll(reader) @@ -435,8 +435,8 @@ func TestClientCreateResourceMultihash(t *testing.T) { } -// TestClientCreateUpdateResource will check that mutable resources can be created and updated via the HTTP client. -func TestClientCreateUpdateResource(t *testing.T) { +// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client. +func TestClientCreateUpdateFeed(t *testing.T) { signer, _ := newTestSigner() @@ -444,10 +444,10 @@ func TestClientCreateUpdateResource(t *testing.T) { client := NewClient(srv.URL) defer srv.Close() - // set raw data for the resource + // set raw data for the feed update databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") - // our mutable resource name + // our feed topic name topic, _ := mru.NewTopic("El Quijote", nil) createRequest := mru.NewFirstRequest(topic) @@ -456,16 +456,16 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Error signing update: %s", err) } - resourceManifestHash, err := client.CreateResource(createRequest) + feedManifestHash, err := client.CreateFeedWithManifest(createRequest) - correctManifestAddrHex := "fcb8e75f53e480e197c083ad1976d265674d0ce776f2bf359c09c413fb5230b8" - if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882" + if feedManifestHash != correctManifestAddrHex { + t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash) } - reader, err := client.GetResource(nil, correctManifestAddrHex) + reader, err := client.QueryFeed(nil, correctManifestAddrHex) if err != nil { - t.Fatalf("Error retrieving resource: %s", err) + t.Fatalf("Error retrieving feed updates: %s", err) } defer reader.Close() gotData, err := ioutil.ReadAll(reader) @@ -479,7 +479,7 @@ func TestClientCreateUpdateResource(t *testing.T) { // define different data databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...") - updateRequest, err := client.GetResourceMetadata(nil, correctManifestAddrHex) + updateRequest, err := client.GetFeedMetadata(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving update request template: %s", err) } @@ -489,13 +489,13 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Error signing update: %s", err) } - if err = client.UpdateResource(updateRequest); err != nil { - t.Fatalf("Error updating resource: %s", err) + if err = client.UpdateFeed(updateRequest); err != nil { + t.Fatalf("Error updating feed: %s", err) } - reader, err = client.GetResource(nil, correctManifestAddrHex) + reader, err = client.QueryFeed(nil, correctManifestAddrHex) if err != nil { - t.Fatalf("Error retrieving resource: %s", err) + t.Fatalf("Error retrieving feed updates: %s", err) } defer reader.Close() gotData, err = ioutil.ReadAll(reader) @@ -506,17 +506,17 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Expected: %v, got %v", databytes, gotData) } - // now try retrieving resource without a manifest + // now try retrieving feed updates without a manifest - view := &mru.Feed{ + feed := &mru.Feed{ Topic: topic, User: signer.Address(), } - lookupParams := mru.NewQueryLatest(view, lookup.NoClue) - reader, err = client.GetResource(lookupParams, "") + lookupParams := mru.NewQueryLatest(feed, lookup.NoClue) + reader, err = client.QueryFeed(lookupParams, "") if err != nil { - t.Fatalf("Error retrieving resource: %s", err) + t.Fatalf("Error retrieving feed updates: %s", err) } defer reader.Close() gotData, err = ioutil.ReadAll(reader) diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 4c19dd6df..c5b3b564c 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -31,7 +31,6 @@ import ( "net/http" "os" "path" - "regexp" "strconv" "strings" "time" @@ -145,13 +144,13 @@ func NewServer(api *api.API, corsString string) *Server { defaultMiddlewares..., ), }) - mux.Handle("/bzz-resource:/", methodHandler{ + mux.Handle("/bzz-feed:/", methodHandler{ "GET": Adapt( - http.HandlerFunc(server.HandleGetResource), + http.HandlerFunc(server.HandleGetFeed), defaultMiddlewares..., ), "POST": Adapt( - http.HandlerFunc(server.HandlePostResource), + http.HandlerFunc(server.HandlePostFeed), defaultMiddlewares..., ), }) @@ -458,44 +457,13 @@ func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, newKey) } -// Parses a resource update post url to corresponding action -// possible combinations: -// / add multihash update to existing hash -// /raw add raw update to existing hash -// /# create new resource with first update as mulitihash -// /raw/# create new resource with first update raw -func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { - re, err := regexp.Compile("^(raw)?/?([0-9]+)?$") - if err != nil { - return isRaw, frequency, err - } - m := re.FindAllStringSubmatch(path, 2) - var freqstr = "0" - if len(m) > 0 { - if m[0][1] != "" { - isRaw = true - } - if m[0][2] != "" { - freqstr = m[0][2] - } - } else if len(path) > 0 { - return isRaw, frequency, fmt.Errorf("invalid path") - } - frequency, err = strconv.ParseUint(freqstr, 10, 64) - return isRaw, frequency, err -} - -// Handles creation of new mutable resources and adding updates to existing mutable resources -// There are two types of updates available, "raw" and "multihash." -// If the latter is used, a subsequent bzz:// GET call to the manifest of the resource will return -// the page that the multihash is pointing to, as if it held a normal swarm content manifest -// -// The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON` -// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content -func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { +// Handles feed manifest creation and feed updates +// The POST request admits a JSON structure as defined in the feeds package: `feeds.updateRequestJSON` +// The requests can be to a) create a feed manifest, b) update a feed or c) both a+b: create a feed manifest and publish a first update +func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) uri := GetURI(r.Context()) - log.Debug("handle.post.resource", "ruid", ruid) + log.Debug("handle.post.feed", "ruid", ruid) var err error // Creation and update must send mru.updateRequestJSON JSON structure @@ -505,19 +473,19 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { return } - view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + feed, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) httpStatus := http.StatusBadRequest - if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI { httpStatus = http.StatusNotFound } - RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) + RespondError(w, r, fmt.Sprintf("cannot retrieve feed from manifest: %s", err), httpStatus) return } var updateRequest mru.Request - updateRequest.Feed = *view + updateRequest.Feed = *feed query := r.URL.Query() if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters @@ -527,13 +495,13 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { if updateRequest.IsUpdate() { // Verify that the signature is intact and that the signer is authorized - // to update this resource - // Check this early, to avoid creating a resource and then not being able to set its first update. + // to update this feed + // Check this early, to avoid creating a feed and then not being able to set its first update. if err = updateRequest.Verify(); err != nil { RespondError(w, r, err.Error(), http.StatusForbidden) return } - _, err = s.api.ResourceUpdate(r.Context(), &updateRequest) + _, err = s.api.FeedsUpdate(r.Context(), &updateRequest) if err != nil { RespondError(w, r, err.Error(), http.StatusInternalServerError) return @@ -541,16 +509,16 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } if query.Get("manifest") == "1" { - // we create a manifest so we can retrieve the resource with bzz:// later - // this manifest has a special "resource type" manifest, and saves the - // resource view ID used to retrieve the resource later - m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.Feed) + // we create a manifest so we can retrieve feed updates with bzz:// later + // this manifest has a special "feed type" manifest, and saves the + // feed identification used to retrieve feed updates later + m, err := s.api.NewFeedManifest(r.Context(), &updateRequest.Feed) if err != nil { - RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("failed to create feed manifest: %v", err), http.StatusInternalServerError) return } // the key to the manifest will be passed back to the client - // the client can access the view directly through its resourceView member + // the client can access the Feed directly through its Feed member // the manifest key can be set as content in the resolver of the ENS name outdata, err := json.Marshal(m) if err != nil { @@ -563,41 +531,49 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } } -// Retrieve Swarm Feeds: -// bzz-resource:// - get latest update -// bzz-resource:///?period=n - get latest update on period n -// bzz-resource:///?period=n&version=m - get update version m of period n -// bzz-resource:///meta - get metadata and next version information -// = ens name or hash -// TODO: Enable pass maxPeriod parameter -func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { +// HandleGetFeed retrieves Swarm Feeds updates: +// bzz-feed:// - get latest feed update, given a manifest address +// - or - +// specify user + topic (optional), subtopic name (optional) directly, without manifest: +// bzz-feed://?user=0x...&topic=0x...&name=subtopic name +// topic defaults to 0x000... if not specified. +// name defaults to empty string if not specified. +// thus, empty name and topic refers to the user's default feed. +// +// Optional parameters: +// time=xx - get the latest update before time (in epoch seconds) +// hint.time=xx - hint the lookup algorithm looking for updates at around that time +// hint.level=xx - hint the lookup algorithm looking for updates at around this frequency level +// meta=1 - get feed metadata and status information instead of performing a feed query +// NOTE: meta=1 will be deprecated in the near future +func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) uri := GetURI(r.Context()) - log.Debug("handle.get.resource", "ruid", ruid) + log.Debug("handle.get.feed", "ruid", ruid) var err error - view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + feed, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) httpStatus := http.StatusBadRequest - if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI { httpStatus = http.StatusNotFound } - RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) + RespondError(w, r, fmt.Sprintf("cannot retrieve feed information from manifest: %s", err), httpStatus) return } // determine if the query specifies period and version or it is a metadata query if r.URL.Query().Get("meta") == "1" { - unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), view) + unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), feed) if err != nil { getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for view=%s: %s", view.Hex(), err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", feed.Hex(), err), http.StatusNotFound) return } rawResponse, err := unsignedUpdateRequest.MarshalJSON() if err != nil { - RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) + RespondError(w, r, fmt.Sprintf("cannot encode unsigned feed update request: %v", err), http.StatusInternalServerError) return } w.Header().Add("Content-type", "application/json") @@ -606,28 +582,28 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { return } - lookupParams := &mru.Query{Feed: *view} + lookupParams := &mru.Query{Feed: *feed} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version - RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest) + RespondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest) return } - data, err := s.api.ResourceLookup(r.Context(), lookupParams) + data, err := s.api.FeedsLookup(r.Context(), lookupParams) // any error from the switch statement will end up here if err != nil { - code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err) + code, err2 := s.translateFeedError(w, r, "feed lookup fail", err) RespondError(w, r, err2.Error(), code) return } // All ok, serve the retrieved update - log.Debug("Found update", "view", view.Hex(), "ruid", ruid) + log.Debug("Found update", "feed", feed.Hex(), "ruid", ruid) w.Header().Set("Content-Type", api.MimeOctetStream) http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data)) } -func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { +func (s *Server) translateFeedError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { code := 0 defaultErr := fmt.Errorf("%s: %v", supErr, err) rsrcErr, ok := err.(*mru.Error) diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 0855d30a2..a7c7e3003 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -58,47 +58,6 @@ func init() { log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) } -func TestResourcePostMode(t *testing.T) { - path := "" - errstr := "resourcePostMode for '%s' should be raw %v frequency %d, was raw %v, frequency %d" - r, f, err := resourcePostMode(path) - if err != nil { - t.Fatal(err) - } else if r || f != 0 { - t.Fatalf(errstr, path, false, 0, r, f) - } - - path = "raw" - r, f, err = resourcePostMode(path) - if err != nil { - t.Fatal(err) - } else if !r || f != 0 { - t.Fatalf(errstr, path, true, 0, r, f) - } - - path = "13" - r, f, err = resourcePostMode(path) - if err != nil { - t.Fatal(err) - } else if r || f == 0 { - t.Fatalf(errstr, path, false, 13, r, f) - } - - path = "raw/13" - r, f, err = resourcePostMode(path) - if err != nil { - t.Fatal(err) - } else if !r || f == 0 { - t.Fatalf(errstr, path, true, 13, r, f) - } - - path = "foo/13" - r, f, err = resourcePostMode(path) - if err == nil { - t.Fatal("resourcePostMode for 'foo/13' should fail, returned error nil") - } -} - func serverFunc(api *api.API) testutil.TestServer { return NewServer(api, "") } @@ -111,12 +70,12 @@ func newTestSigner() (*mru.GenericSigner, error) { return mru.NewGenericSigner(privKey), nil } -// test the transparent resolving of multihash resource types with bzz:// scheme +// test the transparent resolving of multihash-containing feed updates with bzz:// scheme // -// first upload data, and store the multihash to the resulting manifest in a resource update +// first upload data, and store the multihash to the resulting manifest in a feed update // retrieving the update with the multihash should return the manifest pointing directly to the data // and raw retrieve of that hash should return the data -func TestBzzResourceMultihash(t *testing.T) { +func TestBzzFeedMultihash(t *testing.T) { signer, _ := newTestSigner() @@ -154,7 +113,7 @@ func TestBzzResourceMultihash(t *testing.T) { } log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { t.Fatal(err) } @@ -182,12 +141,12 @@ func TestBzzResourceMultihash(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" + correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b" if rsrcResp.Hex() != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) + t.Fatalf("Response feed manifest address mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } - // get bzz manifest transparent resource resolve + // get bzz manifest transparent feed update resolve testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) resp, err = http.Get(testBzzUrl) if err != nil { @@ -207,7 +166,7 @@ func TestBzzResourceMultihash(t *testing.T) { } // Test Swarm Feeds using the raw update methods -func TestBzzResource(t *testing.T) { +func TestBzzFeed(t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc, nil) signer, _ := newTestSigner() @@ -234,8 +193,8 @@ func TestBzzResource(t *testing.T) { t.Fatal(err) } - // creates resource and sets update 1 - testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) + // creates feed and sets update 1 + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { t.Fatal(err) } @@ -262,9 +221,9 @@ func TestBzzResource(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" + correctManifestAddrHex := "bb056a5264c295c2b0f613c8409b9c87ce9d71576ace02458160df4cc894210b" if rsrcResp.Hex() != correctManifestAddrHex { - t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) + t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get the manifest @@ -289,12 +248,12 @@ func TestBzzResource(t *testing.T) { if len(manifest.Entries) != 1 { t.Fatalf("Manifest has %d entries", len(manifest.Entries)) } - correctViewHex := "0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d" - if manifest.Entries[0].ResourceView.Hex() != correctViewHex { - t.Fatalf("Expected manifest Resource View '%s', got '%s'", correctViewHex, manifest.Entries[0].ResourceView.Hex()) + correctFeedHex := "0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d" + if manifest.Entries[0].Feed.Hex() != correctFeedHex { + t.Fatalf("Expected manifest Feed '%s', got '%s'", correctFeedHex, manifest.Entries[0].Feed.Hex()) } - // get bzz manifest transparent resource resolve + // get bzz manifest transparent feed update resolve testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) resp, err = http.Get(testBzzUrl) if err != nil { @@ -302,7 +261,7 @@ func TestBzzResource(t *testing.T) { } defer resp.Body.Close() if resp.StatusCode == http.StatusOK { - t.Fatal("Expected error status since resource is not multihash. Received 200 OK") + t.Fatal("Expected error status since feed update does not contain multihash. Received 200 OK") } b, err = ioutil.ReadAll(resp.Body) if err != nil { @@ -310,21 +269,21 @@ func TestBzzResource(t *testing.T) { } // get non-existent name, should fail - testBzzResUrl := fmt.Sprintf("%s/bzz-resource:/bar", srv.URL) + testBzzResUrl := fmt.Sprintf("%s/bzz-feed:/bar", srv.URL) resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusNotFound { - t.Fatalf("Expected get non-existent resource to fail with StatusNotFound (404), got %d", resp.StatusCode) + t.Fatalf("Expected get non-existent feed manifest to fail with StatusNotFound (404), got %d", resp.StatusCode) } resp.Body.Close() - // get latest update (1.1) through resource directly + // get latest update through bzz-feed directly log.Info("get update latest = 1.1", "addr", correctManifestAddrHex) - testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + testBzzResUrl = fmt.Sprintf("%s/bzz-feed:/%s", srv.URL, correctManifestAddrHex) resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) @@ -346,15 +305,15 @@ func TestBzzResource(t *testing.T) { srv.CurrentTime++ log.Info("update 2") - // 1.- get metadata about this resource - testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) + // 1.- get metadata about this Feed + testBzzResUrl = fmt.Sprintf("%s/bzz-feed:/%s/", srv.URL, correctManifestAddrHex) resp, err = http.Get(testBzzResUrl + "?meta=1") if err != nil { t.Fatal(err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - t.Fatalf("Get resource metadata returned %s", resp.Status) + t.Fatalf("Get feed metadata returned %s", resp.Status) } b, err = ioutil.ReadAll(resp.Body) if err != nil { @@ -362,13 +321,13 @@ func TestBzzResource(t *testing.T) { } updateRequest = &mru.Request{} if err = updateRequest.UnmarshalJSON(b); err != nil { - t.Fatalf("Error decoding resource metadata: %s", err) + t.Fatalf("Error decoding feed metadata: %s", err) } updateRequest.SetData(update2Data) if err = updateRequest.Sign(signer); err != nil { t.Fatal(err) } - testUrl, err = url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) + testUrl, err = url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { t.Fatal(err) } @@ -385,9 +344,9 @@ func TestBzzResource(t *testing.T) { t.Fatalf("Update returned %s", resp.Status) } - // get latest update (1.2) through resource directly + // get latest update through bzz-feed directly log.Info("get update 1.2") - testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + testBzzResUrl = fmt.Sprintf("%s/bzz-feed:/%s", srv.URL, correctManifestAddrHex) resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) @@ -408,13 +367,13 @@ func TestBzzResource(t *testing.T) { log.Info("get first update in update1Timestamp via direct query") query := mru.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) - urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) + urlq, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { t.Fatal(err) } values := urlq.Query() - query.AppendValues(values) // this adds view query parameters + query.AppendValues(values) // this adds feed query parameters urlq.RawQuery = values.Encode() resp, err = http.Get(urlq.String()) if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 9672ca540..f41a823bd 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -35,8 +35,8 @@ import ( ) const ( - ManifestType = "application/bzz-manifest+json" - ResourceContentType = "application/bzz-resource" + ManifestType = "application/bzz-manifest+json" + FeedContentType = "application/bzz-feed" manifestSizeLimit = 5 * 1024 * 1024 ) @@ -48,15 +48,15 @@ type Manifest struct { // ManifestEntry represents an entry in a swarm manifest type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` - Access *AccessEntry `json:"access,omitempty"` - ResourceView *mru.Feed `json:"resourceView,omitempty"` + Hash string `json:"hash,omitempty"` + Path string `json:"path,omitempty"` + ContentType string `json:"contentType,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` + Status int `json:"status,omitempty"` + Access *AccessEntry `json:"access,omitempty"` + Feed *mru.Feed `json:"feed,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -80,13 +80,13 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, return addr, err } -// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme +// Manifest hack for supporting Feeds from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(ctx context.Context, view *mru.Feed) (storage.Address, error) { +func (a *API) NewFeedManifest(ctx context.Context, feed *mru.Feed) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ - ResourceView: view, - ContentType: ResourceContentType, + Feed: feed, + ContentType: FeedContentType, } manifest.Entries = append(manifest.Entries, entry) data, err := json.Marshal(&manifest) diff --git a/swarm/api/uri.go b/swarm/api/uri.go index 808517088..09cfa4502 100644 --- a/swarm/api/uri.go +++ b/swarm/api/uri.go @@ -86,7 +86,7 @@ func Parse(rawuri string) (*URI, error) { // check the scheme is valid switch uri.Scheme { - case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzz-resource": + case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzz-feed": default: return nil, fmt.Errorf("unknown scheme %q", u.Scheme) } @@ -108,8 +108,8 @@ func Parse(rawuri string) (*URI, error) { } return uri, nil } -func (u *URI) Resource() bool { - return u.Scheme == "bzz-resource" +func (u *URI) Feed() bool { + return u.Scheme == "bzz-feed" } func (u *URI) Raw() bool { diff --git a/swarm/network/README.md b/swarm/network/README.md index 0955fe94e..33d215c4b 100644 --- a/swarm/network/README.md +++ b/swarm/network/README.md @@ -57,7 +57,7 @@ receipts for a deleted chunk easily to refute their challenge. - syncing should be resilient to cut connections, metadata should be persisted that keep track of syncing state across sessions, historical syncing state should survive restart - extra data structures to support syncing should be kept at minimum -- syncing is organized separately for chunk types (resource update v content chunk) +- syncing is organized separately for chunk types (Swarm Feed Updates v regular content chunk) - various types of streams should have common logic abstracted Syncing is now entirely mediated by the localstore, ie., no processes or memory leaks due to network contention. diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go index 814d270d3..b8eea4350 100644 --- a/swarm/storage/localstore_test.go +++ b/swarm/storage/localstore_test.go @@ -30,8 +30,8 @@ var ( ) // tests that the content address validator correctly checks the data -// tests that resource update chunks are passed through content address validator -// the test checking the resouce update validator internal correctness is found in resource_test.go +// tests that Feed update chunks are passed through content address validator +// the test checking the resouce update validator internal correctness is found in storage/feeds/handler_test.go func TestValidator(t *testing.T) { // set up localstore datadir, err := ioutil.TempDir("", "storage-testvalidator") diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index 2cf2d3757..7ffd4a3c6 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -26,7 +26,7 @@ The Feed Update data is: updatedata = Feed|Epoch|data The full update data that goes in the chunk payload is: -resourcedata|sign(resourcedata) +updatedata|sign(updatedata) Structure Summary: diff --git a/swarm/storage/mru/error.go b/swarm/storage/mru/error.go index 714426449..452cc80f0 100644 --- a/swarm/storage/mru/error.go +++ b/swarm/storage/mru/error.go @@ -35,7 +35,7 @@ const ( ErrCnt ) -// Error is a the typed error object used for Mutable Resources +// Error is a the typed error object used for Swarm Feeds type Error struct { code int err string diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 3ddeaafae..cc0da7df9 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -14,8 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Handler is the API for Mutable Resources -// It enables creating, updating, syncing and retrieving resources and their update data +// Handler is the API for Feeds +// It enables creating, updating, syncing and retrieving feed updates and their data package mru import ( @@ -265,7 +265,7 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad // send the chunk h.chunkStore.Put(ctx, chunk) log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) - // update our resources map cache entry if the new update is older than the one we have, if we have it. + // update our feed updates map cache entry if the new update is older than the one we have, if we have it. if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { feedUpdate.Epoch = r.Epoch feedUpdate.data = make([]byte, len(r.data)) diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go index 3bf2bda8b..b66ff0c80 100644 --- a/swarm/storage/mru/handler_test.go +++ b/swarm/storage/mru/handler_test.go @@ -396,7 +396,7 @@ func TestValidatorInStore(t *testing.T) { signer := newAliceSigner() // set up localstore - datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") + datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator") if err != nil { t.Fatal(err) } @@ -463,7 +463,7 @@ func TestValidatorInStore(t *testing.T) { } } -// create rpc and resourcehandler +// create rpc and Feeds Handler func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { var fsClean func() diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index 33e975756..515de651c 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -228,7 +228,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) { var recovered Request recovered.fromChunk(chunk.Address(), chunk.Data()) if !reflect.DeepEqual(recovered, r) { - t.Fatal("Expected recovered SignedResource update to equal the original one") + t.Fatal("Expected recovered Request update to equal the original one") } } @@ -248,7 +248,7 @@ func TestReverse(t *testing.T) { // signer containing private key signer := newAliceSigner() - // set up rpc and create resourcehandler + // set up rpc and create Feeds handler _, _, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) diff --git a/swarm/swarm.go b/swarm/swarm.go index 0cd56d4eb..4d9bf724f 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -186,15 +186,15 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams) - var resourceHandler *mru.Handler - rhparams := &mru.HandlerParams{} + var feedsHandler *mru.Handler + fhParams := &mru.HandlerParams{} - resourceHandler = mru.NewHandler(rhparams) - resourceHandler.SetStore(self.netStore) + feedsHandler = mru.NewHandler(fhParams) + feedsHandler.SetStore(self.netStore) lstore.Validators = []storage.ChunkValidator{ storage.NewContentAddressValidator(storage.MakeHashFunc(storage.DefaultHash)), - resourceHandler, + feedsHandler, } log.Debug("Setup local storage") @@ -210,7 +210,7 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e pss.SetHandshakeController(self.ps, pss.NewHandshakeParams()) } - self.api = api.NewAPI(self.fileStore, self.dns, resourceHandler, self.privateKey) + self.api = api.NewAPI(self.fileStore, self.dns, feedsHandler, self.privateKey) self.sfs = fuse.NewSwarmFS(self.api) log.Debug("Initialized FUSE filesystem") diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 2309c39f0..2162da880 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -48,14 +48,14 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso } fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams()) - // mutable resources test setup - resourceDir, err := ioutil.TempDir("", "swarm-resource-test") + // Swarm Feeds test setup + feedsDir, err := ioutil.TempDir("", "swarm-feeds-test") if err != nil { t.Fatal(err) } rhparams := &mru.HandlerParams{} - rh, err := mru.NewTestHandler(resourceDir, rhparams) + rh, err := mru.NewTestHandler(feedsDir, rhparams) if err != nil { t.Fatal(err) } @@ -71,7 +71,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso srv.Close() rh.Close() os.RemoveAll(dir) - os.RemoveAll(resourceDir) + os.RemoveAll(feedsDir) }, CurrentTime: 42, } -- cgit From b6ccc06cdaac80d09da17c25b2f450cd1f1b7914 Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Sun, 30 Sep 2018 09:51:11 +0200 Subject: swarm/storage/feeds: Final package rename and moved files --- swarm/OWNERS | 2 +- swarm/api/api.go | 27 +- swarm/api/client/client.go | 18 +- swarm/api/client/client_test.go | 22 +- swarm/api/http/server.go | 10 +- swarm/api/http/server_test.go | 20 +- swarm/api/manifest.go | 6 +- swarm/pss/notify/notify.go | 2 +- swarm/storage/feeds/binaryserializer.go | 44 +++ swarm/storage/feeds/binaryserializer_test.go | 98 +++++ swarm/storage/feeds/cacheentry.go | 48 +++ swarm/storage/feeds/doc.go | 43 +++ swarm/storage/feeds/error.go | 73 ++++ swarm/storage/feeds/feed.go | 125 +++++++ swarm/storage/feeds/feed_test.go | 36 ++ swarm/storage/feeds/handler.go | 295 +++++++++++++++ swarm/storage/feeds/handler_test.go | 520 +++++++++++++++++++++++++++ swarm/storage/feeds/id.go | 123 +++++++ swarm/storage/feeds/id_test.go | 28 ++ swarm/storage/feeds/lookup/epoch.go | 91 +++++ swarm/storage/feeds/lookup/epoch_test.go | 57 +++ swarm/storage/feeds/lookup/lookup.go | 180 ++++++++++ swarm/storage/feeds/lookup/lookup_test.go | 414 +++++++++++++++++++++ swarm/storage/feeds/query.go | 78 ++++ swarm/storage/feeds/query_test.go | 38 ++ swarm/storage/feeds/request.go | 284 +++++++++++++++ swarm/storage/feeds/request_test.go | 312 ++++++++++++++++ swarm/storage/feeds/sign.go | 75 ++++ swarm/storage/feeds/testutil.go | 71 ++++ swarm/storage/feeds/timestampprovider.go | 84 +++++ swarm/storage/feeds/topic.go | 105 ++++++ swarm/storage/feeds/topic_test.go | 50 +++ swarm/storage/feeds/update.go | 132 +++++++ swarm/storage/feeds/update_test.go | 50 +++ swarm/storage/mru/binaryserializer.go | 44 --- swarm/storage/mru/binaryserializer_test.go | 98 ----- swarm/storage/mru/cacheentry.go | 48 --- swarm/storage/mru/doc.go | 43 --- swarm/storage/mru/error.go | 73 ---- swarm/storage/mru/handler.go | 295 --------------- swarm/storage/mru/handler_test.go | 520 --------------------------- swarm/storage/mru/id.go | 123 ------- swarm/storage/mru/id_test.go | 28 -- swarm/storage/mru/lookup/epoch.go | 91 ----- swarm/storage/mru/lookup/epoch_test.go | 57 --- swarm/storage/mru/lookup/lookup.go | 180 ---------- swarm/storage/mru/lookup/lookup_test.go | 414 --------------------- swarm/storage/mru/query.go | 78 ---- swarm/storage/mru/query_test.go | 38 -- swarm/storage/mru/request.go | 284 --------------- swarm/storage/mru/request_test.go | 312 ---------------- swarm/storage/mru/sign.go | 75 ---- swarm/storage/mru/testutil.go | 71 ---- swarm/storage/mru/timestampprovider.go | 84 ----- swarm/storage/mru/topic.go | 105 ------ swarm/storage/mru/topic_test.go | 50 --- swarm/storage/mru/update.go | 132 ------- swarm/storage/mru/update_test.go | 50 --- swarm/storage/mru/view.go | 125 ------- swarm/storage/mru/view_test.go | 36 -- swarm/swarm.go | 8 +- swarm/testutil/http.go | 12 +- 62 files changed, 3518 insertions(+), 3517 deletions(-) create mode 100644 swarm/storage/feeds/binaryserializer.go create mode 100644 swarm/storage/feeds/binaryserializer_test.go create mode 100644 swarm/storage/feeds/cacheentry.go create mode 100644 swarm/storage/feeds/doc.go create mode 100644 swarm/storage/feeds/error.go create mode 100644 swarm/storage/feeds/feed.go create mode 100644 swarm/storage/feeds/feed_test.go create mode 100644 swarm/storage/feeds/handler.go create mode 100644 swarm/storage/feeds/handler_test.go create mode 100644 swarm/storage/feeds/id.go create mode 100644 swarm/storage/feeds/id_test.go create mode 100644 swarm/storage/feeds/lookup/epoch.go create mode 100644 swarm/storage/feeds/lookup/epoch_test.go create mode 100644 swarm/storage/feeds/lookup/lookup.go create mode 100644 swarm/storage/feeds/lookup/lookup_test.go create mode 100644 swarm/storage/feeds/query.go create mode 100644 swarm/storage/feeds/query_test.go create mode 100644 swarm/storage/feeds/request.go create mode 100644 swarm/storage/feeds/request_test.go create mode 100644 swarm/storage/feeds/sign.go create mode 100644 swarm/storage/feeds/testutil.go create mode 100644 swarm/storage/feeds/timestampprovider.go create mode 100644 swarm/storage/feeds/topic.go create mode 100644 swarm/storage/feeds/topic_test.go create mode 100644 swarm/storage/feeds/update.go create mode 100644 swarm/storage/feeds/update_test.go delete mode 100644 swarm/storage/mru/binaryserializer.go delete mode 100644 swarm/storage/mru/binaryserializer_test.go delete mode 100644 swarm/storage/mru/cacheentry.go delete mode 100644 swarm/storage/mru/doc.go delete mode 100644 swarm/storage/mru/error.go delete mode 100644 swarm/storage/mru/handler.go delete mode 100644 swarm/storage/mru/handler_test.go delete mode 100644 swarm/storage/mru/id.go delete mode 100644 swarm/storage/mru/id_test.go delete mode 100644 swarm/storage/mru/lookup/epoch.go delete mode 100644 swarm/storage/mru/lookup/epoch_test.go delete mode 100644 swarm/storage/mru/lookup/lookup.go delete mode 100644 swarm/storage/mru/lookup/lookup_test.go delete mode 100644 swarm/storage/mru/query.go delete mode 100644 swarm/storage/mru/query_test.go delete mode 100644 swarm/storage/mru/request.go delete mode 100644 swarm/storage/mru/request_test.go delete mode 100644 swarm/storage/mru/sign.go delete mode 100644 swarm/storage/mru/testutil.go delete mode 100644 swarm/storage/mru/timestampprovider.go delete mode 100644 swarm/storage/mru/topic.go delete mode 100644 swarm/storage/mru/topic_test.go delete mode 100644 swarm/storage/mru/update.go delete mode 100644 swarm/storage/mru/update_test.go delete mode 100644 swarm/storage/mru/view.go delete mode 100644 swarm/storage/mru/view_test.go (limited to 'swarm') diff --git a/swarm/OWNERS b/swarm/OWNERS index 774cd7db9..4681ed5ef 100644 --- a/swarm/OWNERS +++ b/swarm/OWNERS @@ -22,5 +22,5 @@ swarm ├── storage ─────────────── ethersphere │ ├── encryption ──────── @gbalint, @zelig, @nagydani │ ├── mock ────────────── @janos -│ └── mru ─────────────── @nolash +│ └── feeds ───────────── @nolash └── testutil ────────────── @lmars \ No newline at end of file diff --git a/swarm/api/api.go b/swarm/api/api.go index 9b4571bee..c61bd8064 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -45,9 +45,10 @@ import ( "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" - "github.com/opentracing/opentracing-go" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + + opentracing "github.com/opentracing/opentracing-go" ) var ( @@ -235,14 +236,14 @@ on top of the FileStore it is the public interface of the FileStore which is included in the ethereum stack */ type API struct { - feeds *mru.Handler + feeds *feeds.Handler fileStore *storage.FileStore dns Resolver Decryptor func(context.Context, string) DecryptFunc } // NewAPI the api constructor initialises a new API instance. -func NewAPI(fileStore *storage.FileStore, dns Resolver, feedsHandler *mru.Handler, pk *ecdsa.PrivateKey) (self *API) { +func NewAPI(fileStore *storage.FileStore, dns Resolver, feedsHandler *feeds.Handler, pk *ecdsa.PrivateKey) (self *API) { self = &API{ fileStore: fileStore, dns: dns, @@ -408,7 +409,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage if entry.Feed == nil { return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest") } - _, err := a.feeds.Lookup(ctx, mru.NewQueryLatest(entry.Feed, lookup.NoClue)) + _, err := a.feeds.Lookup(ctx, feeds.NewQueryLatest(entry.Feed, lookup.NoClue)) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -957,7 +958,7 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver } // FeedsLookup finds Swarm Feeds Updates at specific points in time, or the latest update -func (a *API) FeedsLookup(ctx context.Context, query *mru.Query) ([]byte, error) { +func (a *API) FeedsLookup(ctx context.Context, query *feeds.Query) ([]byte, error) { _, err := a.feeds.Lookup(ctx, query) if err != nil { return nil, err @@ -971,12 +972,12 @@ func (a *API) FeedsLookup(ctx context.Context, query *mru.Query) ([]byte, error) } // FeedsNewRequest creates a Request object to update a specific Feed -func (a *API) FeedsNewRequest(ctx context.Context, feed *mru.Feed) (*mru.Request, error) { +func (a *API) FeedsNewRequest(ctx context.Context, feed *feeds.Feed) (*feeds.Request, error) { return a.feeds.NewRequest(ctx, feed) } // FeedsUpdate publishes a new update on the given Feed -func (a *API) FeedsUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) { +func (a *API) FeedsUpdate(ctx context.Context, request *feeds.Request) (storage.Address, error) { return a.feeds.Update(ctx, request) } @@ -992,7 +993,7 @@ var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") var ErrNotAFeedManifest = errors.New("Not a feed manifest") // ResolveFeedManifest retrieves the Feed manifest for the given address, and returns the referenced Feed. -func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*mru.Feed, error) { +func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feeds.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { return nil, ErrCannotLoadFeedManifest @@ -1015,8 +1016,8 @@ var ErrCannotResolveFeed = errors.New("Cannot resolve Feed") // ResolveFeed attempts to extract Feed information out of the manifest, if provided // If not, it attempts to extract the Feed out of a set of key-value pairs -func (a *API) ResolveFeed(ctx context.Context, uri *URI, values mru.Values) (*mru.Feed, error) { - var feed *mru.Feed +func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feeds.Values) (*feeds.Feed, error) { + var feed *feeds.Feed var err error if uri.Addr != "" { // resolve the content key. @@ -1035,7 +1036,7 @@ func (a *API) ResolveFeed(ctx context.Context, uri *URI, values mru.Values) (*mr } log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", feed.Hex()) } else { - var v mru.Feed + var v feeds.Feed if err := v.FromValues(values); err != nil { return nil, ErrCannotResolveFeed diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 76ada1297..6b4614581 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -35,7 +35,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/swarm/api" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" ) var ( @@ -608,7 +608,7 @@ var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed") // data // Returns the resulting Feed Manifest address that you can use to include in an ENS Resolver (setContent) // or reference future updates (Client.UpdateFeed) -func (c *Client) CreateFeedWithManifest(request *mru.Request) (string, error) { +func (c *Client) CreateFeedWithManifest(request *feeds.Request) (string, error) { responseStream, err := c.updateFeed(request, true) if err != nil { return "", err @@ -628,12 +628,12 @@ func (c *Client) CreateFeedWithManifest(request *mru.Request) (string, error) { } // UpdateFeed allows you to set a new version of your content -func (c *Client) UpdateFeed(request *mru.Request) error { +func (c *Client) UpdateFeed(request *feeds.Request) error { _, err := c.updateFeed(request, false) return err } -func (c *Client) updateFeed(request *mru.Request, createManifest bool) (io.ReadCloser, error) { +func (c *Client) updateFeed(request *feeds.Request, createManifest bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err @@ -662,7 +662,7 @@ func (c *Client) updateFeed(request *mru.Request, createManifest bool) (io.ReadC // QueryFeed returns a byte stream with the raw content of the feed update // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) QueryFeed(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { +func (c *Client) QueryFeed(query *feeds.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { return c.queryFeed(query, manifestAddressOrDomain, false) } @@ -670,7 +670,7 @@ func (c *Client) QueryFeed(query *mru.Query, manifestAddressOrDomain string) (io // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address // meta set to true will instruct the node return Feed metainformation instead -func (c *Client) queryFeed(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { +func (c *Client) queryFeed(query *feeds.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err @@ -706,10 +706,10 @@ func (c *Client) queryFeed(query *mru.Query, manifestAddressOrDomain string, met return res.Body, nil } -// GetFeedMetadata returns a structure that describes the referenced Feed status +// GetFeedRequest returns a structure that describes the referenced Feed status // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) GetFeedMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) { +func (c *Client) GetFeedRequest(query *feeds.Query, manifestAddressOrDomain string) (*feeds.Request, error) { responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true) if err != nil { @@ -722,7 +722,7 @@ func (c *Client) GetFeedMetadata(query *mru.Query, manifestAddressOrDomain strin return nil, err } - var metadata mru.Request + var metadata feeds.Request if err := metadata.UnmarshalJSON(body); err != nil { return nil, err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index fbc49a6e1..4fad7fbc7 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -25,14 +25,14 @@ import ( "sort" "testing" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/swarm/api" swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" "github.com/ethereum/go-ethereum/swarm/multihash" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/ethereum/go-ethereum/swarm/testutil" ) @@ -361,12 +361,12 @@ func TestClientMultipartUpload(t *testing.T) { } } -func newTestSigner() (*mru.GenericSigner, error) { +func newTestSigner() (*feeds.GenericSigner, error) { privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") if err != nil { return nil, err } - return mru.NewGenericSigner(privKey), nil + return feeds.NewGenericSigner(privKey), nil } // test the transparent resolving of multihash feed updates with bzz:// scheme @@ -394,9 +394,9 @@ func TestClientCreateFeedMultihash(t *testing.T) { mh := multihash.ToMultihash(s) // our feed topic - topic, _ := mru.NewTopic("foo.eth", nil) + topic, _ := feeds.NewTopic("foo.eth", nil) - createRequest := mru.NewFirstRequest(topic) + createRequest := feeds.NewFirstRequest(topic) createRequest.SetData(mh) if err := createRequest.Sign(signer); err != nil { @@ -448,8 +448,8 @@ func TestClientCreateUpdateFeed(t *testing.T) { databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") // our feed topic name - topic, _ := mru.NewTopic("El Quijote", nil) - createRequest := mru.NewFirstRequest(topic) + topic, _ := feeds.NewTopic("El Quijote", nil) + createRequest := feeds.NewFirstRequest(topic) createRequest.SetData(databytes) if err := createRequest.Sign(signer); err != nil { @@ -479,7 +479,7 @@ func TestClientCreateUpdateFeed(t *testing.T) { // define different data databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...") - updateRequest, err := client.GetFeedMetadata(nil, correctManifestAddrHex) + updateRequest, err := client.GetFeedRequest(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving update request template: %s", err) } @@ -508,12 +508,12 @@ func TestClientCreateUpdateFeed(t *testing.T) { // now try retrieving feed updates without a manifest - feed := &mru.Feed{ + feed := &feeds.Feed{ Topic: topic, User: signer.Address(), } - lookupParams := mru.NewQueryLatest(feed, lookup.NoClue) + lookupParams := feeds.NewQueryLatest(feed, lookup.NoClue) reader, err = client.QueryFeed(lookupParams, "") if err != nil { t.Fatalf("Error retrieving feed updates: %s", err) diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index c5b3b564c..84e06d09f 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -40,7 +40,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/rs/cors" ) @@ -466,7 +466,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { log.Debug("handle.post.feed", "ruid", ruid) var err error - // Creation and update must send mru.updateRequestJSON JSON structure + // Creation and update must send feeds.updateRequestJSON JSON structure body, err := ioutil.ReadAll(r.Body) if err != nil { RespondError(w, r, err.Error(), http.StatusInternalServerError) @@ -484,7 +484,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { return } - var updateRequest mru.Request + var updateRequest feeds.Request updateRequest.Feed = *feed query := r.URL.Query() @@ -582,7 +582,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { return } - lookupParams := &mru.Query{Feed: *feed} + lookupParams := &feeds.Query{Feed: *feed} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version RespondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest) return @@ -606,7 +606,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { func (s *Server) translateFeedError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { code := 0 defaultErr := fmt.Errorf("%s: %v", supErr, err) - rsrcErr, ok := err.(*mru.Error) + rsrcErr, ok := err.(*feeds.Error) if !ok && rsrcErr != nil { code = rsrcErr.Code() } diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index a7c7e3003..16813cab5 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -38,7 +38,7 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -48,7 +48,7 @@ import ( swarm "github.com/ethereum/go-ethereum/swarm/api/client" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/ethereum/go-ethereum/swarm/testutil" ) @@ -62,12 +62,12 @@ func serverFunc(api *api.API) testutil.TestServer { return NewServer(api, "") } -func newTestSigner() (*mru.GenericSigner, error) { +func newTestSigner() (*feeds.GenericSigner, error) { privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") if err != nil { return nil, err } - return mru.NewGenericSigner(privKey), nil + return feeds.NewGenericSigner(privKey), nil } // test the transparent resolving of multihash-containing feed updates with bzz:// scheme @@ -103,8 +103,8 @@ func TestBzzFeedMultihash(t *testing.T) { log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - topic, _ := mru.NewTopic("foo.eth", nil) - updateRequest := mru.NewFirstRequest(topic) + topic, _ := feeds.NewTopic("foo.eth", nil) + updateRequest := feeds.NewFirstRequest(topic) updateRequest.SetData(mh) @@ -182,8 +182,8 @@ func TestBzzFeed(t *testing.T) { //data for update 2 update2Data := []byte("foo") - topic, _ := mru.NewTopic("foo.eth", nil) - updateRequest := mru.NewFirstRequest(topic) + topic, _ := feeds.NewTopic("foo.eth", nil) + updateRequest := feeds.NewFirstRequest(topic) if err != nil { t.Fatal(err) } @@ -319,7 +319,7 @@ func TestBzzFeed(t *testing.T) { if err != nil { t.Fatal(err) } - updateRequest = &mru.Request{} + updateRequest = &feeds.Request{} if err = updateRequest.UnmarshalJSON(b); err != nil { t.Fatalf("Error decoding feed metadata: %s", err) } @@ -365,7 +365,7 @@ func TestBzzFeed(t *testing.T) { // test manifest-less queries log.Info("get first update in update1Timestamp via direct query") - query := mru.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) + query := feeds.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) urlq, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index f41a823bd..9ac3214a5 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/swarm/log" @@ -56,7 +56,7 @@ type ManifestEntry struct { ModTime time.Time `json:"mod_time,omitempty"` Status int `json:"status,omitempty"` Access *AccessEntry `json:"access,omitempty"` - Feed *mru.Feed `json:"feed,omitempty"` + Feed *feeds.Feed `json:"feed,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -82,7 +82,7 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Feeds from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewFeedManifest(ctx context.Context, feed *mru.Feed) (storage.Address, error) { +func (a *API) NewFeedManifest(ctx context.Context, feed *feeds.Feed) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ Feed: feed, diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go index 723092c32..01ecb4ff9 100644 --- a/swarm/pss/notify/notify.go +++ b/swarm/pss/notify/notify.go @@ -317,7 +317,7 @@ func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) { return err } - // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of MRU is sent upon subscription + // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of Swarm Feed is sent upon subscription notify := []byte{} replyMsg := NewMsg(MsgCodeNotifyWithKey, msg.namestring, make([]byte, len(notify)+symKeyLength)) copy(replyMsg.Payload, notify) diff --git a/swarm/storage/feeds/binaryserializer.go b/swarm/storage/feeds/binaryserializer.go new file mode 100644 index 000000000..ce146571b --- /dev/null +++ b/swarm/storage/feeds/binaryserializer.go @@ -0,0 +1,44 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type binarySerializer interface { + binaryPut(serializedData []byte) error + binaryLength() int + binaryGet(serializedData []byte) error +} + +// Values interface represents a string key-value store +// useful for building query strings +type Values interface { + Get(key string) string + Set(key, value string) +} + +type valueSerializer interface { + FromValues(values Values) error + AppendValues(values Values) +} + +// Hex serializes the structure and converts it to a hex string +func Hex(bin binarySerializer) string { + b := make([]byte, bin.binaryLength()) + bin.binaryPut(b) + return hexutil.Encode(b) +} diff --git a/swarm/storage/feeds/binaryserializer_test.go b/swarm/storage/feeds/binaryserializer_test.go new file mode 100644 index 000000000..0c81e7f18 --- /dev/null +++ b/swarm/storage/feeds/binaryserializer_test.go @@ -0,0 +1,98 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// KV mocks a key value store +type KV map[string]string + +func (kv KV) Get(key string) string { + return kv[key] +} +func (kv KV) Set(key, value string) { + kv[key] = value +} + +func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { + if hexutil.Encode(actualValue) != expectedHex { + t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) + } +} + +func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { + name := reflect.TypeOf(bin).Elem().Name() + serialized := make([]byte, bin.binaryLength()) + if err := bin.binaryPut(serialized); err != nil { + t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) + } + + compareByteSliceToExpectedHex(t, name, serialized, expectedHex) + + recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) + if err := recovered.binaryGet(serialized); err != nil { + t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) + } + + if !reflect.DeepEqual(bin, recovered) { + t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) + } + + serializedWrongLength := make([]byte, 1) + copy(serializedWrongLength[:], serialized) + if err := recovered.binaryGet(serializedWrongLength); err == nil { + t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) + } +} + +func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { + name := reflect.TypeOf(bin).Elem().Name() + // make a slice that is too small to contain the metadata + serialized := make([]byte, bin.binaryLength()-1) + + if err := bin.binaryPut(serialized); err == nil { + t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) + } +} + +func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { + name := reflect.TypeOf(v).Elem().Name() + kv := make(KV) + + v.AppendValues(kv) + if !reflect.DeepEqual(expected, kv) { + expj, _ := json.Marshal(expected) + gotj, _ := json.Marshal(kv) + t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) + } + + recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) + err := recovered.FromValues(kv) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(recovered, v) { + t.Fatalf("Expected recovered %s to be the same", name) + } +} diff --git a/swarm/storage/feeds/cacheentry.go b/swarm/storage/feeds/cacheentry.go new file mode 100644 index 000000000..7a2f87b56 --- /dev/null +++ b/swarm/storage/feeds/cacheentry.go @@ -0,0 +1,48 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "bytes" + "context" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + hasherCount = 8 + feedsHashAlgorithm = storage.SHA3Hash + defaultRetrieveTimeout = 100 * time.Millisecond +) + +// cacheEntry caches the last known update of a specific Feed. +type cacheEntry struct { + Update + *bytes.Reader + lastKey storage.Address +} + +// implements storage.LazySectionReader +func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { + return int64(len(r.Update.data)), nil +} + +//returns the Feed's topic +func (r *cacheEntry) Topic() Topic { + return r.Feed.Topic +} diff --git a/swarm/storage/feeds/doc.go b/swarm/storage/feeds/doc.go new file mode 100644 index 000000000..d1edf5d6d --- /dev/null +++ b/swarm/storage/feeds/doc.go @@ -0,0 +1,43 @@ +/* +Package feeds defines Swarm Feeds. + +Swarm Feeds allows a user to build an update feed about a particular topic +without resorting to ENS on each update. +The update scheme is built on swarm chunks with chunk keys following +a predictable, versionable pattern. + +A Feed is tied to a unique identifier that is deterministically generated out of +the chosen topic. + +A Feed is defined as the series of updates of a specific user about a particular topic + +Actual data updates are also made in the form of swarm chunks. The keys +of the updates are the hash of a concatenation of properties as follows: + +updateAddr = H(Feed, Epoch ID) +where H is the SHA3 hash function +Feed is the combination of Topic and the user address +Epoch ID is a time slot. See the lookup package for more information. + +A user looking up a the latest update in a Feed only needs to know the Topic +and the other user's address. + +The Feed Update data is: +updatedata = Feed|Epoch|data + +The full update data that goes in the chunk payload is: +updatedata|sign(updatedata) + +Structure Summary: + +Request: Feed Update with signature + Update: headers + data + Header: Protocol version and reserved for future use placeholders + ID: Information about how to locate a specific update + Feed: Represents a user's series of publications about a specific Topic + Topic: Item that the updates are about + User: User who updates the Feed + Epoch: time slot where the update is stored + +*/ +package feeds diff --git a/swarm/storage/feeds/error.go b/swarm/storage/feeds/error.go new file mode 100644 index 000000000..13266b900 --- /dev/null +++ b/swarm/storage/feeds/error.go @@ -0,0 +1,73 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "fmt" +) + +const ( + ErrInit = iota + ErrNotFound + ErrIO + ErrUnauthorized + ErrInvalidValue + ErrDataOverflow + ErrNothingToReturn + ErrCorruptData + ErrInvalidSignature + ErrNotSynced + ErrPeriodDepth + ErrCnt +) + +// Error is a the typed error object used for Swarm Feeds +type Error struct { + code int + err string +} + +// Error implements the error interface +func (e *Error) Error() string { + return e.err +} + +// Code returns the error code +// Error codes are enumerated in the error.go file within the feeds package +func (e *Error) Code() int { + return e.code +} + +// NewError creates a new Swarm Feeds Error object with the specified code and custom error message +func NewError(code int, s string) error { + if code < 0 || code >= ErrCnt { + panic("no such error code!") + } + r := &Error{ + err: s, + } + switch code { + case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: + r.code = code + } + return r +} + +// NewErrorf is a convenience version of NewError that incorporates printf-style formatting +func NewErrorf(code int, format string, args ...interface{}) error { + return NewError(code, fmt.Sprintf(format, args...)) +} diff --git a/swarm/storage/feeds/feed.go b/swarm/storage/feeds/feed.go new file mode 100644 index 000000000..8a807d506 --- /dev/null +++ b/swarm/storage/feeds/feed.go @@ -0,0 +1,125 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "hash" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// Feed represents a particular user's stream of updates on a Topic +type Feed struct { + Topic Topic `json:"topic"` + User common.Address `json:"user"` +} + +// Feed layout: +// TopicLength bytes +// userAddr common.AddressLength bytes +const feedLength = TopicLength + common.AddressLength + +// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` +func (f *Feed) mapKey() uint64 { + serializedData := make([]byte, feedLength) + f.binaryPut(serializedData) + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + hash := hasher.Sum(nil) + return *(*uint64)(unsafe.Pointer(&hash[0])) +} + +// binaryPut serializes this Feed instance into the provided slice +func (f *Feed) binaryPut(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData)) + } + var cursor int + copy(serializedData[cursor:cursor+TopicLength], f.Topic[:TopicLength]) + cursor += TopicLength + + copy(serializedData[cursor:cursor+common.AddressLength], f.User[:]) + cursor += common.AddressLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (f *Feed) binaryLength() int { + return feedLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (f *Feed) binaryGet(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read Feed. Expected %d, got %d", feedLength, len(serializedData)) + } + + var cursor int + copy(f.Topic[:], serializedData[cursor:cursor+TopicLength]) + cursor += TopicLength + + copy(f.User[:], serializedData[cursor:cursor+common.AddressLength]) + cursor += common.AddressLength + + return nil +} + +// Hex serializes the Feed to a hex string +func (f *Feed) Hex() string { + serializedData := make([]byte, feedLength) + f.binaryPut(serializedData) + return hexutil.Encode(serializedData) +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (f *Feed) FromValues(values Values) (err error) { + topic := values.Get("topic") + if topic != "" { + if err := f.Topic.FromHex(values.Get("topic")); err != nil { + return err + } + } else { // see if the user set name and relatedcontent + name := values.Get("name") + relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) + if len(relatedContent) > 0 { + if len(relatedContent) < storage.AddressLength { + return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) + } + relatedContent = relatedContent[:storage.AddressLength] + } + f.Topic, err = NewTopic(name, relatedContent) + if err != nil { + return err + } + } + f.User = common.HexToAddress(values.Get("user")) + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (f *Feed) AppendValues(values Values) { + values.Set("topic", f.Topic.Hex()) + values.Set("user", f.User.Hex()) +} diff --git a/swarm/storage/feeds/feed_test.go b/swarm/storage/feeds/feed_test.go new file mode 100644 index 000000000..7806e0ad7 --- /dev/null +++ b/swarm/storage/feeds/feed_test.go @@ -0,0 +1,36 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package feeds + +import ( + "testing" +) + +func getTestFeed() *Feed { + topic, _ := NewTopic("world news report, every hour", nil) + return &Feed{ + Topic: topic, + User: newCharlieSigner().Address(), + } +} + +func TestFeedSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +} + +func TestFeedSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeed()) +} diff --git a/swarm/storage/feeds/handler.go b/swarm/storage/feeds/handler.go new file mode 100644 index 000000000..9c69fd1b4 --- /dev/null +++ b/swarm/storage/feeds/handler.go @@ -0,0 +1,295 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Handler is the API for Feeds +// It enables creating, updating, syncing and retrieving feed updates and their data +package feeds + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +type Handler struct { + chunkStore *storage.NetStore + HashSize int + cache map[uint64]*cacheEntry + cacheLock sync.RWMutex + storeTimeout time.Duration + queryMaxPeriods uint32 +} + +// HandlerParams pass parameters to the Handler constructor NewHandler +// Signer and TimestampProvider are mandatory parameters +type HandlerParams struct { +} + +// hashPool contains a pool of ready hashers +var hashPool sync.Pool + +// init initializes the package and hashPool +func init() { + hashPool = sync.Pool{ + New: func() interface{} { + return storage.MakeHashFunc(feedsHashAlgorithm)() + }, + } +} + +// NewHandler creates a new Swarm Feeds API +func NewHandler(params *HandlerParams) *Handler { + fh := &Handler{ + cache: make(map[uint64]*cacheEntry), + } + + for i := 0; i < hasherCount; i++ { + hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() + if fh.HashSize == 0 { + fh.HashSize = hashfunc.Size() + } + hashPool.Put(hashfunc) + } + + return fh +} + +// SetStore sets the store backend for the Swarm Feeds API +func (h *Handler) SetStore(store *storage.NetStore) { + h.chunkStore = store +} + +// Validate is a chunk validation method +// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature +// It implements the storage.ChunkValidator interface +func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { + dataLength := len(data) + if dataLength < minimumSignedUpdateLength { + return false + } + + // check if it is a properly formatted update chunk with + // valid signature and proof of ownership of the feed it is trying + // to update + + // First, deserialize the chunk + var r Request + if err := r.fromChunk(chunkAddr, data); err != nil { + log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) + return false + } + + // Verify signatures and that the signer actually owns the feed + // If it fails, it means either the signature is not valid, data is corrupted + // or someone is trying to update someone else's feed. + if err := r.Verify(); err != nil { + log.Debug("Invalid feed update signature", "err", err) + return false + } + + return true +} + +// GetContent retrieves the data payload of the last synced update of the Feed +func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { + if feed == nil { + return nil, nil, NewError(ErrInvalidValue, "feed is nil") + } + feedUpdate := h.get(feed) + if feedUpdate == nil { + return nil, nil, NewError(ErrNotFound, "feed update not cached") + } + return feedUpdate.lastKey, feedUpdate.data, nil +} + +// NewRequest prepares a Request structure with all the necessary information to +// just add the desired data and sign it. +// The resulting structure can then be signed and passed to Handler.Update to be verified and sent +func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { + if feed == nil { + return nil, NewError(ErrInvalidValue, "feed cannot be nil") + } + + now := TimestampProvider.Now().Time + request = new(Request) + request.Header.Version = ProtocolVersion + + query := NewQueryLatest(feed, lookup.NoClue) + + feedUpdate, err := h.Lookup(ctx, query) + if err != nil { + if err.(*Error).code != ErrNotFound { + return nil, err + } + // not finding updates means that there is a network error + // or that the feed really does not have updates + } + + request.Feed = *feed + + // if we already have an update, then find next epoch + if feedUpdate != nil { + request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) + } else { + request.Epoch = lookup.GetFirstEpoch(now) + } + + return request, nil +} + +// Lookup retrieves a specific or latest feed update +// Lookup works differently depending on the configuration of `query` +// See the `query` documentation and helper functions: +// `NewQueryLatest` and `NewQuery` +func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { + + timeLimit := query.TimeLimit + if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update + timeLimit = TimestampProvider.Now().Time + } + + if query.Hint == lookup.NoClue { // try to use our cache + entry := h.get(&query.Feed) + if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints + query.Hint = entry.Epoch + } + } + + // we can't look for anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") + } + + var id ID + id.Feed = query.Feed + var readCount int + + // Invoke the lookup engine. + // The callback will be called every time the lookup algorithm needs to guess + requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { + readCount++ + id.Epoch = epoch + ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() + + chunk, err := h.chunkStore.Get(ctx, id.Addr()) + if err != nil { // TODO: check for catastrophic errors other than chunk not found + return nil, nil + } + + var request Request + if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { + return nil, nil + } + if request.Time <= timeLimit { + return &request, nil + } + return nil, nil + }) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount)) + + request, _ := requestPtr.(*Request) + if request == nil { + return nil, NewError(ErrNotFound, "no feed updates found") + } + return h.updateCache(request) + +} + +// update feed updates cache with specified content +func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { + + updateAddr := request.Addr() + log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + + feedUpdate := h.get(&request.Feed) + if feedUpdate == nil { + feedUpdate = &cacheEntry{} + h.set(&request.Feed, feedUpdate) + } + + // update our rsrcs entry map + feedUpdate.lastKey = updateAddr + feedUpdate.Update = request.Update + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + return feedUpdate, nil +} + +// Update publishes a feed update +// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. +// This results in a max payload of `maxUpdateDataLength` (check update.go for more details) +// An error will be returned if the total length of the chunk payload will exceed this limit. +// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update +// on the network. +func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { + + // we can't update anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") + } + + feedUpdate := h.get(&r.Feed) + if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") + } + + chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big + if err != nil { + return nil, err + } + + // send the chunk + h.chunkStore.Put(ctx, chunk) + log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + // update our feed updates map cache entry if the new update is older than the one we have, if we have it. + if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { + feedUpdate.Epoch = r.Epoch + feedUpdate.data = make([]byte, len(r.data)) + feedUpdate.lastKey = r.idAddr + copy(feedUpdate.data, r.data) + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + } + + return r.idAddr, nil +} + +// Retrieves the feed update cache value for the given nameHash +func (h *Handler) get(feed *Feed) *cacheEntry { + mapKey := feed.mapKey() + h.cacheLock.RLock() + defer h.cacheLock.RUnlock() + feedUpdate := h.cache[mapKey] + return feedUpdate +} + +// Sets the feed update cache value for the given Feed +func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { + mapKey := feed.mapKey() + h.cacheLock.Lock() + defer h.cacheLock.Unlock() + h.cache[mapKey] = feedUpdate +} diff --git a/swarm/storage/feeds/handler_test.go b/swarm/storage/feeds/handler_test.go new file mode 100644 index 000000000..8331980ca --- /dev/null +++ b/swarm/storage/feeds/handler_test.go @@ -0,0 +1,520 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "bytes" + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +var ( + loglevel = flag.Int("loglevel", 3, "loglevel") + startTime = Timestamp{ + Time: uint64(4200), + } + cleanF func() + subtopicName = "føø.bar" + hashfunc = storage.MakeHashFunc(storage.DefaultHash) +) + +func init() { + flag.Parse() + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) +} + +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 +} + +func (f *fakeTimeProvider) Tick() { + f.currentTime++ +} + +func (f *fakeTimeProvider) Set(time uint64) { + f.currentTime = time +} + +func (f *fakeTimeProvider) FastForward(offset uint64) { + f.currentTime += offset +} + +func (f *fakeTimeProvider) Now() Timestamp { + return Timestamp{ + Time: f.currentTime, + } +} + +// make updates and retrieve them based on periods and versions +func TestFeedsHandler(t *testing.T) { + + // make fake timeProvider + clock := &fakeTimeProvider{ + currentTime: startTime.Time, // clock starts at t=4200 + } + + // signer containing private key + signer := newAliceSigner() + + feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create a new Feed + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) + feed := Feed{ + Topic: topic, + User: signer.Address(), + } + + // data for updates: + updates := []string{ + "blinky", // t=4200 + "pinky", // t=4242 + "inky", // t=4284 + "clyde", // t=4285 + } + + request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time) + chunkAddress := make(map[string]storage.Address) + data := []byte(updates[0]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4221 + + request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { + t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) + } + + request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail + data = []byte(updates[1]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) + if err == nil { + t.Fatal("Expected update to fail since an update in this epoch already exists") + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4242 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 42 seconds + clock.FastForward(42) // t=4284 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + data = []byte(updates[2]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 1 second + clock.FastForward(1) // t=4285 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { + t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) + } + data = []byte(updates[3]) + request.SetData(data) + + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + feedsHandler.Close() + + // check we can retrieve the updates after close + clock.FastForward(2000) // t=6285 + + feedParams := &HandlerParams{} + + feedsHandler2, err := NewTestHandler(datadir, feedParams) + if err != nil { + t.Fatal(err) + } + + update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + + // last update should be "clyde" + if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) { + t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1]) + } + if update2.Level != 22 { + t.Fatalf("feed update epoch level was %d, expected 22", update2.Level) + } + if update2.Base() != 0 { + t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base()) + } + log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) + + // specific point in time + update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + // check data + if !bytes.Equal(update.data, []byte(updates[2])) { + t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2]) + } + log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) + + // beyond the first should yield an error + update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) + if err == nil { + t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data) + } + +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func generateData(x uint64) []byte { + return []byte(fmt.Sprintf("%d", x)) +} + +func TestSparseUpdates(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + defer os.RemoveAll(datadir) + + // create a new Feed + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + topic, _ := NewTopic("Very slow updates", nil) + feed := Feed{ + Topic: topic, + User: signer.Address(), + } + + // publish one update every 5 years since Unix 0 until today + today := uint64(1533799046) + var epoch lookup.Epoch + var lastUpdateTime uint64 + for T := uint64(0); T < today; T += 5 * Year { + request := NewFirstRequest(feed.Topic) + request.Epoch = lookup.GetNextEpoch(epoch, T) + request.data = generateData(T) // this generates some data that depends on T, so we can check later + request.Sign(signer) + if err != nil { + t.Fatal(err) + } + + if _, err := rh.Update(ctx, request); err != nil { + t.Fatal(err) + } + epoch = request.Epoch + lastUpdateTime = T + } + + query := NewQuery(&feed, today, lookup.NoClue) + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err := rh.GetContent(&feed) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(lastUpdateTime), content) { + t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) + } + + // lookup the closest update to 35*Year + 6* Month (~ June 2005): + // it should find the update we put on 35*Year, since we were updating every 5 years. + + query.TimeLimit = 35*Year + 6*Month + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err = rh.GetContent(&feed) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(35*Year), content) { + t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) + } +} + +func TestValidator(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key. Alice will be the good girl + signer := newAliceSigner() + + // set up sim timeProvider + rh, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create new Feed + topic, _ := NewTopic(subtopicName, nil) + feed := Feed{ + Topic: topic, + User: signer.Address(), + } + mr := NewFirstRequest(feed.Topic) + + // chunk with address + data := []byte("foo") + mr.SetData(data) + if err := mr.Sign(signer); err != nil { + t.Fatalf("sign fail: %v", err) + } + + chunk, err := mr.toChunk() + if err != nil { + t.Fatal(err) + } + if !rh.Validate(chunk.Address(), chunk.Data()) { + t.Fatal("Chunk validator fail on update chunk") + } + + address := chunk.Address() + // mess with the address + address[0] = 11 + address[15] = 99 + + if rh.Validate(address, chunk.Data()) { + t.Fatal("Expected Validate to fail with false chunk address") + } +} + +// tests that the content address validator correctly checks the data +// tests that Feed update chunks are passed through content address validator +// there is some redundancy in this test as it also tests content addressed chunks, +// which should be evaluated as invalid chunks by this validator +func TestValidatorInStore(t *testing.T) { + + // make fake timeProvider + TimestampProvider = &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up localstore + datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(datadir) + + handlerParams := storage.NewDefaultLocalStoreParams() + handlerParams.Init(datadir) + store, err := storage.NewLocalStore(handlerParams, nil) + if err != nil { + t.Fatal(err) + } + + // set up Swarm Feeds handler and add is as a validator to the localstore + fhParams := &HandlerParams{} + fh := NewHandler(fhParams) + store.Validators = append(store.Validators, fh) + + // create content addressed chunks, one good, one faulty + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) + goodChunk := chunks[0] + badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) + + topic, _ := NewTopic("xyzzy", nil) + feed := Feed{ + Topic: topic, + User: signer.Address(), + } + + // create a feed update chunk with correct publickey + id := ID{ + Epoch: lookup.Epoch{Time: 42, + Level: 1, + }, + Feed: feed, + } + + updateAddr := id.Addr() + data := []byte("bar") + + r := new(Request) + r.idAddr = updateAddr + r.Update.ID = id + r.data = data + + r.Sign(signer) + + uglyChunk, err := r.toChunk() + if err != nil { + t.Fatal(err) + } + + // put the chunks in the store and check their error status + err = store.Put(context.Background(), goodChunk) + if err == nil { + t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") + } + err = store.Put(context.Background(), badChunk) + if err == nil { + t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") + } + err = store.Put(context.Background(), uglyChunk) + if err != nil { + t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) + } +} + +// create rpc and Feeds Handler +func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { + + var fsClean func() + var rpcClean func() + cleanF = func() { + if fsClean != nil { + fsClean() + } + if rpcClean != nil { + rpcClean() + } + } + + // temp datadir + datadir, err = ioutil.TempDir("", "fh") + if err != nil { + return nil, "", nil, err + } + fsClean = func() { + os.RemoveAll(datadir) + } + + TimestampProvider = timeProvider + fhParams := &HandlerParams{} + fh, err = NewTestHandler(datadir, fhParams) + return fh, datadir, cleanF, err +} + +func newAliceSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + return NewGenericSigner(privKey) +} + +func newBobSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") + return NewGenericSigner(privKey) +} + +func newCharlieSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") + return NewGenericSigner(privKey) +} + +func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { + chunk, err := rh.chunkStore.Get(context.TODO(), addr) + if err != nil { + return nil, err + } + var r Request + if err := r.fromChunk(addr, chunk.Data()); err != nil { + return nil, err + } + return r.data, nil +} diff --git a/swarm/storage/feeds/id.go b/swarm/storage/feeds/id.go new file mode 100644 index 000000000..dd813ae89 --- /dev/null +++ b/swarm/storage/feeds/id.go @@ -0,0 +1,123 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "fmt" + "hash" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// ID uniquely identifies an update on the network. +type ID struct { + Feed `json:"feed"` + lookup.Epoch `json:"epoch"` +} + +// ID layout: +// Feed feedLength bytes +// Epoch EpochLength +const idLength = feedLength + lookup.EpochLength + +// Addr calculates the feed update chunk address corresponding to this ID +func (u *ID) Addr() (updateAddr storage.Address) { + serializedData := make([]byte, idLength) + var cursor int + u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) + cursor += feedLength + + eid := u.Epoch.ID() + copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) + + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + return hasher.Sum(nil) +} + +// binaryPut serializes this instance into the provided slice +func (u *ID) binaryPut(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) + } + var cursor int + if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { + return err + } + cursor += feedLength + + epochBytes, err := u.Epoch.MarshalBinary() + if err != nil { + return err + } + copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) + cursor += lookup.EpochLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *ID) binaryLength() int { + return idLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *ID) binaryGet(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) + } + + var cursor int + if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { + return err + } + cursor += feedLength + + if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { + return err + } + cursor += lookup.EpochLength + + return nil +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *ID) FromValues(values Values) error { + level, _ := strconv.ParseUint(values.Get("level"), 10, 32) + u.Epoch.Level = uint8(level) + u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) + + if u.Feed.User == (common.Address{}) { + return u.Feed.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *ID) AppendValues(values Values) { + values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) + values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) + u.Feed.AppendValues(values) +} diff --git a/swarm/storage/feeds/id_test.go b/swarm/storage/feeds/id_test.go new file mode 100644 index 000000000..2ef12e891 --- /dev/null +++ b/swarm/storage/feeds/id_test.go @@ -0,0 +1,28 @@ +package feeds + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +func getTestID() *ID { + return &ID{ + Feed: *getTestFeed(), + Epoch: lookup.GetFirstEpoch(1000), + } +} + +func TestIDAddr(t *testing.T) { + id := getTestID() + updateAddr := id.Addr() + compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") +} + +func TestIDSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") +} + +func TestIDLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestID()) +} diff --git a/swarm/storage/feeds/lookup/epoch.go b/swarm/storage/feeds/lookup/epoch.go new file mode 100644 index 000000000..bafe95477 --- /dev/null +++ b/swarm/storage/feeds/lookup/epoch.go @@ -0,0 +1,91 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lookup + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// Epoch represents a time slot at a particular frequency level +type Epoch struct { + Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place + Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 +} + +// EpochID is a unique identifier for an Epoch, based on its level and base time. +type EpochID [8]byte + +// EpochLength stores the serialized binary length of an Epoch +const EpochLength = 8 + +// MaxTime contains the highest possible time value an Epoch can handle +const MaxTime uint64 = (1 << 56) - 1 + +// Base returns the base time of the Epoch +func (e *Epoch) Base() uint64 { + return getBaseTime(e.Time, e.Level) +} + +// ID Returns the unique identifier of this epoch +func (e *Epoch) ID() EpochID { + base := e.Base() + var id EpochID + binary.LittleEndian.PutUint64(id[:], base) + id[7] = e.Level + return id +} + +// MarshalBinary implements the encoding.BinaryMarshaller interface +func (e *Epoch) MarshalBinary() (data []byte, err error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b[:], e.Time) + b[7] = e.Level + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface +func (e *Epoch) UnmarshalBinary(data []byte) error { + if len(data) != EpochLength { + return errors.New("Invalid data unmarshalling Epoch") + } + b := make([]byte, 8) + copy(b, data) + e.Level = b[7] + b[7] = 0 + e.Time = binary.LittleEndian.Uint64(b) + return nil +} + +// After returns true if this epoch occurs later or exactly at the other epoch. +func (e *Epoch) After(epoch Epoch) bool { + if e.Time == epoch.Time { + return e.Level < epoch.Level + } + return e.Time >= epoch.Time +} + +// Equals compares two epochs and returns true if they refer to the same time period. +func (e *Epoch) Equals(epoch Epoch) bool { + return e.Level == epoch.Level && e.Base() == epoch.Base() +} + +// String implements the Stringer interface. +func (e *Epoch) String() string { + return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) +} diff --git a/swarm/storage/feeds/lookup/epoch_test.go b/swarm/storage/feeds/lookup/epoch_test.go new file mode 100644 index 000000000..70bfd836a --- /dev/null +++ b/swarm/storage/feeds/lookup/epoch_test.go @@ -0,0 +1,57 @@ +package lookup_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +func TestMarshallers(t *testing.T) { + + for i := uint64(1); i < lookup.MaxTime; i *= 3 { + e := lookup.Epoch{ + Time: i, + Level: uint8(i % 20), + } + b, err := e.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var e2 lookup.Epoch + if err := e2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if e != e2 { + t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") + } + } + +} + +func TestAfter(t *testing.T) { + a := lookup.Epoch{ + Time: 5, + Level: 3, + } + b := lookup.Epoch{ + Time: 6, + Level: 3, + } + c := lookup.Epoch{ + Time: 6, + Level: 4, + } + + if !b.After(a) { + t.Fatal("Expected 'after' to be true, got false") + } + + if b.After(b) { + t.Fatal("Expected 'after' to be false when both epochs are identical, got true") + } + + if !b.After(c) { + t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") + } + +} diff --git a/swarm/storage/feeds/lookup/lookup.go b/swarm/storage/feeds/lookup/lookup.go new file mode 100644 index 000000000..a5154d261 --- /dev/null +++ b/swarm/storage/feeds/lookup/lookup.go @@ -0,0 +1,180 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package lookup defines Feed lookup algorithms and provides tools to place updates +so they can be found +*/ +package lookup + +const maxuint64 = ^uint64(0) + +// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. +const LowestLevel uint8 = 0 // default is 0 (1 second) + +// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. +// 25 -> 2^25 equals to roughly one year. +const HighestLevel = 25 // default is 25 (~1 year) + +// DefaultLevel sets what level will be chosen to search when there is no hint +const DefaultLevel = HighestLevel + +//Algorithm is the function signature of a lookup algorithm +type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) + +// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' +// It takes a hint which should be the epoch where the last known update was +// If you don't know in what epoch the last update happened, simply submit lookup.NoClue +// read() will be called on each lookup attempt +// Returns an error only if read() returns an error +// Returns nil if an update was not found +var Lookup Algorithm = FluzCapacitorAlgorithm + +// ReadFunc is a handler called by Lookup each time it attempts to find a value +// It should return if a value is not found +// It should return if a value is found, but its timestamp is higher than "now" +// It should only return an error in case the handler wants to stop the +// lookup process entirely. +type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) + +// NoClue is a hint that can be provided when the Lookup caller does not have +// a clue about where the last update may be +var NoClue = Epoch{} + +// getBaseTime returns the epoch base time of the given +// time and level +func getBaseTime(t uint64, level uint8) uint64 { + return t & (maxuint64 << level) +} + +// Hint creates a hint based only on the last known update time +func Hint(last uint64) Epoch { + return Epoch{ + Time: last, + Level: DefaultLevel, + } +} + +// GetNextLevel returns the frequency level a next update should be placed at, provided where +// the last update was and what time it is now. +// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit +// but limited to not return a level that is smaller than the last-1 +func GetNextLevel(last Epoch, now uint64) uint8 { + // First XOR the last epoch base time with the current clock. + // This will set all the common most significant bits to zero. + mix := (last.Base() ^ now) + + // Then, make sure we stop the below loop before one level below the current, by setting + // that level's bit to 1. + // If the next level is lower than the current one, it must be exactly level-1 and not lower. + mix |= (1 << (last.Level - 1)) + + // if the last update was more than 2^highestLevel seconds ago, choose the highest level + if mix > (maxuint64 >> (64 - HighestLevel - 1)) { + return HighestLevel + } + + // set up a mask to scan for nonzero bits, starting at the highest level + mask := uint64(1 << (HighestLevel)) + + for i := uint8(HighestLevel); i > LowestLevel; i-- { + if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. + return i + } + mask = mask >> 1 // move our bit one position to the right + } + return 0 +} + +// GetNextEpoch returns the epoch where the next update should be located +// according to where the previous update was +// and what time it is now. +func GetNextEpoch(last Epoch, now uint64) Epoch { + if last == NoClue { + return GetFirstEpoch(now) + } + level := GetNextLevel(last, now) + return Epoch{ + Level: level, + Time: now, + } +} + +// GetFirstEpoch returns the epoch where the first update should be located +// based on what time it is now. +func GetFirstEpoch(now uint64) Epoch { + return Epoch{Level: HighestLevel, Time: now} +} + +var worstHint = Epoch{Time: 0, Level: 63} + +// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found +// going back and forth in time +// First, it will attempt to find an update where it should be now if the hint was +// really the last update. If that lookup fails, then the last update must be either the hint itself +// or the epochs right below. If however, that lookup succeeds, then the update must be +// that one or within the epochs right below. +// see the guide for a more graphical representation +func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { + var lastFound interface{} + var epoch Epoch + if hint == NoClue { + hint = worstHint + } + + t := now + + for { + epoch = GetNextEpoch(hint, t) + value, err = read(epoch, now) + if err != nil { + return nil, err + } + if value != nil { + lastFound = value + if epoch.Level == LowestLevel || epoch.Equals(hint) { + return value, nil + } + hint = epoch + continue + } + if epoch.Base() == hint.Base() { + if lastFound != nil { + return lastFound, nil + } + // we have reached the hint itself + if hint == worstHint { + return nil, nil + } + // check it out + value, err = read(hint, now) + if err != nil { + return nil, err + } + if value != nil { + return value, nil + } + // bad hint. + epoch = hint + hint = worstHint + } + base := epoch.Base() + if base == 0 { + return nil, nil + } + t = base - 1 + } +} diff --git a/swarm/storage/feeds/lookup/lookup_test.go b/swarm/storage/feeds/lookup/lookup_test.go new file mode 100644 index 000000000..7d5014608 --- /dev/null +++ b/swarm/storage/feeds/lookup/lookup_test.go @@ -0,0 +1,414 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lookup_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +type Data struct { + Payload uint64 + Time uint64 +} + +type Store map[lookup.EpochID]*Data + +func write(store Store, epoch lookup.Epoch, value *Data) { + log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) + store[epoch.ID()] = value +} + +func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { + epoch := lookup.GetNextEpoch(last, now) + + write(store, epoch, value) + + return epoch +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func makeReadFunc(store Store, counter *int) lookup.ReadFunc { + return func(epoch lookup.Epoch, now uint64) (interface{}, error) { + *counter++ + data := store[epoch.ID()] + var valueStr string + if data != nil { + valueStr = fmt.Sprintf("%d", data.Payload) + } + log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) + if data != nil && data.Time <= now { + return data, nil + } + return nil, nil + } +} + +func TestLookup(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every month for 12 months 3 years ago and then silence for two years + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 12; i++ { + t := uint64(now - Year*3 + i*Month) + data := Data{ + Payload: t, //our "payload" will be the timestamp itself. + Time: t, + } + epoch = update(store, epoch, t, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + // try to get an intermediate value + // if we look for a value in now - Year*3 + 6*Month, we should get that value + // Since the "payload" is the timestamp itself, we can check this. + + expectedTime := now - Year*3 + 6*Month + + value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + data, ok := value.(*Data) + + if !ok { + t.Fatal("Expected value to contain data") + } + + if data.Time != expectedTime { + t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) + } + +} + +func TestOneUpdateAt0(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + update(store, epoch, 0, &data) + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +// Tests the update is found even when a bad hint is given +func TestBadHint(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + + // place an update for t=1200 + update(store, epoch, 1200, &data) + + // come up with some evil hint + badHint := lookup.Epoch{ + Level: 18, + Time: 1200000000, + } + + value, err := lookup.Lookup(now, badHint, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +func TestLookupFail(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // don't write anything and try to look up. + // we're testing we don't get stuck in a loop + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("Expected value to be nil, since the update should've failed") + } + + expectedReads := now/(1< readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) // update every second for the last 1000 seconds + value, err := lookup.Lookup(T, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + data, _ := value.(*Data) + if data == nil { + t.Fatalf("Expected lookup to return %d, got nil", T) + } + if data.Payload != T { + t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) + } + } +} + +func TestSparseUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 5; i++ { + T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + +} + +// testG will hold precooked test results +// fields are abbreviated to reduce the size of the literal below +type testG struct { + e lookup.Epoch // last + n uint64 // next level + x uint8 // expected result +} + +// test cases +var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} + +func TestGetNextLevel(t *testing.T) { + + // First, test well-known cases + last := lookup.Epoch{ + Time: 1533799046, + Level: 5, + } + + level := lookup.GetNextLevel(last, last.Time) + expected := uint8(4) + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1< lookup.HighestLevel { + v = 0 + } + now = last.Time + uint64(rand.Intn(1<. + +package feeds + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +// Query is used to specify constraints when performing an update lookup +// TimeLimit indicates an upper bound for the search. Set to 0 for "now" +type Query struct { + Feed + Hint lookup.Epoch + TimeLimit uint64 +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (q *Query) FromValues(values Values) error { + time, _ := strconv.ParseUint(values.Get("time"), 10, 64) + q.TimeLimit = time + + level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) + q.Hint.Level = uint8(level) + q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) + if q.Feed.User == (common.Address{}) { + return q.Feed.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (q *Query) AppendValues(values Values) { + if q.TimeLimit != 0 { + values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) + } + if q.Hint.Level != 0 { + values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) + } + if q.Hint.Time != 0 { + values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) + } + q.Feed.AppendValues(values) +} + +// NewQuery constructs an Query structure to find updates on or before `time` +// if time == 0, the latest update will be looked up +func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { + return &Query{ + TimeLimit: time, + Feed: *feed, + Hint: hint, + } +} + +// NewQueryLatest generates lookup parameters that look for the latest update to a feed +func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { + return NewQuery(feed, 0, hint) +} diff --git a/swarm/storage/feeds/query_test.go b/swarm/storage/feeds/query_test.go new file mode 100644 index 000000000..1420c69ae --- /dev/null +++ b/swarm/storage/feeds/query_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "testing" +) + +func getTestQuery() *Query { + id := getTestID() + return &Query{ + TimeLimit: 5000, + Feed: id.Feed, + Hint: id.Epoch, + } +} + +func TestQueryValues(t *testing.T) { + var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} + + query := getTestQuery() + testValueSerializer(t, query, expected) + +} diff --git a/swarm/storage/feeds/request.go b/swarm/storage/feeds/request.go new file mode 100644 index 000000000..719d8fba8 --- /dev/null +++ b/swarm/storage/feeds/request.go @@ -0,0 +1,284 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "bytes" + "encoding/json" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +// Request represents a request to sign or signed Feed Update message +type Request struct { + Update // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) +} + +// updateRequestJSON represents a JSON-serialized UpdateRequest +type updateRequestJSON struct { + ID + ProtocolVersion uint8 `json:"protocolVersion"` + Data string `json:"data,omitempty"` + Signature string `json:"signature,omitempty"` +} + +// Request layout +// Update bytes +// SignatureLength bytes +const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength + +// NewFirstRequest returns a ready to sign request to publish a first feed update +func NewFirstRequest(topic Topic) *Request { + + request := new(Request) + + // get the current time + now := TimestampProvider.Now().Time + request.Epoch = lookup.GetFirstEpoch(now) + request.Feed.Topic = topic + request.Header.Version = ProtocolVersion + + return request +} + +// SetData stores the payload data the feed update will be updated with +func (r *Request) SetData(data []byte) { + r.data = data + r.Signature = nil +} + +// IsUpdate returns true if this request models a signed update or otherwise it is a signature request +func (r *Request) IsUpdate() bool { + return r.Signature != nil +} + +// Verify checks that signatures are valid +func (r *Request) Verify() (err error) { + if len(r.data) == 0 { + return NewError(ErrInvalidValue, "Update does not contain data") + } + if r.Signature == nil { + return NewError(ErrInvalidSignature, "Missing signature field") + } + + digest, err := r.GetDigest() + if err != nil { + return err + } + + // get the address of the signer (which also checks that it's a valid signature) + r.Feed.User, err = getUserAddr(digest, *r.Signature) + if err != nil { + return err + } + + // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) + // that was used to retrieve this chunk + // if this validation fails, someone forged a chunk. + if !bytes.Equal(r.idAddr, r.Addr()) { + return NewError(ErrInvalidSignature, "Signature address does not match with update user address") + } + + return nil +} + +// Sign executes the signature to validate the update message +func (r *Request) Sign(signer Signer) error { + r.Feed.User = signer.Address() + r.binaryData = nil //invalidate serialized data + digest, err := r.GetDigest() // computes digest and serializes into .binaryData + if err != nil { + return err + } + + signature, err := signer.Sign(digest) + if err != nil { + return err + } + + // Although the Signer interface returns the public address of the signer, + // recover it from the signature to see if they match + userAddr, err := getUserAddr(digest, signature) + if err != nil { + return NewError(ErrInvalidSignature, "Error verifying signature") + } + + if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! + return NewError(ErrInvalidSignature, "Signer address does not match update user address") + } + + r.Signature = &signature + r.idAddr = r.Addr() + return nil +} + +// GetDigest creates the feed update digest used in signatures +// the serialized payload is cached in .binaryData +func (r *Request) GetDigest() (result common.Hash, err error) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + dataLength := r.Update.binaryLength() + if r.binaryData == nil { + r.binaryData = make([]byte, dataLength+signatureLength) + if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { + return result, err + } + } + hasher.Write(r.binaryData[:dataLength]) //everything except the signature. + + return common.BytesToHash(hasher.Sum(nil)), nil +} + +// create an update chunk. +func (r *Request) toChunk() (storage.Chunk, error) { + + // Check that the update is signed and serialized + // For efficiency, data is serialized during signature and cached in + // the binaryData field when computing the signature digest in .getDigest() + if r.Signature == nil || r.binaryData == nil { + return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") + } + + updateLength := r.Update.binaryLength() + + // signature is the last item in the chunk data + copy(r.binaryData[updateLength:], r.Signature[:]) + + chunk := storage.NewChunk(r.idAddr, r.binaryData) + return chunk, nil +} + +// fromChunk populates this structure from chunk data. It does not verify the signature is valid. +func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { + // for update chunk layout see Request definition + + //deserialize the feed update portion + if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { + return err + } + + // Extract the signature + var signature *Signature + cursor := r.Update.binaryLength() + sigdata := chunkdata[cursor : cursor+signatureLength] + if len(sigdata) > 0 { + signature = &Signature{} + copy(signature[:], sigdata) + } + + r.Signature = signature + r.idAddr = updateAddr + r.binaryData = chunkdata + + return nil + +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Request) FromValues(values Values, data []byte) error { + signatureBytes, err := hexutil.Decode(values.Get("signature")) + if err != nil { + r.Signature = nil + } else { + if len(signatureBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Incorrect signature length") + } + r.Signature = new(Signature) + copy(r.Signature[:], signatureBytes) + } + err = r.Update.FromValues(values, data) + if err != nil { + return err + } + r.idAddr = r.Addr() + return err +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Request) AppendValues(values Values) []byte { + if r.Signature != nil { + values.Set("signature", hexutil.Encode(r.Signature[:])) + } + return r.Update.AppendValues(values) +} + +// fromJSON takes an update request JSON and populates an UpdateRequest +func (r *Request) fromJSON(j *updateRequestJSON) error { + + r.ID = j.ID + r.Header.Version = j.ProtocolVersion + + var err error + if j.Data != "" { + r.data, err = hexutil.Decode(j.Data) + if err != nil { + return NewError(ErrInvalidValue, "Cannot decode data") + } + } + + if j.Signature != "" { + sigBytes, err := hexutil.Decode(j.Signature) + if err != nil || len(sigBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Cannot decode signature") + } + r.Signature = new(Signature) + r.idAddr = r.Addr() + copy(r.Signature[:], sigBytes) + } + return nil +} + +// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object +// Implements json.Unmarshaler interface +func (r *Request) UnmarshalJSON(rawData []byte) error { + var requestJSON updateRequestJSON + if err := json.Unmarshal(rawData, &requestJSON); err != nil { + return err + } + return r.fromJSON(&requestJSON) +} + +// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array +// Implements json.Marshaler interface +func (r *Request) MarshalJSON() (rawData []byte, err error) { + var signatureString, dataString string + if r.Signature != nil { + signatureString = hexutil.Encode(r.Signature[:]) + } + if r.data != nil { + dataString = hexutil.Encode(r.data) + } + + requestJSON := &updateRequestJSON{ + ID: r.ID, + ProtocolVersion: r.Header.Version, + Data: dataString, + Signature: signatureString, + } + + return json.Marshal(requestJSON) +} diff --git a/swarm/storage/feeds/request_test.go b/swarm/storage/feeds/request_test.go new file mode 100644 index 000000000..2e3783834 --- /dev/null +++ b/swarm/storage/feeds/request_test.go @@ -0,0 +1,312 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" +) + +func areEqualJSON(s1, s2 string) (bool, error) { + //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67 + var o1 interface{} + var o2 interface{} + + err := json.Unmarshal([]byte(s1), &o1) + if err != nil { + return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) + } + err = json.Unmarshal([]byte(s2), &o2) + if err != nil { + return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) + } + + return reflect.DeepEqual(o1, o2), nil +} + +// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly +// while also checking cryptographically that only the owner of a Feed can update it. +func TestEncodingDecodingUpdateRequests(t *testing.T) { + + charlie := newCharlieSigner() //Charlie + bob := newBobSigner() //Bob + + // Create a feed to our good guy Charlie's name + topic, _ := NewTopic("a good topic name", nil) + firstRequest := NewFirstRequest(topic) + firstRequest.User = charlie.Address() + + // We now encode the create message to simulate we send it over the wire + messageRawData, err := firstRequest.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding first feed update request: %s", err) + } + + // ... the message arrives and is decoded... + var recoveredFirstRequest Request + if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding first feed update request: %s", err) + } + + // ... but verification should fail because it is not signed! + if err := recoveredFirstRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail since the message is not signed") + } + + // We now assume that the feed ypdate was created and propagated. + + const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601" + const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` + + //Put together an unsigned update request that we will serialize to send it to the signer. + data := []byte("This hour's update: Swarm 99.0 has been released!") + request := &Request{ + Update: Update{ + ID: ID{ + Epoch: lookup.Epoch{ + Time: 1000, + Level: 1, + }, + Feed: firstRequest.Update.Feed, + }, + data: data, + }, + } + + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding update request: %s", err) + } + + equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON) + if err != nil { + t.Fatalf("Error decoding update request JSON: %s", err) + } + if !equalJSON { + t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData)) + } + + // now the encoded message messageRawData is sent over the wire and arrives to the signer + + //Attempt to extract an UpdateRequest out of the encoded message + var recoveredRequest Request + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding update request: %s", err) + } + + //sign the request and see if it matches our predefined signature above. + if err := recoveredRequest.Sign(charlie); err != nil { + t.Fatalf("Error signing request: %s", err) + } + + compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) + + // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON + // to alter the signature field. + var j updateRequestJSON + if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil { + t.Fatal("Error unmarshalling test json, check expectedJSON constant") + } + j.Signature = "Certainly not a signature" + corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature + var corruptRequest Request + if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil { + t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") + } + + // Now imagine Bob wants to create an update of his own about the same Feed, + // signing a message with his private key + if err := request.Sign(bob); err != nil { + t.Fatalf("Error signing: %s", err) + } + + // Now Bob encodes the message to send it over the wire... + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding message:%s", err) + } + + // ... the message arrives to our Swarm node and it is decoded. + recoveredRequest = Request{} + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding message:%s", err) + } + + // Before checking what happened with Bob's update, let's see what would happen if we mess + // with the signature big time to see if Verify catches it + savedSignature := *recoveredRequest.Signature // save the signature for later + binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature + if err = recoveredRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail on corrupt signature") + } + + // restore the Bob's signature from corruption + *recoveredRequest.Signature = savedSignature + + // Now the signature is not corrupt + if err = recoveredRequest.Verify(); err != nil { + t.Fatal(err) + } + + // Reuse object and sign with our friend Charlie's private key + if err := recoveredRequest.Sign(charlie); err != nil { + t.Fatalf("Error signing with the correct private key: %s", err) + } + + // And now, Verify should work since this update now belongs to Charlie + if err = recoveredRequest.Verify(); err != nil { + t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) + } + + // mess with the lookup key to make sure Verify fails: + recoveredRequest.Time = 77999 // this will alter the lookup key + if err = recoveredRequest.Verify(); err == nil { + t.Fatalf("Expected Verify to fail since the lookup key has been altered") + } +} + +func getTestRequest() *Request { + return &Request{ + Update: *getTestFeedUpdate(), + } +} + +func TestUpdateChunkSerializationErrorChecking(t *testing.T) { + + // Test that parseUpdate fails if the chunk is too small + var r Request + if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { + t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) + } + + r = *getTestRequest() + + _, err := r.toChunk() + if err == nil { + t.Fatal("Expected request.toChunk to fail when there is no data") + } + r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data + _, err = r.toChunk() + if err == nil { + t.Fatal("expected request.toChunk to fail when there is no signature") + } + + charlie := newCharlieSigner() + if err := r.Sign(charlie); err != nil { + t.Fatalf("error signing:%s", err) + } + + chunk, err := r.toChunk() + if err != nil { + t.Fatalf("error creating update chunk:%s", err) + } + + compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") + + var recovered Request + recovered.fromChunk(chunk.Address(), chunk.Data()) + if !reflect.DeepEqual(recovered, r) { + t.Fatal("Expected recovered Request update to equal the original one") + } +} + +// check that signature address matches update signer address +func TestReverse(t *testing.T) { + + epoch := lookup.Epoch{ + Time: 7888, + Level: 6, + } + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up rpc and create Feeds handler + _, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + topic, _ := NewTopic("Cervantes quotes", nil) + feed := Feed{ + Topic: topic, + User: signer.Address(), + } + + data := []byte("Donde una puerta se cierra, otra se abre") + + request := new(Request) + request.Feed = feed + request.Epoch = epoch + request.data = data + + // generate a chunk key for this request + key := request.Addr() + + if err = request.Sign(signer); err != nil { + t.Fatal(err) + } + + chunk, err := request.toChunk() + if err != nil { + t.Fatal(err) + } + + // check that we can recover the owner account from the update chunk's signature + var checkUpdate Request + if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { + t.Fatal(err) + } + checkdigest, err := checkUpdate.GetDigest() + if err != nil { + t.Fatal(err) + } + recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) + if err != nil { + t.Fatalf("Retrieve address from signature fail: %v", err) + } + originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + + // check that the metadata retrieved from the chunk matches what we gave it + if recoveredAddr != originalAddr { + t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) + } + + if !bytes.Equal(key[:], chunk.Address()[:]) { + t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) + } + if epoch != checkUpdate.Epoch { + t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) + } + if !bytes.Equal(data, checkUpdate.data) { + t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) + } +} diff --git a/swarm/storage/feeds/sign.go b/swarm/storage/feeds/sign.go new file mode 100644 index 000000000..a69942f2b --- /dev/null +++ b/swarm/storage/feeds/sign.go @@ -0,0 +1,75 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const signatureLength = 65 + +// Signature is an alias for a static byte array with the size of a signature +type Signature [signatureLength]byte + +// Signer signs Feed update payloads +type Signer interface { + Sign(common.Hash) (Signature, error) + Address() common.Address +} + +// GenericSigner implements the Signer interface +// It is the vanilla signer that probably should be used in most cases +type GenericSigner struct { + PrivKey *ecdsa.PrivateKey + address common.Address +} + +// NewGenericSigner builds a signer that will sign everything with the provided private key +func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { + return &GenericSigner{ + PrivKey: privKey, + address: crypto.PubkeyToAddress(privKey.PublicKey), + } +} + +// Sign signs the supplied data +// It wraps the ethereum crypto.Sign() method +func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { + signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) + if err != nil { + return + } + copy(signature[:], signaturebytes) + return +} + +// Address returns the public key of the signer's private key +func (s *GenericSigner) Address() common.Address { + return s.address +} + +// getUserAddr extracts the address of the Feed update signer +func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} diff --git a/swarm/storage/feeds/testutil.go b/swarm/storage/feeds/testutil.go new file mode 100644 index 000000000..879f73348 --- /dev/null +++ b/swarm/storage/feeds/testutil.go @@ -0,0 +1,71 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "context" + "fmt" + "path/filepath" + "sync" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + testDbDirName = "feeds" +) + +type TestHandler struct { + *Handler +} + +func (t *TestHandler) Close() { + t.chunkStore.Close() +} + +type mockNetFetcher struct{} + +func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) { +} +func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) { +} + +func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher { + return &mockNetFetcher{} +} + +// NewTestHandler creates Handler object to be used for testing purposes. +func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { + path := filepath.Join(datadir, testDbDirName) + fh := NewHandler(params) + localstoreparams := storage.NewDefaultLocalStoreParams() + localstoreparams.Init(path) + localStore, err := storage.NewLocalStore(localstoreparams, nil) + if err != nil { + return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) + } + localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) + localStore.Validators = append(localStore.Validators, fh) + netStore, err := storage.NewNetStore(localStore, nil) + if err != nil { + return nil, err + } + netStore.NewNetFetcherFunc = newFakeNetFetcher + fh.SetStore(netStore) + return &TestHandler{fh}, nil +} diff --git a/swarm/storage/feeds/timestampprovider.go b/swarm/storage/feeds/timestampprovider.go new file mode 100644 index 000000000..f6aa0775c --- /dev/null +++ b/swarm/storage/feeds/timestampprovider.go @@ -0,0 +1,84 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "encoding/binary" + "encoding/json" + "time" +) + +// TimestampProvider sets the time source of the feeds package +var TimestampProvider timestampProvider = NewDefaultTimestampProvider() + +// Timestamp encodes a point in time as a Unix epoch +type Timestamp struct { + Time uint64 `json:"time"` // Unix epoch timestamp, in seconds +} + +// 8 bytes uint64 Time +const timestampLength = 8 + +// timestampProvider interface describes a source of timestamp information +type timestampProvider interface { + Now() Timestamp // returns the current timestamp information +} + +// binaryGet populates the timestamp structure from the given byte slice +func (t *Timestamp) binaryGet(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + t.Time = binary.LittleEndian.Uint64(data[:8]) + return nil +} + +// binaryPut Serializes a Timestamp to a byte slice +func (t *Timestamp) binaryPut(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + binary.LittleEndian.PutUint64(data, t.Time) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Timestamp) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &t.Time) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Timestamp) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} + +// DefaultTimestampProvider is a TimestampProvider that uses system time +// as time source +type DefaultTimestampProvider struct { +} + +// NewDefaultTimestampProvider creates a system clock based timestamp provider +func NewDefaultTimestampProvider() *DefaultTimestampProvider { + return &DefaultTimestampProvider{} +} + +// Now returns the current time according to this provider +func (dtp *DefaultTimestampProvider) Now() Timestamp { + return Timestamp{ + Time: uint64(time.Now().Unix()), + } +} diff --git a/swarm/storage/feeds/topic.go b/swarm/storage/feeds/topic.go new file mode 100644 index 000000000..2dc8c18cd --- /dev/null +++ b/swarm/storage/feeds/topic.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// TopicLength establishes the max length of a topic string +const TopicLength = storage.AddressLength + +// Topic represents what a feed is about +type Topic [TopicLength]byte + +// ErrTopicTooLong is returned when creating a topic with a name/related content too long +var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) + +// NewTopic creates a new topic from a provided name and "related content" byte array, +// merging the two together. +// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned +// name can be an empty string +// relatedContent can be nil +func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + err = ErrTopicTooLong + } + copy(topic[:], relatedContent[:contentLength]) + } + nameBytes := []byte(name) + nameLength := len(nameBytes) + if nameLength > TopicLength { + nameLength = TopicLength + err = ErrTopicTooLong + } + bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) + return topic, err +} + +// Hex will return the topic encoded as an hex string +func (t *Topic) Hex() string { + return hexutil.Encode(t[:]) +} + +// FromHex will parse a hex string into this Topic instance +func (t *Topic) FromHex(hex string) error { + bytes, err := hexutil.Decode(hex) + if err != nil || len(bytes) != len(t) { + return NewErrorf(ErrInvalidValue, "Cannot decode topic") + } + copy(t[:], bytes) + return nil +} + +// Name will try to extract the topic name out of the Topic +func (t *Topic) Name(relatedContent []byte) string { + nameBytes := *t + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + } + bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) + } + z := bytes.IndexByte(nameBytes[:], 0) + if z < 0 { + z = TopicLength + } + return string(nameBytes[:z]) + +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Topic) UnmarshalJSON(data []byte) error { + var hex string + json.Unmarshal(data, &hex) + return t.FromHex(hex) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Topic) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Hex()) +} diff --git a/swarm/storage/feeds/topic_test.go b/swarm/storage/feeds/topic_test.go new file mode 100644 index 000000000..8994002d7 --- /dev/null +++ b/swarm/storage/feeds/topic_test.go @@ -0,0 +1,50 @@ +package feeds + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func TestTopic(t *testing.T) { + related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") + topicName := "test-topic" + topic, _ := NewTopic(topicName, related) + hex := topic.Hex() + expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" + if hex != expectedHex { + t.Fatalf("Expected %s, got %s", expectedHex, hex) + } + + var topic2 Topic + topic2.FromHex(hex) + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + + if topic2.Name(related) != topicName { + t.Fatal("Retrieved name does not match") + } + + bytes, err := topic2.MarshalJSON() + if err != nil { + t.Fatal(err) + } + expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` + equal, err := areEqualJSON(expectedJSON, string(bytes)) + if err != nil { + t.Fatal(err) + } + if !equal { + t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) + } + + err = topic2.UnmarshalJSON(bytes) + if err != nil { + t.Fatal(err) + } + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + +} diff --git a/swarm/storage/feeds/update.go b/swarm/storage/feeds/update.go new file mode 100644 index 000000000..02bd37522 --- /dev/null +++ b/swarm/storage/feeds/update.go @@ -0,0 +1,132 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/swarm/chunk" +) + +// ProtocolVersion defines the current version of the protocol that will be included in each update message +const ProtocolVersion uint8 = 0 + +const headerLength = 8 + +// Header defines a update message header including a protocol version byte +type Header struct { + Version uint8 // Protocol version + Padding [headerLength - 1]uint8 // reserved for future use +} + +// Update encapsulates the information sent as part of a feed update +type Update struct { + Header Header // + ID // Feed Update identifying information + data []byte // actual data payload +} + +const minimumUpdateDataLength = idLength + headerLength + 1 +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength + +// binaryPut serializes the feed update information into the given slice +func (r *Update) binaryPut(serializedData []byte) error { + datalength := len(r.data) + if datalength == 0 { + return NewError(ErrInvalidValue, "a feed update must contain data") + } + + if datalength > maxUpdateDataLength { + return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) + } + + if len(serializedData) != r.binaryLength() { + return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) + } + + var cursor int + // serialize Header + serializedData[cursor] = r.Header.Version + copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) + cursor += headerLength + + // serialize ID + if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { + return err + } + cursor += idLength + + // add the data + copy(serializedData[cursor:], r.data) + cursor += datalength + + return nil +} + +// binaryLength returns the expected number of bytes this structure will take to encode +func (r *Update) binaryLength() int { + return idLength + headerLength + len(r.data) +} + +// binaryGet populates this instance from the information contained in the passed byte slice +func (r *Update) binaryGet(serializedData []byte) error { + if len(serializedData) < minimumUpdateDataLength { + return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength) + } + dataLength := len(serializedData) - idLength - headerLength + // at this point we can be satisfied that we have the correct data length to read + + var cursor int + + // deserialize Header + r.Header.Version = serializedData[cursor] // extract the protocol version + copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding + cursor += headerLength + + if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { + return err + } + cursor += idLength + + data := serializedData[cursor : cursor+dataLength] + cursor += dataLength + + // now that all checks have passed, copy data into structure + r.data = make([]byte, dataLength) + copy(r.data, data) + + return nil + +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Update) FromValues(values Values, data []byte) error { + r.data = data + version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) + r.Header.Version = uint8(version) + return r.ID.FromValues(values) +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Update) AppendValues(values Values) []byte { + r.ID.AppendValues(values) + values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) + return r.data +} diff --git a/swarm/storage/feeds/update_test.go b/swarm/storage/feeds/update_test.go new file mode 100644 index 000000000..7763da0c8 --- /dev/null +++ b/swarm/storage/feeds/update_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feeds + +import ( + "testing" +) + +func getTestFeedUpdate() *Update { + return &Update{ + ID: *getTestID(), + data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), + } +} + +func TestUpdateSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +} + +func TestUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeedUpdate()) + // Test fail if update is too big + update := getTestFeedUpdate() + update.data = make([]byte, maxUpdateDataLength+100) + serialized := make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected update.binaryPut to fail since update is too big") + } + + // test fail if data is empty or nil + update.data = nil + serialized = make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected update.binaryPut to fail since data is empty") + } +} diff --git a/swarm/storage/mru/binaryserializer.go b/swarm/storage/mru/binaryserializer.go deleted file mode 100644 index 3123a82ee..000000000 --- a/swarm/storage/mru/binaryserializer.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import "github.com/ethereum/go-ethereum/common/hexutil" - -type binarySerializer interface { - binaryPut(serializedData []byte) error - binaryLength() int - binaryGet(serializedData []byte) error -} - -// Values interface represents a string key-value store -// useful for building query strings -type Values interface { - Get(key string) string - Set(key, value string) -} - -type valueSerializer interface { - FromValues(values Values) error - AppendValues(values Values) -} - -// Hex serializes the structure and converts it to a hex string -func Hex(bin binarySerializer) string { - b := make([]byte, bin.binaryLength()) - bin.binaryPut(b) - return hexutil.Encode(b) -} diff --git a/swarm/storage/mru/binaryserializer_test.go b/swarm/storage/mru/binaryserializer_test.go deleted file mode 100644 index f524157d6..000000000 --- a/swarm/storage/mru/binaryserializer_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -// KV mocks a key value store -type KV map[string]string - -func (kv KV) Get(key string) string { - return kv[key] -} -func (kv KV) Set(key, value string) { - kv[key] = value -} - -func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { - if hexutil.Encode(actualValue) != expectedHex { - t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) - } -} - -func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { - name := reflect.TypeOf(bin).Elem().Name() - serialized := make([]byte, bin.binaryLength()) - if err := bin.binaryPut(serialized); err != nil { - t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) - } - - compareByteSliceToExpectedHex(t, name, serialized, expectedHex) - - recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) - if err := recovered.binaryGet(serialized); err != nil { - t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) - } - - if !reflect.DeepEqual(bin, recovered) { - t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) - } - - serializedWrongLength := make([]byte, 1) - copy(serializedWrongLength[:], serialized) - if err := recovered.binaryGet(serializedWrongLength); err == nil { - t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) - } -} - -func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { - name := reflect.TypeOf(bin).Elem().Name() - // make a slice that is too small to contain the metadata - serialized := make([]byte, bin.binaryLength()-1) - - if err := bin.binaryPut(serialized); err == nil { - t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) - } -} - -func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { - name := reflect.TypeOf(v).Elem().Name() - kv := make(KV) - - v.AppendValues(kv) - if !reflect.DeepEqual(expected, kv) { - expj, _ := json.Marshal(expected) - gotj, _ := json.Marshal(kv) - t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) - } - - recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) - err := recovered.FromValues(kv) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(recovered, v) { - t.Fatalf("Expected recovered %s to be the same", name) - } -} diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go deleted file mode 100644 index f6ed72818..000000000 --- a/swarm/storage/mru/cacheentry.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "bytes" - "context" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - hasherCount = 8 - feedsHashAlgorithm = storage.SHA3Hash - defaultRetrieveTimeout = 100 * time.Millisecond -) - -// cacheEntry caches the last known update of a specific Feed. -type cacheEntry struct { - Update - *bytes.Reader - lastKey storage.Address -} - -// implements storage.LazySectionReader -func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { - return int64(len(r.Update.data)), nil -} - -//returns the Feed's topic -func (r *cacheEntry) Topic() Topic { - return r.Feed.Topic -} diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go deleted file mode 100644 index 7ffd4a3c6..000000000 --- a/swarm/storage/mru/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Package feeds defines Swarm Feeds. - -Swarm Feeds allows a user to build an update feed about a particular topic -without resorting to ENS on each update. -The update scheme is built on swarm chunks with chunk keys following -a predictable, versionable pattern. - -A Feed is tied to a unique identifier that is deterministically generated out of -the chosen topic. - -A Feed is defined as the series of updates of a specific user about a particular topic - -Actual data updates are also made in the form of swarm chunks. The keys -of the updates are the hash of a concatenation of properties as follows: - -updateAddr = H(Feed, Epoch ID) -where H is the SHA3 hash function -Feed is the combination of Topic and the user address -Epoch ID is a time slot. See the lookup package for more information. - -A user looking up a the latest update in a Feed only needs to know the Topic -and the other user's address. - -The Feed Update data is: -updatedata = Feed|Epoch|data - -The full update data that goes in the chunk payload is: -updatedata|sign(updatedata) - -Structure Summary: - -Request: Feed Update with signature - Update: headers + data - Header: Protocol version and reserved for future use placeholders - ID: Information about how to locate a specific update - Feed: Represents a user's series of publications about a specific Topic - Topic: Item that the updates are about - User: User who updates the Feed - Epoch: time slot where the update is stored - -*/ -package mru diff --git a/swarm/storage/mru/error.go b/swarm/storage/mru/error.go deleted file mode 100644 index 452cc80f0..000000000 --- a/swarm/storage/mru/error.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "fmt" -) - -const ( - ErrInit = iota - ErrNotFound - ErrIO - ErrUnauthorized - ErrInvalidValue - ErrDataOverflow - ErrNothingToReturn - ErrCorruptData - ErrInvalidSignature - ErrNotSynced - ErrPeriodDepth - ErrCnt -) - -// Error is a the typed error object used for Swarm Feeds -type Error struct { - code int - err string -} - -// Error implements the error interface -func (e *Error) Error() string { - return e.err -} - -// Code returns the error code -// Error codes are enumerated in the error.go file within the mru package -func (e *Error) Code() int { - return e.code -} - -// NewError creates a new Swarm Feeds Error object with the specified code and custom error message -func NewError(code int, s string) error { - if code < 0 || code >= ErrCnt { - panic("no such error code!") - } - r := &Error{ - err: s, - } - switch code { - case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: - r.code = code - } - return r -} - -// NewErrorf is a convenience version of NewError that incorporates printf-style formatting -func NewErrorf(code int, format string, args ...interface{}) error { - return NewError(code, fmt.Sprintf(format, args...)) -} diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go deleted file mode 100644 index cc0da7df9..000000000 --- a/swarm/storage/mru/handler.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Handler is the API for Feeds -// It enables creating, updating, syncing and retrieving feed updates and their data -package mru - -import ( - "bytes" - "context" - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" - - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -type Handler struct { - chunkStore *storage.NetStore - HashSize int - cache map[uint64]*cacheEntry - cacheLock sync.RWMutex - storeTimeout time.Duration - queryMaxPeriods uint32 -} - -// HandlerParams pass parameters to the Handler constructor NewHandler -// Signer and TimestampProvider are mandatory parameters -type HandlerParams struct { -} - -// hashPool contains a pool of ready hashers -var hashPool sync.Pool - -// init initializes the package and hashPool -func init() { - hashPool = sync.Pool{ - New: func() interface{} { - return storage.MakeHashFunc(feedsHashAlgorithm)() - }, - } -} - -// NewHandler creates a new Swarm Feeds API -func NewHandler(params *HandlerParams) *Handler { - fh := &Handler{ - cache: make(map[uint64]*cacheEntry), - } - - for i := 0; i < hasherCount; i++ { - hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() - if fh.HashSize == 0 { - fh.HashSize = hashfunc.Size() - } - hashPool.Put(hashfunc) - } - - return fh -} - -// SetStore sets the store backend for the Swarm Feeds API -func (h *Handler) SetStore(store *storage.NetStore) { - h.chunkStore = store -} - -// Validate is a chunk validation method -// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature -// It implements the storage.ChunkValidator interface -func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { - dataLength := len(data) - if dataLength < minimumSignedUpdateLength { - return false - } - - // check if it is a properly formatted update chunk with - // valid signature and proof of ownership of the feed it is trying - // to update - - // First, deserialize the chunk - var r Request - if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) - return false - } - - // Verify signatures and that the signer actually owns the feed - // If it fails, it means either the signature is not valid, data is corrupted - // or someone is trying to update someone else's feed. - if err := r.Verify(); err != nil { - log.Debug("Invalid feed update signature", "err", err) - return false - } - - return true -} - -// GetContent retrieves the data payload of the last synced update of the Feed -func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { - if feed == nil { - return nil, nil, NewError(ErrInvalidValue, "feed is nil") - } - feedUpdate := h.get(feed) - if feedUpdate == nil { - return nil, nil, NewError(ErrNotFound, "feed update not cached") - } - return feedUpdate.lastKey, feedUpdate.data, nil -} - -// NewRequest prepares a Request structure with all the necessary information to -// just add the desired data and sign it. -// The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { - if feed == nil { - return nil, NewError(ErrInvalidValue, "feed cannot be nil") - } - - now := TimestampProvider.Now().Time - request = new(Request) - request.Header.Version = ProtocolVersion - - query := NewQueryLatest(feed, lookup.NoClue) - - feedUpdate, err := h.Lookup(ctx, query) - if err != nil { - if err.(*Error).code != ErrNotFound { - return nil, err - } - // not finding updates means that there is a network error - // or that the feed really does not have updates - } - - request.Feed = *feed - - // if we already have an update, then find next epoch - if feedUpdate != nil { - request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) - } else { - request.Epoch = lookup.GetFirstEpoch(now) - } - - return request, nil -} - -// Lookup retrieves a specific or latest feed update -// Lookup works differently depending on the configuration of `query` -// See the `query` documentation and helper functions: -// `NewQueryLatest` and `NewQuery` -func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { - - timeLimit := query.TimeLimit - if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update - timeLimit = TimestampProvider.Now().Time - } - - if query.Hint == lookup.NoClue { // try to use our cache - entry := h.get(&query.Feed) - if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints - query.Hint = entry.Epoch - } - } - - // we can't look for anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") - } - - var id ID - id.Feed = query.Feed - var readCount int - - // Invoke the lookup engine. - // The callback will be called every time the lookup algorithm needs to guess - requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { - readCount++ - id.Epoch = epoch - ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) - defer cancel() - - chunk, err := h.chunkStore.Get(ctx, id.Addr()) - if err != nil { // TODO: check for catastrophic errors other than chunk not found - return nil, nil - } - - var request Request - if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { - return nil, nil - } - if request.Time <= timeLimit { - return &request, nil - } - return nil, nil - }) - if err != nil { - return nil, err - } - - log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount)) - - request, _ := requestPtr.(*Request) - if request == nil { - return nil, NewError(ErrNotFound, "no feed updates found") - } - return h.updateCache(request) - -} - -// update feed updates cache with specified content -func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { - - updateAddr := request.Addr() - log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) - - feedUpdate := h.get(&request.Feed) - if feedUpdate == nil { - feedUpdate = &cacheEntry{} - h.set(&request.Feed, feedUpdate) - } - - // update our rsrcs entry map - feedUpdate.lastKey = updateAddr - feedUpdate.Update = request.Update - feedUpdate.Reader = bytes.NewReader(feedUpdate.data) - return feedUpdate, nil -} - -// Update publishes a feed update -// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. -// This results in a max payload of `maxUpdateDataLength` (check update.go for more details) -// An error will be returned if the total length of the chunk payload will exceed this limit. -// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update -// on the network. -func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { - - // we can't update anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") - } - - feedUpdate := h.get(&r.Feed) - if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure - return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") - } - - chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big - if err != nil { - return nil, err - } - - // send the chunk - h.chunkStore.Put(ctx, chunk) - log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) - // update our feed updates map cache entry if the new update is older than the one we have, if we have it. - if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { - feedUpdate.Epoch = r.Epoch - feedUpdate.data = make([]byte, len(r.data)) - feedUpdate.lastKey = r.idAddr - copy(feedUpdate.data, r.data) - feedUpdate.Reader = bytes.NewReader(feedUpdate.data) - } - - return r.idAddr, nil -} - -// Retrieves the feed update cache value for the given nameHash -func (h *Handler) get(feed *Feed) *cacheEntry { - mapKey := feed.mapKey() - h.cacheLock.RLock() - defer h.cacheLock.RUnlock() - feedUpdate := h.cache[mapKey] - return feedUpdate -} - -// Sets the feed update cache value for the given Feed -func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { - mapKey := feed.mapKey() - h.cacheLock.Lock() - defer h.cacheLock.Unlock() - h.cache[mapKey] = feedUpdate -} diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go deleted file mode 100644 index b66ff0c80..000000000 --- a/swarm/storage/mru/handler_test.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "bytes" - "context" - "flag" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/crypto" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -var ( - loglevel = flag.Int("loglevel", 3, "loglevel") - startTime = Timestamp{ - Time: uint64(4200), - } - cleanF func() - subtopicName = "føø.bar" - hashfunc = storage.MakeHashFunc(storage.DefaultHash) -) - -func init() { - flag.Parse() - log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) -} - -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Set(time uint64) { - f.currentTime = time -} - -func (f *fakeTimeProvider) FastForward(offset uint64) { - f.currentTime += offset -} - -func (f *fakeTimeProvider) Now() Timestamp { - return Timestamp{ - Time: f.currentTime, - } -} - -// make updates and retrieve them based on periods and versions -func TestFeedsHandler(t *testing.T) { - - // make fake timeProvider - clock := &fakeTimeProvider{ - currentTime: startTime.Time, // clock starts at t=4200 - } - - // signer containing private key - signer := newAliceSigner() - - feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new Feed - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) - feed := Feed{ - Topic: topic, - User: signer.Address(), - } - - // data for updates: - updates := []string{ - "blinky", // t=4200 - "pinky", // t=4242 - "inky", // t=4284 - "clyde", // t=4285 - } - - request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time) - chunkAddress := make(map[string]storage.Address) - data := []byte(updates[0]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 21 seconds - clock.FastForward(21) // t=4221 - - request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 - if err != nil { - t.Fatal(err) - } - if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { - t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) - } - - request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail - data = []byte(updates[1]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) - if err == nil { - t.Fatal("Expected update to fail since an update in this epoch already exists") - } - - // move the clock ahead 21 seconds - clock.FastForward(21) // t=4242 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 42 seconds - clock.FastForward(42) // t=4284 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - data = []byte(updates[2]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 1 second - clock.FastForward(1) // t=4285 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { - t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) - } - data = []byte(updates[3]) - request.SetData(data) - - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - feedsHandler.Close() - - // check we can retrieve the updates after close - clock.FastForward(2000) // t=6285 - - feedParams := &HandlerParams{} - - feedsHandler2, err := NewTestHandler(datadir, feedParams) - if err != nil { - t.Fatal(err) - } - - update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) - if err != nil { - t.Fatal(err) - } - - // last update should be "clyde" - if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1]) - } - if update2.Level != 22 { - t.Fatalf("feed update epoch level was %d, expected 22", update2.Level) - } - if update2.Base() != 0 { - t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base()) - } - log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) - - // specific point in time - update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(update.data, []byte(updates[2])) { - t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2]) - } - log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) - - // beyond the first should yield an error - update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) - if err == nil { - t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data) - } - -} - -const Day = 60 * 60 * 24 -const Year = Day * 365 -const Month = Day * 30 - -func generateData(x uint64) []byte { - return []byte(fmt.Sprintf("%d", x)) -} - -func TestSparseUpdates(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - defer os.RemoveAll(datadir) - - // create a new Feed - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - topic, _ := NewTopic("Very slow updates", nil) - feed := Feed{ - Topic: topic, - User: signer.Address(), - } - - // publish one update every 5 years since Unix 0 until today - today := uint64(1533799046) - var epoch lookup.Epoch - var lastUpdateTime uint64 - for T := uint64(0); T < today; T += 5 * Year { - request := NewFirstRequest(feed.Topic) - request.Epoch = lookup.GetNextEpoch(epoch, T) - request.data = generateData(T) // this generates some data that depends on T, so we can check later - request.Sign(signer) - if err != nil { - t.Fatal(err) - } - - if _, err := rh.Update(ctx, request); err != nil { - t.Fatal(err) - } - epoch = request.Epoch - lastUpdateTime = T - } - - query := NewQuery(&feed, today, lookup.NoClue) - - _, err = rh.Lookup(ctx, query) - if err != nil { - t.Fatal(err) - } - - _, content, err := rh.GetContent(&feed) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(generateData(lastUpdateTime), content) { - t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) - } - - // lookup the closest update to 35*Year + 6* Month (~ June 2005): - // it should find the update we put on 35*Year, since we were updating every 5 years. - - query.TimeLimit = 35*Year + 6*Month - - _, err = rh.Lookup(ctx, query) - if err != nil { - t.Fatal(err) - } - - _, content, err = rh.GetContent(&feed) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(generateData(35*Year), content) { - t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) - } -} - -func TestValidator(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key. Alice will be the good girl - signer := newAliceSigner() - - // set up sim timeProvider - rh, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create new Feed - topic, _ := NewTopic(subtopicName, nil) - feed := Feed{ - Topic: topic, - User: signer.Address(), - } - mr := NewFirstRequest(feed.Topic) - - // chunk with address - data := []byte("foo") - mr.SetData(data) - if err := mr.Sign(signer); err != nil { - t.Fatalf("sign fail: %v", err) - } - - chunk, err := mr.toChunk() - if err != nil { - t.Fatal(err) - } - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on update chunk") - } - - address := chunk.Address() - // mess with the address - address[0] = 11 - address[15] = 99 - - if rh.Validate(address, chunk.Data()) { - t.Fatal("Expected Validate to fail with false chunk address") - } -} - -// tests that the content address validator correctly checks the data -// tests that Feed update chunks are passed through content address validator -// there is some redundancy in this test as it also tests content addressed chunks, -// which should be evaluated as invalid chunks by this validator -func TestValidatorInStore(t *testing.T) { - - // make fake timeProvider - TimestampProvider = &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up localstore - datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(datadir) - - handlerParams := storage.NewDefaultLocalStoreParams() - handlerParams.Init(datadir) - store, err := storage.NewLocalStore(handlerParams, nil) - if err != nil { - t.Fatal(err) - } - - // set up Swarm Feeds handler and add is as a validator to the localstore - fhParams := &HandlerParams{} - fh := NewHandler(fhParams) - store.Validators = append(store.Validators, fh) - - // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) - goodChunk := chunks[0] - badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) - - topic, _ := NewTopic("xyzzy", nil) - feed := Feed{ - Topic: topic, - User: signer.Address(), - } - - // create a feed update chunk with correct publickey - id := ID{ - Epoch: lookup.Epoch{Time: 42, - Level: 1, - }, - Feed: feed, - } - - updateAddr := id.Addr() - data := []byte("bar") - - r := new(Request) - r.idAddr = updateAddr - r.Update.ID = id - r.data = data - - r.Sign(signer) - - uglyChunk, err := r.toChunk() - if err != nil { - t.Fatal(err) - } - - // put the chunks in the store and check their error status - err = store.Put(context.Background(), goodChunk) - if err == nil { - t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") - } - err = store.Put(context.Background(), badChunk) - if err == nil { - t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") - } - err = store.Put(context.Background(), uglyChunk) - if err != nil { - t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) - } -} - -// create rpc and Feeds Handler -func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { - - var fsClean func() - var rpcClean func() - cleanF = func() { - if fsClean != nil { - fsClean() - } - if rpcClean != nil { - rpcClean() - } - } - - // temp datadir - datadir, err = ioutil.TempDir("", "fh") - if err != nil { - return nil, "", nil, err - } - fsClean = func() { - os.RemoveAll(datadir) - } - - TimestampProvider = timeProvider - fhParams := &HandlerParams{} - fh, err = NewTestHandler(datadir, fhParams) - return fh, datadir, cleanF, err -} - -func newAliceSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") - return NewGenericSigner(privKey) -} - -func newBobSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") - return NewGenericSigner(privKey) -} - -func newCharlieSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") - return NewGenericSigner(privKey) -} - -func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { - chunk, err := rh.chunkStore.Get(context.TODO(), addr) - if err != nil { - return nil, err - } - var r Request - if err := r.fromChunk(addr, chunk.Data()); err != nil { - return nil, err - } - return r.data, nil -} diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go deleted file mode 100644 index dcc88ac2a..000000000 --- a/swarm/storage/mru/id.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "fmt" - "hash" - "strconv" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// ID uniquely identifies an update on the network. -type ID struct { - Feed `json:"feed"` - lookup.Epoch `json:"epoch"` -} - -// ID layout: -// Feed feedLength bytes -// Epoch EpochLength -const idLength = feedLength + lookup.EpochLength - -// Addr calculates the feed update chunk address corresponding to this ID -func (u *ID) Addr() (updateAddr storage.Address) { - serializedData := make([]byte, idLength) - var cursor int - u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) - cursor += feedLength - - eid := u.Epoch.ID() - copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) - - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - return hasher.Sum(nil) -} - -// binaryPut serializes this instance into the provided slice -func (u *ID) binaryPut(serializedData []byte) error { - if len(serializedData) != idLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) - } - var cursor int - if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { - return err - } - cursor += feedLength - - epochBytes, err := u.Epoch.MarshalBinary() - if err != nil { - return err - } - copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) - cursor += lookup.EpochLength - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (u *ID) binaryLength() int { - return idLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (u *ID) binaryGet(serializedData []byte) error { - if len(serializedData) != idLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) - } - - var cursor int - if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { - return err - } - cursor += feedLength - - if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { - return err - } - cursor += lookup.EpochLength - - return nil -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (u *ID) FromValues(values Values) error { - level, _ := strconv.ParseUint(values.Get("level"), 10, 32) - u.Epoch.Level = uint8(level) - u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) - - if u.Feed.User == (common.Address{}) { - return u.Feed.FromValues(values) - } - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (u *ID) AppendValues(values Values) { - values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) - values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) - u.Feed.AppendValues(values) -} diff --git a/swarm/storage/mru/id_test.go b/swarm/storage/mru/id_test.go deleted file mode 100644 index 767b3c159..000000000 --- a/swarm/storage/mru/id_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package mru - -import ( - "testing" - - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -func getTestID() *ID { - return &ID{ - Feed: *getTestFeed(), - Epoch: lookup.GetFirstEpoch(1000), - } -} - -func TestIDAddr(t *testing.T) { - id := getTestID() - updateAddr := id.Addr() - compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") -} - -func TestIDSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") -} - -func TestIDLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestID()) -} diff --git a/swarm/storage/mru/lookup/epoch.go b/swarm/storage/mru/lookup/epoch.go deleted file mode 100644 index bafe95477..000000000 --- a/swarm/storage/mru/lookup/epoch.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package lookup - -import ( - "encoding/binary" - "errors" - "fmt" -) - -// Epoch represents a time slot at a particular frequency level -type Epoch struct { - Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place - Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 -} - -// EpochID is a unique identifier for an Epoch, based on its level and base time. -type EpochID [8]byte - -// EpochLength stores the serialized binary length of an Epoch -const EpochLength = 8 - -// MaxTime contains the highest possible time value an Epoch can handle -const MaxTime uint64 = (1 << 56) - 1 - -// Base returns the base time of the Epoch -func (e *Epoch) Base() uint64 { - return getBaseTime(e.Time, e.Level) -} - -// ID Returns the unique identifier of this epoch -func (e *Epoch) ID() EpochID { - base := e.Base() - var id EpochID - binary.LittleEndian.PutUint64(id[:], base) - id[7] = e.Level - return id -} - -// MarshalBinary implements the encoding.BinaryMarshaller interface -func (e *Epoch) MarshalBinary() (data []byte, err error) { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b[:], e.Time) - b[7] = e.Level - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface -func (e *Epoch) UnmarshalBinary(data []byte) error { - if len(data) != EpochLength { - return errors.New("Invalid data unmarshalling Epoch") - } - b := make([]byte, 8) - copy(b, data) - e.Level = b[7] - b[7] = 0 - e.Time = binary.LittleEndian.Uint64(b) - return nil -} - -// After returns true if this epoch occurs later or exactly at the other epoch. -func (e *Epoch) After(epoch Epoch) bool { - if e.Time == epoch.Time { - return e.Level < epoch.Level - } - return e.Time >= epoch.Time -} - -// Equals compares two epochs and returns true if they refer to the same time period. -func (e *Epoch) Equals(epoch Epoch) bool { - return e.Level == epoch.Level && e.Base() == epoch.Base() -} - -// String implements the Stringer interface. -func (e *Epoch) String() string { - return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) -} diff --git a/swarm/storage/mru/lookup/epoch_test.go b/swarm/storage/mru/lookup/epoch_test.go deleted file mode 100644 index 62cf5523d..000000000 --- a/swarm/storage/mru/lookup/epoch_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package lookup_test - -import ( - "testing" - - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -func TestMarshallers(t *testing.T) { - - for i := uint64(1); i < lookup.MaxTime; i *= 3 { - e := lookup.Epoch{ - Time: i, - Level: uint8(i % 20), - } - b, err := e.MarshalBinary() - if err != nil { - t.Fatal(err) - } - var e2 lookup.Epoch - if err := e2.UnmarshalBinary(b); err != nil { - t.Fatal(err) - } - if e != e2 { - t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") - } - } - -} - -func TestAfter(t *testing.T) { - a := lookup.Epoch{ - Time: 5, - Level: 3, - } - b := lookup.Epoch{ - Time: 6, - Level: 3, - } - c := lookup.Epoch{ - Time: 6, - Level: 4, - } - - if !b.After(a) { - t.Fatal("Expected 'after' to be true, got false") - } - - if b.After(b) { - t.Fatal("Expected 'after' to be false when both epochs are identical, got true") - } - - if !b.After(c) { - t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") - } - -} diff --git a/swarm/storage/mru/lookup/lookup.go b/swarm/storage/mru/lookup/lookup.go deleted file mode 100644 index a5154d261..000000000 --- a/swarm/storage/mru/lookup/lookup.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -Package lookup defines Feed lookup algorithms and provides tools to place updates -so they can be found -*/ -package lookup - -const maxuint64 = ^uint64(0) - -// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. -const LowestLevel uint8 = 0 // default is 0 (1 second) - -// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. -// 25 -> 2^25 equals to roughly one year. -const HighestLevel = 25 // default is 25 (~1 year) - -// DefaultLevel sets what level will be chosen to search when there is no hint -const DefaultLevel = HighestLevel - -//Algorithm is the function signature of a lookup algorithm -type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) - -// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' -// It takes a hint which should be the epoch where the last known update was -// If you don't know in what epoch the last update happened, simply submit lookup.NoClue -// read() will be called on each lookup attempt -// Returns an error only if read() returns an error -// Returns nil if an update was not found -var Lookup Algorithm = FluzCapacitorAlgorithm - -// ReadFunc is a handler called by Lookup each time it attempts to find a value -// It should return if a value is not found -// It should return if a value is found, but its timestamp is higher than "now" -// It should only return an error in case the handler wants to stop the -// lookup process entirely. -type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) - -// NoClue is a hint that can be provided when the Lookup caller does not have -// a clue about where the last update may be -var NoClue = Epoch{} - -// getBaseTime returns the epoch base time of the given -// time and level -func getBaseTime(t uint64, level uint8) uint64 { - return t & (maxuint64 << level) -} - -// Hint creates a hint based only on the last known update time -func Hint(last uint64) Epoch { - return Epoch{ - Time: last, - Level: DefaultLevel, - } -} - -// GetNextLevel returns the frequency level a next update should be placed at, provided where -// the last update was and what time it is now. -// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit -// but limited to not return a level that is smaller than the last-1 -func GetNextLevel(last Epoch, now uint64) uint8 { - // First XOR the last epoch base time with the current clock. - // This will set all the common most significant bits to zero. - mix := (last.Base() ^ now) - - // Then, make sure we stop the below loop before one level below the current, by setting - // that level's bit to 1. - // If the next level is lower than the current one, it must be exactly level-1 and not lower. - mix |= (1 << (last.Level - 1)) - - // if the last update was more than 2^highestLevel seconds ago, choose the highest level - if mix > (maxuint64 >> (64 - HighestLevel - 1)) { - return HighestLevel - } - - // set up a mask to scan for nonzero bits, starting at the highest level - mask := uint64(1 << (HighestLevel)) - - for i := uint8(HighestLevel); i > LowestLevel; i-- { - if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. - return i - } - mask = mask >> 1 // move our bit one position to the right - } - return 0 -} - -// GetNextEpoch returns the epoch where the next update should be located -// according to where the previous update was -// and what time it is now. -func GetNextEpoch(last Epoch, now uint64) Epoch { - if last == NoClue { - return GetFirstEpoch(now) - } - level := GetNextLevel(last, now) - return Epoch{ - Level: level, - Time: now, - } -} - -// GetFirstEpoch returns the epoch where the first update should be located -// based on what time it is now. -func GetFirstEpoch(now uint64) Epoch { - return Epoch{Level: HighestLevel, Time: now} -} - -var worstHint = Epoch{Time: 0, Level: 63} - -// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found -// going back and forth in time -// First, it will attempt to find an update where it should be now if the hint was -// really the last update. If that lookup fails, then the last update must be either the hint itself -// or the epochs right below. If however, that lookup succeeds, then the update must be -// that one or within the epochs right below. -// see the guide for a more graphical representation -func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { - var lastFound interface{} - var epoch Epoch - if hint == NoClue { - hint = worstHint - } - - t := now - - for { - epoch = GetNextEpoch(hint, t) - value, err = read(epoch, now) - if err != nil { - return nil, err - } - if value != nil { - lastFound = value - if epoch.Level == LowestLevel || epoch.Equals(hint) { - return value, nil - } - hint = epoch - continue - } - if epoch.Base() == hint.Base() { - if lastFound != nil { - return lastFound, nil - } - // we have reached the hint itself - if hint == worstHint { - return nil, nil - } - // check it out - value, err = read(hint, now) - if err != nil { - return nil, err - } - if value != nil { - return value, nil - } - // bad hint. - epoch = hint - hint = worstHint - } - base := epoch.Base() - if base == 0 { - return nil, nil - } - t = base - 1 - } -} diff --git a/swarm/storage/mru/lookup/lookup_test.go b/swarm/storage/mru/lookup/lookup_test.go deleted file mode 100644 index 34bcb61f0..000000000 --- a/swarm/storage/mru/lookup/lookup_test.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package lookup_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -type Data struct { - Payload uint64 - Time uint64 -} - -type Store map[lookup.EpochID]*Data - -func write(store Store, epoch lookup.Epoch, value *Data) { - log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) - store[epoch.ID()] = value -} - -func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { - epoch := lookup.GetNextEpoch(last, now) - - write(store, epoch, value) - - return epoch -} - -const Day = 60 * 60 * 24 -const Year = Day * 365 -const Month = Day * 30 - -func makeReadFunc(store Store, counter *int) lookup.ReadFunc { - return func(epoch lookup.Epoch, now uint64) (interface{}, error) { - *counter++ - data := store[epoch.ID()] - var valueStr string - if data != nil { - valueStr = fmt.Sprintf("%d", data.Payload) - } - log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) - if data != nil && data.Time <= now { - return data, nil - } - return nil, nil - } -} - -func TestLookup(t *testing.T) { - - store := make(Store) - readCount := 0 - readFunc := makeReadFunc(store, &readCount) - - // write an update every month for 12 months 3 years ago and then silence for two years - now := uint64(1533799046) - var epoch lookup.Epoch - - var lastData *Data - for i := uint64(0); i < 12; i++ { - t := uint64(now - Year*3 + i*Month) - data := Data{ - Payload: t, //our "payload" will be the timestamp itself. - Time: t, - } - epoch = update(store, epoch, t, &data) - lastData = &data - } - - // try to get the last value - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - readCountWithoutHint := readCount - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - // reset the read count for the next test - readCount = 0 - // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update - value, err = lookup.Lookup(now, epoch, readFunc) - if err != nil { - t.Fatal(err) - } - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - if readCount > readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - - // try to get an intermediate value - // if we look for a value in now - Year*3 + 6*Month, we should get that value - // Since the "payload" is the timestamp itself, we can check this. - - expectedTime := now - Year*3 + 6*Month - - value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - data, ok := value.(*Data) - - if !ok { - t.Fatal("Expected value to contain data") - } - - if data.Time != expectedTime { - t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) - } - -} - -func TestOneUpdateAt0(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - var epoch lookup.Epoch - data := Data{ - Payload: 79, - Time: 0, - } - update(store, epoch, 0, &data) - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - if value != &data { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) - } -} - -// Tests the update is found even when a bad hint is given -func TestBadHint(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - var epoch lookup.Epoch - data := Data{ - Payload: 79, - Time: 0, - } - - // place an update for t=1200 - update(store, epoch, 1200, &data) - - // come up with some evil hint - badHint := lookup.Epoch{ - Level: 18, - Time: 1200000000, - } - - value, err := lookup.Lookup(now, badHint, readFunc) - if err != nil { - t.Fatal(err) - } - if value != &data { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) - } -} - -func TestLookupFail(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - // don't write anything and try to look up. - // we're testing we don't get stuck in a loop - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - if value != nil { - t.Fatal("Expected value to be nil, since the update should've failed") - } - - expectedReads := now/(1< readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - - for i := uint64(0); i <= 994; i++ { - T := uint64(now - 1000 + i) // update every second for the last 1000 seconds - value, err := lookup.Lookup(T, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - data, _ := value.(*Data) - if data == nil { - t.Fatalf("Expected lookup to return %d, got nil", T) - } - if data.Payload != T { - t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) - } - } -} - -func TestSparseUpdates(t *testing.T) { - - store := make(Store) - readCount := 0 - readFunc := makeReadFunc(store, &readCount) - - // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence - - now := uint64(1533799046) - var epoch lookup.Epoch - - var lastData *Data - for i := uint64(0); i < 5; i++ { - T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence - data := Data{ - Payload: T, //our "payload" will be the timestamp itself. - Time: T, - } - epoch = update(store, epoch, T, &data) - lastData = &data - } - - // try to get the last value - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - readCountWithoutHint := readCount - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - // reset the read count for the next test - readCount = 0 - // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update - value, err = lookup.Lookup(now, epoch, readFunc) - if err != nil { - t.Fatal(err) - } - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - if readCount > readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - -} - -// testG will hold precooked test results -// fields are abbreviated to reduce the size of the literal below -type testG struct { - e lookup.Epoch // last - n uint64 // next level - x uint8 // expected result -} - -// test cases -var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} - -func TestGetNextLevel(t *testing.T) { - - // First, test well-known cases - last := lookup.Epoch{ - Time: 1533799046, - Level: 5, - } - - level := lookup.GetNextLevel(last, last.Time) - expected := uint8(4) - if level != expected { - t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) - } - - level = lookup.GetNextLevel(last, last.Time+(1< lookup.HighestLevel { - v = 0 - } - now = last.Time + uint64(rand.Intn(1<. - -package mru - -import ( - "fmt" - "strconv" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -// Query is used to specify constraints when performing an update lookup -// TimeLimit indicates an upper bound for the search. Set to 0 for "now" -type Query struct { - Feed - Hint lookup.Epoch - TimeLimit uint64 -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (q *Query) FromValues(values Values) error { - time, _ := strconv.ParseUint(values.Get("time"), 10, 64) - q.TimeLimit = time - - level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) - q.Hint.Level = uint8(level) - q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) - if q.Feed.User == (common.Address{}) { - return q.Feed.FromValues(values) - } - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (q *Query) AppendValues(values Values) { - if q.TimeLimit != 0 { - values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) - } - if q.Hint.Level != 0 { - values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) - } - if q.Hint.Time != 0 { - values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) - } - q.Feed.AppendValues(values) -} - -// NewQuery constructs an Query structure to find updates on or before `time` -// if time == 0, the latest update will be looked up -func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { - return &Query{ - TimeLimit: time, - Feed: *feed, - Hint: hint, - } -} - -// NewQueryLatest generates lookup parameters that look for the latest update to a feed -func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { - return NewQuery(feed, 0, hint) -} diff --git a/swarm/storage/mru/query_test.go b/swarm/storage/mru/query_test.go deleted file mode 100644 index 4cfc597b2..000000000 --- a/swarm/storage/mru/query_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "testing" -) - -func getTestQuery() *Query { - id := getTestID() - return &Query{ - TimeLimit: 5000, - Feed: id.Feed, - Hint: id.Epoch, - } -} - -func TestQueryValues(t *testing.T) { - var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} - - query := getTestQuery() - testValueSerializer(t, query, expected) - -} diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go deleted file mode 100644 index ca88adda1..000000000 --- a/swarm/storage/mru/request.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "bytes" - "encoding/json" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -// Request represents a request to sign or signed Feed Update message -type Request struct { - Update // actual content that will be put on the chunk, less signature - Signature *Signature - idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) - binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) -} - -// updateRequestJSON represents a JSON-serialized UpdateRequest -type updateRequestJSON struct { - ID - ProtocolVersion uint8 `json:"protocolVersion"` - Data string `json:"data,omitempty"` - Signature string `json:"signature,omitempty"` -} - -// Request layout -// Update bytes -// SignatureLength bytes -const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength - -// NewFirstRequest returns a ready to sign request to publish a first feed update -func NewFirstRequest(topic Topic) *Request { - - request := new(Request) - - // get the current time - now := TimestampProvider.Now().Time - request.Epoch = lookup.GetFirstEpoch(now) - request.Feed.Topic = topic - request.Header.Version = ProtocolVersion - - return request -} - -// SetData stores the payload data the feed update will be updated with -func (r *Request) SetData(data []byte) { - r.data = data - r.Signature = nil -} - -// IsUpdate returns true if this request models a signed update or otherwise it is a signature request -func (r *Request) IsUpdate() bool { - return r.Signature != nil -} - -// Verify checks that signatures are valid -func (r *Request) Verify() (err error) { - if len(r.data) == 0 { - return NewError(ErrInvalidValue, "Update does not contain data") - } - if r.Signature == nil { - return NewError(ErrInvalidSignature, "Missing signature field") - } - - digest, err := r.GetDigest() - if err != nil { - return err - } - - // get the address of the signer (which also checks that it's a valid signature) - r.Feed.User, err = getUserAddr(digest, *r.Signature) - if err != nil { - return err - } - - // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) - // that was used to retrieve this chunk - // if this validation fails, someone forged a chunk. - if !bytes.Equal(r.idAddr, r.Addr()) { - return NewError(ErrInvalidSignature, "Signature address does not match with update user address") - } - - return nil -} - -// Sign executes the signature to validate the update message -func (r *Request) Sign(signer Signer) error { - r.Feed.User = signer.Address() - r.binaryData = nil //invalidate serialized data - digest, err := r.GetDigest() // computes digest and serializes into .binaryData - if err != nil { - return err - } - - signature, err := signer.Sign(digest) - if err != nil { - return err - } - - // Although the Signer interface returns the public address of the signer, - // recover it from the signature to see if they match - userAddr, err := getUserAddr(digest, signature) - if err != nil { - return NewError(ErrInvalidSignature, "Error verifying signature") - } - - if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! - return NewError(ErrInvalidSignature, "Signer address does not match update user address") - } - - r.Signature = &signature - r.idAddr = r.Addr() - return nil -} - -// GetDigest creates the feed update digest used in signatures -// the serialized payload is cached in .binaryData -func (r *Request) GetDigest() (result common.Hash, err error) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - dataLength := r.Update.binaryLength() - if r.binaryData == nil { - r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { - return result, err - } - } - hasher.Write(r.binaryData[:dataLength]) //everything except the signature. - - return common.BytesToHash(hasher.Sum(nil)), nil -} - -// create an update chunk. -func (r *Request) toChunk() (storage.Chunk, error) { - - // Check that the update is signed and serialized - // For efficiency, data is serialized during signature and cached in - // the binaryData field when computing the signature digest in .getDigest() - if r.Signature == nil || r.binaryData == nil { - return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") - } - - updateLength := r.Update.binaryLength() - - // signature is the last item in the chunk data - copy(r.binaryData[updateLength:], r.Signature[:]) - - chunk := storage.NewChunk(r.idAddr, r.binaryData) - return chunk, nil -} - -// fromChunk populates this structure from chunk data. It does not verify the signature is valid. -func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { - // for update chunk layout see Request definition - - //deserialize the feed update portion - if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { - return err - } - - // Extract the signature - var signature *Signature - cursor := r.Update.binaryLength() - sigdata := chunkdata[cursor : cursor+signatureLength] - if len(sigdata) > 0 { - signature = &Signature{} - copy(signature[:], sigdata) - } - - r.Signature = signature - r.idAddr = updateAddr - r.binaryData = chunkdata - - return nil - -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (r *Request) FromValues(values Values, data []byte) error { - signatureBytes, err := hexutil.Decode(values.Get("signature")) - if err != nil { - r.Signature = nil - } else { - if len(signatureBytes) != signatureLength { - return NewError(ErrInvalidSignature, "Incorrect signature length") - } - r.Signature = new(Signature) - copy(r.Signature[:], signatureBytes) - } - err = r.Update.FromValues(values, data) - if err != nil { - return err - } - r.idAddr = r.Addr() - return err -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (r *Request) AppendValues(values Values) []byte { - if r.Signature != nil { - values.Set("signature", hexutil.Encode(r.Signature[:])) - } - return r.Update.AppendValues(values) -} - -// fromJSON takes an update request JSON and populates an UpdateRequest -func (r *Request) fromJSON(j *updateRequestJSON) error { - - r.ID = j.ID - r.Header.Version = j.ProtocolVersion - - var err error - if j.Data != "" { - r.data, err = hexutil.Decode(j.Data) - if err != nil { - return NewError(ErrInvalidValue, "Cannot decode data") - } - } - - if j.Signature != "" { - sigBytes, err := hexutil.Decode(j.Signature) - if err != nil || len(sigBytes) != signatureLength { - return NewError(ErrInvalidSignature, "Cannot decode signature") - } - r.Signature = new(Signature) - r.idAddr = r.Addr() - copy(r.Signature[:], sigBytes) - } - return nil -} - -// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object -// Implements json.Unmarshaler interface -func (r *Request) UnmarshalJSON(rawData []byte) error { - var requestJSON updateRequestJSON - if err := json.Unmarshal(rawData, &requestJSON); err != nil { - return err - } - return r.fromJSON(&requestJSON) -} - -// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array -// Implements json.Marshaler interface -func (r *Request) MarshalJSON() (rawData []byte, err error) { - var signatureString, dataString string - if r.Signature != nil { - signatureString = hexutil.Encode(r.Signature[:]) - } - if r.data != nil { - dataString = hexutil.Encode(r.data) - } - - requestJSON := &updateRequestJSON{ - ID: r.ID, - ProtocolVersion: r.Header.Version, - Data: dataString, - Signature: signatureString, - } - - return json.Marshal(requestJSON) -} diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go deleted file mode 100644 index 515de651c..000000000 --- a/swarm/storage/mru/request_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" -) - -func areEqualJSON(s1, s2 string) (bool, error) { - //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67 - var o1 interface{} - var o2 interface{} - - err := json.Unmarshal([]byte(s1), &o1) - if err != nil { - return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) - } - err = json.Unmarshal([]byte(s2), &o2) - if err != nil { - return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) - } - - return reflect.DeepEqual(o1, o2), nil -} - -// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly -// while also checking cryptographically that only the owner of a Feed can update it. -func TestEncodingDecodingUpdateRequests(t *testing.T) { - - charlie := newCharlieSigner() //Charlie - bob := newBobSigner() //Bob - - // Create a feed to our good guy Charlie's name - topic, _ := NewTopic("a good topic name", nil) - firstRequest := NewFirstRequest(topic) - firstRequest.User = charlie.Address() - - // We now encode the create message to simulate we send it over the wire - messageRawData, err := firstRequest.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding first feed update request: %s", err) - } - - // ... the message arrives and is decoded... - var recoveredFirstRequest Request - if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding first feed update request: %s", err) - } - - // ... but verification should fail because it is not signed! - if err := recoveredFirstRequest.Verify(); err == nil { - t.Fatal("Expected Verify to fail since the message is not signed") - } - - // We now assume that the feed ypdate was created and propagated. - - const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601" - const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` - - //Put together an unsigned update request that we will serialize to send it to the signer. - data := []byte("This hour's update: Swarm 99.0 has been released!") - request := &Request{ - Update: Update{ - ID: ID{ - Epoch: lookup.Epoch{ - Time: 1000, - Level: 1, - }, - Feed: firstRequest.Update.Feed, - }, - data: data, - }, - } - - messageRawData, err = request.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding update request: %s", err) - } - - equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON) - if err != nil { - t.Fatalf("Error decoding update request JSON: %s", err) - } - if !equalJSON { - t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData)) - } - - // now the encoded message messageRawData is sent over the wire and arrives to the signer - - //Attempt to extract an UpdateRequest out of the encoded message - var recoveredRequest Request - if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding update request: %s", err) - } - - //sign the request and see if it matches our predefined signature above. - if err := recoveredRequest.Sign(charlie); err != nil { - t.Fatalf("Error signing request: %s", err) - } - - compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) - - // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON - // to alter the signature field. - var j updateRequestJSON - if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil { - t.Fatal("Error unmarshalling test json, check expectedJSON constant") - } - j.Signature = "Certainly not a signature" - corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature - var corruptRequest Request - if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil { - t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") - } - - // Now imagine Bob wants to create an update of his own about the same Feed, - // signing a message with his private key - if err := request.Sign(bob); err != nil { - t.Fatalf("Error signing: %s", err) - } - - // Now Bob encodes the message to send it over the wire... - messageRawData, err = request.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding message:%s", err) - } - - // ... the message arrives to our Swarm node and it is decoded. - recoveredRequest = Request{} - if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding message:%s", err) - } - - // Before checking what happened with Bob's update, let's see what would happen if we mess - // with the signature big time to see if Verify catches it - savedSignature := *recoveredRequest.Signature // save the signature for later - binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature - if err = recoveredRequest.Verify(); err == nil { - t.Fatal("Expected Verify to fail on corrupt signature") - } - - // restore the Bob's signature from corruption - *recoveredRequest.Signature = savedSignature - - // Now the signature is not corrupt - if err = recoveredRequest.Verify(); err != nil { - t.Fatal(err) - } - - // Reuse object and sign with our friend Charlie's private key - if err := recoveredRequest.Sign(charlie); err != nil { - t.Fatalf("Error signing with the correct private key: %s", err) - } - - // And now, Verify should work since this update now belongs to Charlie - if err = recoveredRequest.Verify(); err != nil { - t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) - } - - // mess with the lookup key to make sure Verify fails: - recoveredRequest.Time = 77999 // this will alter the lookup key - if err = recoveredRequest.Verify(); err == nil { - t.Fatalf("Expected Verify to fail since the lookup key has been altered") - } -} - -func getTestRequest() *Request { - return &Request{ - Update: *getTestFeedUpdate(), - } -} - -func TestUpdateChunkSerializationErrorChecking(t *testing.T) { - - // Test that parseUpdate fails if the chunk is too small - var r Request - if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { - t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) - } - - r = *getTestRequest() - - _, err := r.toChunk() - if err == nil { - t.Fatal("Expected request.toChunk to fail when there is no data") - } - r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data - _, err = r.toChunk() - if err == nil { - t.Fatal("expected request.toChunk to fail when there is no signature") - } - - charlie := newCharlieSigner() - if err := r.Sign(charlie); err != nil { - t.Fatalf("error signing:%s", err) - } - - chunk, err := r.toChunk() - if err != nil { - t.Fatalf("error creating update chunk:%s", err) - } - - compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") - - var recovered Request - recovered.fromChunk(chunk.Address(), chunk.Data()) - if !reflect.DeepEqual(recovered, r) { - t.Fatal("Expected recovered Request update to equal the original one") - } -} - -// check that signature address matches update signer address -func TestReverse(t *testing.T) { - - epoch := lookup.Epoch{ - Time: 7888, - Level: 6, - } - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create Feeds handler - _, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - topic, _ := NewTopic("Cervantes quotes", nil) - feed := Feed{ - Topic: topic, - User: signer.Address(), - } - - data := []byte("Donde una puerta se cierra, otra se abre") - - request := new(Request) - request.Feed = feed - request.Epoch = epoch - request.data = data - - // generate a chunk key for this request - key := request.Addr() - - if err = request.Sign(signer); err != nil { - t.Fatal(err) - } - - chunk, err := request.toChunk() - if err != nil { - t.Fatal(err) - } - - // check that we can recover the owner account from the update chunk's signature - var checkUpdate Request - if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { - t.Fatal(err) - } - checkdigest, err := checkUpdate.GetDigest() - if err != nil { - t.Fatal(err) - } - recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) - if err != nil { - t.Fatalf("Retrieve address from signature fail: %v", err) - } - originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - - // check that the metadata retrieved from the chunk matches what we gave it - if recoveredAddr != originalAddr { - t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) - } - - if !bytes.Equal(key[:], chunk.Address()[:]) { - t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) - } - if epoch != checkUpdate.Epoch { - t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) - } - if !bytes.Equal(data, checkUpdate.data) { - t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) - } -} diff --git a/swarm/storage/mru/sign.go b/swarm/storage/mru/sign.go deleted file mode 100644 index 03b0fa2b6..000000000 --- a/swarm/storage/mru/sign.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "crypto/ecdsa" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -const signatureLength = 65 - -// Signature is an alias for a static byte array with the size of a signature -type Signature [signatureLength]byte - -// Signer signs Feed update payloads -type Signer interface { - Sign(common.Hash) (Signature, error) - Address() common.Address -} - -// GenericSigner implements the Signer interface -// It is the vanilla signer that probably should be used in most cases -type GenericSigner struct { - PrivKey *ecdsa.PrivateKey - address common.Address -} - -// NewGenericSigner builds a signer that will sign everything with the provided private key -func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { - return &GenericSigner{ - PrivKey: privKey, - address: crypto.PubkeyToAddress(privKey.PublicKey), - } -} - -// Sign signs the supplied data -// It wraps the ethereum crypto.Sign() method -func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { - signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) - if err != nil { - return - } - copy(signature[:], signaturebytes) - return -} - -// Address returns the public key of the signer's private key -func (s *GenericSigner) Address() common.Address { - return s.address -} - -// getUserAddr extracts the address of the Feed update signer -func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go deleted file mode 100644 index 80e0d4cf0..000000000 --- a/swarm/storage/mru/testutil.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "context" - "fmt" - "path/filepath" - "sync" - - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - testDbDirName = "feeds" -) - -type TestHandler struct { - *Handler -} - -func (t *TestHandler) Close() { - t.chunkStore.Close() -} - -type mockNetFetcher struct{} - -func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) { -} -func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) { -} - -func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher { - return &mockNetFetcher{} -} - -// NewTestHandler creates Handler object to be used for testing purposes. -func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { - path := filepath.Join(datadir, testDbDirName) - fh := NewHandler(params) - localstoreparams := storage.NewDefaultLocalStoreParams() - localstoreparams.Init(path) - localStore, err := storage.NewLocalStore(localstoreparams, nil) - if err != nil { - return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) - } - localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) - localStore.Validators = append(localStore.Validators, fh) - netStore, err := storage.NewNetStore(localStore, nil) - if err != nil { - return nil, err - } - netStore.NewNetFetcherFunc = newFakeNetFetcher - fh.SetStore(netStore) - return &TestHandler{fh}, nil -} diff --git a/swarm/storage/mru/timestampprovider.go b/swarm/storage/mru/timestampprovider.go deleted file mode 100644 index 6ac153213..000000000 --- a/swarm/storage/mru/timestampprovider.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "encoding/binary" - "encoding/json" - "time" -) - -// TimestampProvider sets the time source of the mru package -var TimestampProvider timestampProvider = NewDefaultTimestampProvider() - -// Timestamp encodes a point in time as a Unix epoch -type Timestamp struct { - Time uint64 `json:"time"` // Unix epoch timestamp, in seconds -} - -// 8 bytes uint64 Time -const timestampLength = 8 - -// timestampProvider interface describes a source of timestamp information -type timestampProvider interface { - Now() Timestamp // returns the current timestamp information -} - -// binaryGet populates the timestamp structure from the given byte slice -func (t *Timestamp) binaryGet(data []byte) error { - if len(data) != timestampLength { - return NewError(ErrCorruptData, "timestamp data has the wrong size") - } - t.Time = binary.LittleEndian.Uint64(data[:8]) - return nil -} - -// binaryPut Serializes a Timestamp to a byte slice -func (t *Timestamp) binaryPut(data []byte) error { - if len(data) != timestampLength { - return NewError(ErrCorruptData, "timestamp data has the wrong size") - } - binary.LittleEndian.PutUint64(data, t.Time) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaller interface -func (t *Timestamp) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &t.Time) -} - -// MarshalJSON implements the json.Marshaller interface -func (t *Timestamp) MarshalJSON() ([]byte, error) { - return json.Marshal(t.Time) -} - -// DefaultTimestampProvider is a TimestampProvider that uses system time -// as time source -type DefaultTimestampProvider struct { -} - -// NewDefaultTimestampProvider creates a system clock based timestamp provider -func NewDefaultTimestampProvider() *DefaultTimestampProvider { - return &DefaultTimestampProvider{} -} - -// Now returns the current time according to this provider -func (dtp *DefaultTimestampProvider) Now() Timestamp { - return Timestamp{ - Time: uint64(time.Now().Unix()), - } -} diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go deleted file mode 100644 index b6adb4cd7..000000000 --- a/swarm/storage/mru/topic.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/ethereum/go-ethereum/common/bitutil" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// TopicLength establishes the max length of a topic string -const TopicLength = storage.AddressLength - -// Topic represents what a feed is about -type Topic [TopicLength]byte - -// ErrTopicTooLong is returned when creating a topic with a name/related content too long -var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) - -// NewTopic creates a new topic from a provided name and "related content" byte array, -// merging the two together. -// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned -// name can be an empty string -// relatedContent can be nil -func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { - if relatedContent != nil { - contentLength := len(relatedContent) - if contentLength > TopicLength { - contentLength = TopicLength - err = ErrTopicTooLong - } - copy(topic[:], relatedContent[:contentLength]) - } - nameBytes := []byte(name) - nameLength := len(nameBytes) - if nameLength > TopicLength { - nameLength = TopicLength - err = ErrTopicTooLong - } - bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) - return topic, err -} - -// Hex will return the topic encoded as an hex string -func (t *Topic) Hex() string { - return hexutil.Encode(t[:]) -} - -// FromHex will parse a hex string into this Topic instance -func (t *Topic) FromHex(hex string) error { - bytes, err := hexutil.Decode(hex) - if err != nil || len(bytes) != len(t) { - return NewErrorf(ErrInvalidValue, "Cannot decode topic") - } - copy(t[:], bytes) - return nil -} - -// Name will try to extract the topic name out of the Topic -func (t *Topic) Name(relatedContent []byte) string { - nameBytes := *t - if relatedContent != nil { - contentLength := len(relatedContent) - if contentLength > TopicLength { - contentLength = TopicLength - } - bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) - } - z := bytes.IndexByte(nameBytes[:], 0) - if z < 0 { - z = TopicLength - } - return string(nameBytes[:z]) - -} - -// UnmarshalJSON implements the json.Unmarshaller interface -func (t *Topic) UnmarshalJSON(data []byte) error { - var hex string - json.Unmarshal(data, &hex) - return t.FromHex(hex) -} - -// MarshalJSON implements the json.Marshaller interface -func (t *Topic) MarshalJSON() ([]byte, error) { - return json.Marshal(t.Hex()) -} diff --git a/swarm/storage/mru/topic_test.go b/swarm/storage/mru/topic_test.go deleted file mode 100644 index dad7c7ddc..000000000 --- a/swarm/storage/mru/topic_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package mru - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func TestTopic(t *testing.T) { - related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") - topicName := "test-topic" - topic, _ := NewTopic(topicName, related) - hex := topic.Hex() - expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" - if hex != expectedHex { - t.Fatalf("Expected %s, got %s", expectedHex, hex) - } - - var topic2 Topic - topic2.FromHex(hex) - if topic2 != topic { - t.Fatal("Expected recovered topic to be equal to original one") - } - - if topic2.Name(related) != topicName { - t.Fatal("Retrieved name does not match") - } - - bytes, err := topic2.MarshalJSON() - if err != nil { - t.Fatal(err) - } - expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` - equal, err := areEqualJSON(expectedJSON, string(bytes)) - if err != nil { - t.Fatal(err) - } - if !equal { - t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) - } - - err = topic2.UnmarshalJSON(bytes) - if err != nil { - t.Fatal(err) - } - if topic2 != topic { - t.Fatal("Expected recovered topic to be equal to original one") - } - -} diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go deleted file mode 100644 index f6e70b4d8..000000000 --- a/swarm/storage/mru/update.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "fmt" - "strconv" - - "github.com/ethereum/go-ethereum/swarm/chunk" -) - -// ProtocolVersion defines the current version of the protocol that will be included in each update message -const ProtocolVersion uint8 = 0 - -const headerLength = 8 - -// Header defines a update message header including a protocol version byte -type Header struct { - Version uint8 // Protocol version - Padding [headerLength - 1]uint8 // reserved for future use -} - -// Update encapsulates the information sent as part of a feed update -type Update struct { - Header Header // - ID // Feed Update identifying information - data []byte // actual data payload -} - -const minimumUpdateDataLength = idLength + headerLength + 1 -const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength - -// binaryPut serializes the feed update information into the given slice -func (r *Update) binaryPut(serializedData []byte) error { - datalength := len(r.data) - if datalength == 0 { - return NewError(ErrInvalidValue, "a feed update must contain data") - } - - if datalength > maxUpdateDataLength { - return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) - } - - if len(serializedData) != r.binaryLength() { - return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) - } - - var cursor int - // serialize Header - serializedData[cursor] = r.Header.Version - copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) - cursor += headerLength - - // serialize ID - if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { - return err - } - cursor += idLength - - // add the data - copy(serializedData[cursor:], r.data) - cursor += datalength - - return nil -} - -// binaryLength returns the expected number of bytes this structure will take to encode -func (r *Update) binaryLength() int { - return idLength + headerLength + len(r.data) -} - -// binaryGet populates this instance from the information contained in the passed byte slice -func (r *Update) binaryGet(serializedData []byte) error { - if len(serializedData) < minimumUpdateDataLength { - return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength) - } - dataLength := len(serializedData) - idLength - headerLength - // at this point we can be satisfied that we have the correct data length to read - - var cursor int - - // deserialize Header - r.Header.Version = serializedData[cursor] // extract the protocol version - copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding - cursor += headerLength - - if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { - return err - } - cursor += idLength - - data := serializedData[cursor : cursor+dataLength] - cursor += dataLength - - // now that all checks have passed, copy data into structure - r.data = make([]byte, dataLength) - copy(r.data, data) - - return nil - -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (r *Update) FromValues(values Values, data []byte) error { - r.data = data - version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) - r.Header.Version = uint8(version) - return r.ID.FromValues(values) -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (r *Update) AppendValues(values Values) []byte { - r.ID.AppendValues(values) - values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) - return r.data -} diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go deleted file mode 100644 index 62c401f3f..000000000 --- a/swarm/storage/mru/update_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "testing" -) - -func getTestFeedUpdate() *Update { - return &Update{ - ID: *getTestID(), - data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), - } -} - -func TestUpdateSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") -} - -func TestUpdateLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestFeedUpdate()) - // Test fail if update is too big - update := getTestFeedUpdate() - update.data = make([]byte, maxUpdateDataLength+100) - serialized := make([]byte, update.binaryLength()) - if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected update.binaryPut to fail since update is too big") - } - - // test fail if data is empty or nil - update.data = nil - serialized = make([]byte, update.binaryLength()) - if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected update.binaryPut to fail since data is empty") - } -} diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go deleted file mode 100644 index 8d21ef7a4..000000000 --- a/swarm/storage/mru/view.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package mru - -import ( - "hash" - "unsafe" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// Feed represents a particular user's stream of updates on a Topic -type Feed struct { - Topic Topic `json:"topic"` - User common.Address `json:"user"` -} - -// Feed layout: -// TopicLength bytes -// userAddr common.AddressLength bytes -const feedLength = TopicLength + common.AddressLength - -// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` -func (u *Feed) mapKey() uint64 { - serializedData := make([]byte, feedLength) - u.binaryPut(serializedData) - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - hash := hasher.Sum(nil) - return *(*uint64)(unsafe.Pointer(&hash[0])) -} - -// binaryPut serializes this Feed instance into the provided slice -func (u *Feed) binaryPut(serializedData []byte) error { - if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData)) - } - var cursor int - copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) - cursor += TopicLength - - copy(serializedData[cursor:cursor+common.AddressLength], u.User[:]) - cursor += common.AddressLength - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (u *Feed) binaryLength() int { - return feedLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (u *Feed) binaryGet(serializedData []byte) error { - if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read Feed. Expected %d, got %d", feedLength, len(serializedData)) - } - - var cursor int - copy(u.Topic[:], serializedData[cursor:cursor+TopicLength]) - cursor += TopicLength - - copy(u.User[:], serializedData[cursor:cursor+common.AddressLength]) - cursor += common.AddressLength - - return nil -} - -// Hex serializes the Feed to a hex string -func (u *Feed) Hex() string { - serializedData := make([]byte, feedLength) - u.binaryPut(serializedData) - return hexutil.Encode(serializedData) -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (u *Feed) FromValues(values Values) (err error) { - topic := values.Get("topic") - if topic != "" { - if err := u.Topic.FromHex(values.Get("topic")); err != nil { - return err - } - } else { // see if the user set name and relatedcontent - name := values.Get("name") - relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) - if len(relatedContent) > 0 { - if len(relatedContent) < storage.AddressLength { - return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) - } - relatedContent = relatedContent[:storage.AddressLength] - } - u.Topic, err = NewTopic(name, relatedContent) - if err != nil { - return err - } - } - u.User = common.HexToAddress(values.Get("user")) - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (u *Feed) AppendValues(values Values) { - values.Set("topic", u.Topic.Hex()) - values.Set("user", u.User.Hex()) -} diff --git a/swarm/storage/mru/view_test.go b/swarm/storage/mru/view_test.go deleted file mode 100644 index e2f4d6b30..000000000 --- a/swarm/storage/mru/view_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . -package mru - -import ( - "testing" -) - -func getTestFeed() *Feed { - topic, _ := NewTopic("world news report, every hour", nil) - return &Feed{ - Topic: topic, - User: newCharlieSigner().Address(), - } -} - -func TestFeedSerializerDeserializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") -} - -func TestFeedSerializerLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestFeed()) -} diff --git a/swarm/swarm.go b/swarm/swarm.go index 4d9bf724f..74c53dece 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -50,7 +50,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage/mock" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/ethereum/go-ethereum/swarm/tracing" ) @@ -186,10 +186,10 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams) - var feedsHandler *mru.Handler - fhParams := &mru.HandlerParams{} + var feedsHandler *feeds.Handler + fhParams := &feeds.HandlerParams{} - feedsHandler = mru.NewHandler(fhParams) + feedsHandler = feeds.NewHandler(fhParams) feedsHandler.SetStore(self.netStore) lstore.Validators = []storage.ChunkValidator{ diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 2162da880..05b7c52e0 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/swarm/storage/feeds" ) type TestServer interface { @@ -54,8 +54,8 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso t.Fatal(err) } - rhparams := &mru.HandlerParams{} - rh, err := mru.NewTestHandler(feedsDir, rhparams) + rhparams := &feeds.HandlerParams{} + rh, err := feeds.NewTestHandler(feedsDir, rhparams) if err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso }, CurrentTime: 42, } - mru.TimestampProvider = tss + feeds.TimestampProvider = tss return tss } @@ -92,6 +92,6 @@ func (t *TestSwarmServer) Close() { t.cleanup() } -func (t *TestSwarmServer) Now() mru.Timestamp { - return mru.Timestamp{Time: t.CurrentTime} +func (t *TestSwarmServer) Now() feeds.Timestamp { + return feeds.Timestamp{Time: t.CurrentTime} } -- cgit From 68b8088cb9730bfe0ee2395dd5506dc744076fc2 Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Mon, 1 Oct 2018 18:36:05 +0200 Subject: swarm: Changed owners. --- swarm/OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'swarm') diff --git a/swarm/OWNERS b/swarm/OWNERS index 4681ed5ef..cea47766f 100644 --- a/swarm/OWNERS +++ b/swarm/OWNERS @@ -22,5 +22,5 @@ swarm ├── storage ─────────────── ethersphere │ ├── encryption ──────── @gbalint, @zelig, @nagydani │ ├── mock ────────────── @janos -│ └── feeds ───────────── @nolash +│ └── feeds ───────────── @nolash, @jpeletier └── testutil ────────────── @lmars \ No newline at end of file -- cgit From 58c0879c2fbc9f88f35ba503674088da23a8a5a7 Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Tue, 2 Oct 2018 09:32:46 +0200 Subject: swarm/storage/feeds: removed capital Feed throughout --- swarm/api/api.go | 22 +++++++++++----------- swarm/api/client/client.go | 8 ++++---- swarm/api/http/server.go | 4 ++-- swarm/api/http/server_test.go | 4 ++-- swarm/api/manifest.go | 2 +- swarm/network/README.md | 4 ++-- swarm/storage/feeds/cacheentry.go | 4 ++-- swarm/storage/feeds/error.go | 4 ++-- swarm/storage/feeds/feed.go | 10 +++++----- swarm/storage/feeds/handler.go | 12 ++++++------ swarm/storage/feeds/handler_test.go | 14 +++++++------- swarm/storage/feeds/lookup/lookup.go | 2 +- swarm/storage/feeds/request.go | 2 +- swarm/storage/feeds/request_test.go | 8 ++++---- swarm/storage/feeds/sign.go | 4 ++-- swarm/storage/localstore_test.go | 2 +- swarm/testutil/http.go | 2 +- 17 files changed, 54 insertions(+), 54 deletions(-) (limited to 'swarm') diff --git a/swarm/api/api.go b/swarm/api/api.go index c61bd8064..a23e09e68 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -404,7 +404,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage return a.Get(ctx, decrypt, adr, entry.Path) } - // we need to do some extra work if this is a Feed manifest + // we need to do some extra work if this is a Swarm feed manifest if entry.ContentType == FeedContentType { if entry.Feed == nil { return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest") @@ -957,7 +957,7 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver return addr, manifestEntryMap, nil } -// FeedsLookup finds Swarm Feeds Updates at specific points in time, or the latest update +// FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update func (a *API) FeedsLookup(ctx context.Context, query *feeds.Query) ([]byte, error) { _, err := a.feeds.Lookup(ctx, query) if err != nil { @@ -971,17 +971,17 @@ func (a *API) FeedsLookup(ctx context.Context, query *feeds.Query) ([]byte, erro return data, nil } -// FeedsNewRequest creates a Request object to update a specific Feed +// FeedsNewRequest creates a Request object to update a specific feed func (a *API) FeedsNewRequest(ctx context.Context, feed *feeds.Feed) (*feeds.Request, error) { return a.feeds.NewRequest(ctx, feed) } -// FeedsUpdate publishes a new update on the given Feed +// FeedsUpdate publishes a new update on the given feed func (a *API) FeedsUpdate(ctx context.Context, request *feeds.Request) (storage.Address, error) { return a.feeds.Update(ctx, request) } -// FeedsHashSize returned the size of the digest produced by Swarm Feeds' hashing function +// FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function func (a *API) FeedsHashSize() int { return a.feeds.HashSize } @@ -992,7 +992,7 @@ var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") // ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest var ErrNotAFeedManifest = errors.New("Not a feed manifest") -// ResolveFeedManifest retrieves the Feed manifest for the given address, and returns the referenced Feed. +// ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed. func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feeds.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { @@ -1007,15 +1007,15 @@ func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*f return entry.Feed, nil } -// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Feed +// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Swarm feed var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI") // ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a -// Feed out of them. +// feed out of them. var ErrCannotResolveFeed = errors.New("Cannot resolve Feed") -// ResolveFeed attempts to extract Feed information out of the manifest, if provided -// If not, it attempts to extract the Feed out of a set of key-value pairs +// ResolveFeed attempts to extract feed information out of the manifest, if provided +// If not, it attempts to extract the feed out of a set of key-value pairs func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feeds.Values) (*feeds.Feed, error) { var feed *feeds.Feed var err error @@ -1029,7 +1029,7 @@ func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feeds.Values) (* } } - // get the Feed from the manifest + // get the Swarm feed from the manifest feed, err = a.ResolveFeedManifest(ctx, manifestAddr) if err != nil { return nil, err diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 6b4614581..ac218e6a7 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -604,9 +604,9 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) // ErrNoFeedUpdatesFound is returned when Swarm cannot find updates of the given feed var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed") -// CreateFeedWithManifest creates a Feed Manifest, initializing it with the provided +// CreateFeedWithManifest creates a feed manifest, initializing it with the provided // data -// Returns the resulting Feed Manifest address that you can use to include in an ENS Resolver (setContent) +// Returns the resulting feed manifest address that you can use to include in an ENS Resolver (setContent) // or reference future updates (Client.UpdateFeed) func (c *Client) CreateFeedWithManifest(request *feeds.Request) (string, error) { responseStream, err := c.updateFeed(request, true) @@ -669,7 +669,7 @@ func (c *Client) QueryFeed(query *feeds.Query, manifestAddressOrDomain string) ( // queryFeed returns a byte stream with the raw content of the feed update // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -// meta set to true will instruct the node return Feed metainformation instead +// meta set to true will instruct the node return feed metainformation instead func (c *Client) queryFeed(query *feeds.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { @@ -706,7 +706,7 @@ func (c *Client) queryFeed(query *feeds.Query, manifestAddressOrDomain string, m return res.Body, nil } -// GetFeedRequest returns a structure that describes the referenced Feed status +// GetFeedRequest returns a structure that describes the referenced feed status // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address func (c *Client) GetFeedRequest(query *feeds.Query, manifestAddressOrDomain string) (*feeds.Request, error) { diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 84e06d09f..0cfeb1f22 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -518,7 +518,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { return } // the key to the manifest will be passed back to the client - // the client can access the Feed directly through its Feed member + // the client can access the feed directly through its Feed member // the manifest key can be set as content in the resolver of the ENS name outdata, err := json.Marshal(m) if err != nil { @@ -531,7 +531,7 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { } } -// HandleGetFeed retrieves Swarm Feeds updates: +// HandleGetFeed retrieves Swarm feeds updates: // bzz-feed:// - get latest feed update, given a manifest address // - or - // specify user + topic (optional), subtopic name (optional) directly, without manifest: diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 16813cab5..4ab99b209 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -165,7 +165,7 @@ func TestBzzFeedMultihash(t *testing.T) { } } -// Test Swarm Feeds using the raw update methods +// Test Swarm feeds using the raw update methods func TestBzzFeed(t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc, nil) signer, _ := newTestSigner() @@ -305,7 +305,7 @@ func TestBzzFeed(t *testing.T) { srv.CurrentTime++ log.Info("update 2") - // 1.- get metadata about this Feed + // 1.- get metadata about this feed testBzzResUrl = fmt.Sprintf("%s/bzz-feed:/%s/", srv.URL, correctManifestAddrHex) resp, err = http.Get(testBzzResUrl + "?meta=1") if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 9ac3214a5..e45ae85c4 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -80,7 +80,7 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, return addr, err } -// Manifest hack for supporting Feeds from the bzz: scheme +// Manifest hack for supporting Swarm feeds from the bzz: scheme // see swarm/api/api.go:API.Get() for more information func (a *API) NewFeedManifest(ctx context.Context, feed *feeds.Feed) (storage.Address, error) { var manifest Manifest diff --git a/swarm/network/README.md b/swarm/network/README.md index 33d215c4b..684ad0c8c 100644 --- a/swarm/network/README.md +++ b/swarm/network/README.md @@ -37,7 +37,7 @@ Using the streamer logic, various stream types are easy to implement: * live session syncing * historical syncing * simple retrieve requests and deliveries -* Swarm Feeds streams +* swarm feeds streams * receipting for finger pointing ## Syncing @@ -57,7 +57,7 @@ receipts for a deleted chunk easily to refute their challenge. - syncing should be resilient to cut connections, metadata should be persisted that keep track of syncing state across sessions, historical syncing state should survive restart - extra data structures to support syncing should be kept at minimum -- syncing is organized separately for chunk types (Swarm Feed Updates v regular content chunk) +- syncing is not organized separately for chunk types (Swarm feed updates v regular content chunk) - various types of streams should have common logic abstracted Syncing is now entirely mediated by the localstore, ie., no processes or memory leaks due to network contention. diff --git a/swarm/storage/feeds/cacheentry.go b/swarm/storage/feeds/cacheentry.go index 7a2f87b56..e40037688 100644 --- a/swarm/storage/feeds/cacheentry.go +++ b/swarm/storage/feeds/cacheentry.go @@ -30,7 +30,7 @@ const ( defaultRetrieveTimeout = 100 * time.Millisecond ) -// cacheEntry caches the last known update of a specific Feed. +// cacheEntry caches the last known update of a specific Swarm feed. type cacheEntry struct { Update *bytes.Reader @@ -42,7 +42,7 @@ func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { return int64(len(r.Update.data)), nil } -//returns the Feed's topic +//returns the feed's topic func (r *cacheEntry) Topic() Topic { return r.Feed.Topic } diff --git a/swarm/storage/feeds/error.go b/swarm/storage/feeds/error.go index 13266b900..7751a9f19 100644 --- a/swarm/storage/feeds/error.go +++ b/swarm/storage/feeds/error.go @@ -35,7 +35,7 @@ const ( ErrCnt ) -// Error is a the typed error object used for Swarm Feeds +// Error is a the typed error object used for Swarm feeds type Error struct { code int err string @@ -52,7 +52,7 @@ func (e *Error) Code() int { return e.code } -// NewError creates a new Swarm Feeds Error object with the specified code and custom error message +// NewError creates a new Swarm feeds Error object with the specified code and custom error message func NewError(code int, s string) error { if code < 0 || code >= ErrCnt { panic("no such error code!") diff --git a/swarm/storage/feeds/feed.go b/swarm/storage/feeds/feed.go index 8a807d506..d5b262555 100644 --- a/swarm/storage/feeds/feed.go +++ b/swarm/storage/feeds/feed.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" ) -// Feed represents a particular user's stream of updates on a Topic +// Feed represents a particular user's stream of updates on a topic type Feed struct { Topic Topic `json:"topic"` User common.Address `json:"user"` @@ -48,10 +48,10 @@ func (f *Feed) mapKey() uint64 { return *(*uint64)(unsafe.Pointer(&hash[0])) } -// binaryPut serializes this Feed instance into the provided slice +// binaryPut serializes this feed instance into the provided slice func (f *Feed) binaryPut(serializedData []byte) error { if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize Feed. Expected %d, got %d", feedLength, len(serializedData)) + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize feed. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int copy(serializedData[cursor:cursor+TopicLength], f.Topic[:TopicLength]) @@ -71,7 +71,7 @@ func (f *Feed) binaryLength() int { // binaryGet restores the current instance from the information contained in the passed slice func (f *Feed) binaryGet(serializedData []byte) error { if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read Feed. Expected %d, got %d", feedLength, len(serializedData)) + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read feed. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int @@ -84,7 +84,7 @@ func (f *Feed) binaryGet(serializedData []byte) error { return nil } -// Hex serializes the Feed to a hex string +// Hex serializes the feed to a hex string func (f *Feed) Hex() string { serializedData := make([]byte, feedLength) f.binaryPut(serializedData) diff --git a/swarm/storage/feeds/handler.go b/swarm/storage/feeds/handler.go index 9c69fd1b4..2c5261614 100644 --- a/swarm/storage/feeds/handler.go +++ b/swarm/storage/feeds/handler.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// Handler is the API for Feeds +// Handler is the API for feeds // It enables creating, updating, syncing and retrieving feed updates and their data package feeds @@ -57,7 +57,7 @@ func init() { } } -// NewHandler creates a new Swarm Feeds API +// NewHandler creates a new Swarm feeds API func NewHandler(params *HandlerParams) *Handler { fh := &Handler{ cache: make(map[uint64]*cacheEntry), @@ -74,7 +74,7 @@ func NewHandler(params *HandlerParams) *Handler { return fh } -// SetStore sets the store backend for the Swarm Feeds API +// SetStore sets the store backend for the Swarm feeds API func (h *Handler) SetStore(store *storage.NetStore) { h.chunkStore = store } @@ -110,7 +110,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { return true } -// GetContent retrieves the data payload of the last synced update of the Feed +// GetContent retrieves the data payload of the last synced update of the feed func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { if feed == nil { return nil, nil, NewError(ErrInvalidValue, "feed is nil") @@ -240,7 +240,7 @@ func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { } // Update publishes a feed update -// Note that a Feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. +// Note that a feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. // This results in a max payload of `maxUpdateDataLength` (check update.go for more details) // An error will be returned if the total length of the chunk payload will exceed this limit. // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update @@ -286,7 +286,7 @@ func (h *Handler) get(feed *Feed) *cacheEntry { return feedUpdate } -// Sets the feed update cache value for the given Feed +// Sets the feed update cache value for the given feed func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { mapKey := feed.mapKey() h.cacheLock.Lock() diff --git a/swarm/storage/feeds/handler_test.go b/swarm/storage/feeds/handler_test.go index 8331980ca..967bc6d4b 100644 --- a/swarm/storage/feeds/handler_test.go +++ b/swarm/storage/feeds/handler_test.go @@ -89,11 +89,11 @@ func TestFeedsHandler(t *testing.T) { } defer teardownTest() - // create a new Feed + // create a new feed ctx, cancel := context.WithCancel(context.Background()) defer cancel() - topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) + topic, _ := NewTopic("Mess with Swarm feeds code and see what ghost catches you", nil) feed := Feed{ Topic: topic, User: signer.Address(), @@ -266,7 +266,7 @@ func TestSparseUpdates(t *testing.T) { defer teardownTest() defer os.RemoveAll(datadir) - // create a new Feed + // create a new feed ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Very slow updates", nil) @@ -348,7 +348,7 @@ func TestValidator(t *testing.T) { } defer teardownTest() - // create new Feed + // create new feed topic, _ := NewTopic(subtopicName, nil) feed := Feed{ Topic: topic, @@ -382,7 +382,7 @@ func TestValidator(t *testing.T) { } // tests that the content address validator correctly checks the data -// tests that Feed update chunks are passed through content address validator +// tests that feed update chunks are passed through content address validator // there is some redundancy in this test as it also tests content addressed chunks, // which should be evaluated as invalid chunks by this validator func TestValidatorInStore(t *testing.T) { @@ -409,7 +409,7 @@ func TestValidatorInStore(t *testing.T) { t.Fatal(err) } - // set up Swarm Feeds handler and add is as a validator to the localstore + // set up Swarm feeds handler and add is as a validator to the localstore fhParams := &HandlerParams{} fh := NewHandler(fhParams) store.Validators = append(store.Validators, fh) @@ -463,7 +463,7 @@ func TestValidatorInStore(t *testing.T) { } } -// create rpc and Feeds Handler +// create rpc and feeds Handler func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { var fsClean func() diff --git a/swarm/storage/feeds/lookup/lookup.go b/swarm/storage/feeds/lookup/lookup.go index a5154d261..2f862d81c 100644 --- a/swarm/storage/feeds/lookup/lookup.go +++ b/swarm/storage/feeds/lookup/lookup.go @@ -15,7 +15,7 @@ // along with the go-ethereum library. If not, see . /* -Package lookup defines Feed lookup algorithms and provides tools to place updates +Package lookup defines feed lookup algorithms and provides tools to place updates so they can be found */ package lookup diff --git a/swarm/storage/feeds/request.go b/swarm/storage/feeds/request.go index 719d8fba8..81e1b10fe 100644 --- a/swarm/storage/feeds/request.go +++ b/swarm/storage/feeds/request.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" ) -// Request represents a request to sign or signed Feed Update message +// Request represents a request to sign or signed feed update message type Request struct { Update // actual content that will be put on the chunk, less signature Signature *Signature diff --git a/swarm/storage/feeds/request_test.go b/swarm/storage/feeds/request_test.go index 2e3783834..a9a4d1505 100644 --- a/swarm/storage/feeds/request_test.go +++ b/swarm/storage/feeds/request_test.go @@ -47,7 +47,7 @@ func areEqualJSON(s1, s2 string) (bool, error) { } // TestEncodingDecodingUpdateRequests ensures that requests are serialized properly -// while also checking cryptographically that only the owner of a Feed can update it. +// while also checking cryptographically that only the owner of a feed can update it. func TestEncodingDecodingUpdateRequests(t *testing.T) { charlie := newCharlieSigner() //Charlie @@ -136,7 +136,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") } - // Now imagine Bob wants to create an update of his own about the same Feed, + // Now imagine Bob wants to create an update of his own about the same feed, // signing a message with his private key if err := request.Sign(bob); err != nil { t.Fatalf("Error signing: %s", err) @@ -228,7 +228,7 @@ func TestUpdateChunkSerializationErrorChecking(t *testing.T) { var recovered Request recovered.fromChunk(chunk.Address(), chunk.Data()) if !reflect.DeepEqual(recovered, r) { - t.Fatal("Expected recovered Request update to equal the original one") + t.Fatal("Expected recovered feed update request to equal the original one") } } @@ -248,7 +248,7 @@ func TestReverse(t *testing.T) { // signer containing private key signer := newAliceSigner() - // set up rpc and create Feeds handler + // set up rpc and create feeds handler _, _, teardownTest, err := setupTest(timeProvider, signer) if err != nil { t.Fatal(err) diff --git a/swarm/storage/feeds/sign.go b/swarm/storage/feeds/sign.go index a69942f2b..f5760c8f2 100644 --- a/swarm/storage/feeds/sign.go +++ b/swarm/storage/feeds/sign.go @@ -28,7 +28,7 @@ const signatureLength = 65 // Signature is an alias for a static byte array with the size of a signature type Signature [signatureLength]byte -// Signer signs Feed update payloads +// Signer signs feed update payloads type Signer interface { Sign(common.Hash) (Signature, error) Address() common.Address @@ -65,7 +65,7 @@ func (s *GenericSigner) Address() common.Address { return s.address } -// getUserAddr extracts the address of the Feed update signer +// getUserAddr extracts the address of the feed update signer func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) if err != nil { diff --git a/swarm/storage/localstore_test.go b/swarm/storage/localstore_test.go index b8eea4350..10f43f30f 100644 --- a/swarm/storage/localstore_test.go +++ b/swarm/storage/localstore_test.go @@ -30,7 +30,7 @@ var ( ) // tests that the content address validator correctly checks the data -// tests that Feed update chunks are passed through content address validator +// tests that feed update chunks are passed through content address validator // the test checking the resouce update validator internal correctness is found in storage/feeds/handler_test.go func TestValidator(t *testing.T) { // set up localstore diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 05b7c52e0..2e5735ece 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -48,7 +48,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso } fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams()) - // Swarm Feeds test setup + // Swarm feeds test setup feedsDir, err := ioutil.TempDir("", "swarm-feeds-test") if err != nil { t.Fatal(err) -- cgit From 696bc9b01ce0ed3347fa1bd64460ccc08091e90a Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Tue, 2 Oct 2018 09:36:11 +0200 Subject: swarm/storage/feeds: renamed vars that can conflict with package name --- swarm/api/client/client_test.go | 4 ++-- swarm/storage/feeds/handler_test.go | 22 +++++++++++----------- swarm/storage/feeds/request_test.go | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) (limited to 'swarm') diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index 4fad7fbc7..2aecdb299 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -508,12 +508,12 @@ func TestClientCreateUpdateFeed(t *testing.T) { // now try retrieving feed updates without a manifest - feed := &feeds.Feed{ + fd := &feeds.Feed{ Topic: topic, User: signer.Address(), } - lookupParams := feeds.NewQueryLatest(feed, lookup.NoClue) + lookupParams := feeds.NewQueryLatest(fd, lookup.NoClue) reader, err = client.QueryFeed(lookupParams, "") if err != nil { t.Fatalf("Error retrieving feed updates: %s", err) diff --git a/swarm/storage/feeds/handler_test.go b/swarm/storage/feeds/handler_test.go index 967bc6d4b..b35a7c1c1 100644 --- a/swarm/storage/feeds/handler_test.go +++ b/swarm/storage/feeds/handler_test.go @@ -94,7 +94,7 @@ func TestFeedsHandler(t *testing.T) { defer cancel() topic, _ := NewTopic("Mess with Swarm feeds code and see what ghost catches you", nil) - feed := Feed{ + fd := Feed{ Topic: topic, User: signer.Address(), } @@ -107,7 +107,7 @@ func TestFeedsHandler(t *testing.T) { "clyde", // t=4285 } - request := NewFirstRequest(feed.Topic) // this timestamps the update at t = 4200 (start time) + request := NewFirstRequest(fd.Topic) // this timestamps the update at t = 4200 (start time) chunkAddress := make(map[string]storage.Address) data := []byte(updates[0]) request.SetData(data) @@ -270,7 +270,7 @@ func TestSparseUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Very slow updates", nil) - feed := Feed{ + fd := Feed{ Topic: topic, User: signer.Address(), } @@ -280,7 +280,7 @@ func TestSparseUpdates(t *testing.T) { var epoch lookup.Epoch var lastUpdateTime uint64 for T := uint64(0); T < today; T += 5 * Year { - request := NewFirstRequest(feed.Topic) + request := NewFirstRequest(fd.Topic) request.Epoch = lookup.GetNextEpoch(epoch, T) request.data = generateData(T) // this generates some data that depends on T, so we can check later request.Sign(signer) @@ -295,14 +295,14 @@ func TestSparseUpdates(t *testing.T) { lastUpdateTime = T } - query := NewQuery(&feed, today, lookup.NoClue) + query := NewQuery(&fd, today, lookup.NoClue) _, err = rh.Lookup(ctx, query) if err != nil { t.Fatal(err) } - _, content, err := rh.GetContent(&feed) + _, content, err := rh.GetContent(&fd) if err != nil { t.Fatal(err) } @@ -321,7 +321,7 @@ func TestSparseUpdates(t *testing.T) { t.Fatal(err) } - _, content, err = rh.GetContent(&feed) + _, content, err = rh.GetContent(&fd) if err != nil { t.Fatal(err) } @@ -350,11 +350,11 @@ func TestValidator(t *testing.T) { // create new feed topic, _ := NewTopic(subtopicName, nil) - feed := Feed{ + fd := Feed{ Topic: topic, User: signer.Address(), } - mr := NewFirstRequest(feed.Topic) + mr := NewFirstRequest(fd.Topic) // chunk with address data := []byte("foo") @@ -420,7 +420,7 @@ func TestValidatorInStore(t *testing.T) { badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) topic, _ := NewTopic("xyzzy", nil) - feed := Feed{ + fd := Feed{ Topic: topic, User: signer.Address(), } @@ -430,7 +430,7 @@ func TestValidatorInStore(t *testing.T) { Epoch: lookup.Epoch{Time: 42, Level: 1, }, - Feed: feed, + Feed: fd, } updateAddr := id.Addr() diff --git a/swarm/storage/feeds/request_test.go b/swarm/storage/feeds/request_test.go index a9a4d1505..8a15815a4 100644 --- a/swarm/storage/feeds/request_test.go +++ b/swarm/storage/feeds/request_test.go @@ -256,7 +256,7 @@ func TestReverse(t *testing.T) { defer teardownTest() topic, _ := NewTopic("Cervantes quotes", nil) - feed := Feed{ + fd := Feed{ Topic: topic, User: signer.Address(), } @@ -264,7 +264,7 @@ func TestReverse(t *testing.T) { data := []byte("Donde una puerta se cierra, otra se abre") request := new(Request) - request.Feed = feed + request.Feed = fd request.Epoch = epoch request.data = data -- cgit From de01178c18766b9f744acc94fe2b96804f998e40 Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Wed, 3 Oct 2018 09:15:17 +0200 Subject: swarm/storage/feed: Renamed package --- swarm/OWNERS | 2 +- swarm/api/api.go | 48 +-- swarm/api/client/client.go | 16 +- swarm/api/client/client_test.go | 20 +- swarm/api/http/server.go | 24 +- swarm/api/http/server_test.go | 20 +- swarm/api/manifest.go | 6 +- swarm/pss/notify/notify.go | 2 +- swarm/storage/feed/binaryserializer.go | 44 +++ swarm/storage/feed/binaryserializer_test.go | 98 +++++ swarm/storage/feed/cacheentry.go | 48 +++ swarm/storage/feed/doc.go | 43 +++ swarm/storage/feed/error.go | 73 ++++ swarm/storage/feed/feed.go | 125 +++++++ swarm/storage/feed/feed_test.go | 36 ++ swarm/storage/feed/handler.go | 295 +++++++++++++++ swarm/storage/feed/handler_test.go | 520 +++++++++++++++++++++++++++ swarm/storage/feed/id.go | 123 +++++++ swarm/storage/feed/id_test.go | 28 ++ swarm/storage/feed/lookup/epoch.go | 91 +++++ swarm/storage/feed/lookup/epoch_test.go | 57 +++ swarm/storage/feed/lookup/lookup.go | 180 ++++++++++ swarm/storage/feed/lookup/lookup_test.go | 414 +++++++++++++++++++++ swarm/storage/feed/query.go | 78 ++++ swarm/storage/feed/query_test.go | 38 ++ swarm/storage/feed/request.go | 284 +++++++++++++++ swarm/storage/feed/request_test.go | 312 ++++++++++++++++ swarm/storage/feed/sign.go | 75 ++++ swarm/storage/feed/testutil.go | 71 ++++ swarm/storage/feed/timestampprovider.go | 84 +++++ swarm/storage/feed/topic.go | 105 ++++++ swarm/storage/feed/topic_test.go | 50 +++ swarm/storage/feed/update.go | 132 +++++++ swarm/storage/feed/update_test.go | 50 +++ swarm/storage/feeds/binaryserializer.go | 44 --- swarm/storage/feeds/binaryserializer_test.go | 98 ----- swarm/storage/feeds/cacheentry.go | 48 --- swarm/storage/feeds/doc.go | 43 --- swarm/storage/feeds/error.go | 73 ---- swarm/storage/feeds/feed.go | 125 ------- swarm/storage/feeds/feed_test.go | 36 -- swarm/storage/feeds/handler.go | 295 --------------- swarm/storage/feeds/handler_test.go | 520 --------------------------- swarm/storage/feeds/id.go | 123 ------- swarm/storage/feeds/id_test.go | 28 -- swarm/storage/feeds/lookup/epoch.go | 91 ----- swarm/storage/feeds/lookup/epoch_test.go | 57 --- swarm/storage/feeds/lookup/lookup.go | 180 ---------- swarm/storage/feeds/lookup/lookup_test.go | 414 --------------------- swarm/storage/feeds/query.go | 78 ---- swarm/storage/feeds/query_test.go | 38 -- swarm/storage/feeds/request.go | 284 --------------- swarm/storage/feeds/request_test.go | 312 ---------------- swarm/storage/feeds/sign.go | 75 ---- swarm/storage/feeds/testutil.go | 71 ---- swarm/storage/feeds/timestampprovider.go | 84 ----- swarm/storage/feeds/topic.go | 105 ------ swarm/storage/feeds/topic_test.go | 50 --- swarm/storage/feeds/update.go | 132 ------- swarm/storage/feeds/update_test.go | 50 --- swarm/swarm.go | 8 +- swarm/testutil/http.go | 12 +- 62 files changed, 3533 insertions(+), 3533 deletions(-) create mode 100644 swarm/storage/feed/binaryserializer.go create mode 100644 swarm/storage/feed/binaryserializer_test.go create mode 100644 swarm/storage/feed/cacheentry.go create mode 100644 swarm/storage/feed/doc.go create mode 100644 swarm/storage/feed/error.go create mode 100644 swarm/storage/feed/feed.go create mode 100644 swarm/storage/feed/feed_test.go create mode 100644 swarm/storage/feed/handler.go create mode 100644 swarm/storage/feed/handler_test.go create mode 100644 swarm/storage/feed/id.go create mode 100644 swarm/storage/feed/id_test.go create mode 100644 swarm/storage/feed/lookup/epoch.go create mode 100644 swarm/storage/feed/lookup/epoch_test.go create mode 100644 swarm/storage/feed/lookup/lookup.go create mode 100644 swarm/storage/feed/lookup/lookup_test.go create mode 100644 swarm/storage/feed/query.go create mode 100644 swarm/storage/feed/query_test.go create mode 100644 swarm/storage/feed/request.go create mode 100644 swarm/storage/feed/request_test.go create mode 100644 swarm/storage/feed/sign.go create mode 100644 swarm/storage/feed/testutil.go create mode 100644 swarm/storage/feed/timestampprovider.go create mode 100644 swarm/storage/feed/topic.go create mode 100644 swarm/storage/feed/topic_test.go create mode 100644 swarm/storage/feed/update.go create mode 100644 swarm/storage/feed/update_test.go delete mode 100644 swarm/storage/feeds/binaryserializer.go delete mode 100644 swarm/storage/feeds/binaryserializer_test.go delete mode 100644 swarm/storage/feeds/cacheentry.go delete mode 100644 swarm/storage/feeds/doc.go delete mode 100644 swarm/storage/feeds/error.go delete mode 100644 swarm/storage/feeds/feed.go delete mode 100644 swarm/storage/feeds/feed_test.go delete mode 100644 swarm/storage/feeds/handler.go delete mode 100644 swarm/storage/feeds/handler_test.go delete mode 100644 swarm/storage/feeds/id.go delete mode 100644 swarm/storage/feeds/id_test.go delete mode 100644 swarm/storage/feeds/lookup/epoch.go delete mode 100644 swarm/storage/feeds/lookup/epoch_test.go delete mode 100644 swarm/storage/feeds/lookup/lookup.go delete mode 100644 swarm/storage/feeds/lookup/lookup_test.go delete mode 100644 swarm/storage/feeds/query.go delete mode 100644 swarm/storage/feeds/query_test.go delete mode 100644 swarm/storage/feeds/request.go delete mode 100644 swarm/storage/feeds/request_test.go delete mode 100644 swarm/storage/feeds/sign.go delete mode 100644 swarm/storage/feeds/testutil.go delete mode 100644 swarm/storage/feeds/timestampprovider.go delete mode 100644 swarm/storage/feeds/topic.go delete mode 100644 swarm/storage/feeds/topic_test.go delete mode 100644 swarm/storage/feeds/update.go delete mode 100644 swarm/storage/feeds/update_test.go (limited to 'swarm') diff --git a/swarm/OWNERS b/swarm/OWNERS index cea47766f..d4204e08c 100644 --- a/swarm/OWNERS +++ b/swarm/OWNERS @@ -22,5 +22,5 @@ swarm ├── storage ─────────────── ethersphere │ ├── encryption ──────── @gbalint, @zelig, @nagydani │ ├── mock ────────────── @janos -│ └── feeds ───────────── @nolash, @jpeletier +│ └── feed ────────────── @nolash, @jpeletier └── testutil ────────────── @lmars \ No newline at end of file diff --git a/swarm/api/api.go b/swarm/api/api.go index a23e09e68..7bb631967 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -45,8 +45,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/spancontext" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + "github.com/ethereum/go-ethereum/swarm/storage/feed" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" opentracing "github.com/opentracing/opentracing-go" ) @@ -236,18 +236,18 @@ on top of the FileStore it is the public interface of the FileStore which is included in the ethereum stack */ type API struct { - feeds *feeds.Handler + feed *feed.Handler fileStore *storage.FileStore dns Resolver Decryptor func(context.Context, string) DecryptFunc } // NewAPI the api constructor initialises a new API instance. -func NewAPI(fileStore *storage.FileStore, dns Resolver, feedsHandler *feeds.Handler, pk *ecdsa.PrivateKey) (self *API) { +func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey) (self *API) { self = &API{ fileStore: fileStore, dns: dns, - feeds: feedsHandler, + feed: feedHandler, Decryptor: func(ctx context.Context, credentials string) DecryptFunc { return self.doDecrypt(ctx, credentials, pk) }, @@ -409,7 +409,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage if entry.Feed == nil { return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest") } - _, err := a.feeds.Lookup(ctx, feeds.NewQueryLatest(entry.Feed, lookup.NoClue)) + _, err := a.feed.Lookup(ctx, feed.NewQueryLatest(entry.Feed, lookup.NoClue)) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -417,7 +417,7 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage return reader, mimeType, status, nil, err } // get the data of the update - _, rsrcData, err := a.feeds.GetContent(entry.Feed) + _, rsrcData, err := a.feed.GetContent(entry.Feed) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -958,13 +958,13 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver } // FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update -func (a *API) FeedsLookup(ctx context.Context, query *feeds.Query) ([]byte, error) { - _, err := a.feeds.Lookup(ctx, query) +func (a *API) FeedsLookup(ctx context.Context, query *feed.Query) ([]byte, error) { + _, err := a.feed.Lookup(ctx, query) if err != nil { return nil, err } var data []byte - _, data, err = a.feeds.GetContent(&query.Feed) + _, data, err = a.feed.GetContent(&query.Feed) if err != nil { return nil, err } @@ -972,18 +972,18 @@ func (a *API) FeedsLookup(ctx context.Context, query *feeds.Query) ([]byte, erro } // FeedsNewRequest creates a Request object to update a specific feed -func (a *API) FeedsNewRequest(ctx context.Context, feed *feeds.Feed) (*feeds.Request, error) { - return a.feeds.NewRequest(ctx, feed) +func (a *API) FeedsNewRequest(ctx context.Context, feed *feed.Feed) (*feed.Request, error) { + return a.feed.NewRequest(ctx, feed) } // FeedsUpdate publishes a new update on the given feed -func (a *API) FeedsUpdate(ctx context.Context, request *feeds.Request) (storage.Address, error) { - return a.feeds.Update(ctx, request) +func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.Address, error) { + return a.feed.Update(ctx, request) } // FeedsHashSize returned the size of the digest produced by Swarm feeds' hashing function func (a *API) FeedsHashSize() int { - return a.feeds.HashSize + return a.feed.HashSize } // ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails @@ -993,7 +993,7 @@ var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest") var ErrNotAFeedManifest = errors.New("Not a feed manifest") // ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed. -func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feeds.Feed, error) { +func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feed.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { return nil, ErrCannotLoadFeedManifest @@ -1016,8 +1016,8 @@ var ErrCannotResolveFeed = errors.New("Cannot resolve Feed") // ResolveFeed attempts to extract feed information out of the manifest, if provided // If not, it attempts to extract the feed out of a set of key-value pairs -func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feeds.Values) (*feeds.Feed, error) { - var feed *feeds.Feed +func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feed.Values) (*feed.Feed, error) { + var fd *feed.Feed var err error if uri.Addr != "" { // resolve the content key. @@ -1030,20 +1030,20 @@ func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feeds.Values) (* } // get the Swarm feed from the manifest - feed, err = a.ResolveFeedManifest(ctx, manifestAddr) + fd, err = a.ResolveFeedManifest(ctx, manifestAddr) if err != nil { return nil, err } - log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", feed.Hex()) + log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", fd.Hex()) } else { - var v feeds.Feed - if err := v.FromValues(values); err != nil { + var f feed.Feed + if err := f.FromValues(values); err != nil { return nil, ErrCannotResolveFeed } - feed = &v + fd = &f } - return feed, nil + return fd, nil } // MimeOctetStream default value of http Content-Type header diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index ac218e6a7..d9837ca73 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -35,7 +35,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/swarm/api" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" ) var ( @@ -608,7 +608,7 @@ var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed") // data // Returns the resulting feed manifest address that you can use to include in an ENS Resolver (setContent) // or reference future updates (Client.UpdateFeed) -func (c *Client) CreateFeedWithManifest(request *feeds.Request) (string, error) { +func (c *Client) CreateFeedWithManifest(request *feed.Request) (string, error) { responseStream, err := c.updateFeed(request, true) if err != nil { return "", err @@ -628,12 +628,12 @@ func (c *Client) CreateFeedWithManifest(request *feeds.Request) (string, error) } // UpdateFeed allows you to set a new version of your content -func (c *Client) UpdateFeed(request *feeds.Request) error { +func (c *Client) UpdateFeed(request *feed.Request) error { _, err := c.updateFeed(request, false) return err } -func (c *Client) updateFeed(request *feeds.Request, createManifest bool) (io.ReadCloser, error) { +func (c *Client) updateFeed(request *feed.Request, createManifest bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err @@ -662,7 +662,7 @@ func (c *Client) updateFeed(request *feeds.Request, createManifest bool) (io.Rea // QueryFeed returns a byte stream with the raw content of the feed update // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) QueryFeed(query *feeds.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { +func (c *Client) QueryFeed(query *feed.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { return c.queryFeed(query, manifestAddressOrDomain, false) } @@ -670,7 +670,7 @@ func (c *Client) QueryFeed(query *feeds.Query, manifestAddressOrDomain string) ( // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address // meta set to true will instruct the node return feed metainformation instead -func (c *Client) queryFeed(query *feeds.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { +func (c *Client) queryFeed(query *feed.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { URL, err := url.Parse(c.Gateway) if err != nil { return nil, err @@ -709,7 +709,7 @@ func (c *Client) queryFeed(query *feeds.Query, manifestAddressOrDomain string, m // GetFeedRequest returns a structure that describes the referenced feed status // manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver // points to that address -func (c *Client) GetFeedRequest(query *feeds.Query, manifestAddressOrDomain string) (*feeds.Request, error) { +func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain string) (*feed.Request, error) { responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true) if err != nil { @@ -722,7 +722,7 @@ func (c *Client) GetFeedRequest(query *feeds.Query, manifestAddressOrDomain stri return nil, err } - var metadata feeds.Request + var metadata feed.Request if err := metadata.UnmarshalJSON(body); err != nil { return nil, err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index 2aecdb299..03c6cbb28 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -25,14 +25,14 @@ import ( "sort" "testing" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/swarm/api" swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" "github.com/ethereum/go-ethereum/swarm/multihash" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/ethereum/go-ethereum/swarm/testutil" ) @@ -361,12 +361,12 @@ func TestClientMultipartUpload(t *testing.T) { } } -func newTestSigner() (*feeds.GenericSigner, error) { +func newTestSigner() (*feed.GenericSigner, error) { privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") if err != nil { return nil, err } - return feeds.NewGenericSigner(privKey), nil + return feed.NewGenericSigner(privKey), nil } // test the transparent resolving of multihash feed updates with bzz:// scheme @@ -394,9 +394,9 @@ func TestClientCreateFeedMultihash(t *testing.T) { mh := multihash.ToMultihash(s) // our feed topic - topic, _ := feeds.NewTopic("foo.eth", nil) + topic, _ := feed.NewTopic("foo.eth", nil) - createRequest := feeds.NewFirstRequest(topic) + createRequest := feed.NewFirstRequest(topic) createRequest.SetData(mh) if err := createRequest.Sign(signer); err != nil { @@ -448,8 +448,8 @@ func TestClientCreateUpdateFeed(t *testing.T) { databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") // our feed topic name - topic, _ := feeds.NewTopic("El Quijote", nil) - createRequest := feeds.NewFirstRequest(topic) + topic, _ := feed.NewTopic("El Quijote", nil) + createRequest := feed.NewFirstRequest(topic) createRequest.SetData(databytes) if err := createRequest.Sign(signer); err != nil { @@ -508,12 +508,12 @@ func TestClientCreateUpdateFeed(t *testing.T) { // now try retrieving feed updates without a manifest - fd := &feeds.Feed{ + fd := &feed.Feed{ Topic: topic, User: signer.Address(), } - lookupParams := feeds.NewQueryLatest(fd, lookup.NoClue) + lookupParams := feed.NewQueryLatest(fd, lookup.NoClue) reader, err = client.QueryFeed(lookupParams, "") if err != nil { t.Fatalf("Error retrieving feed updates: %s", err) diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 0cfeb1f22..370aca5a7 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -40,7 +40,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/rs/cors" ) @@ -458,7 +458,7 @@ func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) { } // Handles feed manifest creation and feed updates -// The POST request admits a JSON structure as defined in the feeds package: `feeds.updateRequestJSON` +// The POST request admits a JSON structure as defined in the feeds package: `feed.updateRequestJSON` // The requests can be to a) create a feed manifest, b) update a feed or c) both a+b: create a feed manifest and publish a first update func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) @@ -466,14 +466,14 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { log.Debug("handle.post.feed", "ruid", ruid) var err error - // Creation and update must send feeds.updateRequestJSON JSON structure + // Creation and update must send feed.updateRequestJSON JSON structure body, err := ioutil.ReadAll(r.Body) if err != nil { RespondError(w, r, err.Error(), http.StatusInternalServerError) return } - feed, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) + fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) httpStatus := http.StatusBadRequest @@ -484,8 +484,8 @@ func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) { return } - var updateRequest feeds.Request - updateRequest.Feed = *feed + var updateRequest feed.Request + updateRequest.Feed = *fd query := r.URL.Query() if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters @@ -552,7 +552,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { log.Debug("handle.get.feed", "ruid", ruid) var err error - feed, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) + fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query()) if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) httpStatus := http.StatusBadRequest @@ -565,10 +565,10 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { // determine if the query specifies period and version or it is a metadata query if r.URL.Query().Get("meta") == "1" { - unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), feed) + unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), fd) if err != nil { getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", feed.Hex(), err), http.StatusNotFound) + RespondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", fd.Hex(), err), http.StatusNotFound) return } rawResponse, err := unsignedUpdateRequest.MarshalJSON() @@ -582,7 +582,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { return } - lookupParams := &feeds.Query{Feed: *feed} + lookupParams := &feed.Query{Feed: *fd} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version RespondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest) return @@ -598,7 +598,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { } // All ok, serve the retrieved update - log.Debug("Found update", "feed", feed.Hex(), "ruid", ruid) + log.Debug("Found update", "feed", fd.Hex(), "ruid", ruid) w.Header().Set("Content-Type", api.MimeOctetStream) http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data)) } @@ -606,7 +606,7 @@ func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) { func (s *Server) translateFeedError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { code := 0 defaultErr := fmt.Errorf("%s: %v", supErr, err) - rsrcErr, ok := err.(*feeds.Error) + rsrcErr, ok := err.(*feed.Error) if !ok && rsrcErr != nil { code = rsrcErr.Code() } diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 4ab99b209..1cf7ff577 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -38,7 +38,7 @@ import ( "testing" "time" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -48,7 +48,7 @@ import ( swarm "github.com/ethereum/go-ethereum/swarm/api/client" "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/ethereum/go-ethereum/swarm/testutil" ) @@ -62,12 +62,12 @@ func serverFunc(api *api.API) testutil.TestServer { return NewServer(api, "") } -func newTestSigner() (*feeds.GenericSigner, error) { +func newTestSigner() (*feed.GenericSigner, error) { privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") if err != nil { return nil, err } - return feeds.NewGenericSigner(privKey), nil + return feed.NewGenericSigner(privKey), nil } // test the transparent resolving of multihash-containing feed updates with bzz:// scheme @@ -103,8 +103,8 @@ func TestBzzFeedMultihash(t *testing.T) { log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - topic, _ := feeds.NewTopic("foo.eth", nil) - updateRequest := feeds.NewFirstRequest(topic) + topic, _ := feed.NewTopic("foo.eth", nil) + updateRequest := feed.NewFirstRequest(topic) updateRequest.SetData(mh) @@ -182,8 +182,8 @@ func TestBzzFeed(t *testing.T) { //data for update 2 update2Data := []byte("foo") - topic, _ := feeds.NewTopic("foo.eth", nil) - updateRequest := feeds.NewFirstRequest(topic) + topic, _ := feed.NewTopic("foo.eth", nil) + updateRequest := feed.NewFirstRequest(topic) if err != nil { t.Fatal(err) } @@ -319,7 +319,7 @@ func TestBzzFeed(t *testing.T) { if err != nil { t.Fatal(err) } - updateRequest = &feeds.Request{} + updateRequest = &feed.Request{} if err = updateRequest.UnmarshalJSON(b); err != nil { t.Fatalf("Error decoding feed metadata: %s", err) } @@ -365,7 +365,7 @@ func TestBzzFeed(t *testing.T) { // test manifest-less queries log.Info("get first update in update1Timestamp via direct query") - query := feeds.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) + query := feed.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) urlq, err := url.Parse(fmt.Sprintf("%s/bzz-feed:/", srv.URL)) if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index e45ae85c4..7c4cc88e4 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -27,7 +27,7 @@ import ( "strings" "time" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/swarm/log" @@ -56,7 +56,7 @@ type ManifestEntry struct { ModTime time.Time `json:"mod_time,omitempty"` Status int `json:"status,omitempty"` Access *AccessEntry `json:"access,omitempty"` - Feed *feeds.Feed `json:"feed,omitempty"` + Feed *feed.Feed `json:"feed,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -82,7 +82,7 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Swarm feeds from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewFeedManifest(ctx context.Context, feed *feeds.Feed) (storage.Address, error) { +func (a *API) NewFeedManifest(ctx context.Context, feed *feed.Feed) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ Feed: feed, diff --git a/swarm/pss/notify/notify.go b/swarm/pss/notify/notify.go index 01ecb4ff9..3731fb9db 100644 --- a/swarm/pss/notify/notify.go +++ b/swarm/pss/notify/notify.go @@ -317,7 +317,7 @@ func (c *Controller) handleStartMsg(msg *Msg, keyid string) (err error) { return err } - // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of Swarm Feed is sent upon subscription + // TODO this is set to zero-length byte pending decision on protocol for initial message, whether it should include message or not, and how to trigger the initial message so that current state of Swarm feed is sent upon subscription notify := []byte{} replyMsg := NewMsg(MsgCodeNotifyWithKey, msg.namestring, make([]byte, len(notify)+symKeyLength)) copy(replyMsg.Payload, notify) diff --git a/swarm/storage/feed/binaryserializer.go b/swarm/storage/feed/binaryserializer.go new file mode 100644 index 000000000..4e4f67a09 --- /dev/null +++ b/swarm/storage/feed/binaryserializer.go @@ -0,0 +1,44 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type binarySerializer interface { + binaryPut(serializedData []byte) error + binaryLength() int + binaryGet(serializedData []byte) error +} + +// Values interface represents a string key-value store +// useful for building query strings +type Values interface { + Get(key string) string + Set(key, value string) +} + +type valueSerializer interface { + FromValues(values Values) error + AppendValues(values Values) +} + +// Hex serializes the structure and converts it to a hex string +func Hex(bin binarySerializer) string { + b := make([]byte, bin.binaryLength()) + bin.binaryPut(b) + return hexutil.Encode(b) +} diff --git a/swarm/storage/feed/binaryserializer_test.go b/swarm/storage/feed/binaryserializer_test.go new file mode 100644 index 000000000..37828d1c9 --- /dev/null +++ b/swarm/storage/feed/binaryserializer_test.go @@ -0,0 +1,98 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// KV mocks a key value store +type KV map[string]string + +func (kv KV) Get(key string) string { + return kv[key] +} +func (kv KV) Set(key, value string) { + kv[key] = value +} + +func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { + if hexutil.Encode(actualValue) != expectedHex { + t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) + } +} + +func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { + name := reflect.TypeOf(bin).Elem().Name() + serialized := make([]byte, bin.binaryLength()) + if err := bin.binaryPut(serialized); err != nil { + t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) + } + + compareByteSliceToExpectedHex(t, name, serialized, expectedHex) + + recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) + if err := recovered.binaryGet(serialized); err != nil { + t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) + } + + if !reflect.DeepEqual(bin, recovered) { + t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) + } + + serializedWrongLength := make([]byte, 1) + copy(serializedWrongLength[:], serialized) + if err := recovered.binaryGet(serializedWrongLength); err == nil { + t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) + } +} + +func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { + name := reflect.TypeOf(bin).Elem().Name() + // make a slice that is too small to contain the metadata + serialized := make([]byte, bin.binaryLength()-1) + + if err := bin.binaryPut(serialized); err == nil { + t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) + } +} + +func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { + name := reflect.TypeOf(v).Elem().Name() + kv := make(KV) + + v.AppendValues(kv) + if !reflect.DeepEqual(expected, kv) { + expj, _ := json.Marshal(expected) + gotj, _ := json.Marshal(kv) + t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) + } + + recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) + err := recovered.FromValues(kv) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(recovered, v) { + t.Fatalf("Expected recovered %s to be the same", name) + } +} diff --git a/swarm/storage/feed/cacheentry.go b/swarm/storage/feed/cacheentry.go new file mode 100644 index 000000000..be42008e9 --- /dev/null +++ b/swarm/storage/feed/cacheentry.go @@ -0,0 +1,48 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "bytes" + "context" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + hasherCount = 8 + feedsHashAlgorithm = storage.SHA3Hash + defaultRetrieveTimeout = 100 * time.Millisecond +) + +// cacheEntry caches the last known update of a specific Swarm feed. +type cacheEntry struct { + Update + *bytes.Reader + lastKey storage.Address +} + +// implements storage.LazySectionReader +func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { + return int64(len(r.Update.data)), nil +} + +//returns the feed's topic +func (r *cacheEntry) Topic() Topic { + return r.Feed.Topic +} diff --git a/swarm/storage/feed/doc.go b/swarm/storage/feed/doc.go new file mode 100644 index 000000000..1f07948f2 --- /dev/null +++ b/swarm/storage/feed/doc.go @@ -0,0 +1,43 @@ +/* +Package feeds defines Swarm Feeds. + +Swarm Feeds allows a user to build an update feed about a particular topic +without resorting to ENS on each update. +The update scheme is built on swarm chunks with chunk keys following +a predictable, versionable pattern. + +A Feed is tied to a unique identifier that is deterministically generated out of +the chosen topic. + +A Feed is defined as the series of updates of a specific user about a particular topic + +Actual data updates are also made in the form of swarm chunks. The keys +of the updates are the hash of a concatenation of properties as follows: + +updateAddr = H(Feed, Epoch ID) +where H is the SHA3 hash function +Feed is the combination of Topic and the user address +Epoch ID is a time slot. See the lookup package for more information. + +A user looking up a the latest update in a Feed only needs to know the Topic +and the other user's address. + +The Feed Update data is: +updatedata = Feed|Epoch|data + +The full update data that goes in the chunk payload is: +updatedata|sign(updatedata) + +Structure Summary: + +Request: Feed Update with signature + Update: headers + data + Header: Protocol version and reserved for future use placeholders + ID: Information about how to locate a specific update + Feed: Represents a user's series of publications about a specific Topic + Topic: Item that the updates are about + User: User who updates the Feed + Epoch: time slot where the update is stored + +*/ +package feed diff --git a/swarm/storage/feed/error.go b/swarm/storage/feed/error.go new file mode 100644 index 000000000..206ba3316 --- /dev/null +++ b/swarm/storage/feed/error.go @@ -0,0 +1,73 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "fmt" +) + +const ( + ErrInit = iota + ErrNotFound + ErrIO + ErrUnauthorized + ErrInvalidValue + ErrDataOverflow + ErrNothingToReturn + ErrCorruptData + ErrInvalidSignature + ErrNotSynced + ErrPeriodDepth + ErrCnt +) + +// Error is a the typed error object used for Swarm feeds +type Error struct { + code int + err string +} + +// Error implements the error interface +func (e *Error) Error() string { + return e.err +} + +// Code returns the error code +// Error codes are enumerated in the error.go file within the feeds package +func (e *Error) Code() int { + return e.code +} + +// NewError creates a new Swarm feeds Error object with the specified code and custom error message +func NewError(code int, s string) error { + if code < 0 || code >= ErrCnt { + panic("no such error code!") + } + r := &Error{ + err: s, + } + switch code { + case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: + r.code = code + } + return r +} + +// NewErrorf is a convenience version of NewError that incorporates printf-style formatting +func NewErrorf(code int, format string, args ...interface{}) error { + return NewError(code, fmt.Sprintf(format, args...)) +} diff --git a/swarm/storage/feed/feed.go b/swarm/storage/feed/feed.go new file mode 100644 index 000000000..b6ea665a6 --- /dev/null +++ b/swarm/storage/feed/feed.go @@ -0,0 +1,125 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "hash" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// Feed represents a particular user's stream of updates on a topic +type Feed struct { + Topic Topic `json:"topic"` + User common.Address `json:"user"` +} + +// Feed layout: +// TopicLength bytes +// userAddr common.AddressLength bytes +const feedLength = TopicLength + common.AddressLength + +// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` +func (f *Feed) mapKey() uint64 { + serializedData := make([]byte, feedLength) + f.binaryPut(serializedData) + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + hash := hasher.Sum(nil) + return *(*uint64)(unsafe.Pointer(&hash[0])) +} + +// binaryPut serializes this feed instance into the provided slice +func (f *Feed) binaryPut(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize feed. Expected %d, got %d", feedLength, len(serializedData)) + } + var cursor int + copy(serializedData[cursor:cursor+TopicLength], f.Topic[:TopicLength]) + cursor += TopicLength + + copy(serializedData[cursor:cursor+common.AddressLength], f.User[:]) + cursor += common.AddressLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (f *Feed) binaryLength() int { + return feedLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (f *Feed) binaryGet(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read feed. Expected %d, got %d", feedLength, len(serializedData)) + } + + var cursor int + copy(f.Topic[:], serializedData[cursor:cursor+TopicLength]) + cursor += TopicLength + + copy(f.User[:], serializedData[cursor:cursor+common.AddressLength]) + cursor += common.AddressLength + + return nil +} + +// Hex serializes the feed to a hex string +func (f *Feed) Hex() string { + serializedData := make([]byte, feedLength) + f.binaryPut(serializedData) + return hexutil.Encode(serializedData) +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (f *Feed) FromValues(values Values) (err error) { + topic := values.Get("topic") + if topic != "" { + if err := f.Topic.FromHex(values.Get("topic")); err != nil { + return err + } + } else { // see if the user set name and relatedcontent + name := values.Get("name") + relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) + if len(relatedContent) > 0 { + if len(relatedContent) < storage.AddressLength { + return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) + } + relatedContent = relatedContent[:storage.AddressLength] + } + f.Topic, err = NewTopic(name, relatedContent) + if err != nil { + return err + } + } + f.User = common.HexToAddress(values.Get("user")) + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (f *Feed) AppendValues(values Values) { + values.Set("topic", f.Topic.Hex()) + values.Set("user", f.User.Hex()) +} diff --git a/swarm/storage/feed/feed_test.go b/swarm/storage/feed/feed_test.go new file mode 100644 index 000000000..6a575594f --- /dev/null +++ b/swarm/storage/feed/feed_test.go @@ -0,0 +1,36 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package feed + +import ( + "testing" +) + +func getTestFeed() *Feed { + topic, _ := NewTopic("world news report, every hour", nil) + return &Feed{ + Topic: topic, + User: newCharlieSigner().Address(), + } +} + +func TestFeedSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +} + +func TestFeedSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeed()) +} diff --git a/swarm/storage/feed/handler.go b/swarm/storage/feed/handler.go new file mode 100644 index 000000000..9e2640282 --- /dev/null +++ b/swarm/storage/feed/handler.go @@ -0,0 +1,295 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Handler is the API for feeds +// It enables creating, updating, syncing and retrieving feed updates and their data +package feed + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +type Handler struct { + chunkStore *storage.NetStore + HashSize int + cache map[uint64]*cacheEntry + cacheLock sync.RWMutex + storeTimeout time.Duration + queryMaxPeriods uint32 +} + +// HandlerParams pass parameters to the Handler constructor NewHandler +// Signer and TimestampProvider are mandatory parameters +type HandlerParams struct { +} + +// hashPool contains a pool of ready hashers +var hashPool sync.Pool + +// init initializes the package and hashPool +func init() { + hashPool = sync.Pool{ + New: func() interface{} { + return storage.MakeHashFunc(feedsHashAlgorithm)() + }, + } +} + +// NewHandler creates a new Swarm feeds API +func NewHandler(params *HandlerParams) *Handler { + fh := &Handler{ + cache: make(map[uint64]*cacheEntry), + } + + for i := 0; i < hasherCount; i++ { + hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() + if fh.HashSize == 0 { + fh.HashSize = hashfunc.Size() + } + hashPool.Put(hashfunc) + } + + return fh +} + +// SetStore sets the store backend for the Swarm feeds API +func (h *Handler) SetStore(store *storage.NetStore) { + h.chunkStore = store +} + +// Validate is a chunk validation method +// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature +// It implements the storage.ChunkValidator interface +func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { + dataLength := len(data) + if dataLength < minimumSignedUpdateLength { + return false + } + + // check if it is a properly formatted update chunk with + // valid signature and proof of ownership of the feed it is trying + // to update + + // First, deserialize the chunk + var r Request + if err := r.fromChunk(chunkAddr, data); err != nil { + log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) + return false + } + + // Verify signatures and that the signer actually owns the feed + // If it fails, it means either the signature is not valid, data is corrupted + // or someone is trying to update someone else's feed. + if err := r.Verify(); err != nil { + log.Debug("Invalid feed update signature", "err", err) + return false + } + + return true +} + +// GetContent retrieves the data payload of the last synced update of the feed +func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { + if feed == nil { + return nil, nil, NewError(ErrInvalidValue, "feed is nil") + } + feedUpdate := h.get(feed) + if feedUpdate == nil { + return nil, nil, NewError(ErrNotFound, "feed update not cached") + } + return feedUpdate.lastKey, feedUpdate.data, nil +} + +// NewRequest prepares a Request structure with all the necessary information to +// just add the desired data and sign it. +// The resulting structure can then be signed and passed to Handler.Update to be verified and sent +func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { + if feed == nil { + return nil, NewError(ErrInvalidValue, "feed cannot be nil") + } + + now := TimestampProvider.Now().Time + request = new(Request) + request.Header.Version = ProtocolVersion + + query := NewQueryLatest(feed, lookup.NoClue) + + feedUpdate, err := h.Lookup(ctx, query) + if err != nil { + if err.(*Error).code != ErrNotFound { + return nil, err + } + // not finding updates means that there is a network error + // or that the feed really does not have updates + } + + request.Feed = *feed + + // if we already have an update, then find next epoch + if feedUpdate != nil { + request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) + } else { + request.Epoch = lookup.GetFirstEpoch(now) + } + + return request, nil +} + +// Lookup retrieves a specific or latest feed update +// Lookup works differently depending on the configuration of `query` +// See the `query` documentation and helper functions: +// `NewQueryLatest` and `NewQuery` +func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { + + timeLimit := query.TimeLimit + if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update + timeLimit = TimestampProvider.Now().Time + } + + if query.Hint == lookup.NoClue { // try to use our cache + entry := h.get(&query.Feed) + if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints + query.Hint = entry.Epoch + } + } + + // we can't look for anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") + } + + var id ID + id.Feed = query.Feed + var readCount int + + // Invoke the lookup engine. + // The callback will be called every time the lookup algorithm needs to guess + requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { + readCount++ + id.Epoch = epoch + ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() + + chunk, err := h.chunkStore.Get(ctx, id.Addr()) + if err != nil { // TODO: check for catastrophic errors other than chunk not found + return nil, nil + } + + var request Request + if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { + return nil, nil + } + if request.Time <= timeLimit { + return &request, nil + } + return nil, nil + }) + if err != nil { + return nil, err + } + + log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount)) + + request, _ := requestPtr.(*Request) + if request == nil { + return nil, NewError(ErrNotFound, "no feed updates found") + } + return h.updateCache(request) + +} + +// update feed updates cache with specified content +func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { + + updateAddr := request.Addr() + log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + + feedUpdate := h.get(&request.Feed) + if feedUpdate == nil { + feedUpdate = &cacheEntry{} + h.set(&request.Feed, feedUpdate) + } + + // update our rsrcs entry map + feedUpdate.lastKey = updateAddr + feedUpdate.Update = request.Update + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + return feedUpdate, nil +} + +// Update publishes a feed update +// Note that a feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. +// This results in a max payload of `maxUpdateDataLength` (check update.go for more details) +// An error will be returned if the total length of the chunk payload will exceed this limit. +// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update +// on the network. +func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { + + // we can't update anything without a store + if h.chunkStore == nil { + return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") + } + + feedUpdate := h.get(&r.Feed) + if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") + } + + chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big + if err != nil { + return nil, err + } + + // send the chunk + h.chunkStore.Put(ctx, chunk) + log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + // update our feed updates map cache entry if the new update is older than the one we have, if we have it. + if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { + feedUpdate.Epoch = r.Epoch + feedUpdate.data = make([]byte, len(r.data)) + feedUpdate.lastKey = r.idAddr + copy(feedUpdate.data, r.data) + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + } + + return r.idAddr, nil +} + +// Retrieves the feed update cache value for the given nameHash +func (h *Handler) get(feed *Feed) *cacheEntry { + mapKey := feed.mapKey() + h.cacheLock.RLock() + defer h.cacheLock.RUnlock() + feedUpdate := h.cache[mapKey] + return feedUpdate +} + +// Sets the feed update cache value for the given feed +func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { + mapKey := feed.mapKey() + h.cacheLock.Lock() + defer h.cacheLock.Unlock() + h.cache[mapKey] = feedUpdate +} diff --git a/swarm/storage/feed/handler_test.go b/swarm/storage/feed/handler_test.go new file mode 100644 index 000000000..cf95bc1f5 --- /dev/null +++ b/swarm/storage/feed/handler_test.go @@ -0,0 +1,520 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "bytes" + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +var ( + loglevel = flag.Int("loglevel", 3, "loglevel") + startTime = Timestamp{ + Time: uint64(4200), + } + cleanF func() + subtopicName = "føø.bar" + hashfunc = storage.MakeHashFunc(storage.DefaultHash) +) + +func init() { + flag.Parse() + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) +} + +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 +} + +func (f *fakeTimeProvider) Tick() { + f.currentTime++ +} + +func (f *fakeTimeProvider) Set(time uint64) { + f.currentTime = time +} + +func (f *fakeTimeProvider) FastForward(offset uint64) { + f.currentTime += offset +} + +func (f *fakeTimeProvider) Now() Timestamp { + return Timestamp{ + Time: f.currentTime, + } +} + +// make updates and retrieve them based on periods and versions +func TestFeedsHandler(t *testing.T) { + + // make fake timeProvider + clock := &fakeTimeProvider{ + currentTime: startTime.Time, // clock starts at t=4200 + } + + // signer containing private key + signer := newAliceSigner() + + feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create a new feed + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topic, _ := NewTopic("Mess with Swarm feeds code and see what ghost catches you", nil) + fd := Feed{ + Topic: topic, + User: signer.Address(), + } + + // data for updates: + updates := []string{ + "blinky", // t=4200 + "pinky", // t=4242 + "inky", // t=4284 + "clyde", // t=4285 + } + + request := NewFirstRequest(fd.Topic) // this timestamps the update at t = 4200 (start time) + chunkAddress := make(map[string]storage.Address) + data := []byte(updates[0]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4221 + + request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { + t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) + } + + request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail + data = []byte(updates[1]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) + if err == nil { + t.Fatal("Expected update to fail since an update in this epoch already exists") + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4242 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 42 seconds + clock.FastForward(42) // t=4284 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + data = []byte(updates[2]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 1 second + clock.FastForward(1) // t=4285 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { + t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) + } + data = []byte(updates[3]) + request.SetData(data) + + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + feedsHandler.Close() + + // check we can retrieve the updates after close + clock.FastForward(2000) // t=6285 + + feedParams := &HandlerParams{} + + feedsHandler2, err := NewTestHandler(datadir, feedParams) + if err != nil { + t.Fatal(err) + } + + update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + + // last update should be "clyde" + if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) { + t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1]) + } + if update2.Level != 22 { + t.Fatalf("feed update epoch level was %d, expected 22", update2.Level) + } + if update2.Base() != 0 { + t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base()) + } + log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) + + // specific point in time + update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + // check data + if !bytes.Equal(update.data, []byte(updates[2])) { + t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2]) + } + log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) + + // beyond the first should yield an error + update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) + if err == nil { + t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data) + } + +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func generateData(x uint64) []byte { + return []byte(fmt.Sprintf("%d", x)) +} + +func TestSparseUpdates(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + defer os.RemoveAll(datadir) + + // create a new feed + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + topic, _ := NewTopic("Very slow updates", nil) + fd := Feed{ + Topic: topic, + User: signer.Address(), + } + + // publish one update every 5 years since Unix 0 until today + today := uint64(1533799046) + var epoch lookup.Epoch + var lastUpdateTime uint64 + for T := uint64(0); T < today; T += 5 * Year { + request := NewFirstRequest(fd.Topic) + request.Epoch = lookup.GetNextEpoch(epoch, T) + request.data = generateData(T) // this generates some data that depends on T, so we can check later + request.Sign(signer) + if err != nil { + t.Fatal(err) + } + + if _, err := rh.Update(ctx, request); err != nil { + t.Fatal(err) + } + epoch = request.Epoch + lastUpdateTime = T + } + + query := NewQuery(&fd, today, lookup.NoClue) + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err := rh.GetContent(&fd) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(lastUpdateTime), content) { + t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) + } + + // lookup the closest update to 35*Year + 6* Month (~ June 2005): + // it should find the update we put on 35*Year, since we were updating every 5 years. + + query.TimeLimit = 35*Year + 6*Month + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err = rh.GetContent(&fd) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(35*Year), content) { + t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) + } +} + +func TestValidator(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key. Alice will be the good girl + signer := newAliceSigner() + + // set up sim timeProvider + rh, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create new feed + topic, _ := NewTopic(subtopicName, nil) + fd := Feed{ + Topic: topic, + User: signer.Address(), + } + mr := NewFirstRequest(fd.Topic) + + // chunk with address + data := []byte("foo") + mr.SetData(data) + if err := mr.Sign(signer); err != nil { + t.Fatalf("sign fail: %v", err) + } + + chunk, err := mr.toChunk() + if err != nil { + t.Fatal(err) + } + if !rh.Validate(chunk.Address(), chunk.Data()) { + t.Fatal("Chunk validator fail on update chunk") + } + + address := chunk.Address() + // mess with the address + address[0] = 11 + address[15] = 99 + + if rh.Validate(address, chunk.Data()) { + t.Fatal("Expected Validate to fail with false chunk address") + } +} + +// tests that the content address validator correctly checks the data +// tests that feed update chunks are passed through content address validator +// there is some redundancy in this test as it also tests content addressed chunks, +// which should be evaluated as invalid chunks by this validator +func TestValidatorInStore(t *testing.T) { + + // make fake timeProvider + TimestampProvider = &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up localstore + datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(datadir) + + handlerParams := storage.NewDefaultLocalStoreParams() + handlerParams.Init(datadir) + store, err := storage.NewLocalStore(handlerParams, nil) + if err != nil { + t.Fatal(err) + } + + // set up Swarm feeds handler and add is as a validator to the localstore + fhParams := &HandlerParams{} + fh := NewHandler(fhParams) + store.Validators = append(store.Validators, fh) + + // create content addressed chunks, one good, one faulty + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) + goodChunk := chunks[0] + badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) + + topic, _ := NewTopic("xyzzy", nil) + fd := Feed{ + Topic: topic, + User: signer.Address(), + } + + // create a feed update chunk with correct publickey + id := ID{ + Epoch: lookup.Epoch{Time: 42, + Level: 1, + }, + Feed: fd, + } + + updateAddr := id.Addr() + data := []byte("bar") + + r := new(Request) + r.idAddr = updateAddr + r.Update.ID = id + r.data = data + + r.Sign(signer) + + uglyChunk, err := r.toChunk() + if err != nil { + t.Fatal(err) + } + + // put the chunks in the store and check their error status + err = store.Put(context.Background(), goodChunk) + if err == nil { + t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") + } + err = store.Put(context.Background(), badChunk) + if err == nil { + t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") + } + err = store.Put(context.Background(), uglyChunk) + if err != nil { + t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) + } +} + +// create rpc and feeds Handler +func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { + + var fsClean func() + var rpcClean func() + cleanF = func() { + if fsClean != nil { + fsClean() + } + if rpcClean != nil { + rpcClean() + } + } + + // temp datadir + datadir, err = ioutil.TempDir("", "fh") + if err != nil { + return nil, "", nil, err + } + fsClean = func() { + os.RemoveAll(datadir) + } + + TimestampProvider = timeProvider + fhParams := &HandlerParams{} + fh, err = NewTestHandler(datadir, fhParams) + return fh, datadir, cleanF, err +} + +func newAliceSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + return NewGenericSigner(privKey) +} + +func newBobSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") + return NewGenericSigner(privKey) +} + +func newCharlieSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") + return NewGenericSigner(privKey) +} + +func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { + chunk, err := rh.chunkStore.Get(context.TODO(), addr) + if err != nil { + return nil, err + } + var r Request + if err := r.fromChunk(addr, chunk.Data()); err != nil { + return nil, err + } + return r.data, nil +} diff --git a/swarm/storage/feed/id.go b/swarm/storage/feed/id.go new file mode 100644 index 000000000..7e17743c1 --- /dev/null +++ b/swarm/storage/feed/id.go @@ -0,0 +1,123 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "fmt" + "hash" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// ID uniquely identifies an update on the network. +type ID struct { + Feed `json:"feed"` + lookup.Epoch `json:"epoch"` +} + +// ID layout: +// Feed feedLength bytes +// Epoch EpochLength +const idLength = feedLength + lookup.EpochLength + +// Addr calculates the feed update chunk address corresponding to this ID +func (u *ID) Addr() (updateAddr storage.Address) { + serializedData := make([]byte, idLength) + var cursor int + u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) + cursor += feedLength + + eid := u.Epoch.ID() + copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) + + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + return hasher.Sum(nil) +} + +// binaryPut serializes this instance into the provided slice +func (u *ID) binaryPut(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) + } + var cursor int + if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { + return err + } + cursor += feedLength + + epochBytes, err := u.Epoch.MarshalBinary() + if err != nil { + return err + } + copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) + cursor += lookup.EpochLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *ID) binaryLength() int { + return idLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *ID) binaryGet(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) + } + + var cursor int + if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { + return err + } + cursor += feedLength + + if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { + return err + } + cursor += lookup.EpochLength + + return nil +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *ID) FromValues(values Values) error { + level, _ := strconv.ParseUint(values.Get("level"), 10, 32) + u.Epoch.Level = uint8(level) + u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) + + if u.Feed.User == (common.Address{}) { + return u.Feed.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *ID) AppendValues(values Values) { + values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) + values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) + u.Feed.AppendValues(values) +} diff --git a/swarm/storage/feed/id_test.go b/swarm/storage/feed/id_test.go new file mode 100644 index 000000000..e561ff9b4 --- /dev/null +++ b/swarm/storage/feed/id_test.go @@ -0,0 +1,28 @@ +package feed + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +func getTestID() *ID { + return &ID{ + Feed: *getTestFeed(), + Epoch: lookup.GetFirstEpoch(1000), + } +} + +func TestIDAddr(t *testing.T) { + id := getTestID() + updateAddr := id.Addr() + compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") +} + +func TestIDSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") +} + +func TestIDLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestID()) +} diff --git a/swarm/storage/feed/lookup/epoch.go b/swarm/storage/feed/lookup/epoch.go new file mode 100644 index 000000000..bafe95477 --- /dev/null +++ b/swarm/storage/feed/lookup/epoch.go @@ -0,0 +1,91 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lookup + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// Epoch represents a time slot at a particular frequency level +type Epoch struct { + Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place + Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 +} + +// EpochID is a unique identifier for an Epoch, based on its level and base time. +type EpochID [8]byte + +// EpochLength stores the serialized binary length of an Epoch +const EpochLength = 8 + +// MaxTime contains the highest possible time value an Epoch can handle +const MaxTime uint64 = (1 << 56) - 1 + +// Base returns the base time of the Epoch +func (e *Epoch) Base() uint64 { + return getBaseTime(e.Time, e.Level) +} + +// ID Returns the unique identifier of this epoch +func (e *Epoch) ID() EpochID { + base := e.Base() + var id EpochID + binary.LittleEndian.PutUint64(id[:], base) + id[7] = e.Level + return id +} + +// MarshalBinary implements the encoding.BinaryMarshaller interface +func (e *Epoch) MarshalBinary() (data []byte, err error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b[:], e.Time) + b[7] = e.Level + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface +func (e *Epoch) UnmarshalBinary(data []byte) error { + if len(data) != EpochLength { + return errors.New("Invalid data unmarshalling Epoch") + } + b := make([]byte, 8) + copy(b, data) + e.Level = b[7] + b[7] = 0 + e.Time = binary.LittleEndian.Uint64(b) + return nil +} + +// After returns true if this epoch occurs later or exactly at the other epoch. +func (e *Epoch) After(epoch Epoch) bool { + if e.Time == epoch.Time { + return e.Level < epoch.Level + } + return e.Time >= epoch.Time +} + +// Equals compares two epochs and returns true if they refer to the same time period. +func (e *Epoch) Equals(epoch Epoch) bool { + return e.Level == epoch.Level && e.Base() == epoch.Base() +} + +// String implements the Stringer interface. +func (e *Epoch) String() string { + return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) +} diff --git a/swarm/storage/feed/lookup/epoch_test.go b/swarm/storage/feed/lookup/epoch_test.go new file mode 100644 index 000000000..0629f3d1d --- /dev/null +++ b/swarm/storage/feed/lookup/epoch_test.go @@ -0,0 +1,57 @@ +package lookup_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +func TestMarshallers(t *testing.T) { + + for i := uint64(1); i < lookup.MaxTime; i *= 3 { + e := lookup.Epoch{ + Time: i, + Level: uint8(i % 20), + } + b, err := e.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var e2 lookup.Epoch + if err := e2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if e != e2 { + t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") + } + } + +} + +func TestAfter(t *testing.T) { + a := lookup.Epoch{ + Time: 5, + Level: 3, + } + b := lookup.Epoch{ + Time: 6, + Level: 3, + } + c := lookup.Epoch{ + Time: 6, + Level: 4, + } + + if !b.After(a) { + t.Fatal("Expected 'after' to be true, got false") + } + + if b.After(b) { + t.Fatal("Expected 'after' to be false when both epochs are identical, got true") + } + + if !b.After(c) { + t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") + } + +} diff --git a/swarm/storage/feed/lookup/lookup.go b/swarm/storage/feed/lookup/lookup.go new file mode 100644 index 000000000..2f862d81c --- /dev/null +++ b/swarm/storage/feed/lookup/lookup.go @@ -0,0 +1,180 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +/* +Package lookup defines feed lookup algorithms and provides tools to place updates +so they can be found +*/ +package lookup + +const maxuint64 = ^uint64(0) + +// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. +const LowestLevel uint8 = 0 // default is 0 (1 second) + +// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. +// 25 -> 2^25 equals to roughly one year. +const HighestLevel = 25 // default is 25 (~1 year) + +// DefaultLevel sets what level will be chosen to search when there is no hint +const DefaultLevel = HighestLevel + +//Algorithm is the function signature of a lookup algorithm +type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) + +// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' +// It takes a hint which should be the epoch where the last known update was +// If you don't know in what epoch the last update happened, simply submit lookup.NoClue +// read() will be called on each lookup attempt +// Returns an error only if read() returns an error +// Returns nil if an update was not found +var Lookup Algorithm = FluzCapacitorAlgorithm + +// ReadFunc is a handler called by Lookup each time it attempts to find a value +// It should return if a value is not found +// It should return if a value is found, but its timestamp is higher than "now" +// It should only return an error in case the handler wants to stop the +// lookup process entirely. +type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) + +// NoClue is a hint that can be provided when the Lookup caller does not have +// a clue about where the last update may be +var NoClue = Epoch{} + +// getBaseTime returns the epoch base time of the given +// time and level +func getBaseTime(t uint64, level uint8) uint64 { + return t & (maxuint64 << level) +} + +// Hint creates a hint based only on the last known update time +func Hint(last uint64) Epoch { + return Epoch{ + Time: last, + Level: DefaultLevel, + } +} + +// GetNextLevel returns the frequency level a next update should be placed at, provided where +// the last update was and what time it is now. +// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit +// but limited to not return a level that is smaller than the last-1 +func GetNextLevel(last Epoch, now uint64) uint8 { + // First XOR the last epoch base time with the current clock. + // This will set all the common most significant bits to zero. + mix := (last.Base() ^ now) + + // Then, make sure we stop the below loop before one level below the current, by setting + // that level's bit to 1. + // If the next level is lower than the current one, it must be exactly level-1 and not lower. + mix |= (1 << (last.Level - 1)) + + // if the last update was more than 2^highestLevel seconds ago, choose the highest level + if mix > (maxuint64 >> (64 - HighestLevel - 1)) { + return HighestLevel + } + + // set up a mask to scan for nonzero bits, starting at the highest level + mask := uint64(1 << (HighestLevel)) + + for i := uint8(HighestLevel); i > LowestLevel; i-- { + if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. + return i + } + mask = mask >> 1 // move our bit one position to the right + } + return 0 +} + +// GetNextEpoch returns the epoch where the next update should be located +// according to where the previous update was +// and what time it is now. +func GetNextEpoch(last Epoch, now uint64) Epoch { + if last == NoClue { + return GetFirstEpoch(now) + } + level := GetNextLevel(last, now) + return Epoch{ + Level: level, + Time: now, + } +} + +// GetFirstEpoch returns the epoch where the first update should be located +// based on what time it is now. +func GetFirstEpoch(now uint64) Epoch { + return Epoch{Level: HighestLevel, Time: now} +} + +var worstHint = Epoch{Time: 0, Level: 63} + +// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found +// going back and forth in time +// First, it will attempt to find an update where it should be now if the hint was +// really the last update. If that lookup fails, then the last update must be either the hint itself +// or the epochs right below. If however, that lookup succeeds, then the update must be +// that one or within the epochs right below. +// see the guide for a more graphical representation +func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { + var lastFound interface{} + var epoch Epoch + if hint == NoClue { + hint = worstHint + } + + t := now + + for { + epoch = GetNextEpoch(hint, t) + value, err = read(epoch, now) + if err != nil { + return nil, err + } + if value != nil { + lastFound = value + if epoch.Level == LowestLevel || epoch.Equals(hint) { + return value, nil + } + hint = epoch + continue + } + if epoch.Base() == hint.Base() { + if lastFound != nil { + return lastFound, nil + } + // we have reached the hint itself + if hint == worstHint { + return nil, nil + } + // check it out + value, err = read(hint, now) + if err != nil { + return nil, err + } + if value != nil { + return value, nil + } + // bad hint. + epoch = hint + hint = worstHint + } + base := epoch.Base() + if base == 0 { + return nil, nil + } + t = base - 1 + } +} diff --git a/swarm/storage/feed/lookup/lookup_test.go b/swarm/storage/feed/lookup/lookup_test.go new file mode 100644 index 000000000..d71e81e95 --- /dev/null +++ b/swarm/storage/feed/lookup/lookup_test.go @@ -0,0 +1,414 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package lookup_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +type Data struct { + Payload uint64 + Time uint64 +} + +type Store map[lookup.EpochID]*Data + +func write(store Store, epoch lookup.Epoch, value *Data) { + log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) + store[epoch.ID()] = value +} + +func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { + epoch := lookup.GetNextEpoch(last, now) + + write(store, epoch, value) + + return epoch +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func makeReadFunc(store Store, counter *int) lookup.ReadFunc { + return func(epoch lookup.Epoch, now uint64) (interface{}, error) { + *counter++ + data := store[epoch.ID()] + var valueStr string + if data != nil { + valueStr = fmt.Sprintf("%d", data.Payload) + } + log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) + if data != nil && data.Time <= now { + return data, nil + } + return nil, nil + } +} + +func TestLookup(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every month for 12 months 3 years ago and then silence for two years + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 12; i++ { + t := uint64(now - Year*3 + i*Month) + data := Data{ + Payload: t, //our "payload" will be the timestamp itself. + Time: t, + } + epoch = update(store, epoch, t, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + // try to get an intermediate value + // if we look for a value in now - Year*3 + 6*Month, we should get that value + // Since the "payload" is the timestamp itself, we can check this. + + expectedTime := now - Year*3 + 6*Month + + value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + data, ok := value.(*Data) + + if !ok { + t.Fatal("Expected value to contain data") + } + + if data.Time != expectedTime { + t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) + } + +} + +func TestOneUpdateAt0(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + update(store, epoch, 0, &data) + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +// Tests the update is found even when a bad hint is given +func TestBadHint(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + + // place an update for t=1200 + update(store, epoch, 1200, &data) + + // come up with some evil hint + badHint := lookup.Epoch{ + Level: 18, + Time: 1200000000, + } + + value, err := lookup.Lookup(now, badHint, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +func TestLookupFail(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // don't write anything and try to look up. + // we're testing we don't get stuck in a loop + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("Expected value to be nil, since the update should've failed") + } + + expectedReads := now/(1< readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) // update every second for the last 1000 seconds + value, err := lookup.Lookup(T, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + data, _ := value.(*Data) + if data == nil { + t.Fatalf("Expected lookup to return %d, got nil", T) + } + if data.Payload != T { + t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) + } + } +} + +func TestSparseUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 5; i++ { + T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + +} + +// testG will hold precooked test results +// fields are abbreviated to reduce the size of the literal below +type testG struct { + e lookup.Epoch // last + n uint64 // next level + x uint8 // expected result +} + +// test cases +var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} + +func TestGetNextLevel(t *testing.T) { + + // First, test well-known cases + last := lookup.Epoch{ + Time: 1533799046, + Level: 5, + } + + level := lookup.GetNextLevel(last, last.Time) + expected := uint8(4) + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1< lookup.HighestLevel { + v = 0 + } + now = last.Time + uint64(rand.Intn(1<. + +package feed + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +// Query is used to specify constraints when performing an update lookup +// TimeLimit indicates an upper bound for the search. Set to 0 for "now" +type Query struct { + Feed + Hint lookup.Epoch + TimeLimit uint64 +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (q *Query) FromValues(values Values) error { + time, _ := strconv.ParseUint(values.Get("time"), 10, 64) + q.TimeLimit = time + + level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) + q.Hint.Level = uint8(level) + q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) + if q.Feed.User == (common.Address{}) { + return q.Feed.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (q *Query) AppendValues(values Values) { + if q.TimeLimit != 0 { + values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) + } + if q.Hint.Level != 0 { + values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) + } + if q.Hint.Time != 0 { + values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) + } + q.Feed.AppendValues(values) +} + +// NewQuery constructs an Query structure to find updates on or before `time` +// if time == 0, the latest update will be looked up +func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { + return &Query{ + TimeLimit: time, + Feed: *feed, + Hint: hint, + } +} + +// NewQueryLatest generates lookup parameters that look for the latest update to a feed +func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { + return NewQuery(feed, 0, hint) +} diff --git a/swarm/storage/feed/query_test.go b/swarm/storage/feed/query_test.go new file mode 100644 index 000000000..9fa5e2980 --- /dev/null +++ b/swarm/storage/feed/query_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "testing" +) + +func getTestQuery() *Query { + id := getTestID() + return &Query{ + TimeLimit: 5000, + Feed: id.Feed, + Hint: id.Epoch, + } +} + +func TestQueryValues(t *testing.T) { + var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} + + query := getTestQuery() + testValueSerializer(t, query, expected) + +} diff --git a/swarm/storage/feed/request.go b/swarm/storage/feed/request.go new file mode 100644 index 000000000..6968d8b9a --- /dev/null +++ b/swarm/storage/feed/request.go @@ -0,0 +1,284 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "bytes" + "encoding/json" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +// Request represents a request to sign or signed feed update message +type Request struct { + Update // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) +} + +// updateRequestJSON represents a JSON-serialized UpdateRequest +type updateRequestJSON struct { + ID + ProtocolVersion uint8 `json:"protocolVersion"` + Data string `json:"data,omitempty"` + Signature string `json:"signature,omitempty"` +} + +// Request layout +// Update bytes +// SignatureLength bytes +const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength + +// NewFirstRequest returns a ready to sign request to publish a first feed update +func NewFirstRequest(topic Topic) *Request { + + request := new(Request) + + // get the current time + now := TimestampProvider.Now().Time + request.Epoch = lookup.GetFirstEpoch(now) + request.Feed.Topic = topic + request.Header.Version = ProtocolVersion + + return request +} + +// SetData stores the payload data the feed update will be updated with +func (r *Request) SetData(data []byte) { + r.data = data + r.Signature = nil +} + +// IsUpdate returns true if this request models a signed update or otherwise it is a signature request +func (r *Request) IsUpdate() bool { + return r.Signature != nil +} + +// Verify checks that signatures are valid +func (r *Request) Verify() (err error) { + if len(r.data) == 0 { + return NewError(ErrInvalidValue, "Update does not contain data") + } + if r.Signature == nil { + return NewError(ErrInvalidSignature, "Missing signature field") + } + + digest, err := r.GetDigest() + if err != nil { + return err + } + + // get the address of the signer (which also checks that it's a valid signature) + r.Feed.User, err = getUserAddr(digest, *r.Signature) + if err != nil { + return err + } + + // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) + // that was used to retrieve this chunk + // if this validation fails, someone forged a chunk. + if !bytes.Equal(r.idAddr, r.Addr()) { + return NewError(ErrInvalidSignature, "Signature address does not match with update user address") + } + + return nil +} + +// Sign executes the signature to validate the update message +func (r *Request) Sign(signer Signer) error { + r.Feed.User = signer.Address() + r.binaryData = nil //invalidate serialized data + digest, err := r.GetDigest() // computes digest and serializes into .binaryData + if err != nil { + return err + } + + signature, err := signer.Sign(digest) + if err != nil { + return err + } + + // Although the Signer interface returns the public address of the signer, + // recover it from the signature to see if they match + userAddr, err := getUserAddr(digest, signature) + if err != nil { + return NewError(ErrInvalidSignature, "Error verifying signature") + } + + if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! + return NewError(ErrInvalidSignature, "Signer address does not match update user address") + } + + r.Signature = &signature + r.idAddr = r.Addr() + return nil +} + +// GetDigest creates the feed update digest used in signatures +// the serialized payload is cached in .binaryData +func (r *Request) GetDigest() (result common.Hash, err error) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + dataLength := r.Update.binaryLength() + if r.binaryData == nil { + r.binaryData = make([]byte, dataLength+signatureLength) + if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { + return result, err + } + } + hasher.Write(r.binaryData[:dataLength]) //everything except the signature. + + return common.BytesToHash(hasher.Sum(nil)), nil +} + +// create an update chunk. +func (r *Request) toChunk() (storage.Chunk, error) { + + // Check that the update is signed and serialized + // For efficiency, data is serialized during signature and cached in + // the binaryData field when computing the signature digest in .getDigest() + if r.Signature == nil || r.binaryData == nil { + return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") + } + + updateLength := r.Update.binaryLength() + + // signature is the last item in the chunk data + copy(r.binaryData[updateLength:], r.Signature[:]) + + chunk := storage.NewChunk(r.idAddr, r.binaryData) + return chunk, nil +} + +// fromChunk populates this structure from chunk data. It does not verify the signature is valid. +func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { + // for update chunk layout see Request definition + + //deserialize the feed update portion + if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { + return err + } + + // Extract the signature + var signature *Signature + cursor := r.Update.binaryLength() + sigdata := chunkdata[cursor : cursor+signatureLength] + if len(sigdata) > 0 { + signature = &Signature{} + copy(signature[:], sigdata) + } + + r.Signature = signature + r.idAddr = updateAddr + r.binaryData = chunkdata + + return nil + +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Request) FromValues(values Values, data []byte) error { + signatureBytes, err := hexutil.Decode(values.Get("signature")) + if err != nil { + r.Signature = nil + } else { + if len(signatureBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Incorrect signature length") + } + r.Signature = new(Signature) + copy(r.Signature[:], signatureBytes) + } + err = r.Update.FromValues(values, data) + if err != nil { + return err + } + r.idAddr = r.Addr() + return err +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Request) AppendValues(values Values) []byte { + if r.Signature != nil { + values.Set("signature", hexutil.Encode(r.Signature[:])) + } + return r.Update.AppendValues(values) +} + +// fromJSON takes an update request JSON and populates an UpdateRequest +func (r *Request) fromJSON(j *updateRequestJSON) error { + + r.ID = j.ID + r.Header.Version = j.ProtocolVersion + + var err error + if j.Data != "" { + r.data, err = hexutil.Decode(j.Data) + if err != nil { + return NewError(ErrInvalidValue, "Cannot decode data") + } + } + + if j.Signature != "" { + sigBytes, err := hexutil.Decode(j.Signature) + if err != nil || len(sigBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Cannot decode signature") + } + r.Signature = new(Signature) + r.idAddr = r.Addr() + copy(r.Signature[:], sigBytes) + } + return nil +} + +// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object +// Implements json.Unmarshaler interface +func (r *Request) UnmarshalJSON(rawData []byte) error { + var requestJSON updateRequestJSON + if err := json.Unmarshal(rawData, &requestJSON); err != nil { + return err + } + return r.fromJSON(&requestJSON) +} + +// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array +// Implements json.Marshaler interface +func (r *Request) MarshalJSON() (rawData []byte, err error) { + var signatureString, dataString string + if r.Signature != nil { + signatureString = hexutil.Encode(r.Signature[:]) + } + if r.data != nil { + dataString = hexutil.Encode(r.data) + } + + requestJSON := &updateRequestJSON{ + ID: r.ID, + ProtocolVersion: r.Header.Version, + Data: dataString, + Signature: signatureString, + } + + return json.Marshal(requestJSON) +} diff --git a/swarm/storage/feed/request_test.go b/swarm/storage/feed/request_test.go new file mode 100644 index 000000000..f5de32b74 --- /dev/null +++ b/swarm/storage/feed/request_test.go @@ -0,0 +1,312 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feed/lookup" +) + +func areEqualJSON(s1, s2 string) (bool, error) { + //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67 + var o1 interface{} + var o2 interface{} + + err := json.Unmarshal([]byte(s1), &o1) + if err != nil { + return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) + } + err = json.Unmarshal([]byte(s2), &o2) + if err != nil { + return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) + } + + return reflect.DeepEqual(o1, o2), nil +} + +// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly +// while also checking cryptographically that only the owner of a feed can update it. +func TestEncodingDecodingUpdateRequests(t *testing.T) { + + charlie := newCharlieSigner() //Charlie + bob := newBobSigner() //Bob + + // Create a feed to our good guy Charlie's name + topic, _ := NewTopic("a good topic name", nil) + firstRequest := NewFirstRequest(topic) + firstRequest.User = charlie.Address() + + // We now encode the create message to simulate we send it over the wire + messageRawData, err := firstRequest.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding first feed update request: %s", err) + } + + // ... the message arrives and is decoded... + var recoveredFirstRequest Request + if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding first feed update request: %s", err) + } + + // ... but verification should fail because it is not signed! + if err := recoveredFirstRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail since the message is not signed") + } + + // We now assume that the feed ypdate was created and propagated. + + const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601" + const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` + + //Put together an unsigned update request that we will serialize to send it to the signer. + data := []byte("This hour's update: Swarm 99.0 has been released!") + request := &Request{ + Update: Update{ + ID: ID{ + Epoch: lookup.Epoch{ + Time: 1000, + Level: 1, + }, + Feed: firstRequest.Update.Feed, + }, + data: data, + }, + } + + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding update request: %s", err) + } + + equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON) + if err != nil { + t.Fatalf("Error decoding update request JSON: %s", err) + } + if !equalJSON { + t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData)) + } + + // now the encoded message messageRawData is sent over the wire and arrives to the signer + + //Attempt to extract an UpdateRequest out of the encoded message + var recoveredRequest Request + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding update request: %s", err) + } + + //sign the request and see if it matches our predefined signature above. + if err := recoveredRequest.Sign(charlie); err != nil { + t.Fatalf("Error signing request: %s", err) + } + + compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) + + // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON + // to alter the signature field. + var j updateRequestJSON + if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil { + t.Fatal("Error unmarshalling test json, check expectedJSON constant") + } + j.Signature = "Certainly not a signature" + corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature + var corruptRequest Request + if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil { + t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") + } + + // Now imagine Bob wants to create an update of his own about the same feed, + // signing a message with his private key + if err := request.Sign(bob); err != nil { + t.Fatalf("Error signing: %s", err) + } + + // Now Bob encodes the message to send it over the wire... + messageRawData, err = request.MarshalJSON() + if err != nil { + t.Fatalf("Error encoding message:%s", err) + } + + // ... the message arrives to our Swarm node and it is decoded. + recoveredRequest = Request{} + if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding message:%s", err) + } + + // Before checking what happened with Bob's update, let's see what would happen if we mess + // with the signature big time to see if Verify catches it + savedSignature := *recoveredRequest.Signature // save the signature for later + binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature + if err = recoveredRequest.Verify(); err == nil { + t.Fatal("Expected Verify to fail on corrupt signature") + } + + // restore the Bob's signature from corruption + *recoveredRequest.Signature = savedSignature + + // Now the signature is not corrupt + if err = recoveredRequest.Verify(); err != nil { + t.Fatal(err) + } + + // Reuse object and sign with our friend Charlie's private key + if err := recoveredRequest.Sign(charlie); err != nil { + t.Fatalf("Error signing with the correct private key: %s", err) + } + + // And now, Verify should work since this update now belongs to Charlie + if err = recoveredRequest.Verify(); err != nil { + t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) + } + + // mess with the lookup key to make sure Verify fails: + recoveredRequest.Time = 77999 // this will alter the lookup key + if err = recoveredRequest.Verify(); err == nil { + t.Fatalf("Expected Verify to fail since the lookup key has been altered") + } +} + +func getTestRequest() *Request { + return &Request{ + Update: *getTestFeedUpdate(), + } +} + +func TestUpdateChunkSerializationErrorChecking(t *testing.T) { + + // Test that parseUpdate fails if the chunk is too small + var r Request + if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { + t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) + } + + r = *getTestRequest() + + _, err := r.toChunk() + if err == nil { + t.Fatal("Expected request.toChunk to fail when there is no data") + } + r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data + _, err = r.toChunk() + if err == nil { + t.Fatal("expected request.toChunk to fail when there is no signature") + } + + charlie := newCharlieSigner() + if err := r.Sign(charlie); err != nil { + t.Fatalf("error signing:%s", err) + } + + chunk, err := r.toChunk() + if err != nil { + t.Fatalf("error creating update chunk:%s", err) + } + + compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") + + var recovered Request + recovered.fromChunk(chunk.Address(), chunk.Data()) + if !reflect.DeepEqual(recovered, r) { + t.Fatal("Expected recovered feed update request to equal the original one") + } +} + +// check that signature address matches update signer address +func TestReverse(t *testing.T) { + + epoch := lookup.Epoch{ + Time: 7888, + Level: 6, + } + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up rpc and create feeds handler + _, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + topic, _ := NewTopic("Cervantes quotes", nil) + fd := Feed{ + Topic: topic, + User: signer.Address(), + } + + data := []byte("Donde una puerta se cierra, otra se abre") + + request := new(Request) + request.Feed = fd + request.Epoch = epoch + request.data = data + + // generate a chunk key for this request + key := request.Addr() + + if err = request.Sign(signer); err != nil { + t.Fatal(err) + } + + chunk, err := request.toChunk() + if err != nil { + t.Fatal(err) + } + + // check that we can recover the owner account from the update chunk's signature + var checkUpdate Request + if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { + t.Fatal(err) + } + checkdigest, err := checkUpdate.GetDigest() + if err != nil { + t.Fatal(err) + } + recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) + if err != nil { + t.Fatalf("Retrieve address from signature fail: %v", err) + } + originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + + // check that the metadata retrieved from the chunk matches what we gave it + if recoveredAddr != originalAddr { + t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) + } + + if !bytes.Equal(key[:], chunk.Address()[:]) { + t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) + } + if epoch != checkUpdate.Epoch { + t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) + } + if !bytes.Equal(data, checkUpdate.data) { + t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) + } +} diff --git a/swarm/storage/feed/sign.go b/swarm/storage/feed/sign.go new file mode 100644 index 000000000..5f0ea0b33 --- /dev/null +++ b/swarm/storage/feed/sign.go @@ -0,0 +1,75 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +const signatureLength = 65 + +// Signature is an alias for a static byte array with the size of a signature +type Signature [signatureLength]byte + +// Signer signs feed update payloads +type Signer interface { + Sign(common.Hash) (Signature, error) + Address() common.Address +} + +// GenericSigner implements the Signer interface +// It is the vanilla signer that probably should be used in most cases +type GenericSigner struct { + PrivKey *ecdsa.PrivateKey + address common.Address +} + +// NewGenericSigner builds a signer that will sign everything with the provided private key +func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { + return &GenericSigner{ + PrivKey: privKey, + address: crypto.PubkeyToAddress(privKey.PublicKey), + } +} + +// Sign signs the supplied data +// It wraps the ethereum crypto.Sign() method +func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { + signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) + if err != nil { + return + } + copy(signature[:], signaturebytes) + return +} + +// Address returns the public key of the signer's private key +func (s *GenericSigner) Address() common.Address { + return s.address +} + +// getUserAddr extracts the address of the feed update signer +func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} diff --git a/swarm/storage/feed/testutil.go b/swarm/storage/feed/testutil.go new file mode 100644 index 000000000..b513fa1f2 --- /dev/null +++ b/swarm/storage/feed/testutil.go @@ -0,0 +1,71 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "context" + "fmt" + "path/filepath" + "sync" + + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + testDbDirName = "feeds" +) + +type TestHandler struct { + *Handler +} + +func (t *TestHandler) Close() { + t.chunkStore.Close() +} + +type mockNetFetcher struct{} + +func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) { +} +func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) { +} + +func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher { + return &mockNetFetcher{} +} + +// NewTestHandler creates Handler object to be used for testing purposes. +func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { + path := filepath.Join(datadir, testDbDirName) + fh := NewHandler(params) + localstoreparams := storage.NewDefaultLocalStoreParams() + localstoreparams.Init(path) + localStore, err := storage.NewLocalStore(localstoreparams, nil) + if err != nil { + return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) + } + localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) + localStore.Validators = append(localStore.Validators, fh) + netStore, err := storage.NewNetStore(localStore, nil) + if err != nil { + return nil, err + } + netStore.NewNetFetcherFunc = newFakeNetFetcher + fh.SetStore(netStore) + return &TestHandler{fh}, nil +} diff --git a/swarm/storage/feed/timestampprovider.go b/swarm/storage/feed/timestampprovider.go new file mode 100644 index 000000000..072dc3a48 --- /dev/null +++ b/swarm/storage/feed/timestampprovider.go @@ -0,0 +1,84 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "encoding/binary" + "encoding/json" + "time" +) + +// TimestampProvider sets the time source of the feeds package +var TimestampProvider timestampProvider = NewDefaultTimestampProvider() + +// Timestamp encodes a point in time as a Unix epoch +type Timestamp struct { + Time uint64 `json:"time"` // Unix epoch timestamp, in seconds +} + +// 8 bytes uint64 Time +const timestampLength = 8 + +// timestampProvider interface describes a source of timestamp information +type timestampProvider interface { + Now() Timestamp // returns the current timestamp information +} + +// binaryGet populates the timestamp structure from the given byte slice +func (t *Timestamp) binaryGet(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + t.Time = binary.LittleEndian.Uint64(data[:8]) + return nil +} + +// binaryPut Serializes a Timestamp to a byte slice +func (t *Timestamp) binaryPut(data []byte) error { + if len(data) != timestampLength { + return NewError(ErrCorruptData, "timestamp data has the wrong size") + } + binary.LittleEndian.PutUint64(data, t.Time) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Timestamp) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &t.Time) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Timestamp) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} + +// DefaultTimestampProvider is a TimestampProvider that uses system time +// as time source +type DefaultTimestampProvider struct { +} + +// NewDefaultTimestampProvider creates a system clock based timestamp provider +func NewDefaultTimestampProvider() *DefaultTimestampProvider { + return &DefaultTimestampProvider{} +} + +// Now returns the current time according to this provider +func (dtp *DefaultTimestampProvider) Now() Timestamp { + return Timestamp{ + Time: uint64(time.Now().Unix()), + } +} diff --git a/swarm/storage/feed/topic.go b/swarm/storage/feed/topic.go new file mode 100644 index 000000000..43a7b4ba4 --- /dev/null +++ b/swarm/storage/feed/topic.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// TopicLength establishes the max length of a topic string +const TopicLength = storage.AddressLength + +// Topic represents what a feed is about +type Topic [TopicLength]byte + +// ErrTopicTooLong is returned when creating a topic with a name/related content too long +var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) + +// NewTopic creates a new topic from a provided name and "related content" byte array, +// merging the two together. +// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned +// name can be an empty string +// relatedContent can be nil +func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + err = ErrTopicTooLong + } + copy(topic[:], relatedContent[:contentLength]) + } + nameBytes := []byte(name) + nameLength := len(nameBytes) + if nameLength > TopicLength { + nameLength = TopicLength + err = ErrTopicTooLong + } + bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) + return topic, err +} + +// Hex will return the topic encoded as an hex string +func (t *Topic) Hex() string { + return hexutil.Encode(t[:]) +} + +// FromHex will parse a hex string into this Topic instance +func (t *Topic) FromHex(hex string) error { + bytes, err := hexutil.Decode(hex) + if err != nil || len(bytes) != len(t) { + return NewErrorf(ErrInvalidValue, "Cannot decode topic") + } + copy(t[:], bytes) + return nil +} + +// Name will try to extract the topic name out of the Topic +func (t *Topic) Name(relatedContent []byte) string { + nameBytes := *t + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + } + bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) + } + z := bytes.IndexByte(nameBytes[:], 0) + if z < 0 { + z = TopicLength + } + return string(nameBytes[:z]) + +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Topic) UnmarshalJSON(data []byte) error { + var hex string + json.Unmarshal(data, &hex) + return t.FromHex(hex) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Topic) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Hex()) +} diff --git a/swarm/storage/feed/topic_test.go b/swarm/storage/feed/topic_test.go new file mode 100644 index 000000000..0403204f7 --- /dev/null +++ b/swarm/storage/feed/topic_test.go @@ -0,0 +1,50 @@ +package feed + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func TestTopic(t *testing.T) { + related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") + topicName := "test-topic" + topic, _ := NewTopic(topicName, related) + hex := topic.Hex() + expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" + if hex != expectedHex { + t.Fatalf("Expected %s, got %s", expectedHex, hex) + } + + var topic2 Topic + topic2.FromHex(hex) + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + + if topic2.Name(related) != topicName { + t.Fatal("Retrieved name does not match") + } + + bytes, err := topic2.MarshalJSON() + if err != nil { + t.Fatal(err) + } + expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` + equal, err := areEqualJSON(expectedJSON, string(bytes)) + if err != nil { + t.Fatal(err) + } + if !equal { + t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) + } + + err = topic2.UnmarshalJSON(bytes) + if err != nil { + t.Fatal(err) + } + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + +} diff --git a/swarm/storage/feed/update.go b/swarm/storage/feed/update.go new file mode 100644 index 000000000..627a537d1 --- /dev/null +++ b/swarm/storage/feed/update.go @@ -0,0 +1,132 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/swarm/chunk" +) + +// ProtocolVersion defines the current version of the protocol that will be included in each update message +const ProtocolVersion uint8 = 0 + +const headerLength = 8 + +// Header defines a update message header including a protocol version byte +type Header struct { + Version uint8 // Protocol version + Padding [headerLength - 1]uint8 // reserved for future use +} + +// Update encapsulates the information sent as part of a feed update +type Update struct { + Header Header // + ID // Feed Update identifying information + data []byte // actual data payload +} + +const minimumUpdateDataLength = idLength + headerLength + 1 +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength + +// binaryPut serializes the feed update information into the given slice +func (r *Update) binaryPut(serializedData []byte) error { + datalength := len(r.data) + if datalength == 0 { + return NewError(ErrInvalidValue, "a feed update must contain data") + } + + if datalength > maxUpdateDataLength { + return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) + } + + if len(serializedData) != r.binaryLength() { + return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) + } + + var cursor int + // serialize Header + serializedData[cursor] = r.Header.Version + copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) + cursor += headerLength + + // serialize ID + if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { + return err + } + cursor += idLength + + // add the data + copy(serializedData[cursor:], r.data) + cursor += datalength + + return nil +} + +// binaryLength returns the expected number of bytes this structure will take to encode +func (r *Update) binaryLength() int { + return idLength + headerLength + len(r.data) +} + +// binaryGet populates this instance from the information contained in the passed byte slice +func (r *Update) binaryGet(serializedData []byte) error { + if len(serializedData) < minimumUpdateDataLength { + return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength) + } + dataLength := len(serializedData) - idLength - headerLength + // at this point we can be satisfied that we have the correct data length to read + + var cursor int + + // deserialize Header + r.Header.Version = serializedData[cursor] // extract the protocol version + copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding + cursor += headerLength + + if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { + return err + } + cursor += idLength + + data := serializedData[cursor : cursor+dataLength] + cursor += dataLength + + // now that all checks have passed, copy data into structure + r.data = make([]byte, dataLength) + copy(r.data, data) + + return nil + +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Update) FromValues(values Values, data []byte) error { + r.data = data + version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) + r.Header.Version = uint8(version) + return r.ID.FromValues(values) +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Update) AppendValues(values Values) []byte { + r.ID.AppendValues(values) + values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) + return r.data +} diff --git a/swarm/storage/feed/update_test.go b/swarm/storage/feed/update_test.go new file mode 100644 index 000000000..4007223c6 --- /dev/null +++ b/swarm/storage/feed/update_test.go @@ -0,0 +1,50 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package feed + +import ( + "testing" +) + +func getTestFeedUpdate() *Update { + return &Update{ + ID: *getTestID(), + data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), + } +} + +func TestUpdateSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +} + +func TestUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeedUpdate()) + // Test fail if update is too big + update := getTestFeedUpdate() + update.data = make([]byte, maxUpdateDataLength+100) + serialized := make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected update.binaryPut to fail since update is too big") + } + + // test fail if data is empty or nil + update.data = nil + serialized = make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected update.binaryPut to fail since data is empty") + } +} diff --git a/swarm/storage/feeds/binaryserializer.go b/swarm/storage/feeds/binaryserializer.go deleted file mode 100644 index ce146571b..000000000 --- a/swarm/storage/feeds/binaryserializer.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import "github.com/ethereum/go-ethereum/common/hexutil" - -type binarySerializer interface { - binaryPut(serializedData []byte) error - binaryLength() int - binaryGet(serializedData []byte) error -} - -// Values interface represents a string key-value store -// useful for building query strings -type Values interface { - Get(key string) string - Set(key, value string) -} - -type valueSerializer interface { - FromValues(values Values) error - AppendValues(values Values) -} - -// Hex serializes the structure and converts it to a hex string -func Hex(bin binarySerializer) string { - b := make([]byte, bin.binaryLength()) - bin.binaryPut(b) - return hexutil.Encode(b) -} diff --git a/swarm/storage/feeds/binaryserializer_test.go b/swarm/storage/feeds/binaryserializer_test.go deleted file mode 100644 index 0c81e7f18..000000000 --- a/swarm/storage/feeds/binaryserializer_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -// KV mocks a key value store -type KV map[string]string - -func (kv KV) Get(key string) string { - return kv[key] -} -func (kv KV) Set(key, value string) { - kv[key] = value -} - -func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { - if hexutil.Encode(actualValue) != expectedHex { - t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) - } -} - -func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { - name := reflect.TypeOf(bin).Elem().Name() - serialized := make([]byte, bin.binaryLength()) - if err := bin.binaryPut(serialized); err != nil { - t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) - } - - compareByteSliceToExpectedHex(t, name, serialized, expectedHex) - - recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) - if err := recovered.binaryGet(serialized); err != nil { - t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) - } - - if !reflect.DeepEqual(bin, recovered) { - t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) - } - - serializedWrongLength := make([]byte, 1) - copy(serializedWrongLength[:], serialized) - if err := recovered.binaryGet(serializedWrongLength); err == nil { - t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) - } -} - -func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { - name := reflect.TypeOf(bin).Elem().Name() - // make a slice that is too small to contain the metadata - serialized := make([]byte, bin.binaryLength()-1) - - if err := bin.binaryPut(serialized); err == nil { - t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) - } -} - -func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { - name := reflect.TypeOf(v).Elem().Name() - kv := make(KV) - - v.AppendValues(kv) - if !reflect.DeepEqual(expected, kv) { - expj, _ := json.Marshal(expected) - gotj, _ := json.Marshal(kv) - t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) - } - - recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) - err := recovered.FromValues(kv) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(recovered, v) { - t.Fatalf("Expected recovered %s to be the same", name) - } -} diff --git a/swarm/storage/feeds/cacheentry.go b/swarm/storage/feeds/cacheentry.go deleted file mode 100644 index e40037688..000000000 --- a/swarm/storage/feeds/cacheentry.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "bytes" - "context" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - hasherCount = 8 - feedsHashAlgorithm = storage.SHA3Hash - defaultRetrieveTimeout = 100 * time.Millisecond -) - -// cacheEntry caches the last known update of a specific Swarm feed. -type cacheEntry struct { - Update - *bytes.Reader - lastKey storage.Address -} - -// implements storage.LazySectionReader -func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { - return int64(len(r.Update.data)), nil -} - -//returns the feed's topic -func (r *cacheEntry) Topic() Topic { - return r.Feed.Topic -} diff --git a/swarm/storage/feeds/doc.go b/swarm/storage/feeds/doc.go deleted file mode 100644 index d1edf5d6d..000000000 --- a/swarm/storage/feeds/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Package feeds defines Swarm Feeds. - -Swarm Feeds allows a user to build an update feed about a particular topic -without resorting to ENS on each update. -The update scheme is built on swarm chunks with chunk keys following -a predictable, versionable pattern. - -A Feed is tied to a unique identifier that is deterministically generated out of -the chosen topic. - -A Feed is defined as the series of updates of a specific user about a particular topic - -Actual data updates are also made in the form of swarm chunks. The keys -of the updates are the hash of a concatenation of properties as follows: - -updateAddr = H(Feed, Epoch ID) -where H is the SHA3 hash function -Feed is the combination of Topic and the user address -Epoch ID is a time slot. See the lookup package for more information. - -A user looking up a the latest update in a Feed only needs to know the Topic -and the other user's address. - -The Feed Update data is: -updatedata = Feed|Epoch|data - -The full update data that goes in the chunk payload is: -updatedata|sign(updatedata) - -Structure Summary: - -Request: Feed Update with signature - Update: headers + data - Header: Protocol version and reserved for future use placeholders - ID: Information about how to locate a specific update - Feed: Represents a user's series of publications about a specific Topic - Topic: Item that the updates are about - User: User who updates the Feed - Epoch: time slot where the update is stored - -*/ -package feeds diff --git a/swarm/storage/feeds/error.go b/swarm/storage/feeds/error.go deleted file mode 100644 index 7751a9f19..000000000 --- a/swarm/storage/feeds/error.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "fmt" -) - -const ( - ErrInit = iota - ErrNotFound - ErrIO - ErrUnauthorized - ErrInvalidValue - ErrDataOverflow - ErrNothingToReturn - ErrCorruptData - ErrInvalidSignature - ErrNotSynced - ErrPeriodDepth - ErrCnt -) - -// Error is a the typed error object used for Swarm feeds -type Error struct { - code int - err string -} - -// Error implements the error interface -func (e *Error) Error() string { - return e.err -} - -// Code returns the error code -// Error codes are enumerated in the error.go file within the feeds package -func (e *Error) Code() int { - return e.code -} - -// NewError creates a new Swarm feeds Error object with the specified code and custom error message -func NewError(code int, s string) error { - if code < 0 || code >= ErrCnt { - panic("no such error code!") - } - r := &Error{ - err: s, - } - switch code { - case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData: - r.code = code - } - return r -} - -// NewErrorf is a convenience version of NewError that incorporates printf-style formatting -func NewErrorf(code int, format string, args ...interface{}) error { - return NewError(code, fmt.Sprintf(format, args...)) -} diff --git a/swarm/storage/feeds/feed.go b/swarm/storage/feeds/feed.go deleted file mode 100644 index d5b262555..000000000 --- a/swarm/storage/feeds/feed.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "hash" - "unsafe" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// Feed represents a particular user's stream of updates on a topic -type Feed struct { - Topic Topic `json:"topic"` - User common.Address `json:"user"` -} - -// Feed layout: -// TopicLength bytes -// userAddr common.AddressLength bytes -const feedLength = TopicLength + common.AddressLength - -// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` -func (f *Feed) mapKey() uint64 { - serializedData := make([]byte, feedLength) - f.binaryPut(serializedData) - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - hash := hasher.Sum(nil) - return *(*uint64)(unsafe.Pointer(&hash[0])) -} - -// binaryPut serializes this feed instance into the provided slice -func (f *Feed) binaryPut(serializedData []byte) error { - if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize feed. Expected %d, got %d", feedLength, len(serializedData)) - } - var cursor int - copy(serializedData[cursor:cursor+TopicLength], f.Topic[:TopicLength]) - cursor += TopicLength - - copy(serializedData[cursor:cursor+common.AddressLength], f.User[:]) - cursor += common.AddressLength - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (f *Feed) binaryLength() int { - return feedLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (f *Feed) binaryGet(serializedData []byte) error { - if len(serializedData) != feedLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read feed. Expected %d, got %d", feedLength, len(serializedData)) - } - - var cursor int - copy(f.Topic[:], serializedData[cursor:cursor+TopicLength]) - cursor += TopicLength - - copy(f.User[:], serializedData[cursor:cursor+common.AddressLength]) - cursor += common.AddressLength - - return nil -} - -// Hex serializes the feed to a hex string -func (f *Feed) Hex() string { - serializedData := make([]byte, feedLength) - f.binaryPut(serializedData) - return hexutil.Encode(serializedData) -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (f *Feed) FromValues(values Values) (err error) { - topic := values.Get("topic") - if topic != "" { - if err := f.Topic.FromHex(values.Get("topic")); err != nil { - return err - } - } else { // see if the user set name and relatedcontent - name := values.Get("name") - relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) - if len(relatedContent) > 0 { - if len(relatedContent) < storage.AddressLength { - return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) - } - relatedContent = relatedContent[:storage.AddressLength] - } - f.Topic, err = NewTopic(name, relatedContent) - if err != nil { - return err - } - } - f.User = common.HexToAddress(values.Get("user")) - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (f *Feed) AppendValues(values Values) { - values.Set("topic", f.Topic.Hex()) - values.Set("user", f.User.Hex()) -} diff --git a/swarm/storage/feeds/feed_test.go b/swarm/storage/feeds/feed_test.go deleted file mode 100644 index 7806e0ad7..000000000 --- a/swarm/storage/feeds/feed_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . -package feeds - -import ( - "testing" -) - -func getTestFeed() *Feed { - topic, _ := NewTopic("world news report, every hour", nil) - return &Feed{ - Topic: topic, - User: newCharlieSigner().Address(), - } -} - -func TestFeedSerializerDeserializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") -} - -func TestFeedSerializerLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestFeed()) -} diff --git a/swarm/storage/feeds/handler.go b/swarm/storage/feeds/handler.go deleted file mode 100644 index 2c5261614..000000000 --- a/swarm/storage/feeds/handler.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Handler is the API for feeds -// It enables creating, updating, syncing and retrieving feed updates and their data -package feeds - -import ( - "bytes" - "context" - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" - - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -type Handler struct { - chunkStore *storage.NetStore - HashSize int - cache map[uint64]*cacheEntry - cacheLock sync.RWMutex - storeTimeout time.Duration - queryMaxPeriods uint32 -} - -// HandlerParams pass parameters to the Handler constructor NewHandler -// Signer and TimestampProvider are mandatory parameters -type HandlerParams struct { -} - -// hashPool contains a pool of ready hashers -var hashPool sync.Pool - -// init initializes the package and hashPool -func init() { - hashPool = sync.Pool{ - New: func() interface{} { - return storage.MakeHashFunc(feedsHashAlgorithm)() - }, - } -} - -// NewHandler creates a new Swarm feeds API -func NewHandler(params *HandlerParams) *Handler { - fh := &Handler{ - cache: make(map[uint64]*cacheEntry), - } - - for i := 0; i < hasherCount; i++ { - hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() - if fh.HashSize == 0 { - fh.HashSize = hashfunc.Size() - } - hashPool.Put(hashfunc) - } - - return fh -} - -// SetStore sets the store backend for the Swarm feeds API -func (h *Handler) SetStore(store *storage.NetStore) { - h.chunkStore = store -} - -// Validate is a chunk validation method -// If it looks like a feed update, the chunk address is checked against the userAddr of the update's signature -// It implements the storage.ChunkValidator interface -func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { - dataLength := len(data) - if dataLength < minimumSignedUpdateLength { - return false - } - - // check if it is a properly formatted update chunk with - // valid signature and proof of ownership of the feed it is trying - // to update - - // First, deserialize the chunk - var r Request - if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) - return false - } - - // Verify signatures and that the signer actually owns the feed - // If it fails, it means either the signature is not valid, data is corrupted - // or someone is trying to update someone else's feed. - if err := r.Verify(); err != nil { - log.Debug("Invalid feed update signature", "err", err) - return false - } - - return true -} - -// GetContent retrieves the data payload of the last synced update of the feed -func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { - if feed == nil { - return nil, nil, NewError(ErrInvalidValue, "feed is nil") - } - feedUpdate := h.get(feed) - if feedUpdate == nil { - return nil, nil, NewError(ErrNotFound, "feed update not cached") - } - return feedUpdate.lastKey, feedUpdate.data, nil -} - -// NewRequest prepares a Request structure with all the necessary information to -// just add the desired data and sign it. -// The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { - if feed == nil { - return nil, NewError(ErrInvalidValue, "feed cannot be nil") - } - - now := TimestampProvider.Now().Time - request = new(Request) - request.Header.Version = ProtocolVersion - - query := NewQueryLatest(feed, lookup.NoClue) - - feedUpdate, err := h.Lookup(ctx, query) - if err != nil { - if err.(*Error).code != ErrNotFound { - return nil, err - } - // not finding updates means that there is a network error - // or that the feed really does not have updates - } - - request.Feed = *feed - - // if we already have an update, then find next epoch - if feedUpdate != nil { - request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) - } else { - request.Epoch = lookup.GetFirstEpoch(now) - } - - return request, nil -} - -// Lookup retrieves a specific or latest feed update -// Lookup works differently depending on the configuration of `query` -// See the `query` documentation and helper functions: -// `NewQueryLatest` and `NewQuery` -func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { - - timeLimit := query.TimeLimit - if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update - timeLimit = TimestampProvider.Now().Time - } - - if query.Hint == lookup.NoClue { // try to use our cache - entry := h.get(&query.Feed) - if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints - query.Hint = entry.Epoch - } - } - - // we can't look for anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") - } - - var id ID - id.Feed = query.Feed - var readCount int - - // Invoke the lookup engine. - // The callback will be called every time the lookup algorithm needs to guess - requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { - readCount++ - id.Epoch = epoch - ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) - defer cancel() - - chunk, err := h.chunkStore.Get(ctx, id.Addr()) - if err != nil { // TODO: check for catastrophic errors other than chunk not found - return nil, nil - } - - var request Request - if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { - return nil, nil - } - if request.Time <= timeLimit { - return &request, nil - } - return nil, nil - }) - if err != nil { - return nil, err - } - - log.Info(fmt.Sprintf("Feed lookup finished in %d lookups", readCount)) - - request, _ := requestPtr.(*Request) - if request == nil { - return nil, NewError(ErrNotFound, "no feed updates found") - } - return h.updateCache(request) - -} - -// update feed updates cache with specified content -func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { - - updateAddr := request.Addr() - log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) - - feedUpdate := h.get(&request.Feed) - if feedUpdate == nil { - feedUpdate = &cacheEntry{} - h.set(&request.Feed, feedUpdate) - } - - // update our rsrcs entry map - feedUpdate.lastKey = updateAddr - feedUpdate.Update = request.Update - feedUpdate.Reader = bytes.NewReader(feedUpdate.data) - return feedUpdate, nil -} - -// Update publishes a feed update -// Note that a feed update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. -// This results in a max payload of `maxUpdateDataLength` (check update.go for more details) -// An error will be returned if the total length of the chunk payload will exceed this limit. -// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update -// on the network. -func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { - - // we can't update anything without a store - if h.chunkStore == nil { - return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") - } - - feedUpdate := h.get(&r.Feed) - if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure - return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") - } - - chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big - if err != nil { - return nil, err - } - - // send the chunk - h.chunkStore.Put(ctx, chunk) - log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) - // update our feed updates map cache entry if the new update is older than the one we have, if we have it. - if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { - feedUpdate.Epoch = r.Epoch - feedUpdate.data = make([]byte, len(r.data)) - feedUpdate.lastKey = r.idAddr - copy(feedUpdate.data, r.data) - feedUpdate.Reader = bytes.NewReader(feedUpdate.data) - } - - return r.idAddr, nil -} - -// Retrieves the feed update cache value for the given nameHash -func (h *Handler) get(feed *Feed) *cacheEntry { - mapKey := feed.mapKey() - h.cacheLock.RLock() - defer h.cacheLock.RUnlock() - feedUpdate := h.cache[mapKey] - return feedUpdate -} - -// Sets the feed update cache value for the given feed -func (h *Handler) set(feed *Feed, feedUpdate *cacheEntry) { - mapKey := feed.mapKey() - h.cacheLock.Lock() - defer h.cacheLock.Unlock() - h.cache[mapKey] = feedUpdate -} diff --git a/swarm/storage/feeds/handler_test.go b/swarm/storage/feeds/handler_test.go deleted file mode 100644 index b35a7c1c1..000000000 --- a/swarm/storage/feeds/handler_test.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "bytes" - "context" - "flag" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/crypto" - - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -var ( - loglevel = flag.Int("loglevel", 3, "loglevel") - startTime = Timestamp{ - Time: uint64(4200), - } - cleanF func() - subtopicName = "føø.bar" - hashfunc = storage.MakeHashFunc(storage.DefaultHash) -) - -func init() { - flag.Parse() - log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) -} - -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Set(time uint64) { - f.currentTime = time -} - -func (f *fakeTimeProvider) FastForward(offset uint64) { - f.currentTime += offset -} - -func (f *fakeTimeProvider) Now() Timestamp { - return Timestamp{ - Time: f.currentTime, - } -} - -// make updates and retrieve them based on periods and versions -func TestFeedsHandler(t *testing.T) { - - // make fake timeProvider - clock := &fakeTimeProvider{ - currentTime: startTime.Time, // clock starts at t=4200 - } - - // signer containing private key - signer := newAliceSigner() - - feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new feed - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - topic, _ := NewTopic("Mess with Swarm feeds code and see what ghost catches you", nil) - fd := Feed{ - Topic: topic, - User: signer.Address(), - } - - // data for updates: - updates := []string{ - "blinky", // t=4200 - "pinky", // t=4242 - "inky", // t=4284 - "clyde", // t=4285 - } - - request := NewFirstRequest(fd.Topic) // this timestamps the update at t = 4200 (start time) - chunkAddress := make(map[string]storage.Address) - data := []byte(updates[0]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 21 seconds - clock.FastForward(21) // t=4221 - - request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 - if err != nil { - t.Fatal(err) - } - if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { - t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) - } - - request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail - data = []byte(updates[1]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) - if err == nil { - t.Fatal("Expected update to fail since an update in this epoch already exists") - } - - // move the clock ahead 21 seconds - clock.FastForward(21) // t=4242 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 42 seconds - clock.FastForward(42) // t=4284 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - data = []byte(updates[2]) - request.SetData(data) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - // move the clock ahead 1 second - clock.FastForward(1) // t=4285 - request, err = feedsHandler.NewRequest(ctx, &request.Feed) - if err != nil { - t.Fatal(err) - } - if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { - t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) - } - data = []byte(updates[3]) - request.SetData(data) - - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - feedsHandler.Close() - - // check we can retrieve the updates after close - clock.FastForward(2000) // t=6285 - - feedParams := &HandlerParams{} - - feedsHandler2, err := NewTestHandler(datadir, feedParams) - if err != nil { - t.Fatal(err) - } - - update2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) - if err != nil { - t.Fatal(err) - } - - // last update should be "clyde" - if !bytes.Equal(update2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("feed update data was %v, expected %v", string(update2.data), updates[len(updates)-1]) - } - if update2.Level != 22 { - t.Fatalf("feed update epoch level was %d, expected 22", update2.Level) - } - if update2.Base() != 0 { - t.Fatalf("feed update epoch base time was %d, expected 0", update2.Base()) - } - log.Debug("Latest lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) - - // specific point in time - update, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(update.data, []byte(updates[2])) { - t.Fatalf("feed update data (historical) was %v, expected %v", string(update2.data), updates[2]) - } - log.Debug("Historical lookup", "epoch base time", update2.Base(), "epoch level", update2.Level, "data", update2.data) - - // beyond the first should yield an error - update, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) - if err == nil { - t.Fatalf("expected previous to fail, returned epoch %s data %v", update.Epoch.String(), update.data) - } - -} - -const Day = 60 * 60 * 24 -const Year = Day * 365 -const Month = Day * 30 - -func generateData(x uint64) []byte { - return []byte(fmt.Sprintf("%d", x)) -} - -func TestSparseUpdates(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - defer os.RemoveAll(datadir) - - // create a new feed - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - topic, _ := NewTopic("Very slow updates", nil) - fd := Feed{ - Topic: topic, - User: signer.Address(), - } - - // publish one update every 5 years since Unix 0 until today - today := uint64(1533799046) - var epoch lookup.Epoch - var lastUpdateTime uint64 - for T := uint64(0); T < today; T += 5 * Year { - request := NewFirstRequest(fd.Topic) - request.Epoch = lookup.GetNextEpoch(epoch, T) - request.data = generateData(T) // this generates some data that depends on T, so we can check later - request.Sign(signer) - if err != nil { - t.Fatal(err) - } - - if _, err := rh.Update(ctx, request); err != nil { - t.Fatal(err) - } - epoch = request.Epoch - lastUpdateTime = T - } - - query := NewQuery(&fd, today, lookup.NoClue) - - _, err = rh.Lookup(ctx, query) - if err != nil { - t.Fatal(err) - } - - _, content, err := rh.GetContent(&fd) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(generateData(lastUpdateTime), content) { - t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) - } - - // lookup the closest update to 35*Year + 6* Month (~ June 2005): - // it should find the update we put on 35*Year, since we were updating every 5 years. - - query.TimeLimit = 35*Year + 6*Month - - _, err = rh.Lookup(ctx, query) - if err != nil { - t.Fatal(err) - } - - _, content, err = rh.GetContent(&fd) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(generateData(35*Year), content) { - t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) - } -} - -func TestValidator(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key. Alice will be the good girl - signer := newAliceSigner() - - // set up sim timeProvider - rh, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create new feed - topic, _ := NewTopic(subtopicName, nil) - fd := Feed{ - Topic: topic, - User: signer.Address(), - } - mr := NewFirstRequest(fd.Topic) - - // chunk with address - data := []byte("foo") - mr.SetData(data) - if err := mr.Sign(signer); err != nil { - t.Fatalf("sign fail: %v", err) - } - - chunk, err := mr.toChunk() - if err != nil { - t.Fatal(err) - } - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on update chunk") - } - - address := chunk.Address() - // mess with the address - address[0] = 11 - address[15] = 99 - - if rh.Validate(address, chunk.Data()) { - t.Fatal("Expected Validate to fail with false chunk address") - } -} - -// tests that the content address validator correctly checks the data -// tests that feed update chunks are passed through content address validator -// there is some redundancy in this test as it also tests content addressed chunks, -// which should be evaluated as invalid chunks by this validator -func TestValidatorInStore(t *testing.T) { - - // make fake timeProvider - TimestampProvider = &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up localstore - datadir, err := ioutil.TempDir("", "storage-testfeedsvalidator") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(datadir) - - handlerParams := storage.NewDefaultLocalStoreParams() - handlerParams.Init(datadir) - store, err := storage.NewLocalStore(handlerParams, nil) - if err != nil { - t.Fatal(err) - } - - // set up Swarm feeds handler and add is as a validator to the localstore - fhParams := &HandlerParams{} - fh := NewHandler(fhParams) - store.Validators = append(store.Validators, fh) - - // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) - goodChunk := chunks[0] - badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) - - topic, _ := NewTopic("xyzzy", nil) - fd := Feed{ - Topic: topic, - User: signer.Address(), - } - - // create a feed update chunk with correct publickey - id := ID{ - Epoch: lookup.Epoch{Time: 42, - Level: 1, - }, - Feed: fd, - } - - updateAddr := id.Addr() - data := []byte("bar") - - r := new(Request) - r.idAddr = updateAddr - r.Update.ID = id - r.data = data - - r.Sign(signer) - - uglyChunk, err := r.toChunk() - if err != nil { - t.Fatal(err) - } - - // put the chunks in the store and check their error status - err = store.Put(context.Background(), goodChunk) - if err == nil { - t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") - } - err = store.Put(context.Background(), badChunk) - if err == nil { - t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") - } - err = store.Put(context.Background(), uglyChunk) - if err != nil { - t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) - } -} - -// create rpc and feeds Handler -func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { - - var fsClean func() - var rpcClean func() - cleanF = func() { - if fsClean != nil { - fsClean() - } - if rpcClean != nil { - rpcClean() - } - } - - // temp datadir - datadir, err = ioutil.TempDir("", "fh") - if err != nil { - return nil, "", nil, err - } - fsClean = func() { - os.RemoveAll(datadir) - } - - TimestampProvider = timeProvider - fhParams := &HandlerParams{} - fh, err = NewTestHandler(datadir, fhParams) - return fh, datadir, cleanF, err -} - -func newAliceSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") - return NewGenericSigner(privKey) -} - -func newBobSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") - return NewGenericSigner(privKey) -} - -func newCharlieSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") - return NewGenericSigner(privKey) -} - -func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { - chunk, err := rh.chunkStore.Get(context.TODO(), addr) - if err != nil { - return nil, err - } - var r Request - if err := r.fromChunk(addr, chunk.Data()); err != nil { - return nil, err - } - return r.data, nil -} diff --git a/swarm/storage/feeds/id.go b/swarm/storage/feeds/id.go deleted file mode 100644 index dd813ae89..000000000 --- a/swarm/storage/feeds/id.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "fmt" - "hash" - "strconv" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// ID uniquely identifies an update on the network. -type ID struct { - Feed `json:"feed"` - lookup.Epoch `json:"epoch"` -} - -// ID layout: -// Feed feedLength bytes -// Epoch EpochLength -const idLength = feedLength + lookup.EpochLength - -// Addr calculates the feed update chunk address corresponding to this ID -func (u *ID) Addr() (updateAddr storage.Address) { - serializedData := make([]byte, idLength) - var cursor int - u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) - cursor += feedLength - - eid := u.Epoch.ID() - copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) - - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - return hasher.Sum(nil) -} - -// binaryPut serializes this instance into the provided slice -func (u *ID) binaryPut(serializedData []byte) error { - if len(serializedData) != idLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) - } - var cursor int - if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { - return err - } - cursor += feedLength - - epochBytes, err := u.Epoch.MarshalBinary() - if err != nil { - return err - } - copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) - cursor += lookup.EpochLength - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (u *ID) binaryLength() int { - return idLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (u *ID) binaryGet(serializedData []byte) error { - if len(serializedData) != idLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) - } - - var cursor int - if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { - return err - } - cursor += feedLength - - if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { - return err - } - cursor += lookup.EpochLength - - return nil -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (u *ID) FromValues(values Values) error { - level, _ := strconv.ParseUint(values.Get("level"), 10, 32) - u.Epoch.Level = uint8(level) - u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) - - if u.Feed.User == (common.Address{}) { - return u.Feed.FromValues(values) - } - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (u *ID) AppendValues(values Values) { - values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) - values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) - u.Feed.AppendValues(values) -} diff --git a/swarm/storage/feeds/id_test.go b/swarm/storage/feeds/id_test.go deleted file mode 100644 index 2ef12e891..000000000 --- a/swarm/storage/feeds/id_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package feeds - -import ( - "testing" - - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -func getTestID() *ID { - return &ID{ - Feed: *getTestFeed(), - Epoch: lookup.GetFirstEpoch(1000), - } -} - -func TestIDAddr(t *testing.T) { - id := getTestID() - updateAddr := id.Addr() - compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") -} - -func TestIDSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") -} - -func TestIDLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestID()) -} diff --git a/swarm/storage/feeds/lookup/epoch.go b/swarm/storage/feeds/lookup/epoch.go deleted file mode 100644 index bafe95477..000000000 --- a/swarm/storage/feeds/lookup/epoch.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package lookup - -import ( - "encoding/binary" - "errors" - "fmt" -) - -// Epoch represents a time slot at a particular frequency level -type Epoch struct { - Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place - Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 -} - -// EpochID is a unique identifier for an Epoch, based on its level and base time. -type EpochID [8]byte - -// EpochLength stores the serialized binary length of an Epoch -const EpochLength = 8 - -// MaxTime contains the highest possible time value an Epoch can handle -const MaxTime uint64 = (1 << 56) - 1 - -// Base returns the base time of the Epoch -func (e *Epoch) Base() uint64 { - return getBaseTime(e.Time, e.Level) -} - -// ID Returns the unique identifier of this epoch -func (e *Epoch) ID() EpochID { - base := e.Base() - var id EpochID - binary.LittleEndian.PutUint64(id[:], base) - id[7] = e.Level - return id -} - -// MarshalBinary implements the encoding.BinaryMarshaller interface -func (e *Epoch) MarshalBinary() (data []byte, err error) { - b := make([]byte, 8) - binary.LittleEndian.PutUint64(b[:], e.Time) - b[7] = e.Level - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface -func (e *Epoch) UnmarshalBinary(data []byte) error { - if len(data) != EpochLength { - return errors.New("Invalid data unmarshalling Epoch") - } - b := make([]byte, 8) - copy(b, data) - e.Level = b[7] - b[7] = 0 - e.Time = binary.LittleEndian.Uint64(b) - return nil -} - -// After returns true if this epoch occurs later or exactly at the other epoch. -func (e *Epoch) After(epoch Epoch) bool { - if e.Time == epoch.Time { - return e.Level < epoch.Level - } - return e.Time >= epoch.Time -} - -// Equals compares two epochs and returns true if they refer to the same time period. -func (e *Epoch) Equals(epoch Epoch) bool { - return e.Level == epoch.Level && e.Base() == epoch.Base() -} - -// String implements the Stringer interface. -func (e *Epoch) String() string { - return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) -} diff --git a/swarm/storage/feeds/lookup/epoch_test.go b/swarm/storage/feeds/lookup/epoch_test.go deleted file mode 100644 index 70bfd836a..000000000 --- a/swarm/storage/feeds/lookup/epoch_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package lookup_test - -import ( - "testing" - - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -func TestMarshallers(t *testing.T) { - - for i := uint64(1); i < lookup.MaxTime; i *= 3 { - e := lookup.Epoch{ - Time: i, - Level: uint8(i % 20), - } - b, err := e.MarshalBinary() - if err != nil { - t.Fatal(err) - } - var e2 lookup.Epoch - if err := e2.UnmarshalBinary(b); err != nil { - t.Fatal(err) - } - if e != e2 { - t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") - } - } - -} - -func TestAfter(t *testing.T) { - a := lookup.Epoch{ - Time: 5, - Level: 3, - } - b := lookup.Epoch{ - Time: 6, - Level: 3, - } - c := lookup.Epoch{ - Time: 6, - Level: 4, - } - - if !b.After(a) { - t.Fatal("Expected 'after' to be true, got false") - } - - if b.After(b) { - t.Fatal("Expected 'after' to be false when both epochs are identical, got true") - } - - if !b.After(c) { - t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") - } - -} diff --git a/swarm/storage/feeds/lookup/lookup.go b/swarm/storage/feeds/lookup/lookup.go deleted file mode 100644 index 2f862d81c..000000000 --- a/swarm/storage/feeds/lookup/lookup.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -Package lookup defines feed lookup algorithms and provides tools to place updates -so they can be found -*/ -package lookup - -const maxuint64 = ^uint64(0) - -// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. -const LowestLevel uint8 = 0 // default is 0 (1 second) - -// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. -// 25 -> 2^25 equals to roughly one year. -const HighestLevel = 25 // default is 25 (~1 year) - -// DefaultLevel sets what level will be chosen to search when there is no hint -const DefaultLevel = HighestLevel - -//Algorithm is the function signature of a lookup algorithm -type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) - -// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' -// It takes a hint which should be the epoch where the last known update was -// If you don't know in what epoch the last update happened, simply submit lookup.NoClue -// read() will be called on each lookup attempt -// Returns an error only if read() returns an error -// Returns nil if an update was not found -var Lookup Algorithm = FluzCapacitorAlgorithm - -// ReadFunc is a handler called by Lookup each time it attempts to find a value -// It should return if a value is not found -// It should return if a value is found, but its timestamp is higher than "now" -// It should only return an error in case the handler wants to stop the -// lookup process entirely. -type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) - -// NoClue is a hint that can be provided when the Lookup caller does not have -// a clue about where the last update may be -var NoClue = Epoch{} - -// getBaseTime returns the epoch base time of the given -// time and level -func getBaseTime(t uint64, level uint8) uint64 { - return t & (maxuint64 << level) -} - -// Hint creates a hint based only on the last known update time -func Hint(last uint64) Epoch { - return Epoch{ - Time: last, - Level: DefaultLevel, - } -} - -// GetNextLevel returns the frequency level a next update should be placed at, provided where -// the last update was and what time it is now. -// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit -// but limited to not return a level that is smaller than the last-1 -func GetNextLevel(last Epoch, now uint64) uint8 { - // First XOR the last epoch base time with the current clock. - // This will set all the common most significant bits to zero. - mix := (last.Base() ^ now) - - // Then, make sure we stop the below loop before one level below the current, by setting - // that level's bit to 1. - // If the next level is lower than the current one, it must be exactly level-1 and not lower. - mix |= (1 << (last.Level - 1)) - - // if the last update was more than 2^highestLevel seconds ago, choose the highest level - if mix > (maxuint64 >> (64 - HighestLevel - 1)) { - return HighestLevel - } - - // set up a mask to scan for nonzero bits, starting at the highest level - mask := uint64(1 << (HighestLevel)) - - for i := uint8(HighestLevel); i > LowestLevel; i-- { - if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. - return i - } - mask = mask >> 1 // move our bit one position to the right - } - return 0 -} - -// GetNextEpoch returns the epoch where the next update should be located -// according to where the previous update was -// and what time it is now. -func GetNextEpoch(last Epoch, now uint64) Epoch { - if last == NoClue { - return GetFirstEpoch(now) - } - level := GetNextLevel(last, now) - return Epoch{ - Level: level, - Time: now, - } -} - -// GetFirstEpoch returns the epoch where the first update should be located -// based on what time it is now. -func GetFirstEpoch(now uint64) Epoch { - return Epoch{Level: HighestLevel, Time: now} -} - -var worstHint = Epoch{Time: 0, Level: 63} - -// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found -// going back and forth in time -// First, it will attempt to find an update where it should be now if the hint was -// really the last update. If that lookup fails, then the last update must be either the hint itself -// or the epochs right below. If however, that lookup succeeds, then the update must be -// that one or within the epochs right below. -// see the guide for a more graphical representation -func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { - var lastFound interface{} - var epoch Epoch - if hint == NoClue { - hint = worstHint - } - - t := now - - for { - epoch = GetNextEpoch(hint, t) - value, err = read(epoch, now) - if err != nil { - return nil, err - } - if value != nil { - lastFound = value - if epoch.Level == LowestLevel || epoch.Equals(hint) { - return value, nil - } - hint = epoch - continue - } - if epoch.Base() == hint.Base() { - if lastFound != nil { - return lastFound, nil - } - // we have reached the hint itself - if hint == worstHint { - return nil, nil - } - // check it out - value, err = read(hint, now) - if err != nil { - return nil, err - } - if value != nil { - return value, nil - } - // bad hint. - epoch = hint - hint = worstHint - } - base := epoch.Base() - if base == 0 { - return nil, nil - } - t = base - 1 - } -} diff --git a/swarm/storage/feeds/lookup/lookup_test.go b/swarm/storage/feeds/lookup/lookup_test.go deleted file mode 100644 index 7d5014608..000000000 --- a/swarm/storage/feeds/lookup/lookup_test.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package lookup_test - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -type Data struct { - Payload uint64 - Time uint64 -} - -type Store map[lookup.EpochID]*Data - -func write(store Store, epoch lookup.Epoch, value *Data) { - log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) - store[epoch.ID()] = value -} - -func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { - epoch := lookup.GetNextEpoch(last, now) - - write(store, epoch, value) - - return epoch -} - -const Day = 60 * 60 * 24 -const Year = Day * 365 -const Month = Day * 30 - -func makeReadFunc(store Store, counter *int) lookup.ReadFunc { - return func(epoch lookup.Epoch, now uint64) (interface{}, error) { - *counter++ - data := store[epoch.ID()] - var valueStr string - if data != nil { - valueStr = fmt.Sprintf("%d", data.Payload) - } - log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) - if data != nil && data.Time <= now { - return data, nil - } - return nil, nil - } -} - -func TestLookup(t *testing.T) { - - store := make(Store) - readCount := 0 - readFunc := makeReadFunc(store, &readCount) - - // write an update every month for 12 months 3 years ago and then silence for two years - now := uint64(1533799046) - var epoch lookup.Epoch - - var lastData *Data - for i := uint64(0); i < 12; i++ { - t := uint64(now - Year*3 + i*Month) - data := Data{ - Payload: t, //our "payload" will be the timestamp itself. - Time: t, - } - epoch = update(store, epoch, t, &data) - lastData = &data - } - - // try to get the last value - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - readCountWithoutHint := readCount - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - // reset the read count for the next test - readCount = 0 - // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update - value, err = lookup.Lookup(now, epoch, readFunc) - if err != nil { - t.Fatal(err) - } - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - if readCount > readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - - // try to get an intermediate value - // if we look for a value in now - Year*3 + 6*Month, we should get that value - // Since the "payload" is the timestamp itself, we can check this. - - expectedTime := now - Year*3 + 6*Month - - value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - data, ok := value.(*Data) - - if !ok { - t.Fatal("Expected value to contain data") - } - - if data.Time != expectedTime { - t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) - } - -} - -func TestOneUpdateAt0(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - var epoch lookup.Epoch - data := Data{ - Payload: 79, - Time: 0, - } - update(store, epoch, 0, &data) - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - if value != &data { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) - } -} - -// Tests the update is found even when a bad hint is given -func TestBadHint(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - var epoch lookup.Epoch - data := Data{ - Payload: 79, - Time: 0, - } - - // place an update for t=1200 - update(store, epoch, 1200, &data) - - // come up with some evil hint - badHint := lookup.Epoch{ - Level: 18, - Time: 1200000000, - } - - value, err := lookup.Lookup(now, badHint, readFunc) - if err != nil { - t.Fatal(err) - } - if value != &data { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) - } -} - -func TestLookupFail(t *testing.T) { - - store := make(Store) - readCount := 0 - - readFunc := makeReadFunc(store, &readCount) - now := uint64(1533903729) - - // don't write anything and try to look up. - // we're testing we don't get stuck in a loop - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - if value != nil { - t.Fatal("Expected value to be nil, since the update should've failed") - } - - expectedReads := now/(1< readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - - for i := uint64(0); i <= 994; i++ { - T := uint64(now - 1000 + i) // update every second for the last 1000 seconds - value, err := lookup.Lookup(T, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - data, _ := value.(*Data) - if data == nil { - t.Fatalf("Expected lookup to return %d, got nil", T) - } - if data.Payload != T { - t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) - } - } -} - -func TestSparseUpdates(t *testing.T) { - - store := make(Store) - readCount := 0 - readFunc := makeReadFunc(store, &readCount) - - // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence - - now := uint64(1533799046) - var epoch lookup.Epoch - - var lastData *Data - for i := uint64(0); i < 5; i++ { - T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence - data := Data{ - Payload: T, //our "payload" will be the timestamp itself. - Time: T, - } - epoch = update(store, epoch, T, &data) - lastData = &data - } - - // try to get the last value - - value, err := lookup.Lookup(now, lookup.NoClue, readFunc) - if err != nil { - t.Fatal(err) - } - - readCountWithoutHint := readCount - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - // reset the read count for the next test - readCount = 0 - // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update - value, err = lookup.Lookup(now, epoch, readFunc) - if err != nil { - t.Fatal(err) - } - - if value != lastData { - t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) - } - - if readCount > readCountWithoutHint { - t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) - } - -} - -// testG will hold precooked test results -// fields are abbreviated to reduce the size of the literal below -type testG struct { - e lookup.Epoch // last - n uint64 // next level - x uint8 // expected result -} - -// test cases -var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} - -func TestGetNextLevel(t *testing.T) { - - // First, test well-known cases - last := lookup.Epoch{ - Time: 1533799046, - Level: 5, - } - - level := lookup.GetNextLevel(last, last.Time) - expected := uint8(4) - if level != expected { - t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) - } - - level = lookup.GetNextLevel(last, last.Time+(1< lookup.HighestLevel { - v = 0 - } - now = last.Time + uint64(rand.Intn(1<. - -package feeds - -import ( - "fmt" - "strconv" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -// Query is used to specify constraints when performing an update lookup -// TimeLimit indicates an upper bound for the search. Set to 0 for "now" -type Query struct { - Feed - Hint lookup.Epoch - TimeLimit uint64 -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (q *Query) FromValues(values Values) error { - time, _ := strconv.ParseUint(values.Get("time"), 10, 64) - q.TimeLimit = time - - level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) - q.Hint.Level = uint8(level) - q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) - if q.Feed.User == (common.Address{}) { - return q.Feed.FromValues(values) - } - return nil -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (q *Query) AppendValues(values Values) { - if q.TimeLimit != 0 { - values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) - } - if q.Hint.Level != 0 { - values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) - } - if q.Hint.Time != 0 { - values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) - } - q.Feed.AppendValues(values) -} - -// NewQuery constructs an Query structure to find updates on or before `time` -// if time == 0, the latest update will be looked up -func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { - return &Query{ - TimeLimit: time, - Feed: *feed, - Hint: hint, - } -} - -// NewQueryLatest generates lookup parameters that look for the latest update to a feed -func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { - return NewQuery(feed, 0, hint) -} diff --git a/swarm/storage/feeds/query_test.go b/swarm/storage/feeds/query_test.go deleted file mode 100644 index 1420c69ae..000000000 --- a/swarm/storage/feeds/query_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "testing" -) - -func getTestQuery() *Query { - id := getTestID() - return &Query{ - TimeLimit: 5000, - Feed: id.Feed, - Hint: id.Epoch, - } -} - -func TestQueryValues(t *testing.T) { - var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} - - query := getTestQuery() - testValueSerializer(t, query, expected) - -} diff --git a/swarm/storage/feeds/request.go b/swarm/storage/feeds/request.go deleted file mode 100644 index 81e1b10fe..000000000 --- a/swarm/storage/feeds/request.go +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "bytes" - "encoding/json" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -// Request represents a request to sign or signed feed update message -type Request struct { - Update // actual content that will be put on the chunk, less signature - Signature *Signature - idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) - binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) -} - -// updateRequestJSON represents a JSON-serialized UpdateRequest -type updateRequestJSON struct { - ID - ProtocolVersion uint8 `json:"protocolVersion"` - Data string `json:"data,omitempty"` - Signature string `json:"signature,omitempty"` -} - -// Request layout -// Update bytes -// SignatureLength bytes -const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength - -// NewFirstRequest returns a ready to sign request to publish a first feed update -func NewFirstRequest(topic Topic) *Request { - - request := new(Request) - - // get the current time - now := TimestampProvider.Now().Time - request.Epoch = lookup.GetFirstEpoch(now) - request.Feed.Topic = topic - request.Header.Version = ProtocolVersion - - return request -} - -// SetData stores the payload data the feed update will be updated with -func (r *Request) SetData(data []byte) { - r.data = data - r.Signature = nil -} - -// IsUpdate returns true if this request models a signed update or otherwise it is a signature request -func (r *Request) IsUpdate() bool { - return r.Signature != nil -} - -// Verify checks that signatures are valid -func (r *Request) Verify() (err error) { - if len(r.data) == 0 { - return NewError(ErrInvalidValue, "Update does not contain data") - } - if r.Signature == nil { - return NewError(ErrInvalidSignature, "Missing signature field") - } - - digest, err := r.GetDigest() - if err != nil { - return err - } - - // get the address of the signer (which also checks that it's a valid signature) - r.Feed.User, err = getUserAddr(digest, *r.Signature) - if err != nil { - return err - } - - // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) - // that was used to retrieve this chunk - // if this validation fails, someone forged a chunk. - if !bytes.Equal(r.idAddr, r.Addr()) { - return NewError(ErrInvalidSignature, "Signature address does not match with update user address") - } - - return nil -} - -// Sign executes the signature to validate the update message -func (r *Request) Sign(signer Signer) error { - r.Feed.User = signer.Address() - r.binaryData = nil //invalidate serialized data - digest, err := r.GetDigest() // computes digest and serializes into .binaryData - if err != nil { - return err - } - - signature, err := signer.Sign(digest) - if err != nil { - return err - } - - // Although the Signer interface returns the public address of the signer, - // recover it from the signature to see if they match - userAddr, err := getUserAddr(digest, signature) - if err != nil { - return NewError(ErrInvalidSignature, "Error verifying signature") - } - - if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! - return NewError(ErrInvalidSignature, "Signer address does not match update user address") - } - - r.Signature = &signature - r.idAddr = r.Addr() - return nil -} - -// GetDigest creates the feed update digest used in signatures -// the serialized payload is cached in .binaryData -func (r *Request) GetDigest() (result common.Hash, err error) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - dataLength := r.Update.binaryLength() - if r.binaryData == nil { - r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { - return result, err - } - } - hasher.Write(r.binaryData[:dataLength]) //everything except the signature. - - return common.BytesToHash(hasher.Sum(nil)), nil -} - -// create an update chunk. -func (r *Request) toChunk() (storage.Chunk, error) { - - // Check that the update is signed and serialized - // For efficiency, data is serialized during signature and cached in - // the binaryData field when computing the signature digest in .getDigest() - if r.Signature == nil || r.binaryData == nil { - return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") - } - - updateLength := r.Update.binaryLength() - - // signature is the last item in the chunk data - copy(r.binaryData[updateLength:], r.Signature[:]) - - chunk := storage.NewChunk(r.idAddr, r.binaryData) - return chunk, nil -} - -// fromChunk populates this structure from chunk data. It does not verify the signature is valid. -func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { - // for update chunk layout see Request definition - - //deserialize the feed update portion - if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { - return err - } - - // Extract the signature - var signature *Signature - cursor := r.Update.binaryLength() - sigdata := chunkdata[cursor : cursor+signatureLength] - if len(sigdata) > 0 { - signature = &Signature{} - copy(signature[:], sigdata) - } - - r.Signature = signature - r.idAddr = updateAddr - r.binaryData = chunkdata - - return nil - -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (r *Request) FromValues(values Values, data []byte) error { - signatureBytes, err := hexutil.Decode(values.Get("signature")) - if err != nil { - r.Signature = nil - } else { - if len(signatureBytes) != signatureLength { - return NewError(ErrInvalidSignature, "Incorrect signature length") - } - r.Signature = new(Signature) - copy(r.Signature[:], signatureBytes) - } - err = r.Update.FromValues(values, data) - if err != nil { - return err - } - r.idAddr = r.Addr() - return err -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (r *Request) AppendValues(values Values) []byte { - if r.Signature != nil { - values.Set("signature", hexutil.Encode(r.Signature[:])) - } - return r.Update.AppendValues(values) -} - -// fromJSON takes an update request JSON and populates an UpdateRequest -func (r *Request) fromJSON(j *updateRequestJSON) error { - - r.ID = j.ID - r.Header.Version = j.ProtocolVersion - - var err error - if j.Data != "" { - r.data, err = hexutil.Decode(j.Data) - if err != nil { - return NewError(ErrInvalidValue, "Cannot decode data") - } - } - - if j.Signature != "" { - sigBytes, err := hexutil.Decode(j.Signature) - if err != nil || len(sigBytes) != signatureLength { - return NewError(ErrInvalidSignature, "Cannot decode signature") - } - r.Signature = new(Signature) - r.idAddr = r.Addr() - copy(r.Signature[:], sigBytes) - } - return nil -} - -// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object -// Implements json.Unmarshaler interface -func (r *Request) UnmarshalJSON(rawData []byte) error { - var requestJSON updateRequestJSON - if err := json.Unmarshal(rawData, &requestJSON); err != nil { - return err - } - return r.fromJSON(&requestJSON) -} - -// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array -// Implements json.Marshaler interface -func (r *Request) MarshalJSON() (rawData []byte, err error) { - var signatureString, dataString string - if r.Signature != nil { - signatureString = hexutil.Encode(r.Signature[:]) - } - if r.data != nil { - dataString = hexutil.Encode(r.data) - } - - requestJSON := &updateRequestJSON{ - ID: r.ID, - ProtocolVersion: r.Header.Version, - Data: dataString, - Signature: signatureString, - } - - return json.Marshal(requestJSON) -} diff --git a/swarm/storage/feeds/request_test.go b/swarm/storage/feeds/request_test.go deleted file mode 100644 index 8a15815a4..000000000 --- a/swarm/storage/feeds/request_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds/lookup" -) - -func areEqualJSON(s1, s2 string) (bool, error) { - //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67 - var o1 interface{} - var o2 interface{} - - err := json.Unmarshal([]byte(s1), &o1) - if err != nil { - return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) - } - err = json.Unmarshal([]byte(s2), &o2) - if err != nil { - return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) - } - - return reflect.DeepEqual(o1, o2), nil -} - -// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly -// while also checking cryptographically that only the owner of a feed can update it. -func TestEncodingDecodingUpdateRequests(t *testing.T) { - - charlie := newCharlieSigner() //Charlie - bob := newBobSigner() //Bob - - // Create a feed to our good guy Charlie's name - topic, _ := NewTopic("a good topic name", nil) - firstRequest := NewFirstRequest(topic) - firstRequest.User = charlie.Address() - - // We now encode the create message to simulate we send it over the wire - messageRawData, err := firstRequest.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding first feed update request: %s", err) - } - - // ... the message arrives and is decoded... - var recoveredFirstRequest Request - if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding first feed update request: %s", err) - } - - // ... but verification should fail because it is not signed! - if err := recoveredFirstRequest.Verify(); err == nil { - t.Fatal("Expected Verify to fail since the message is not signed") - } - - // We now assume that the feed ypdate was created and propagated. - - const expectedSignature = "0x7235b27a68372ddebcf78eba48543fa460864b0b0e99cb533fcd3664820e603312d29426dd00fb39628f5299480a69bf6e462838d78de49ce0704c754c9deb2601" - const expectedJSON = `{"feed":{"topic":"0x6120676f6f6420746f706963206e616d65000000000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` - - //Put together an unsigned update request that we will serialize to send it to the signer. - data := []byte("This hour's update: Swarm 99.0 has been released!") - request := &Request{ - Update: Update{ - ID: ID{ - Epoch: lookup.Epoch{ - Time: 1000, - Level: 1, - }, - Feed: firstRequest.Update.Feed, - }, - data: data, - }, - } - - messageRawData, err = request.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding update request: %s", err) - } - - equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON) - if err != nil { - t.Fatalf("Error decoding update request JSON: %s", err) - } - if !equalJSON { - t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData)) - } - - // now the encoded message messageRawData is sent over the wire and arrives to the signer - - //Attempt to extract an UpdateRequest out of the encoded message - var recoveredRequest Request - if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding update request: %s", err) - } - - //sign the request and see if it matches our predefined signature above. - if err := recoveredRequest.Sign(charlie); err != nil { - t.Fatalf("Error signing request: %s", err) - } - - compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) - - // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON - // to alter the signature field. - var j updateRequestJSON - if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil { - t.Fatal("Error unmarshalling test json, check expectedJSON constant") - } - j.Signature = "Certainly not a signature" - corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature - var corruptRequest Request - if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil { - t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") - } - - // Now imagine Bob wants to create an update of his own about the same feed, - // signing a message with his private key - if err := request.Sign(bob); err != nil { - t.Fatalf("Error signing: %s", err) - } - - // Now Bob encodes the message to send it over the wire... - messageRawData, err = request.MarshalJSON() - if err != nil { - t.Fatalf("Error encoding message:%s", err) - } - - // ... the message arrives to our Swarm node and it is decoded. - recoveredRequest = Request{} - if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding message:%s", err) - } - - // Before checking what happened with Bob's update, let's see what would happen if we mess - // with the signature big time to see if Verify catches it - savedSignature := *recoveredRequest.Signature // save the signature for later - binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature - if err = recoveredRequest.Verify(); err == nil { - t.Fatal("Expected Verify to fail on corrupt signature") - } - - // restore the Bob's signature from corruption - *recoveredRequest.Signature = savedSignature - - // Now the signature is not corrupt - if err = recoveredRequest.Verify(); err != nil { - t.Fatal(err) - } - - // Reuse object and sign with our friend Charlie's private key - if err := recoveredRequest.Sign(charlie); err != nil { - t.Fatalf("Error signing with the correct private key: %s", err) - } - - // And now, Verify should work since this update now belongs to Charlie - if err = recoveredRequest.Verify(); err != nil { - t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) - } - - // mess with the lookup key to make sure Verify fails: - recoveredRequest.Time = 77999 // this will alter the lookup key - if err = recoveredRequest.Verify(); err == nil { - t.Fatalf("Expected Verify to fail since the lookup key has been altered") - } -} - -func getTestRequest() *Request { - return &Request{ - Update: *getTestFeedUpdate(), - } -} - -func TestUpdateChunkSerializationErrorChecking(t *testing.T) { - - // Test that parseUpdate fails if the chunk is too small - var r Request - if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { - t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) - } - - r = *getTestRequest() - - _, err := r.toChunk() - if err == nil { - t.Fatal("Expected request.toChunk to fail when there is no data") - } - r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data - _, err = r.toChunk() - if err == nil { - t.Fatal("expected request.toChunk to fail when there is no signature") - } - - charlie := newCharlieSigner() - if err := r.Sign(charlie); err != nil { - t.Fatalf("error signing:%s", err) - } - - chunk, err := r.toChunk() - if err != nil { - t.Fatalf("error creating update chunk:%s", err) - } - - compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") - - var recovered Request - recovered.fromChunk(chunk.Address(), chunk.Data()) - if !reflect.DeepEqual(recovered, r) { - t.Fatal("Expected recovered feed update request to equal the original one") - } -} - -// check that signature address matches update signer address -func TestReverse(t *testing.T) { - - epoch := lookup.Epoch{ - Time: 7888, - Level: 6, - } - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create feeds handler - _, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - topic, _ := NewTopic("Cervantes quotes", nil) - fd := Feed{ - Topic: topic, - User: signer.Address(), - } - - data := []byte("Donde una puerta se cierra, otra se abre") - - request := new(Request) - request.Feed = fd - request.Epoch = epoch - request.data = data - - // generate a chunk key for this request - key := request.Addr() - - if err = request.Sign(signer); err != nil { - t.Fatal(err) - } - - chunk, err := request.toChunk() - if err != nil { - t.Fatal(err) - } - - // check that we can recover the owner account from the update chunk's signature - var checkUpdate Request - if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { - t.Fatal(err) - } - checkdigest, err := checkUpdate.GetDigest() - if err != nil { - t.Fatal(err) - } - recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) - if err != nil { - t.Fatalf("Retrieve address from signature fail: %v", err) - } - originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - - // check that the metadata retrieved from the chunk matches what we gave it - if recoveredAddr != originalAddr { - t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) - } - - if !bytes.Equal(key[:], chunk.Address()[:]) { - t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) - } - if epoch != checkUpdate.Epoch { - t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) - } - if !bytes.Equal(data, checkUpdate.data) { - t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) - } -} diff --git a/swarm/storage/feeds/sign.go b/swarm/storage/feeds/sign.go deleted file mode 100644 index f5760c8f2..000000000 --- a/swarm/storage/feeds/sign.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "crypto/ecdsa" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -const signatureLength = 65 - -// Signature is an alias for a static byte array with the size of a signature -type Signature [signatureLength]byte - -// Signer signs feed update payloads -type Signer interface { - Sign(common.Hash) (Signature, error) - Address() common.Address -} - -// GenericSigner implements the Signer interface -// It is the vanilla signer that probably should be used in most cases -type GenericSigner struct { - PrivKey *ecdsa.PrivateKey - address common.Address -} - -// NewGenericSigner builds a signer that will sign everything with the provided private key -func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner { - return &GenericSigner{ - PrivKey: privKey, - address: crypto.PubkeyToAddress(privKey.PublicKey), - } -} - -// Sign signs the supplied data -// It wraps the ethereum crypto.Sign() method -func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) { - signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey) - if err != nil { - return - } - copy(signature[:], signaturebytes) - return -} - -// Address returns the public key of the signer's private key -func (s *GenericSigner) Address() common.Address { - return s.address -} - -// getUserAddr extracts the address of the feed update signer -func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} diff --git a/swarm/storage/feeds/testutil.go b/swarm/storage/feeds/testutil.go deleted file mode 100644 index 879f73348..000000000 --- a/swarm/storage/feeds/testutil.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "context" - "fmt" - "path/filepath" - "sync" - - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - testDbDirName = "feeds" -) - -type TestHandler struct { - *Handler -} - -func (t *TestHandler) Close() { - t.chunkStore.Close() -} - -type mockNetFetcher struct{} - -func (m *mockNetFetcher) Request(ctx context.Context, hopCount uint8) { -} -func (m *mockNetFetcher) Offer(ctx context.Context, source *enode.ID) { -} - -func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetFetcher { - return &mockNetFetcher{} -} - -// NewTestHandler creates Handler object to be used for testing purposes. -func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { - path := filepath.Join(datadir, testDbDirName) - fh := NewHandler(params) - localstoreparams := storage.NewDefaultLocalStoreParams() - localstoreparams.Init(path) - localStore, err := storage.NewLocalStore(localstoreparams, nil) - if err != nil { - return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) - } - localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) - localStore.Validators = append(localStore.Validators, fh) - netStore, err := storage.NewNetStore(localStore, nil) - if err != nil { - return nil, err - } - netStore.NewNetFetcherFunc = newFakeNetFetcher - fh.SetStore(netStore) - return &TestHandler{fh}, nil -} diff --git a/swarm/storage/feeds/timestampprovider.go b/swarm/storage/feeds/timestampprovider.go deleted file mode 100644 index f6aa0775c..000000000 --- a/swarm/storage/feeds/timestampprovider.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "encoding/binary" - "encoding/json" - "time" -) - -// TimestampProvider sets the time source of the feeds package -var TimestampProvider timestampProvider = NewDefaultTimestampProvider() - -// Timestamp encodes a point in time as a Unix epoch -type Timestamp struct { - Time uint64 `json:"time"` // Unix epoch timestamp, in seconds -} - -// 8 bytes uint64 Time -const timestampLength = 8 - -// timestampProvider interface describes a source of timestamp information -type timestampProvider interface { - Now() Timestamp // returns the current timestamp information -} - -// binaryGet populates the timestamp structure from the given byte slice -func (t *Timestamp) binaryGet(data []byte) error { - if len(data) != timestampLength { - return NewError(ErrCorruptData, "timestamp data has the wrong size") - } - t.Time = binary.LittleEndian.Uint64(data[:8]) - return nil -} - -// binaryPut Serializes a Timestamp to a byte slice -func (t *Timestamp) binaryPut(data []byte) error { - if len(data) != timestampLength { - return NewError(ErrCorruptData, "timestamp data has the wrong size") - } - binary.LittleEndian.PutUint64(data, t.Time) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaller interface -func (t *Timestamp) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, &t.Time) -} - -// MarshalJSON implements the json.Marshaller interface -func (t *Timestamp) MarshalJSON() ([]byte, error) { - return json.Marshal(t.Time) -} - -// DefaultTimestampProvider is a TimestampProvider that uses system time -// as time source -type DefaultTimestampProvider struct { -} - -// NewDefaultTimestampProvider creates a system clock based timestamp provider -func NewDefaultTimestampProvider() *DefaultTimestampProvider { - return &DefaultTimestampProvider{} -} - -// Now returns the current time according to this provider -func (dtp *DefaultTimestampProvider) Now() Timestamp { - return Timestamp{ - Time: uint64(time.Now().Unix()), - } -} diff --git a/swarm/storage/feeds/topic.go b/swarm/storage/feeds/topic.go deleted file mode 100644 index 2dc8c18cd..000000000 --- a/swarm/storage/feeds/topic.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/ethereum/go-ethereum/common/bitutil" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// TopicLength establishes the max length of a topic string -const TopicLength = storage.AddressLength - -// Topic represents what a feed is about -type Topic [TopicLength]byte - -// ErrTopicTooLong is returned when creating a topic with a name/related content too long -var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) - -// NewTopic creates a new topic from a provided name and "related content" byte array, -// merging the two together. -// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned -// name can be an empty string -// relatedContent can be nil -func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { - if relatedContent != nil { - contentLength := len(relatedContent) - if contentLength > TopicLength { - contentLength = TopicLength - err = ErrTopicTooLong - } - copy(topic[:], relatedContent[:contentLength]) - } - nameBytes := []byte(name) - nameLength := len(nameBytes) - if nameLength > TopicLength { - nameLength = TopicLength - err = ErrTopicTooLong - } - bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) - return topic, err -} - -// Hex will return the topic encoded as an hex string -func (t *Topic) Hex() string { - return hexutil.Encode(t[:]) -} - -// FromHex will parse a hex string into this Topic instance -func (t *Topic) FromHex(hex string) error { - bytes, err := hexutil.Decode(hex) - if err != nil || len(bytes) != len(t) { - return NewErrorf(ErrInvalidValue, "Cannot decode topic") - } - copy(t[:], bytes) - return nil -} - -// Name will try to extract the topic name out of the Topic -func (t *Topic) Name(relatedContent []byte) string { - nameBytes := *t - if relatedContent != nil { - contentLength := len(relatedContent) - if contentLength > TopicLength { - contentLength = TopicLength - } - bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) - } - z := bytes.IndexByte(nameBytes[:], 0) - if z < 0 { - z = TopicLength - } - return string(nameBytes[:z]) - -} - -// UnmarshalJSON implements the json.Unmarshaller interface -func (t *Topic) UnmarshalJSON(data []byte) error { - var hex string - json.Unmarshal(data, &hex) - return t.FromHex(hex) -} - -// MarshalJSON implements the json.Marshaller interface -func (t *Topic) MarshalJSON() ([]byte, error) { - return json.Marshal(t.Hex()) -} diff --git a/swarm/storage/feeds/topic_test.go b/swarm/storage/feeds/topic_test.go deleted file mode 100644 index 8994002d7..000000000 --- a/swarm/storage/feeds/topic_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package feeds - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func TestTopic(t *testing.T) { - related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") - topicName := "test-topic" - topic, _ := NewTopic(topicName, related) - hex := topic.Hex() - expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" - if hex != expectedHex { - t.Fatalf("Expected %s, got %s", expectedHex, hex) - } - - var topic2 Topic - topic2.FromHex(hex) - if topic2 != topic { - t.Fatal("Expected recovered topic to be equal to original one") - } - - if topic2.Name(related) != topicName { - t.Fatal("Retrieved name does not match") - } - - bytes, err := topic2.MarshalJSON() - if err != nil { - t.Fatal(err) - } - expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` - equal, err := areEqualJSON(expectedJSON, string(bytes)) - if err != nil { - t.Fatal(err) - } - if !equal { - t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) - } - - err = topic2.UnmarshalJSON(bytes) - if err != nil { - t.Fatal(err) - } - if topic2 != topic { - t.Fatal("Expected recovered topic to be equal to original one") - } - -} diff --git a/swarm/storage/feeds/update.go b/swarm/storage/feeds/update.go deleted file mode 100644 index 02bd37522..000000000 --- a/swarm/storage/feeds/update.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "fmt" - "strconv" - - "github.com/ethereum/go-ethereum/swarm/chunk" -) - -// ProtocolVersion defines the current version of the protocol that will be included in each update message -const ProtocolVersion uint8 = 0 - -const headerLength = 8 - -// Header defines a update message header including a protocol version byte -type Header struct { - Version uint8 // Protocol version - Padding [headerLength - 1]uint8 // reserved for future use -} - -// Update encapsulates the information sent as part of a feed update -type Update struct { - Header Header // - ID // Feed Update identifying information - data []byte // actual data payload -} - -const minimumUpdateDataLength = idLength + headerLength + 1 -const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength - -// binaryPut serializes the feed update information into the given slice -func (r *Update) binaryPut(serializedData []byte) error { - datalength := len(r.data) - if datalength == 0 { - return NewError(ErrInvalidValue, "a feed update must contain data") - } - - if datalength > maxUpdateDataLength { - return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) - } - - if len(serializedData) != r.binaryLength() { - return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) - } - - var cursor int - // serialize Header - serializedData[cursor] = r.Header.Version - copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) - cursor += headerLength - - // serialize ID - if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { - return err - } - cursor += idLength - - // add the data - copy(serializedData[cursor:], r.data) - cursor += datalength - - return nil -} - -// binaryLength returns the expected number of bytes this structure will take to encode -func (r *Update) binaryLength() int { - return idLength + headerLength + len(r.data) -} - -// binaryGet populates this instance from the information contained in the passed byte slice -func (r *Update) binaryGet(serializedData []byte) error { - if len(serializedData) < minimumUpdateDataLength { - return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a feed update chunk", minimumUpdateDataLength) - } - dataLength := len(serializedData) - idLength - headerLength - // at this point we can be satisfied that we have the correct data length to read - - var cursor int - - // deserialize Header - r.Header.Version = serializedData[cursor] // extract the protocol version - copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding - cursor += headerLength - - if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { - return err - } - cursor += idLength - - data := serializedData[cursor : cursor+dataLength] - cursor += dataLength - - // now that all checks have passed, copy data into structure - r.data = make([]byte, dataLength) - copy(r.data, data) - - return nil - -} - -// FromValues deserializes this instance from a string key-value store -// useful to parse query strings -func (r *Update) FromValues(values Values, data []byte) error { - r.data = data - version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) - r.Header.Version = uint8(version) - return r.ID.FromValues(values) -} - -// AppendValues serializes this structure into the provided string key-value store -// useful to build query strings -func (r *Update) AppendValues(values Values) []byte { - r.ID.AppendValues(values) - values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) - return r.data -} diff --git a/swarm/storage/feeds/update_test.go b/swarm/storage/feeds/update_test.go deleted file mode 100644 index 7763da0c8..000000000 --- a/swarm/storage/feeds/update_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package feeds - -import ( - "testing" -) - -func getTestFeedUpdate() *Update { - return &Update{ - ID: *getTestID(), - data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), - } -} - -func TestUpdateSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") -} - -func TestUpdateLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestFeedUpdate()) - // Test fail if update is too big - update := getTestFeedUpdate() - update.data = make([]byte, maxUpdateDataLength+100) - serialized := make([]byte, update.binaryLength()) - if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected update.binaryPut to fail since update is too big") - } - - // test fail if data is empty or nil - update.data = nil - serialized = make([]byte, update.binaryLength()) - if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected update.binaryPut to fail since data is empty") - } -} diff --git a/swarm/swarm.go b/swarm/swarm.go index 74c53dece..6aa15edd9 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -49,8 +49,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/pss" "github.com/ethereum/go-ethereum/swarm/state" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/feed" "github.com/ethereum/go-ethereum/swarm/storage/mock" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" "github.com/ethereum/go-ethereum/swarm/tracing" ) @@ -186,10 +186,10 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e // Swarm Hash Merklised Chunking for Arbitrary-length Document/File storage self.fileStore = storage.NewFileStore(self.netStore, self.config.FileStoreParams) - var feedsHandler *feeds.Handler - fhParams := &feeds.HandlerParams{} + var feedsHandler *feed.Handler + fhParams := &feed.HandlerParams{} - feedsHandler = feeds.NewHandler(fhParams) + feedsHandler = feed.NewHandler(fhParams) feedsHandler.SetStore(self.netStore) lstore.Validators = []storage.ChunkValidator{ diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 2e5735ece..cdf9239d0 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/storage" - "github.com/ethereum/go-ethereum/swarm/storage/feeds" + "github.com/ethereum/go-ethereum/swarm/storage/feed" ) type TestServer interface { @@ -54,8 +54,8 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso t.Fatal(err) } - rhparams := &feeds.HandlerParams{} - rh, err := feeds.NewTestHandler(feedsDir, rhparams) + rhparams := &feed.HandlerParams{} + rh, err := feed.NewTestHandler(feedsDir, rhparams) if err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso }, CurrentTime: 42, } - feeds.TimestampProvider = tss + feed.TimestampProvider = tss return tss } @@ -92,6 +92,6 @@ func (t *TestSwarmServer) Close() { t.cleanup() } -func (t *TestSwarmServer) Now() feeds.Timestamp { - return feeds.Timestamp{Time: t.CurrentTime} +func (t *TestSwarmServer) Now() feed.Timestamp { + return feed.Timestamp{Time: t.CurrentTime} } -- cgit