From f1e86ad9cf0470051b7106ee83794d27276b528d Mon Sep 17 00:00:00 2001 From: Javier Peletier Date: Sat, 29 Sep 2018 01:00:28 +0200 Subject: swarm/storage/mru: Renamed all identifiers to Feeds --- cmd/swarm/mru.go | 4 +- cmd/swarm/mru_test.go | 6 +- swarm/api/api.go | 14 ++--- swarm/api/client/client_test.go | 2 +- swarm/api/http/server.go | 8 +-- swarm/api/http/server_test.go | 4 +- swarm/api/manifest.go | 4 +- swarm/network/README.md | 2 +- swarm/storage/mru/cacheentry.go | 8 +-- swarm/storage/mru/doc.go | 2 +- swarm/storage/mru/handler.go | 112 +++++++++++++++++++------------------- swarm/storage/mru/handler_test.go | 78 +++++++++++++------------- swarm/storage/mru/id.go | 24 ++++---- swarm/storage/mru/id_test.go | 6 +- swarm/storage/mru/query.go | 16 +++--- swarm/storage/mru/query_test.go | 6 +- swarm/storage/mru/request.go | 34 ++++++------ swarm/storage/mru/request_test.go | 38 ++++++------- swarm/storage/mru/testutil.go | 12 ++-- swarm/storage/mru/topic.go | 2 +- swarm/storage/mru/update.go | 20 +++---- swarm/storage/mru/update_test.go | 18 +++--- swarm/storage/mru/view.go | 40 +++++++------- swarm/storage/mru/view_test.go | 12 ++-- 24 files changed, 236 insertions(+), 236 deletions(-) diff --git a/cmd/swarm/mru.go b/cmd/swarm/mru.go index cc7f634cb..afa73820f 100644 --- a/cmd/swarm/mru.go +++ b/cmd/swarm/mru.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -// Command resource allows the user to create and update signed mutable resource updates +// Command resource allows the user to create and update signed Swarm Feeds package main import ( @@ -66,7 +66,7 @@ func resourceCreate(ctx *cli.Context) { ) newResourceRequest := mru.NewFirstRequest(getTopic(ctx)) - newResourceRequest.View.User = resourceGetUser(ctx) + newResourceRequest.Feed.User = resourceGetUser(ctx) manifestAddress, err := client.CreateResource(newResourceRequest) if err != nil { diff --git a/cmd/swarm/mru_test.go b/cmd/swarm/mru_test.go index 142cf9cfd..c52097a6e 100644 --- a/cmd/swarm/mru_test.go +++ b/cmd/swarm/mru_test.go @@ -101,7 +101,7 @@ func TestCLIResourceUpdate(t *testing.T) { } // View configures whose updates we will be looking up. - view := mru.View{ + view := mru.Feed{ Topic: topic, User: address, } @@ -146,8 +146,8 @@ func TestCLIResourceUpdate(t *testing.T) { } // make sure the retrieved view is the same - if request.View != view { - t.Fatalf("Expected view to be: %s, got %s", view, request.View) + if request.Feed != view { + t.Fatalf("Expected view to be: %s, got %s", view, request.Feed) } // test publishing a manifest diff --git a/swarm/api/api.go b/swarm/api/api.go index 7b8f04c13..e6b676dba 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -956,14 +956,14 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver return addr, manifestEntryMap, nil } -// ResourceLookup finds mutable resource updates at specific periods and versions +// ResourceLookup finds Swarm Feeds at specific periods and versions func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) { _, err := a.resource.Lookup(ctx, query) if err != nil { return nil, err } var data []byte - _, data, err = a.resource.GetContent(&query.View) + _, data, err = a.resource.GetContent(&query.Feed) if err != nil { return nil, err } @@ -971,7 +971,7 @@ func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, err } // ResourceNewRequest creates a Request object to update a specific mutable resource -func (a *API) ResourceNewRequest(ctx context.Context, view *mru.View) (*mru.Request, error) { +func (a *API) ResourceNewRequest(ctx context.Context, view *mru.Feed) (*mru.Request, error) { return a.resource.NewRequest(ctx, view) } @@ -993,7 +993,7 @@ var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest") var ErrNotAResourceManifest = errors.New("Not a resource manifest") // ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID. -func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.View, error) { +func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.Feed, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { return nil, ErrCannotLoadResourceManifest @@ -1016,8 +1016,8 @@ var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view") // ResolveResourceView attempts to extract View information out of the manifest, if provided // If not, it attempts to extract the View out of a set of key-value pairs -func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.View, error) { - var view *mru.View +func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.Feed, error) { + var view *mru.Feed var err error if uri.Addr != "" { // resolve the content key. @@ -1036,7 +1036,7 @@ func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Valu } log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex()) } else { - var v mru.View + var v mru.Feed if err := v.FromValues(values); err != nil { return nil, ErrCannotResolveResourceView diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index 02980de1d..c6ad0237b 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -508,7 +508,7 @@ func TestClientCreateUpdateResource(t *testing.T) { // now try retrieving resource without a manifest - view := &mru.View{ + view := &mru.Feed{ Topic: topic, User: signer.Address(), } diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 5ec69373d..4c19dd6df 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -517,7 +517,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } var updateRequest mru.Request - updateRequest.View = *view + updateRequest.Feed = *view query := r.URL.Query() if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters @@ -544,7 +544,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { // we create a manifest so we can retrieve the resource with bzz:// later // this manifest has a special "resource type" manifest, and saves the // resource view ID used to retrieve the resource later - m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.View) + m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.Feed) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return @@ -563,7 +563,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { } } -// Retrieve mutable resource updates: +// Retrieve Swarm Feeds: // bzz-resource:// - get latest update // bzz-resource:///?period=n - get latest update on period n // bzz-resource:///?period=n&version=m - get update version m of period n @@ -606,7 +606,7 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { return } - lookupParams := &mru.Query{View: *view} + lookupParams := &mru.Query{Feed: *view} if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest) return diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 817519a30..0855d30a2 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -206,7 +206,7 @@ func TestBzzResourceMultihash(t *testing.T) { } } -// Test resource updates using the raw update methods +// Test Swarm Feeds using the raw update methods func TestBzzResource(t *testing.T) { srv := testutil.NewTestSwarmServer(t, serverFunc, nil) signer, _ := newTestSigner() @@ -406,7 +406,7 @@ func TestBzzResource(t *testing.T) { // test manifest-less queries log.Info("get first update in update1Timestamp via direct query") - query := mru.NewQuery(&updateRequest.View, update1Timestamp, lookup.NoClue) + query := mru.NewQuery(&updateRequest.Feed, update1Timestamp, lookup.NoClue) urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 06be7323e..9672ca540 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -56,7 +56,7 @@ type ManifestEntry struct { ModTime time.Time `json:"mod_time,omitempty"` Status int `json:"status,omitempty"` Access *AccessEntry `json:"access,omitempty"` - ResourceView *mru.View `json:"resourceView,omitempty"` + ResourceView *mru.Feed `json:"resourceView,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -82,7 +82,7 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(ctx context.Context, view *mru.View) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, view *mru.Feed) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ ResourceView: view, diff --git a/swarm/network/README.md b/swarm/network/README.md index e9c9d30d0..0955fe94e 100644 --- a/swarm/network/README.md +++ b/swarm/network/README.md @@ -37,7 +37,7 @@ Using the streamer logic, various stream types are easy to implement: * live session syncing * historical syncing * simple retrieve requests and deliveries -* mutable resource updates streams +* Swarm Feeds streams * receipting for finger pointing ## Syncing diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go index 280331f77..024ed61c3 100644 --- a/swarm/storage/mru/cacheentry.go +++ b/swarm/storage/mru/cacheentry.go @@ -26,23 +26,23 @@ import ( const ( hasherCount = 8 - resourceHashAlgorithm = storage.SHA3Hash + feedsHashAlgorithm = storage.SHA3Hash defaultRetrieveTimeout = 100 * time.Millisecond ) // cacheEntry caches resource data and the metadata of its root chunk. type cacheEntry struct { - ResourceUpdate + Update *bytes.Reader lastKey storage.Address } // implements storage.LazySectionReader func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { - return int64(len(r.ResourceUpdate.data)), nil + return int64(len(r.Update.data)), nil } //returns the resource's topic func (r *cacheEntry) Topic() Topic { - return r.View.Topic + return r.Feed.Topic } diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index 19330e0c1..a9ea2076c 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -1,5 +1,5 @@ /* -Package mru defines Mutable resource updates. +Package feeds defines Swarm Feeds. A Mutable Resource is an entity which allows updates to a resource without resorting to ENS on each update. diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 3e7654795..034934d05 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -34,8 +34,8 @@ import ( type Handler struct { chunkStore *storage.NetStore HashSize int - resources map[uint64]*cacheEntry - resourceLock sync.RWMutex + cache map[uint64]*cacheEntry + cacheLock sync.RWMutex storeTimeout time.Duration queryMaxPeriods uint32 } @@ -52,26 +52,26 @@ var hashPool sync.Pool func init() { hashPool = sync.Pool{ New: func() interface{} { - return storage.MakeHashFunc(resourceHashAlgorithm)() + return storage.MakeHashFunc(feedsHashAlgorithm)() }, } } // NewHandler creates a new Mutable Resource API func NewHandler(params *HandlerParams) *Handler { - rh := &Handler{ - resources: make(map[uint64]*cacheEntry), + fh := &Handler{ + cache: make(map[uint64]*cacheEntry), } for i := 0; i < hasherCount; i++ { - hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)() - if rh.HashSize == 0 { - rh.HashSize = hashfunc.Size() + hashfunc := storage.MakeHashFunc(feedsHashAlgorithm)() + if fh.HashSize == 0 { + fh.HashSize = hashfunc.Size() } hashPool.Put(hashfunc) } - return rh + return fh } // SetStore sets the store backend for the Mutable Resource API @@ -95,7 +95,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // First, deserialize the chunk var r Request if err := r.fromChunk(chunkAddr, data); err != nil { - log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) + log.Debug("Invalid feed update chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } @@ -103,7 +103,7 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. if err := r.Verify(); err != nil { - log.Debug("Invalid signature", "err", err) + log.Debug("Invalid feed update signature", "err", err) return false } @@ -111,32 +111,32 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { } // GetContent retrieves the data payload of the last synced update of the Mutable Resource -func (h *Handler) GetContent(view *View) (storage.Address, []byte, error) { - if view == nil { +func (h *Handler) GetContent(feed *Feed) (storage.Address, []byte, error) { + if feed == nil { return nil, nil, NewError(ErrInvalidValue, "view is nil") } - rsrc := h.get(view) - if rsrc == nil { + feedUpdate := h.get(feed) + if feedUpdate == nil { return nil, nil, NewError(ErrNotFound, "resource does not exist") } - return rsrc.lastKey, rsrc.data, nil + return feedUpdate.lastKey, feedUpdate.data, nil } // NewRequest prepares a Request structure with all the necessary information to // just add the desired data and sign it. // The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, err error) { - if view == nil { - return nil, NewError(ErrInvalidValue, "view cannot be nil") +func (h *Handler) NewRequest(ctx context.Context, feed *Feed) (request *Request, err error) { + if feed == nil { + return nil, NewError(ErrInvalidValue, "feed cannot be nil") } now := TimestampProvider.Now().Time request = new(Request) request.Header.Version = ProtocolVersion - query := NewQueryLatest(view, lookup.NoClue) + query := NewQueryLatest(feed, lookup.NoClue) - rsrc, err := h.Lookup(ctx, query) + feedUpdate, err := h.Lookup(ctx, query) if err != nil { if err.(*Error).code != ErrNotFound { return nil, err @@ -145,11 +145,11 @@ func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, // or that the resource really does not have updates } - request.View = *view + request.Feed = *feed // if we already have an update, then find next epoch - if rsrc != nil { - request.Epoch = lookup.GetNextEpoch(rsrc.Epoch, now) + if feedUpdate != nil { + request.Epoch = lookup.GetNextEpoch(feedUpdate.Epoch, now) } else { request.Epoch = lookup.GetFirstEpoch(now) } @@ -172,7 +172,7 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) } if query.Hint == lookup.NoClue { // try to use our cache - entry := h.get(&query.View) + entry := h.get(&query.Feed) if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints query.Hint = entry.Epoch } @@ -183,19 +183,19 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") } - var ul ID - ul.View = query.View + var id ID + id.Feed = query.Feed var readCount int // Invoke the lookup engine. // The callback will be called every time the lookup algorithm needs to guess requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { readCount++ - ul.Epoch = epoch + id.Epoch = epoch ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) defer cancel() - chunk, err := h.chunkStore.Get(ctx, ul.Addr()) + chunk, err := h.chunkStore.Get(ctx, id.Addr()) if err != nil { // TODO: check for catastrophic errors other than chunk not found return nil, nil } @@ -227,19 +227,19 @@ func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { updateAddr := request.Addr() - log.Trace("resource cache update", "topic", request.Topic.Hex(), "updatekey", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + log.Trace("feed cache update", "topic", request.Topic.Hex(), "updateaddr", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) - rsrc := h.get(&request.View) - if rsrc == nil { - rsrc = &cacheEntry{} - h.set(&request.View, rsrc) + feedUpdate := h.get(&request.Feed) + if feedUpdate == nil { + feedUpdate = &cacheEntry{} + h.set(&request.Feed, feedUpdate) } // update our rsrcs entry map - rsrc.lastKey = updateAddr - rsrc.ResourceUpdate = request.ResourceUpdate - rsrc.Reader = bytes.NewReader(rsrc.data) - return rsrc, nil + feedUpdate.lastKey = updateAddr + feedUpdate.Update = request.Update + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) + return feedUpdate, nil } // Update adds an actual data update @@ -255,8 +255,8 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") } - rsrc := h.get(&r.View) - if rsrc != nil && rsrc.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + feedUpdate := h.get(&r.Feed) + if feedUpdate != nil && feedUpdate.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") } @@ -267,32 +267,32 @@ func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Ad // send the chunk h.chunkStore.Put(ctx, chunk) - log.Trace("resource update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + log.Trace("feed update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) // update our resources map cache entry if the new update is older than the one we have, if we have it. - if rsrc != nil && r.Epoch.After(rsrc.Epoch) { - rsrc.Epoch = r.Epoch - rsrc.data = make([]byte, len(r.data)) - rsrc.lastKey = r.idAddr - copy(rsrc.data, r.data) - rsrc.Reader = bytes.NewReader(rsrc.data) + if feedUpdate != nil && r.Epoch.After(feedUpdate.Epoch) { + feedUpdate.Epoch = r.Epoch + feedUpdate.data = make([]byte, len(r.data)) + feedUpdate.lastKey = r.idAddr + copy(feedUpdate.data, r.data) + feedUpdate.Reader = bytes.NewReader(feedUpdate.data) } return r.idAddr, nil } // Retrieves the resource cache value for the given nameHash -func (h *Handler) get(view *View) *cacheEntry { +func (h *Handler) get(view *Feed) *cacheEntry { mapKey := view.mapKey() - h.resourceLock.RLock() - defer h.resourceLock.RUnlock() - rsrc := h.resources[mapKey] - return rsrc + h.cacheLock.RLock() + defer h.cacheLock.RUnlock() + feedUpdate := h.cache[mapKey] + return feedUpdate } // Sets the resource cache value for the given View -func (h *Handler) set(view *View, rsrc *cacheEntry) { +func (h *Handler) set(view *Feed, feedUpdate *cacheEntry) { mapKey := view.mapKey() - h.resourceLock.Lock() - defer h.resourceLock.Unlock() - h.resources[mapKey] = rsrc + h.cacheLock.Lock() + defer h.cacheLock.Unlock() + h.cache[mapKey] = feedUpdate } diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go index 13eb9e51b..a4c7969a3 100644 --- a/swarm/storage/mru/handler_test.go +++ b/swarm/storage/mru/handler_test.go @@ -40,7 +40,7 @@ var ( Time: uint64(4200), } cleanF func() - resourceName = "føø.bar" + subtopicName = "føø.bar" hashfunc = storage.MakeHashFunc(storage.DefaultHash) ) @@ -73,7 +73,7 @@ func (f *fakeTimeProvider) Now() Timestamp { } // make updates and retrieve them based on periods and versions -func TestResourceHandler(t *testing.T) { +func TestFeedsHandler(t *testing.T) { // make fake timeProvider clock := &fakeTimeProvider{ @@ -83,7 +83,7 @@ func TestResourceHandler(t *testing.T) { // signer containing private key signer := newAliceSigner() - rh, datadir, teardownTest, err := setupTest(clock, signer) + feedsHandler, datadir, teardownTest, err := setupTest(clock, signer) if err != nil { t.Fatal(err) } @@ -93,8 +93,8 @@ func TestResourceHandler(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - topic, _ := NewTopic("Mess with mru code and see what ghost catches you", nil) - view := View{ + topic, _ := NewTopic("Mess with Swarm Feeds code and see what ghost catches you", nil) + view := Feed{ Topic: topic, User: signer.Address(), } @@ -108,13 +108,13 @@ func TestResourceHandler(t *testing.T) { } request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) - resourcekey := make(map[string]storage.Address) + chunkAddress := make(map[string]storage.Address) data := []byte(updates[0]) request.SetData(data) if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[0]], err = rh.Update(ctx, request) + chunkAddress[updates[0]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } @@ -122,7 +122,7 @@ func TestResourceHandler(t *testing.T) { // move the clock ahead 21 seconds clock.FastForward(21) // t=4221 - request, err = rh.NewRequest(ctx, &request.View) // this timestamps the update at t = 4221 + request, err = feedsHandler.NewRequest(ctx, &request.Feed) // this timestamps the update at t = 4221 if err != nil { t.Fatal(err) } @@ -136,14 +136,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[1]], err = rh.Update(ctx, request) + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) if err == nil { t.Fatal("Expected update to fail since an update in this epoch already exists") } // move the clock ahead 21 seconds clock.FastForward(21) // t=4242 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -151,14 +151,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[1]], err = rh.Update(ctx, request) + chunkAddress[updates[1]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } // move the clock ahead 42 seconds clock.FastForward(42) // t=4284 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -167,14 +167,14 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[2]], err = rh.Update(ctx, request) + chunkAddress[updates[2]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } // move the clock ahead 1 second clock.FastForward(1) // t=4285 - request, err = rh.NewRequest(ctx, &request.View) + request, err = feedsHandler.NewRequest(ctx, &request.Feed) if err != nil { t.Fatal(err) } @@ -187,25 +187,25 @@ func TestResourceHandler(t *testing.T) { if err := request.Sign(signer); err != nil { t.Fatal(err) } - resourcekey[updates[3]], err = rh.Update(ctx, request) + chunkAddress[updates[3]], err = feedsHandler.Update(ctx, request) if err != nil { t.Fatal(err) } time.Sleep(time.Second) - rh.Close() + feedsHandler.Close() // check we can retrieve the updates after close clock.FastForward(2000) // t=6285 - rhparams := &HandlerParams{} + feedParams := &HandlerParams{} - rh2, err := NewTestHandler(datadir, rhparams) + feedsHandler2, err := NewTestHandler(datadir, feedParams) if err != nil { t.Fatal(err) } - rsrc2, err := rh2.Lookup(ctx, NewQueryLatest(&request.View, lookup.NoClue)) + rsrc2, err := feedsHandler2.Lookup(ctx, NewQueryLatest(&request.Feed, lookup.NoClue)) if err != nil { t.Fatal(err) } @@ -223,7 +223,7 @@ func TestResourceHandler(t *testing.T) { log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) // specific point in time - rsrc, err := rh2.Lookup(ctx, NewQuery(&request.View, 4284, lookup.NoClue)) + rsrc, err := feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, 4284, lookup.NoClue)) if err != nil { t.Fatal(err) } @@ -234,7 +234,7 @@ func TestResourceHandler(t *testing.T) { log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) // beyond the first should yield an error - rsrc, err = rh2.Lookup(ctx, NewQuery(&request.View, startTime.Time-1, lookup.NoClue)) + rsrc, err = feedsHandler2.Lookup(ctx, NewQuery(&request.Feed, startTime.Time-1, lookup.NoClue)) if err == nil { t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) } @@ -270,7 +270,7 @@ func TestSparseUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() topic, _ := NewTopic("Very slow updates", nil) - view := View{ + view := Feed{ Topic: topic, User: signer.Address(), } @@ -349,12 +349,12 @@ func TestValidator(t *testing.T) { defer teardownTest() // create new resource - topic, _ := NewTopic(resourceName, nil) - view := View{ + topic, _ := NewTopic(subtopicName, nil) + feed := Feed{ Topic: topic, User: signer.Address(), } - mr := NewFirstRequest(view.Topic) + mr := NewFirstRequest(feed.Topic) // chunk with address data := []byte("foo") @@ -410,9 +410,9 @@ func TestValidatorInStore(t *testing.T) { } // set up resource handler and add is as a validator to the localstore - rhParams := &HandlerParams{} - rh := NewHandler(rhParams) - store.Validators = append(store.Validators, rh) + fhParams := &HandlerParams{} + fh := NewHandler(fhParams) + store.Validators = append(store.Validators, fh) // create content addressed chunks, one good, one faulty chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) @@ -420,7 +420,7 @@ func TestValidatorInStore(t *testing.T) { badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) topic, _ := NewTopic("xyzzy", nil) - view := View{ + feed := Feed{ Topic: topic, User: signer.Address(), } @@ -430,7 +430,7 @@ func TestValidatorInStore(t *testing.T) { Epoch: lookup.Epoch{Time: 42, Level: 1, }, - View: view, + Feed: feed, } updateAddr := id.Addr() @@ -438,7 +438,7 @@ func TestValidatorInStore(t *testing.T) { r := new(Request) r.idAddr = updateAddr - r.ResourceUpdate.ID = id + r.Update.ID = id r.data = data r.Sign(signer) @@ -451,20 +451,20 @@ func TestValidatorInStore(t *testing.T) { // put the chunks in the store and check their error status err = store.Put(context.Background(), goodChunk) if err == nil { - t.Fatal("expected error on good content address chunk with resource validator only, but got nil") + t.Fatal("expected error on good content address chunk with feed update validator only, but got nil") } err = store.Put(context.Background(), badChunk) if err == nil { - t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") + t.Fatal("expected error on bad content address chunk with feed update validator only, but got nil") } err = store.Put(context.Background(), uglyChunk) if err != nil { - t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) + t.Fatalf("expected no error on feed update chunk with feed update validator only, but got: %s", err) } } // create rpc and resourcehandler -func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { +func setupTest(timeProvider timestampProvider, signer Signer) (fh *TestHandler, datadir string, teardown func(), err error) { var fsClean func() var rpcClean func() @@ -478,7 +478,7 @@ func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, } // temp datadir - datadir, err = ioutil.TempDir("", "rh") + datadir, err = ioutil.TempDir("", "fh") if err != nil { return nil, "", nil, err } @@ -487,9 +487,9 @@ func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, } TimestampProvider = timeProvider - rhparams := &HandlerParams{} - rh, err = NewTestHandler(datadir, rhparams) - return rh, datadir, cleanF, err + fhParams := &HandlerParams{} + fh, err = NewTestHandler(datadir, fhParams) + return fh, datadir, cleanF, err } func newAliceSigner() *GenericSigner { diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go index f008169ed..09ef9e450 100644 --- a/swarm/storage/mru/id.go +++ b/swarm/storage/mru/id.go @@ -29,21 +29,21 @@ import ( // ID uniquely identifies an update on the network. type ID struct { - View `json:"view"` + Feed `json:"view"` lookup.Epoch `json:"epoch"` } // ID layout: -// View viewLength bytes +// Feed feedLength bytes // Epoch EpochLength -const idLength = viewLength + lookup.EpochLength +const idLength = feedLength + lookup.EpochLength // Addr calculates the resource update chunk address corresponding to this ID func (u *ID) Addr() (updateAddr storage.Address) { serializedData := make([]byte, idLength) var cursor int - u.View.binaryPut(serializedData[cursor : cursor+viewLength]) - cursor += viewLength + u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]) + cursor += feedLength eid := u.Epoch.ID() copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) @@ -61,10 +61,10 @@ func (u *ID) binaryPut(serializedData []byte) error { return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) } var cursor int - if err := u.View.binaryPut(serializedData[cursor : cursor+viewLength]); err != nil { + if err := u.Feed.binaryPut(serializedData[cursor : cursor+feedLength]); err != nil { return err } - cursor += viewLength + cursor += feedLength epochBytes, err := u.Epoch.MarshalBinary() if err != nil { @@ -88,10 +88,10 @@ func (u *ID) binaryGet(serializedData []byte) error { } var cursor int - if err := u.View.binaryGet(serializedData[cursor : cursor+viewLength]); err != nil { + if err := u.Feed.binaryGet(serializedData[cursor : cursor+feedLength]); err != nil { return err } - cursor += viewLength + cursor += feedLength if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { return err @@ -108,8 +108,8 @@ func (u *ID) FromValues(values Values) error { u.Epoch.Level = uint8(level) u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) - if u.View.User == (common.Address{}) { - return u.View.FromValues(values) + if u.Feed.User == (common.Address{}) { + return u.Feed.FromValues(values) } return nil } @@ -119,5 +119,5 @@ func (u *ID) FromValues(values Values) error { func (u *ID) AppendValues(values Values) { values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) - u.View.AppendValues(values) + u.Feed.AppendValues(values) } diff --git a/swarm/storage/mru/id_test.go b/swarm/storage/mru/id_test.go index eba58fbf3..767b3c159 100644 --- a/swarm/storage/mru/id_test.go +++ b/swarm/storage/mru/id_test.go @@ -8,14 +8,14 @@ import ( func getTestID() *ID { return &ID{ - View: *getTestView(), + Feed: *getTestFeed(), Epoch: lookup.GetFirstEpoch(1000), } } func TestIDAddr(t *testing.T) { - ul := getTestID() - updateAddr := ul.Addr() + id := getTestID() + updateAddr := id.Addr() compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") } diff --git a/swarm/storage/mru/query.go b/swarm/storage/mru/query.go index 13a28eaab..c8a7cbe5a 100644 --- a/swarm/storage/mru/query.go +++ b/swarm/storage/mru/query.go @@ -27,7 +27,7 @@ import ( // Query is used to specify constraints when performing an update lookup // TimeLimit indicates an upper bound for the search. Set to 0 for "now" type Query struct { - View + Feed Hint lookup.Epoch TimeLimit uint64 } @@ -41,8 +41,8 @@ func (q *Query) FromValues(values Values) error { level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) q.Hint.Level = uint8(level) q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) - if q.View.User == (common.Address{}) { - return q.View.FromValues(values) + if q.Feed.User == (common.Address{}) { + return q.Feed.FromValues(values) } return nil } @@ -59,20 +59,20 @@ func (q *Query) AppendValues(values Values) { if q.Hint.Time != 0 { values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) } - q.View.AppendValues(values) + q.Feed.AppendValues(values) } // NewQuery constructs an Query structure to find updates on or before `time` // if time == 0, the latest update will be looked up -func NewQuery(view *View, time uint64, hint lookup.Epoch) *Query { +func NewQuery(feed *Feed, time uint64, hint lookup.Epoch) *Query { return &Query{ TimeLimit: time, - View: *view, + Feed: *feed, Hint: hint, } } // NewQueryLatest generates lookup parameters that look for the latest version of a resource -func NewQueryLatest(view *View, hint lookup.Epoch) *Query { - return NewQuery(view, 0, hint) +func NewQueryLatest(feed *Feed, hint lookup.Epoch) *Query { + return NewQuery(feed, 0, hint) } diff --git a/swarm/storage/mru/query_test.go b/swarm/storage/mru/query_test.go index 189a465d6..4cfc597b2 100644 --- a/swarm/storage/mru/query_test.go +++ b/swarm/storage/mru/query_test.go @@ -21,11 +21,11 @@ import ( ) func getTestQuery() *Query { - ul := getTestID() + id := getTestID() return &Query{ TimeLimit: 5000, - View: ul.View, - Hint: ul.Epoch, + Feed: id.Feed, + Hint: id.Epoch, } } diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go index f6d0f38ff..03ffbf038 100644 --- a/swarm/storage/mru/request.go +++ b/swarm/storage/mru/request.go @@ -29,10 +29,10 @@ import ( // Request represents an update and/or resource create message type Request struct { - ResourceUpdate // actual content that will be put on the chunk, less signature - Signature *Signature - idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) - binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) + Update // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) } // updateRequestJSON represents a JSON-serialized UpdateRequest @@ -44,11 +44,11 @@ type updateRequestJSON struct { } // Request layout -// resourceUpdate bytes +// Update bytes // SignatureLength bytes const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength -// NewFirstRequest returns a ready to sign request to publish a first update +// NewFirstRequest returns a ready to sign request to publish a first feed update func NewFirstRequest(topic Topic) *Request { request := new(Request) @@ -56,7 +56,7 @@ func NewFirstRequest(topic Topic) *Request { // get the current time now := TimestampProvider.Now().Time request.Epoch = lookup.GetFirstEpoch(now) - request.View.Topic = topic + request.Feed.Topic = topic request.Header.Version = ProtocolVersion return request @@ -88,7 +88,7 @@ func (r *Request) Verify() (err error) { } // get the address of the signer (which also checks that it's a valid signature) - r.View.User, err = getUserAddr(digest, *r.Signature) + r.Feed.User, err = getUserAddr(digest, *r.Signature) if err != nil { return err } @@ -105,7 +105,7 @@ func (r *Request) Verify() (err error) { // Sign executes the signature to validate the resource func (r *Request) Sign(signer Signer) error { - r.View.User = signer.Address() + r.Feed.User = signer.Address() r.binaryData = nil //invalidate serialized data digest, err := r.GetDigest() // computes digest and serializes into .binaryData if err != nil { @@ -139,10 +139,10 @@ func (r *Request) GetDigest() (result common.Hash, err error) { hasher := hashPool.Get().(hash.Hash) defer hashPool.Put(hasher) hasher.Reset() - dataLength := r.ResourceUpdate.binaryLength() + dataLength := r.Update.binaryLength() if r.binaryData == nil { r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.ResourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { + if err := r.Update.binaryPut(r.binaryData[:dataLength]); err != nil { return result, err } } @@ -161,10 +161,10 @@ func (r *Request) toChunk() (storage.Chunk, error) { return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") } - resourceUpdateLength := r.ResourceUpdate.binaryLength() + updateLength := r.Update.binaryLength() // signature is the last item in the chunk data - copy(r.binaryData[resourceUpdateLength:], r.Signature[:]) + copy(r.binaryData[updateLength:], r.Signature[:]) chunk := storage.NewChunk(r.idAddr, r.binaryData) return chunk, nil @@ -175,13 +175,13 @@ func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error // for update chunk layout see Request definition //deserialize the resource update portion - if err := r.ResourceUpdate.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { + if err := r.Update.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { return err } // Extract the signature var signature *Signature - cursor := r.ResourceUpdate.binaryLength() + cursor := r.Update.binaryLength() sigdata := chunkdata[cursor : cursor+signatureLength] if len(sigdata) > 0 { signature = &Signature{} @@ -209,7 +209,7 @@ func (r *Request) FromValues(values Values, data []byte) error { r.Signature = new(Signature) copy(r.Signature[:], signatureBytes) } - err = r.ResourceUpdate.FromValues(values, data) + err = r.Update.FromValues(values, data) if err != nil { return err } @@ -223,7 +223,7 @@ func (r *Request) AppendValues(values Values) []byte { if r.Signature != nil { values.Set("signature", hexutil.Encode(r.Signature[:])) } - return r.ResourceUpdate.AppendValues(values) + return r.Update.AppendValues(values) } // fromJSON takes an update request JSON and populates an UpdateRequest diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index c32d5ec13..e58bf28aa 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -53,25 +53,25 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { charlie := newCharlieSigner() //Charlie bob := newBobSigner() //Bob - // Create a resource to our good guy Charlie's name - topic, _ := NewTopic("a good resource name", nil) - createRequest := NewFirstRequest(topic) - createRequest.User = charlie.Address() + // Create a feed to our good guy Charlie's name + topic, _ := NewTopic("a good topic name", nil) + firstRequest := NewFirstRequest(topic) + firstRequest.User = charlie.Address() // We now encode the create message to simulate we send it over the wire - messageRawData, err := createRequest.MarshalJSON() + messageRawData, err := firstRequest.MarshalJSON() if err != nil { - t.Fatalf("Error encoding create resource request: %s", err) + t.Fatalf("Error encoding first feed update request: %s", err) } // ... the message arrives and is decoded... - var recoveredCreateRequest Request - if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil { - t.Fatalf("Error decoding create resource request: %s", err) + var recoveredFirstRequest Request + if err := recoveredFirstRequest.UnmarshalJSON(messageRawData); err != nil { + t.Fatalf("Error decoding first feed update request: %s", err) } // ... but verification should fail because it is not signed! - if err := recoveredCreateRequest.Verify(); err == nil { + if err := recoveredFirstRequest.Verify(); err == nil { t.Fatal("Expected Verify to fail since the message is not signed") } @@ -85,13 +85,13 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { //Put together an unsigned update request that we will serialize to send it to the signer. data := []byte("This hour's update: Swarm 99.0 has been released!") request := &Request{ - ResourceUpdate: ResourceUpdate{ + Update: Update{ ID: ID{ Epoch: lookup.Epoch{ Time: 1000, Level: 1, }, - View: createRequest.ResourceUpdate.View, + Feed: firstRequest.Update.Feed, }, data: data, }, @@ -191,7 +191,7 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { func getTestRequest() *Request { return &Request{ - ResourceUpdate: *getTestResourceUpdate(), + Update: *getTestFeedUpdate(), } } @@ -258,7 +258,7 @@ func TestReverse(t *testing.T) { defer teardownTest() topic, _ := NewTopic("Cervantes quotes", nil) - view := View{ + view := Feed{ Topic: topic, User: signer.Address(), } @@ -266,7 +266,7 @@ func TestReverse(t *testing.T) { data := []byte("Donde una puerta se cierra, otra se abre") request := new(Request) - request.View = view + request.Feed = view request.Epoch = epoch request.data = data @@ -291,15 +291,15 @@ func TestReverse(t *testing.T) { if err != nil { t.Fatal(err) } - recoveredaddress, err := getUserAddr(checkdigest, *checkUpdate.Signature) + recoveredAddr, err := getUserAddr(checkdigest, *checkUpdate.Signature) if err != nil { t.Fatalf("Retrieve address from signature fail: %v", err) } - originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + originalAddr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) // check that the metadata retrieved from the chunk matches what we gave it - if recoveredaddress != originaladdress { - t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) + if recoveredAddr != originalAddr { + t.Fatalf("addresses dont match: %x != %x", originalAddr, recoveredAddr) } if !bytes.Equal(key[:], chunk.Address()[:]) { diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go index 7a5a9e4d9..80e0d4cf0 100644 --- a/swarm/storage/mru/testutil.go +++ b/swarm/storage/mru/testutil.go @@ -27,7 +27,7 @@ import ( ) const ( - testDbDirName = "mru" + testDbDirName = "feeds" ) type TestHandler struct { @@ -52,20 +52,20 @@ func newFakeNetFetcher(context.Context, storage.Address, *sync.Map) storage.NetF // NewTestHandler creates Handler object to be used for testing purposes. func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) { path := filepath.Join(datadir, testDbDirName) - rh := NewHandler(params) + fh := NewHandler(params) localstoreparams := storage.NewDefaultLocalStoreParams() localstoreparams.Init(path) localStore, err := storage.NewLocalStore(localstoreparams, nil) if err != nil { return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err) } - localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm))) - localStore.Validators = append(localStore.Validators, rh) + localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(feedsHashAlgorithm))) + localStore.Validators = append(localStore.Validators, fh) netStore, err := storage.NewNetStore(localStore, nil) if err != nil { return nil, err } netStore.NewNetFetcherFunc = newFakeNetFetcher - rh.SetStore(netStore) - return &TestHandler{rh}, nil + fh.SetStore(netStore) + return &TestHandler{fh}, nil } diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go index f318a5593..5f5720ae2 100644 --- a/swarm/storage/mru/topic.go +++ b/swarm/storage/mru/topic.go @@ -29,7 +29,7 @@ import ( // TopicLength establishes the max length of a topic string const TopicLength = storage.AddressLength -// Topic represents what a resource talks about +// Topic represents what a feed is about type Topic [TopicLength]byte // ErrTopicTooLong is returned when creating a topic with a name/related content too long diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index 6aa57fce1..892cb9d1b 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -34,8 +34,8 @@ type Header struct { Padding [headerLength - 1]uint8 // reserved for future use } -// ResourceUpdate encapsulates the information sent as part of a resource update -type ResourceUpdate struct { +// Update encapsulates the information sent as part of a feed update +type Update struct { Header Header // ID // Resource update identifying information data []byte // actual data payload @@ -44,15 +44,15 @@ type ResourceUpdate struct { const minimumUpdateDataLength = idLength + headerLength + 1 const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength -// binaryPut serializes the resource update information into the given slice -func (r *ResourceUpdate) binaryPut(serializedData []byte) error { +// binaryPut serializes the feed update information into the given slice +func (r *Update) binaryPut(serializedData []byte) error { datalength := len(r.data) if datalength == 0 { - return NewError(ErrInvalidValue, "cannot update a resource with no data") + return NewError(ErrInvalidValue, "a feed update must contain data") } if datalength > maxUpdateDataLength { - return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) + return NewErrorf(ErrInvalidValue, "feed update data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength) } if len(serializedData) != r.binaryLength() { @@ -79,12 +79,12 @@ func (r *ResourceUpdate) binaryPut(serializedData []byte) error { } // binaryLength returns the expected number of bytes this structure will take to encode -func (r *ResourceUpdate) binaryLength() int { +func (r *Update) binaryLength() int { return idLength + headerLength + len(r.data) } // binaryGet populates this instance from the information contained in the passed byte slice -func (r *ResourceUpdate) binaryGet(serializedData []byte) error { +func (r *Update) binaryGet(serializedData []byte) error { if len(serializedData) < minimumUpdateDataLength { return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) } @@ -116,7 +116,7 @@ func (r *ResourceUpdate) binaryGet(serializedData []byte) error { // FromValues deserializes this instance from a string key-value store // useful to parse query strings -func (r *ResourceUpdate) FromValues(values Values, data []byte) error { +func (r *Update) FromValues(values Values, data []byte) error { r.data = data version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) r.Header.Version = uint8(version) @@ -125,7 +125,7 @@ func (r *ResourceUpdate) FromValues(values Values, data []byte) error { // AppendValues serializes this structure into the provided string key-value store // useful to build query strings -func (r *ResourceUpdate) AppendValues(values Values) []byte { +func (r *Update) AppendValues(values Values) []byte { r.ID.AppendValues(values) values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) return r.data diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go index bd706d83a..62c401f3f 100644 --- a/swarm/storage/mru/update_test.go +++ b/swarm/storage/mru/update_test.go @@ -20,31 +20,31 @@ import ( "testing" ) -func getTestResourceUpdate() *ResourceUpdate { - return &ResourceUpdate{ +func getTestFeedUpdate() *Update { + return &Update{ ID: *getTestID(), data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), } } -func TestResourceUpdateSerializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestResourceUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +func TestUpdateSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeedUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") } -func TestResourceUpdateLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestResourceUpdate()) +func TestUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeedUpdate()) // Test fail if update is too big - update := getTestResourceUpdate() + update := getTestFeedUpdate() update.data = make([]byte, maxUpdateDataLength+100) serialized := make([]byte, update.binaryLength()) if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big") + t.Fatal("Expected update.binaryPut to fail since update is too big") } // test fail if data is empty or nil update.data = nil serialized = make([]byte, update.binaryLength()) if err := update.binaryPut(serialized); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since data is empty") + t.Fatal("Expected update.binaryPut to fail since data is empty") } } diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go index 2e4ce4a0b..f1a588d44 100644 --- a/swarm/storage/mru/view.go +++ b/swarm/storage/mru/view.go @@ -25,8 +25,8 @@ import ( "github.com/ethereum/go-ethereum/swarm/storage" ) -// View represents a particular user's view of a resource -type View struct { +// Feed represents a particular user's view of a resource +type Feed struct { Topic Topic `json:"topic"` User common.Address `json:"user"` } @@ -34,11 +34,11 @@ type View struct { // View layout: // TopicLength bytes // userAddr common.AddressLength bytes -const viewLength = TopicLength + common.AddressLength +const feedLength = TopicLength + common.AddressLength -// mapKey calculates a unique id for this view for the cache map in `Handler` -func (u *View) mapKey() uint64 { - serializedData := make([]byte, viewLength) +// mapKey calculates a unique id for this feed. Used by the cache map in `Handler` +func (u *Feed) mapKey() uint64 { + serializedData := make([]byte, feedLength) u.binaryPut(serializedData) hasher := hashPool.Get().(hash.Hash) defer hashPool.Put(hasher) @@ -48,10 +48,10 @@ func (u *View) mapKey() uint64 { return *(*uint64)(unsafe.Pointer(&hash[0])) } -// binaryPut serializes this View instance into the provided slice -func (u *View) binaryPut(serializedData []byte) error { - if len(serializedData) != viewLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", viewLength, len(serializedData)) +// binaryPut serializes this Feed instance into the provided slice +func (u *Feed) binaryPut(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) @@ -64,14 +64,14 @@ func (u *View) binaryPut(serializedData []byte) error { } // binaryLength returns the expected size of this structure when serialized -func (u *View) binaryLength() int { - return viewLength +func (u *Feed) binaryLength() int { + return feedLength } // binaryGet restores the current instance from the information contained in the passed slice -func (u *View) binaryGet(serializedData []byte) error { - if len(serializedData) != viewLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read View. Expected %d, got %d", viewLength, len(serializedData)) +func (u *Feed) binaryGet(serializedData []byte) error { + if len(serializedData) != feedLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read Feed. Expected %d, got %d", feedLength, len(serializedData)) } var cursor int @@ -84,16 +84,16 @@ func (u *View) binaryGet(serializedData []byte) error { return nil } -// Hex serializes the View to a hex string -func (u *View) Hex() string { - serializedData := make([]byte, viewLength) +// Hex serializes the Feed to a hex string +func (u *Feed) Hex() string { + serializedData := make([]byte, feedLength) u.binaryPut(serializedData) return hexutil.Encode(serializedData) } // FromValues deserializes this instance from a string key-value store // useful to parse query strings -func (u *View) FromValues(values Values) (err error) { +func (u *Feed) FromValues(values Values) (err error) { topic := values.Get("topic") if topic != "" { if err := u.Topic.FromHex(values.Get("topic")); err != nil { @@ -119,7 +119,7 @@ func (u *View) FromValues(values Values) (err error) { // AppendValues serializes this structure into the provided string key-value store // useful to build query strings -func (u *View) AppendValues(values Values) { +func (u *Feed) AppendValues(values Values) { values.Set("topic", u.Topic.Hex()) values.Set("user", u.User.Hex()) } diff --git a/swarm/storage/mru/view_test.go b/swarm/storage/mru/view_test.go index 45720ba79..e2f4d6b30 100644 --- a/swarm/storage/mru/view_test.go +++ b/swarm/storage/mru/view_test.go @@ -19,18 +19,18 @@ import ( "testing" ) -func getTestView() *View { +func getTestFeed() *Feed { topic, _ := NewTopic("world news report, every hour", nil) - return &View{ + return &Feed{ Topic: topic, User: newCharlieSigner().Address(), } } -func TestViewSerializerDeserializer(t *testing.T) { - testBinarySerializerRecovery(t, getTestView(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +func TestFeedSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestFeed(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") } -func TestMetadataSerializerLengthCheck(t *testing.T) { - testBinarySerializerLengthCheck(t, getTestView()) +func TestFeedSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestFeed()) } -- cgit