diff options
author | Javier Peletier <jpeletier@users.noreply.github.com> | 2018-09-28 18:07:17 +0800 |
---|---|---|
committer | Martin Holst Swende <martin@swende.se> | 2018-09-28 18:07:17 +0800 |
commit | 2c110c81ee92290d3e5ce6134a065c8d2abfbb60 (patch) | |
tree | db263ba1b6f051da8d3e5d0faaafec1c868e453d /swarm | |
parent | 0da3b17a112a75b54c8b3e5a2bf65a27a1c8c999 (diff) | |
download | dexon-2c110c81ee92290d3e5ce6134a065c8d2abfbb60.tar.gz dexon-2c110c81ee92290d3e5ce6134a065c8d2abfbb60.tar.zst dexon-2c110c81ee92290d3e5ce6134a065c8d2abfbb60.zip |
Swarm MRUs: Adaptive frequency / Predictable lookups / API simplification (#17559)
* swarm/storage/mru: Adaptive Frequency
swarm/storage/mru/lookup: fixed getBaseTime
Added NewEpoch constructor
swarm/api/client: better error handling in GetResource()
swarm/storage/mru: Renamed structures.
Renamed ResourceMetadata to ResourceID.
Renamed ResourceID.Name to ResourceID.Topic
swarm/storage/mru: Added binarySerializer interface and test tools
swarm/storage/mru/lookup: Changed base time to time and + marshallers
swarm/storage/mru: Added ResourceID (former resourceMetadata)
swarm/storage/mru: Added ResourceViewId and serialization tests
swarm/storage/mru/lookup: fixed epoch unmarshaller. Added Epoch Equals
swarm/storage/mru: Fixes as per review comments
cmd/swarm: reworded resource create/update help text regarding topic
swarm/storage/mru: Added UpdateLookup and serializer tests
swarm/storage/mru: Added UpdateHeader, serializers and tests
swarm/storage/mru: changed UpdateAddr / epoch to Base()
swarm/storage/mru: Added resourceUpdate serializer and tests
swarm/storage/mru: Added SignedResourceUpdate tests and serializers
swarm/storage/mru/lookup: fixed GetFirstEpoch bug
swarm/storage/mru: refactor, comments, cleanup
Also added tests for Topic
swarm/storage/mru: handler tests pass
swarm/storage/mru: all resource package tests pass
swarm/storage/mru: resource test pass after adding
timestamp checking support
swarm/storage/mru: Added JSON serializers to ResourceIDView structures
swarm/storage/mru: Sever, client, API test pass
swarm/storage/mru: server test pass
swarm/storage/mru: Added topic length check
swarm/storage/mru: removed some literals,
improved "previous lookup" test case
swarm/storage/mru: some fixes and comments as per review
swarm/storage/mru: first working version without metadata chunk
swarm/storage/mru: Various fixes as per review
swarm/storage/mru: client test pass
swarm/storage/mru: resource query strings and manifest-less queries
swarm/storage/mru: simplify naming
swarm/storage/mru: first autofreq working version
swarm/storage/mru: renamed ToValues to AppendValues
swarm/resource/mru: Added ToValues / FromValues for URL query strings
swarm/storage/mru: Changed POST resource to work with query strings.
No more JSON.
swarm/storage/mru: removed resourceid
swarm/storage/mru: Opened up structures
swarm/storage/mru: Merged Request and SignedResourceUpdate
swarm/storage/mru: removed initial data from CLI resource create
swarm/storage/mru: Refactor Topic as a direct fixed-length array
swarm/storage/mru/lookup: Comprehensive GetNextLevel tests
swarm/storage/mru: Added comments
Added length checks in Topic
swarm/storage/mru: fixes in tests and some code comments
swarm/storage/mru/lookup: new optimized lookup algorithm
swarm/api: moved getResourceView to api out of server
swarm/storage/mru: Lookup algorithm working
swarm/storage/mru: comments and renamed NewLookupParams
Deleted commented code
swarm/storage/mru/lookup: renamed Epoch.LaterThan to After
swarm/storage/mru/lookup: Comments and tidying naming
swarm/storage/mru: fix lookup algorithm
swarm/storage/mru: exposed lookup hint
removed updateheader
swarm/storage/mru/lookup: changed GetNextEpoch for initial values
swarm/storage/mru: resource tests pass
swarm/storage/mru: valueSerializer interface and tests
swarm/storage/mru/lookup: Comments, improvements, fixes, more tests
swarm/storage/mru: renamed UpdateLookup to ID, LookupParams to Query
swarm/storage/mru: renamed query receiver var
swarm/cmd: MRU CLI tests
* cmd/swarm: remove rogue fmt
* swarm/storage/mru: Add version / header for future use
* swarm/storage/mru: Fixes/comments as per review
cmd/swarm: remove rogue fmt
swarm/storage/mru: Add version / header for future use-
* swarm/storage/mru: fix linter errors
* cmd/swarm: Speeded up TestCLIResourceUpdate
Diffstat (limited to 'swarm')
40 files changed, 3006 insertions, 2910 deletions
diff --git a/swarm/api/api.go b/swarm/api/api.go index d7b6d8419..70c12a757 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -29,6 +29,8 @@ import ( "path" "strings" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "bytes" "mime" "path/filepath" @@ -401,77 +403,54 @@ func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage // we need to do some extra work if this is a mutable resource manifest if entry.ContentType == ResourceContentType { - - // get the resource rootAddr - log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - rootAddr := storage.Address(common.FromHex(entry.Hash)) - rsrc, err := a.resource.Load(ctx, rootAddr) + if entry.ResourceView == nil { + return reader, mimeType, status, nil, fmt.Errorf("Cannot decode ResourceView in manifest") + } + _, err := a.resource.Lookup(ctx, mru.NewQueryLatest(entry.ResourceView, lookup.NoClue)) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound log.Debug(fmt.Sprintf("get resource content error: %v", err)) return reader, mimeType, status, nil, err } + // get the data of the update + _, rsrcData, err := a.resource.GetContent(entry.ResourceView) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // extract multihash + decodedMultihash, err := multihash.FromMultihash(rsrcData) + if err != nil { + apiGetInvalid.Inc(1) + status = http.StatusUnprocessableEntity + log.Warn("invalid resource multihash", "err", err) + return reader, mimeType, status, nil, err + } + manifestAddr = storage.Address(decodedMultihash) + log.Trace("resource is multihash", "key", manifestAddr) - // use this key to retrieve the latest update - params := mru.LookupLatest(rootAddr) - rsrc, err = a.resource.Lookup(ctx, params) + // get the manifest the multihash digest points to + trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound - log.Debug(fmt.Sprintf("get resource content error: %v", err)) + log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) return reader, mimeType, status, nil, err } - // if it's multihash, we will transparently serve the content this multihash points to - // \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper - if rsrc.Multihash() { - - // get the data of the update - _, rsrcData, err := a.resource.GetContent(rootAddr) - if err != nil { - apiGetNotFound.Inc(1) - status = http.StatusNotFound - log.Warn(fmt.Sprintf("get resource content error: %v", err)) - return reader, mimeType, status, nil, err - } - - // validate that data as multihash - decodedMultihash, err := multihash.FromMultihash(rsrcData) - if err != nil { - apiGetInvalid.Inc(1) - status = http.StatusUnprocessableEntity - log.Warn("invalid resource multihash", "err", err) - return reader, mimeType, status, nil, err - } - manifestAddr = storage.Address(decodedMultihash) - log.Trace("resource is multihash", "key", manifestAddr) - - // get the manifest the multihash digest points to - trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt) - if err != nil { - apiGetNotFound.Inc(1) - status = http.StatusNotFound - log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) - return reader, mimeType, status, nil, err - } - - // finally, get the manifest entry - // it will always be the entry on path "" - entry, _ = trie.getEntry(path) - if entry == nil { - status = http.StatusNotFound - apiGetNotFound.Inc(1) - err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) - log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) - return reader, mimeType, status, nil, err - } - - } else { - // data is returned verbatim since it's not a multihash - return rsrc, "application/octet-stream", http.StatusOK, nil, nil + // finally, get the manifest entry + // it will always be the entry on path "" + entry, _ = trie.getEntry(path) + if entry == nil { + status = http.StatusNotFound + apiGetNotFound.Inc(1) + err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) + log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) + return reader, mimeType, status, nil, err } } @@ -966,37 +945,27 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver } // ResourceLookup finds mutable resource updates at specific periods and versions -func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) { - var err error - rsrc, err := a.resource.Load(ctx, params.RootAddr()) +func (a *API) ResourceLookup(ctx context.Context, query *mru.Query) ([]byte, error) { + _, err := a.resource.Lookup(ctx, query) if err != nil { - return "", nil, err - } - _, err = a.resource.Lookup(ctx, params) - if err != nil { - return "", nil, err + return nil, err } var data []byte - _, data, err = a.resource.GetContent(params.RootAddr()) + _, data, err = a.resource.GetContent(&query.View) if err != nil { - return "", nil, err + return nil, err } - return rsrc.Name(), data, nil -} - -// Create Mutable resource -func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error { - return a.resource.New(ctx, request) + return data, nil } // ResourceNewRequest creates a Request object to update a specific mutable resource -func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) { - return a.resource.NewUpdateRequest(ctx, rootAddr) +func (a *API) ResourceNewRequest(ctx context.Context, view *mru.View) (*mru.Request, error) { + return a.resource.NewRequest(ctx, view) } // ResourceUpdate updates a Mutable Resource with arbitrary data. // Upon retrieval the update will be retrieved verbatim as bytes. -func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) { +func (a *API) ResourceUpdate(ctx context.Context, request *mru.Request) (storage.Address, error) { return a.resource.Update(ctx, request) } @@ -1005,17 +974,62 @@ func (a *API) ResourceHashSize() int { return a.resource.HashSize } -// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. -func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) { +// ErrCannotLoadResourceManifest is returned when looking up a resource manifest fails +var ErrCannotLoadResourceManifest = errors.New("Cannot load resource manifest") + +// ErrNotAResourceManifest is returned when the address provided returned something other than a valid manifest +var ErrNotAResourceManifest = errors.New("Not a resource manifest") + +// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the Resource's view ID. +func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (*mru.View, error) { trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt) if err != nil { - return nil, fmt.Errorf("cannot load resource manifest: %v", err) + return nil, ErrCannotLoadResourceManifest } entry, _ := trie.getEntry("") if entry.ContentType != ResourceContentType { - return nil, fmt.Errorf("not a resource manifest: %s", addr) + return nil, ErrNotAResourceManifest } - return storage.Address(common.FromHex(entry.Hash)), nil + return entry.ResourceView, nil +} + +// ErrCannotResolveResourceURI is returned when the ENS resolver is not able to translate a name to a resource +var ErrCannotResolveResourceURI = errors.New("Cannot resolve Resource URI") + +// ErrCannotResolveResourceView is returned when values provided are not enough or invalid to recreate a +// resource view out of them. +var ErrCannotResolveResourceView = errors.New("Cannot resolve resource view") + +// ResolveResourceView attempts to extract View information out of the manifest, if provided +// If not, it attempts to extract the View out of a set of key-value pairs +func (a *API) ResolveResourceView(ctx context.Context, uri *URI, values mru.Values) (*mru.View, error) { + var view *mru.View + var err error + if uri.Addr != "" { + // resolve the content key. + manifestAddr := uri.Address() + if manifestAddr == nil { + manifestAddr, err = a.Resolve(ctx, uri.Addr) + if err != nil { + return nil, ErrCannotResolveResourceURI + } + } + + // get the resource view from the manifest + view, err = a.ResolveResourceManifest(ctx, manifestAddr) + if err != nil { + return nil, err + } + log.Debug("handle.get.resource: resolved", "manifestkey", manifestAddr, "view", view.Hex()) + } else { + var v mru.View + if err := v.FromValues(values); err != nil { + return nil, ErrCannotResolveResourceView + + } + view = &v + } + return view, nil } diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 3d06e9e1c..a6666144a 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -28,6 +28,7 @@ import ( "mime/multipart" "net/http" "net/textproto" + "net/url" "os" "path/filepath" "regexp" @@ -595,13 +596,16 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) return string(data), nil } +// ErrNoResourceUpdatesFound is returned when Swarm cannot find updates of the given resource +var ErrNoResourceUpdatesFound = errors.New("No updates found for this resource") + // CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided // data. Data is interpreted as multihash or not depending on the multihash parameter. // startTime=0 means "now" // Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent) // or reference future updates (Client.UpdateResource) func (c *Client) CreateResource(request *mru.Request) (string, error) { - responseStream, err := c.updateResource(request) + responseStream, err := c.updateResource(request, true) if err != nil { return "", err } @@ -621,17 +625,24 @@ func (c *Client) CreateResource(request *mru.Request) (string, error) { // UpdateResource allows you to set a new version of your content func (c *Client) UpdateResource(request *mru.Request) error { - _, err := c.updateResource(request) + _, err := c.updateResource(request, false) return err } -func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) { - body, err := request.MarshalJSON() +func (c *Client) updateResource(request *mru.Request, createManifest bool) (io.ReadCloser, error) { + URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } + URL.Path = "/bzz-resource:/" + values := URL.Query() + body := request.AppendValues(values) + if createManifest { + values.Set("manifest", "1") + } + URL.RawQuery = values.Encode() - req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body)) + req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body)) if err != nil { return nil, err } @@ -642,28 +653,61 @@ func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) { } return res.Body, nil - } // GetResource returns a byte stream with the raw content of the resource // manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver // points to that address -func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) { +func (c *Client) GetResource(query *mru.Query, manifestAddressOrDomain string) (io.ReadCloser, error) { + return c.getResource(query, manifestAddressOrDomain, false) +} - res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain) +// getResource returns a byte stream with the raw content of the resource +// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver +// points to that address +// meta set to true will instruct the node return resource metainformation instead +func (c *Client) getResource(query *mru.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) { + URL, err := url.Parse(c.Gateway) if err != nil { return nil, err } - return res.Body, nil + URL.Path = "/bzz-resource:/" + manifestAddressOrDomain + values := URL.Query() + if query != nil { + query.AppendValues(values) //adds query parameters + } + if meta { + values.Set("meta", "1") + } + URL.RawQuery = values.Encode() + res, err := http.Get(URL.String()) + if err != nil { + return nil, err + } + + if res.StatusCode != http.StatusOK { + if res.StatusCode == http.StatusNotFound { + return nil, ErrNoResourceUpdatesFound + } + errorMessageBytes, err := ioutil.ReadAll(res.Body) + var errorMessage string + if err != nil { + errorMessage = "cannot retrieve error message: " + err.Error() + } else { + errorMessage = string(errorMessageBytes) + } + return nil, fmt.Errorf("Error retrieving resource: %s", errorMessage) + } + return res.Body, nil } // GetResourceMetadata returns a structure that describes the Mutable Resource // manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver // points to that address -func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) { +func (c *Client) GetResourceMetadata(query *mru.Query, manifestAddressOrDomain string) (*mru.Request, error) { - responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta") + responseStream, err := c.getResource(query, manifestAddressOrDomain, true) if err != nil { return nil, err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index f9312d48f..02980de1d 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -25,6 +25,8 @@ import ( "sort" "testing" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/swarm/api" @@ -391,19 +393,12 @@ func TestClientCreateResourceMultihash(t *testing.T) { s := common.FromHex(swarmHash) mh := multihash.ToMultihash(s) - // our mutable resource "name" - resourceName := "foo.eth" + // our mutable resource topic + topic, _ := mru.NewTopic("foo.eth", nil) - createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: resourceName, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - createRequest.SetData(mh, true) + createRequest := mru.NewFirstRequest(topic) + + createRequest.SetData(mh) if err := createRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } @@ -414,12 +409,18 @@ func TestClientCreateResourceMultihash(t *testing.T) { t.Fatalf("Error creating resource: %s", err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) } - reader, err := client.GetResource(correctManifestAddrHex) + // Check we get a not found error when trying to get the resource with a made-up manifest + _, err = client.GetResource(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") + if err != ErrNoResourceUpdatesFound { + t.Fatalf("Expected to receive ErrNoResourceUpdatesFound error. Got: %s", err) + } + + reader, err := client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -447,30 +448,22 @@ func TestClientCreateUpdateResource(t *testing.T) { databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...") // our mutable resource name - resourceName := "El Quijote" + topic, _ := mru.NewTopic("El Quijote", nil) + createRequest := mru.NewFirstRequest(topic) - createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: resourceName, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - createRequest.SetData(databytes, false) + createRequest.SetData(databytes) if err := createRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } resourceManifestHash, err := client.CreateResource(createRequest) - correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a" + correctManifestAddrHex := "fcb8e75f53e480e197c083ad1976d265674d0ce776f2bf359c09c413fb5230b8" if resourceManifestHash != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash) } - reader, err := client.GetResource(correctManifestAddrHex) + reader, err := client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -486,12 +479,12 @@ func TestClientCreateUpdateResource(t *testing.T) { // define different data databytes = []byte("... no ha mucho tiempo que vivĂa un hidalgo de los de lanza en astillero ...") - updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex) + updateRequest, err := client.GetResourceMetadata(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving update request template: %s", err) } - updateRequest.SetData(databytes, false) + updateRequest.SetData(databytes) if err := updateRequest.Sign(signer); err != nil { t.Fatalf("Error signing update: %s", err) } @@ -500,7 +493,7 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Error updating resource: %s", err) } - reader, err = client.GetResource(correctManifestAddrHex) + reader, err = client.GetResource(nil, correctManifestAddrHex) if err != nil { t.Fatalf("Error retrieving resource: %s", err) } @@ -513,4 +506,24 @@ func TestClientCreateUpdateResource(t *testing.T) { t.Fatalf("Expected: %v, got %v", databytes, gotData) } + // now try retrieving resource without a manifest + + view := &mru.View{ + Topic: topic, + User: signer.Address(), + } + + lookupParams := mru.NewQueryLatest(view, lookup.NoClue) + reader, err = client.GetResource(lookupParams, "") + if err != nil { + t.Fatalf("Error retrieving resource: %s", err) + } + defer reader.Close() + gotData, err = ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(databytes, gotData) { + t.Fatalf("Expected: %v, got %v", databytes, gotData) + } } diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index af1269b93..87ef05baa 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -487,6 +487,7 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { // The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { ruid := GetRUID(r.Context()) + uri := GetURI(r.Context()) log.Debug("handle.post.resource", "ruid", ruid) var err error @@ -496,9 +497,24 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { RespondError(w, r, err.Error(), http.StatusInternalServerError) return } + + view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + if err != nil { // couldn't parse query string or retrieve manifest + getFail.Inc(1) + httpStatus := http.StatusBadRequest + if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + httpStatus = http.StatusNotFound + } + RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) + return + } + var updateRequest mru.Request - if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON - RespondError(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error + updateRequest.View = *view + query := r.URL.Query() + + if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters + RespondError(w, r, err.Error(), http.StatusBadRequest) return } @@ -510,56 +526,40 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *http.Request) { RespondError(w, r, err.Error(), http.StatusForbidden) return } - } - - if updateRequest.IsNew() { - err = s.api.ResourceCreate(r.Context(), &updateRequest) - if err != nil { - code, err2 := s.translateResourceError(w, r, "resource creation fail", err) - RespondError(w, r, err2.Error(), code) - return - } - } - - if updateRequest.IsUpdate() { - _, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate) + _, err = s.api.ResourceUpdate(r.Context(), &updateRequest) if err != nil { RespondError(w, r, err.Error(), http.StatusInternalServerError) return } } - // at this point both possible operations (create, update or both) were successful - // so in case it was a new resource, then create a manifest and send it over. - - if updateRequest.IsNew() { + if query.Get("manifest") == "1" { // we create a manifest so we can retrieve the resource with bzz:// later - // this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource - // metadata chunk (rootAddr) - m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex()) + // this manifest has a special "resource type" manifest, and saves the + // resource view ID used to retrieve the resource later + m, err := s.api.NewResourceManifest(r.Context(), &updateRequest.View) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) return } - // the key to the manifest will be passed back to the client - // the client can access the root chunk key directly through its Hash member - // the manifest key should be set as content in the resolver of the ENS name - // \TODO update manifest key automatically in ENS + // the client can access the view directly through its resourceView member + // the manifest key can be set as content in the resolver of the ENS name outdata, err := json.Marshal(m) if err != nil { RespondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError) return } fmt.Fprint(w, string(outdata)) + + w.Header().Add("Content-type", "application/json") } - w.Header().Add("Content-type", "application/json") } // Retrieve mutable resource updates: // bzz-resource://<id> - get latest update -// bzz-resource://<id>/<n> - get latest update on period n -// bzz-resource://<id>/<n>/<m> - get update version m of period n +// bzz-resource://<id>/?period=n - get latest update on period n +// bzz-resource://<id>/?period=n&version=m - get update version m of period n // bzz-resource://<id>/meta - get metadata and next version information // <id> = ens name or hash // TODO: Enable pass maxPeriod parameter @@ -569,84 +569,44 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { log.Debug("handle.get.resource", "ruid", ruid) var err error - // resolve the content key. - manifestAddr := uri.Address() - if manifestAddr == nil { - manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound) - return - } - } else { - w.Header().Set("Cache-Control", "max-age=2147483648") - } - - // get the root chunk rootAddr from the manifest - rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr) - if err != nil { + view, err := s.api.ResolveResourceView(r.Context(), uri, r.URL.Query()) + if err != nil { // couldn't parse query string or retrieve manifest getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", uri.Addr, err), http.StatusNotFound) + httpStatus := http.StatusBadRequest + if err == api.ErrCannotLoadResourceManifest || err == api.ErrCannotResolveResourceURI { + httpStatus = http.StatusNotFound + } + RespondError(w, r, fmt.Sprintf("cannot retrieve resource view: %s", err), httpStatus) return } - log.Debug("handle.get.resource: resolved", "ruid", ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr) - // determine if the query specifies period and version or it is a metadata query - var params []string - if len(uri.Path) > 0 { - if uri.Path == "meta" { - unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr) - if err != nil { - getFail.Inc(1) - RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound) - return - } - rawResponse, err := unsignedUpdateRequest.MarshalJSON() - if err != nil { - RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) - return - } - w.Header().Add("Content-type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(rawResponse)) - return - - } - - params = strings.Split(uri.Path, "/") - - } - var name string - var data []byte - now := time.Now() - - switch len(params) { - case 0: // latest only - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr)) - case 2: // specific period and version - var version uint64 - var period uint64 - version, err = strconv.ParseUint(params[1], 10, 32) + if r.URL.Query().Get("meta") == "1" { + unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), view) if err != nil { - break - } - period, err = strconv.ParseUint(params[0], 10, 32) - if err != nil { - break + getFail.Inc(1) + RespondError(w, r, fmt.Sprintf("cannot retrieve resource metadata for view=%s: %s", view.Hex(), err), http.StatusNotFound) + return } - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version))) - case 1: // last version of specific period - var period uint64 - period, err = strconv.ParseUint(params[0], 10, 32) + rawResponse, err := unsignedUpdateRequest.MarshalJSON() if err != nil { - break + RespondError(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError) + return } - name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period))) - default: // bogus - err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request") + w.Header().Add("Content-type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(rawResponse)) + return } + lookupParams := &mru.Query{View: *view} + if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version + RespondError(w, r, fmt.Sprintf("invalid mutable resource request:%s", err), http.StatusBadRequest) + return + } + + data, err := s.api.ResourceLookup(r.Context(), lookupParams) + // any error from the switch statement will end up here if err != nil { code, err2 := s.translateResourceError(w, r, "mutable resource lookup fail", err) @@ -655,9 +615,9 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *http.Request) { } // All ok, serve the retrieved update - log.Debug("Found update", "name", name, "ruid", ruid) + log.Debug("Found update", "view", view.Hex(), "ruid", ruid) w.Header().Set("Content-Type", "application/octet-stream") - http.ServeContent(w, r, "", now, bytes.NewReader(data)) + http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data)) } func (s *Server) translateResourceError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) { diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 4a3ca0429..8ba4e55c3 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -30,12 +30,15 @@ import ( "math/big" "mime/multipart" "net/http" + "net/url" "os" "strconv" "strings" "testing" "time" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -121,8 +124,8 @@ func TestBzzResourceMultihash(t *testing.T) { // add the data our multihash aliased manifest will point to databytes := "bar" - url := fmt.Sprintf("%s/bzz:/", srv.URL) - resp, err := http.Post(url, "text/plain", bytes.NewReader([]byte(databytes))) + testBzzUrl := fmt.Sprintf("%s/bzz:/", srv.URL) + resp, err := http.Post(testBzzUrl, "text/plain", bytes.NewReader([]byte(databytes))) if err != nil { t.Fatal(err) } @@ -140,33 +143,27 @@ func TestBzzResourceMultihash(t *testing.T) { log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - // our mutable resource "name" - keybytes := "foo.eth" + topic, _ := mru.NewTopic("foo.eth", nil) + updateRequest := mru.NewFirstRequest(topic) - updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: keybytes, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) - if err != nil { - t.Fatal(err) - } - updateRequest.SetData(mh, true) + updateRequest.SetData(mh) if err := updateRequest.Sign(signer); err != nil { t.Fatal(err) } log.Info("added data", "manifest", string(b), "data", common.ToHex(mh)) - body, err := updateRequest.MarshalJSON() + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + query := testUrl.Query() + body := updateRequest.AppendValues(query) // this adds all query parameters and returns the data to be posted + query.Set("manifest", "1") // indicate we want a manifest back + testUrl.RawQuery = query.Encode() // create the multihash update - url = fmt.Sprintf("%s/bzz-resource:/", srv.URL) - resp, err = http.Post(url, "application/json", bytes.NewReader(body)) + resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -184,14 +181,14 @@ func TestBzzResourceMultihash(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if rsrcResp.Hex() != correctManifestAddrHex { t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get bzz manifest transparent resource resolve - url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testBzzUrl = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testBzzUrl) if err != nil { t.Fatal(err) } @@ -215,39 +212,38 @@ func TestBzzResource(t *testing.T) { defer srv.Close() - // our mutable resource "name" - keybytes := "foo.eth" - // data of update 1 - databytes := make([]byte, 666) - _, err := rand.Read(databytes) + update1Data := make([]byte, 666) + update1Timestamp := srv.CurrentTime + _, err := rand.Read(update1Data) if err != nil { t.Fatal(err) } + //data for update 2 + update2Data := []byte("foo") - updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{ - Name: keybytes, - Frequency: 13, - StartTime: srv.GetCurrentTime(), - Owner: signer.Address(), - }) + topic, _ := mru.NewTopic("foo.eth", nil) + updateRequest := mru.NewFirstRequest(topic) if err != nil { t.Fatal(err) } - updateRequest.SetData(databytes, false) + updateRequest.SetData(update1Data) if err := updateRequest.Sign(signer); err != nil { t.Fatal(err) } - body, err := updateRequest.MarshalJSON() + // creates resource and sets update 1 + testUrl, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + urlQuery := testUrl.Query() + body := updateRequest.AppendValues(urlQuery) // this adds all query parameters + urlQuery.Set("manifest", "1") // indicate we want a manifest back + testUrl.RawQuery = urlQuery.Encode() - // creates resource and sets update 1 - url := fmt.Sprintf("%s/bzz-resource:/", srv.URL) - resp, err := http.Post(url, "application/json", bytes.NewReader(body)) + resp, err := http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -265,14 +261,14 @@ func TestBzzResource(t *testing.T) { t.Fatalf("data %s could not be unmarshaled: %v", b, err) } - correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e" + correctManifestAddrHex := "6ef40ba1492cf2a029dc9a8b5896c822cf689d3cd010842f4f1744e6db8824bd" if rsrcResp.Hex() != correctManifestAddrHex { - t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) + t.Fatalf("Response resource manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex()) } // get the manifest - url = fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testRawUrl := fmt.Sprintf("%s/bzz-raw:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testRawUrl) if err != nil { t.Fatal(err) } @@ -292,20 +288,20 @@ func TestBzzResource(t *testing.T) { if len(manifest.Entries) != 1 { t.Fatalf("Manifest has %d entries", len(manifest.Entries)) } - correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562" - if manifest.Entries[0].Hash != correctRootKeyHex { - t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash) + correctViewHex := "0x666f6f2e65746800000000000000000000000000000000000000000000000000c96aaa54e2d44c299564da76e1cd3184a2386b8d" + if manifest.Entries[0].ResourceView.Hex() != correctViewHex { + t.Fatalf("Expected manifest Resource View '%s', got '%s'", correctViewHex, manifest.Entries[0].ResourceView.Hex()) } // get bzz manifest transparent resource resolve - url = fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) - resp, err = http.Get(url) + testBzzUrl := fmt.Sprintf("%s/bzz:/%s", srv.URL, rsrcResp) + resp, err = http.Get(testBzzUrl) if err != nil { t.Fatal(err) } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("err %s", resp.Status) + if resp.StatusCode == http.StatusOK { + t.Fatal("Expected error status since resource is not multihash. Received 200 OK") } b, err = ioutil.ReadAll(resp.Body) if err != nil { @@ -313,8 +309,8 @@ func TestBzzResource(t *testing.T) { } // get non-existent name, should fail - url = fmt.Sprintf("%s/bzz-resource:/bar", srv.URL) - resp, err = http.Get(url) + testBzzResUrl := fmt.Sprintf("%s/bzz-resource:/bar", srv.URL) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -327,8 +323,8 @@ func TestBzzResource(t *testing.T) { // get latest update (1.1) through resource directly log.Info("get update latest = 1.1", "addr", correctManifestAddrHex) - url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -340,16 +336,18 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(databytes, b) { - t.Fatalf("Expected body '%x', got '%x'", databytes, b) + if !bytes.Equal(update1Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update1Data, b) } // update 2 + // Move the clock ahead 1 second + srv.CurrentTime++ log.Info("update 2") // 1.- get metadata about this resource - url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url + "meta") + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl + "?meta=1") if err != nil { t.Fatal(err) } @@ -365,17 +363,19 @@ func TestBzzResource(t *testing.T) { if err = updateRequest.UnmarshalJSON(b); err != nil { t.Fatalf("Error decoding resource metadata: %s", err) } - data := []byte("foo") - updateRequest.SetData(data, false) + updateRequest.SetData(update2Data) if err = updateRequest.Sign(signer); err != nil { t.Fatal(err) } - body, err = updateRequest.MarshalJSON() + testUrl, err = url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } + urlQuery = testUrl.Query() + body = updateRequest.AppendValues(urlQuery) // this adds all query parameters + testUrl.RawQuery = urlQuery.Encode() - resp, err = http.Post(url, "application/json", bytes.NewReader(body)) + resp, err = http.Post(testUrl.String(), "application/octet-stream", bytes.NewReader(body)) if err != nil { t.Fatal(err) } @@ -386,8 +386,8 @@ func TestBzzResource(t *testing.T) { // get latest update (1.2) through resource directly log.Info("get update 1.2") - url = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + testBzzResUrl = fmt.Sprintf("%s/bzz-resource:/%s", srv.URL, correctManifestAddrHex) + resp, err = http.Get(testBzzResUrl) if err != nil { t.Fatal(err) } @@ -399,33 +399,23 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(data, b) { - t.Fatalf("Expected body '%x', got '%x'", data, b) + if !bytes.Equal(update2Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update2Data, b) } - // get latest update (1.2) with specified period - log.Info("get update latest = 1.2") - url = fmt.Sprintf("%s/bzz-resource:/%s/1", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - t.Fatalf("err %s", resp.Status) - } - b, err = ioutil.ReadAll(resp.Body) + // test manifest-less queries + log.Info("get first update in update1Timestamp via direct query") + query := mru.NewQuery(&updateRequest.View, update1Timestamp, lookup.NoClue) + + urlq, err := url.Parse(fmt.Sprintf("%s/bzz-resource:/", srv.URL)) if err != nil { t.Fatal(err) } - if !bytes.Equal(data, b) { - t.Fatalf("Expected body '%x', got '%x'", data, b) - } - // get first update (1.1) with specified period and version - log.Info("get first update 1.1") - url = fmt.Sprintf("%s/bzz-resource:/%s/1/1", srv.URL, correctManifestAddrHex) - resp, err = http.Get(url) + values := urlq.Query() + query.AppendValues(values) // this adds view query parameters + urlq.RawQuery = values.Encode() + resp, err = http.Get(urlq.String()) if err != nil { t.Fatal(err) } @@ -437,9 +427,10 @@ func TestBzzResource(t *testing.T) { if err != nil { t.Fatal(err) } - if !bytes.Equal(databytes, b) { - t.Fatalf("Expected body '%x', got '%x'", databytes, b) + if !bytes.Equal(update1Data, b) { + t.Fatalf("Expected body '%x', got '%x'", update1Data, b) } + } func TestBzzGetPath(t *testing.T) { diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index d44ad2277..06be7323e 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -27,6 +27,8 @@ import ( "strings" "time" + "github.com/ethereum/go-ethereum/swarm/storage/mru" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" @@ -46,14 +48,15 @@ type Manifest struct { // ManifestEntry represents an entry in a swarm manifest type ManifestEntry struct { - Hash string `json:"hash,omitempty"` - Path string `json:"path,omitempty"` - ContentType string `json:"contentType,omitempty"` - Mode int64 `json:"mode,omitempty"` - Size int64 `json:"size,omitempty"` - ModTime time.Time `json:"mod_time,omitempty"` - Status int `json:"status,omitempty"` - Access *AccessEntry `json:"access,omitempty"` + Hash string `json:"hash,omitempty"` + Path string `json:"path,omitempty"` + ContentType string `json:"contentType,omitempty"` + Mode int64 `json:"mode,omitempty"` + Size int64 `json:"size,omitempty"` + ModTime time.Time `json:"mod_time,omitempty"` + Status int `json:"status,omitempty"` + Access *AccessEntry `json:"access,omitempty"` + ResourceView *mru.View `json:"resourceView,omitempty"` } // ManifestList represents the result of listing files in a manifest @@ -79,11 +82,11 @@ func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, // Manifest hack for supporting Mutable Resource Updates from the bzz: scheme // see swarm/api/api.go:API.Get() for more information -func (a *API) NewResourceManifest(ctx context.Context, resourceAddr string) (storage.Address, error) { +func (a *API) NewResourceManifest(ctx context.Context, view *mru.View) (storage.Address, error) { var manifest Manifest entry := ManifestEntry{ - Hash: resourceAddr, - ContentType: ResourceContentType, + ResourceView: view, + ContentType: ResourceContentType, } manifest.Entries = append(manifest.Entries, entry) data, err := json.Marshal(&manifest) diff --git a/swarm/storage/mru/binaryserializer.go b/swarm/storage/mru/binaryserializer.go new file mode 100644 index 000000000..3123a82ee --- /dev/null +++ b/swarm/storage/mru/binaryserializer.go @@ -0,0 +1,44 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type binarySerializer interface { + binaryPut(serializedData []byte) error + binaryLength() int + binaryGet(serializedData []byte) error +} + +// Values interface represents a string key-value store +// useful for building query strings +type Values interface { + Get(key string) string + Set(key, value string) +} + +type valueSerializer interface { + FromValues(values Values) error + AppendValues(values Values) +} + +// Hex serializes the structure and converts it to a hex string +func Hex(bin binarySerializer) string { + b := make([]byte, bin.binaryLength()) + bin.binaryPut(b) + return hexutil.Encode(b) +} diff --git a/swarm/storage/mru/binaryserializer_test.go b/swarm/storage/mru/binaryserializer_test.go new file mode 100644 index 000000000..f524157d6 --- /dev/null +++ b/swarm/storage/mru/binaryserializer_test.go @@ -0,0 +1,98 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// KV mocks a key value store +type KV map[string]string + +func (kv KV) Get(key string) string { + return kv[key] +} +func (kv KV) Set(key, value string) { + kv[key] = value +} + +func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { + if hexutil.Encode(actualValue) != expectedHex { + t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) + } +} + +func testBinarySerializerRecovery(t *testing.T, bin binarySerializer, expectedHex string) { + name := reflect.TypeOf(bin).Elem().Name() + serialized := make([]byte, bin.binaryLength()) + if err := bin.binaryPut(serialized); err != nil { + t.Fatalf("%s.binaryPut error when trying to serialize structure: %s", name, err) + } + + compareByteSliceToExpectedHex(t, name, serialized, expectedHex) + + recovered := reflect.New(reflect.TypeOf(bin).Elem()).Interface().(binarySerializer) + if err := recovered.binaryGet(serialized); err != nil { + t.Fatalf("%s.binaryGet error when trying to deserialize structure: %s", name, err) + } + + if !reflect.DeepEqual(bin, recovered) { + t.Fatalf("Expected that the recovered %s equals the marshalled %s", name, name) + } + + serializedWrongLength := make([]byte, 1) + copy(serializedWrongLength[:], serialized) + if err := recovered.binaryGet(serializedWrongLength); err == nil { + t.Fatalf("Expected %s.binaryGet to fail since data is too small", name) + } +} + +func testBinarySerializerLengthCheck(t *testing.T, bin binarySerializer) { + name := reflect.TypeOf(bin).Elem().Name() + // make a slice that is too small to contain the metadata + serialized := make([]byte, bin.binaryLength()-1) + + if err := bin.binaryPut(serialized); err == nil { + t.Fatalf("Expected %s.binaryPut to fail, since target slice is too small", name) + } +} + +func testValueSerializer(t *testing.T, v valueSerializer, expected KV) { + name := reflect.TypeOf(v).Elem().Name() + kv := make(KV) + + v.AppendValues(kv) + if !reflect.DeepEqual(expected, kv) { + expj, _ := json.Marshal(expected) + gotj, _ := json.Marshal(kv) + t.Fatalf("Expected %s.AppendValues to return %s, got %s", name, string(expj), string(gotj)) + } + + recovered := reflect.New(reflect.TypeOf(v).Elem()).Interface().(valueSerializer) + err := recovered.FromValues(kv) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(recovered, v) { + t.Fatalf("Expected recovered %s to be the same", name) + } +} diff --git a/swarm/storage/mru/cacheentry.go b/swarm/storage/mru/cacheentry.go new file mode 100644 index 000000000..280331f77 --- /dev/null +++ b/swarm/storage/mru/cacheentry.go @@ -0,0 +1,48 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "context" + "time" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +const ( + hasherCount = 8 + resourceHashAlgorithm = storage.SHA3Hash + defaultRetrieveTimeout = 100 * time.Millisecond +) + +// cacheEntry caches resource data and the metadata of its root chunk. +type cacheEntry struct { + ResourceUpdate + *bytes.Reader + lastKey storage.Address +} + +// implements storage.LazySectionReader +func (r *cacheEntry) Size(ctx context.Context, _ chan bool) (int64, error) { + return int64(len(r.ResourceUpdate.data)), nil +} + +//returns the resource's topic +func (r *cacheEntry) Topic() Topic { + return r.View.Topic +} diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go index e1d7c2c34..19330e0c1 100644 --- a/swarm/storage/mru/doc.go +++ b/swarm/storage/mru/doc.go @@ -1,61 +1,44 @@ -// Package mru defines Mutable resource updates. -// A Mutable Resource is an entity which allows updates to a resource -// without resorting to ENS on each update. -// The update scheme is built on swarm chunks with chunk keys following -// a predictable, versionable pattern. -// -// Updates are defined to be periodic in nature, where the update frequency -// is expressed in seconds. -// -// The root entry of a mutable resource is tied to a unique identifier that -// is deterministically generated out of the metadata content that describes -// the resource. This metadata includes a user-defined resource name, a resource -// start time that indicates when the resource becomes valid, -// the frequency in seconds with which the resource is expected to be updated, both of -// which are stored as little-endian uint64 values in the database (for a -// total of 16 bytes). It also contains the owner's address (ownerAddr) -// This MRU info is stored in a separate content-addressed chunk -// (call it the metadata chunk), with the following layout: -// -// (00|length|startTime|frequency|name|ownerAddr) -// -// (The two first zero-value bytes are used for disambiguation by the chunk validator, -// and update chunk will always have a value > 0 there.) -// -// Each metadata chunk is identified by its rootAddr, calculated as follows: -// metaHash=H(len(metadata), startTime, frequency,name) -// rootAddr = H(metaHash, ownerAddr). -// where H is the SHA3 hash function -// This scheme effectively locks the root chunk so that only the owner of the private key -// that ownerAddr was derived from can sign updates. -// -// The root entry tells the requester from when the mutable resource was -// first added (Unix time in seconds) and in which moments to look for the -// actual updates. Thus, a resource update for identifier "føø.bar" -// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300, -// 1528800600, 1528800900 and so on. -// -// Actual data updates are also made in the form of swarm chunks. The keys -// of the updates are the hash of a concatenation of properties as follows: -// -// updateAddr = H(period, version, rootAddr) -// where H is the SHA3 hash function -// The period is (currentTime - startTime) / frequency -// -// Using our previous example, this means that a period 3 will happen when the -// clock hits 1528800900 -// -// If more than one update is made in the same period, incremental -// version numbers are used successively. -// -// A user looking up a resource would only need to know the rootAddr in order to get the versions -// -// the resource update data is: -// resourcedata = headerlength|period|version|rootAddr|flags|metaHash -// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash -// -// the full update data that goes in the chunk payload is: -// resourcedata|sign(resourcedata) -// -// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash +/* +Package mru defines Mutable resource updates. + +A Mutable Resource is an entity which allows updates to a resource +without resorting to ENS on each update. +The update scheme is built on swarm chunks with chunk keys following +a predictable, versionable pattern. + +A Resource is tied to a unique identifier that is deterministically generated out of +the chosen topic. + +A Resource View is defined as a specific user's point of view about a particular resource. +Thus, a View is a Topic + the user's address (userAddr) + +Actual data updates are also made in the form of swarm chunks. The keys +of the updates are the hash of a concatenation of properties as follows: + +updateAddr = H(View, Epoch ID) +where H is the SHA3 hash function +View is the combination of Topic and the user address +Epoch ID is a time slot. See the lookup package for more information. + +A user looking up a resource would only need to know the View in order to +another user's updates + +The resource update data is: +resourcedata = View|Epoch|data + +the full update data that goes in the chunk payload is: +resourcedata|sign(resourcedata) + +Structure Summary: + +Request: Resource update with signature + ResourceUpdate: headers + data + Header: Protocol version and reserved for future use placeholders + ID: Information about how to locate a specific update + View: Author of the update and what is updating + Topic: Item that the updates are about + User: User who updates the resource + Epoch: time slot where the update is stored + +*/ package mru diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go index 18c667f14..3e7654795 100644 --- a/swarm/storage/mru/handler.go +++ b/swarm/storage/mru/handler.go @@ -21,11 +21,12 @@ package mru import ( "bytes" "context" + "fmt" "sync" "time" - "unsafe" - "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -33,7 +34,7 @@ import ( type Handler struct { chunkStore *storage.NetStore HashSize int - resources map[uint64]*resource + resources map[uint64]*cacheEntry resourceLock sync.RWMutex storeTimeout time.Duration queryMaxPeriods uint32 @@ -42,12 +43,10 @@ type Handler struct { // HandlerParams pass parameters to the Handler constructor NewHandler // Signer and TimestampProvider are mandatory parameters type HandlerParams struct { - QueryMaxPeriods uint32 } // hashPool contains a pool of ready hashers var hashPool sync.Pool -var minimumChunkLength int // init initializes the package and hashPool func init() { @@ -56,19 +55,12 @@ func init() { return storage.MakeHashFunc(resourceHashAlgorithm)() }, } - if minimumMetadataLength < minimumUpdateDataLength { - minimumChunkLength = minimumMetadataLength - } else { - minimumChunkLength = minimumUpdateDataLength - } } // NewHandler creates a new Mutable Resource API func NewHandler(params *HandlerParams) *Handler { rh := &Handler{ - resources: make(map[uint64]*resource), - storeTimeout: defaultStoreTimeout, - queryMaxPeriods: params.QueryMaxPeriods, + resources: make(map[uint64]*cacheEntry), } for i := 0; i < hasherCount; i++ { @@ -88,44 +80,25 @@ func (h *Handler) SetStore(store *storage.NetStore) { } // Validate is a chunk validation method -// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature +// If it looks like a resource update, the chunk address is checked against the userAddr of the update's signature // It implements the storage.ChunkValidator interface func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { dataLength := len(data) - if dataLength < minimumChunkLength || dataLength > chunk.DefaultSize+8 { + if dataLength < minimumSignedUpdateLength { return false } - //metadata chunks have the first two bytes set to zero - if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength { - //metadata chunk - rootAddr, _ := metadataHash(data) - valid := bytes.Equal(chunkAddr, rootAddr) - if !valid { - log.Debug("Invalid root metadata chunk with address", "addr", chunkAddr.Hex()) - } - return valid - } - - // if it is not a metadata chunk, check if it is a properly formatted update chunk with + // check if it is a properly formatted update chunk with // valid signature and proof of ownership of the resource it is trying // to update // First, deserialize the chunk - var r SignedResourceUpdate + var r Request if err := r.fromChunk(chunkAddr, data); err != nil { log.Debug("Invalid resource chunk", "addr", chunkAddr.Hex(), "err", err.Error()) return false } - // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) - // that was used to retrieve this chunk - // if this validation fails, someone forged a chunk. - if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) { - log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr", "addr", chunkAddr.Hex()) - return false - } - // Verify signatures and that the signer actually owns the resource // If it fails, it means either the signature is not valid, data is corrupted // or someone is trying to update someone else's resource. @@ -138,301 +111,134 @@ func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool { } // GetContent retrieves the data payload of the last synced update of the Mutable Resource -func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) { - rsrc := h.get(rootAddr) - if rsrc == nil || !rsrc.isSynced() { - return nil, nil, NewError(ErrNotFound, " does not exist or is not synced") - } - return rsrc.lastKey, rsrc.data, nil -} - -// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource -func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) { - rsrc := h.get(rootAddr) - if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") +func (h *Handler) GetContent(view *View) (storage.Address, []byte, error) { + if view == nil { + return nil, nil, NewError(ErrInvalidValue, "view is nil") } - return rsrc.period, nil -} - -// GetVersion retrieves the period of the last synced update of the Mutable Resource -func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) { - rsrc := h.get(rootAddr) + rsrc := h.get(view) if rsrc == nil { - return 0, NewError(ErrNotFound, " does not exist") - } else if !rsrc.isSynced() { - return 0, NewError(ErrNotSynced, " is not synced") - } - return rsrc.version, nil -} - -// New creates a new metadata chunk out of the request passed in. -func (h *Handler) New(ctx context.Context, request *Request) error { - - // frequency 0 is invalid - if request.metadata.Frequency == 0 { - return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource") + return nil, nil, NewError(ErrNotFound, "resource does not exist") } - - // make sure owner is set to something - if request.metadata.Owner == zeroAddr { - return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk") - } - - // create the meta chunk and store it in swarm - chunk, metaHash, err := request.metadata.newChunk() - if err != nil { - return err - } - if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) || - request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Address()) { - return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata") - } - - request.metaHash = metaHash - request.rootAddr = chunk.Address() - - h.chunkStore.Put(ctx, chunk) - log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner) - - // create the internal index for the resource and populate it with its metadata - rsrc := &resource{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - rootAddr: chunk.Address(), - }, - }, - }, - ResourceMetadata: request.metadata, - updated: time.Now(), - } - h.set(chunk.Address(), rsrc) - - return nil + return rsrc.lastKey, rsrc.data, nil } -// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to +// NewRequest prepares a Request structure with all the necessary information to // just add the desired data and sign it. // The resulting structure can then be signed and passed to Handler.Update to be verified and sent -func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) { - - if rootAddr == nil { - return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil") +func (h *Handler) NewRequest(ctx context.Context, view *View) (request *Request, err error) { + if view == nil { + return nil, NewError(ErrInvalidValue, "view cannot be nil") } - // Make sure we have a cache of the metadata chunk - rsrc, err := h.Load(ctx, rootAddr) - if err != nil { - return nil, err - } + now := TimestampProvider.Now().Time + request = new(Request) + request.Header.Version = ProtocolVersion - now := TimestampProvider.Now() + query := NewQueryLatest(view, lookup.NoClue) - updateRequest = new(Request) - updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) + rsrc, err := h.Lookup(ctx, query) if err != nil { - return nil, err - } - - if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil { if err.(*Error).code != ErrNotFound { return nil, err } // not finding updates means that there is a network error - // or that the resource really does not have updates in this period. + // or that the resource really does not have updates } - updateRequest.multihash = rsrc.multihash - updateRequest.rootAddr = rsrc.rootAddr - updateRequest.metaHash = rsrc.metaHash - updateRequest.metadata = rsrc.ResourceMetadata + request.View = *view - // if we already have an update for this period then increment version - // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already - if h.hasUpdate(rootAddr, updateRequest.period) { - updateRequest.version = rsrc.version + 1 + // if we already have an update, then find next epoch + if rsrc != nil { + request.Epoch = lookup.GetNextEpoch(rsrc.Epoch, now) } else { - updateRequest.version = 1 + request.Epoch = lookup.GetFirstEpoch(now) } - return updateRequest, nil + return request, nil } -// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root -// Lookup works differently depending on the configuration of `LookupParams` -// See the `LookupParams` documentation and helper functions: -// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion` +// Lookup retrieves a specific or latest version of the resource +// Lookup works differently depending on the configuration of `ID` +// See the `ID` documentation and helper functions: +// `LookupLatest` and `LookupBefore` // When looking for the latest update, it starts at the next period after the current time. // upon failure tries the corresponding keys of each previous period until one is found // (or startTime is reached, in which case there are no updates). -func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) { +func (h *Handler) Lookup(ctx context.Context, query *Query) (*cacheEntry, error) { - rsrc := h.get(params.rootAddr) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") + timeLimit := query.TimeLimit + if timeLimit == 0 { // if time limit is set to zero, the user wants to get the latest update + timeLimit = TimestampProvider.Now().Time } - return h.lookup(rsrc, params) -} -// LookupPrevious returns the resource before the one currently loaded in the resource cache -// This is useful where resource updates are used incrementally in contrast to -// merely replacing content. -// Requires a cached resource object to determine the current state of the resource. -func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) { - rsrc := h.get(params.rootAddr) - if rsrc == nil { - return nil, NewError(ErrNothingToReturn, "resource not loaded") - } - if !rsrc.isSynced() { - return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.") - } else if rsrc.period == 0 { - return nil, NewError(ErrNothingToReturn, " not found") - } - var version, period uint32 - if rsrc.version > 1 { - version = rsrc.version - 1 - period = rsrc.period - } else if rsrc.period == 1 { - return nil, NewError(ErrNothingToReturn, "Current update is the oldest") - } else { - version = 0 - period = rsrc.period - 1 + if query.Hint == lookup.NoClue { // try to use our cache + entry := h.get(&query.View) + if entry != nil && entry.Epoch.Time <= timeLimit { // avoid bad hints + query.Hint = entry.Epoch + } } - return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit)) -} -// base code for public lookup methods -func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) { - - lp := *params // we can't look for anything without a store if h.chunkStore == nil { return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups") } - var specificperiod bool - if lp.period > 0 { - specificperiod = true - } else { - // get the current time and the next period - now := TimestampProvider.Now() - - var period uint32 - period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency) - if err != nil { - return nil, err - } - lp.period = period - } + var ul ID + ul.View = query.View + var readCount int - // start from the last possible period, and iterate previous ones - // (unless we want a specific period only) until we find a match. - // If we hit startTime we're out of options - var specificversion bool - if lp.version > 0 { - specificversion = true - } else { - lp.version = 1 - } + // Invoke the lookup engine. + // The callback will be called every time the lookup algorithm needs to guess + requestPtr, err := lookup.Lookup(timeLimit, query.Hint, func(epoch lookup.Epoch, now uint64) (interface{}, error) { + readCount++ + ul.Epoch = epoch + ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) + defer cancel() - var hops uint32 - if lp.Limit == 0 { - lp.Limit = h.queryMaxPeriods - } - log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit) - for lp.period > 0 { - if lp.Limit != 0 && hops > lp.Limit { - return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit) + chunk, err := h.chunkStore.Get(ctx, ul.Addr()) + if err != nil { // TODO: check for catastrophic errors other than chunk not found + return nil, nil } - updateAddr := lp.UpdateAddr() - - ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) - defer cancel() - chunk, err := h.chunkStore.Get(ctx, updateAddr) - if err == nil { - if specificversion { - return h.updateIndex(rsrc, chunk) - } - // check if we have versions > 1. If a version fails, the previous version is used and returned. - log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr) - for { - newversion := lp.version + 1 - updateAddr := lp.UpdateAddr() - - ctx, cancel := context.WithTimeout(context.Background(), defaultRetrieveTimeout) - defer cancel() - - newchunk, err := h.chunkStore.Get(ctx, updateAddr) - if err != nil { - return h.updateIndex(rsrc, chunk) - } - chunk = newchunk - lp.version = newversion - log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr) - } + var request Request + if err := request.fromChunk(chunk.Address(), chunk.Data()); err != nil { + return nil, nil } - if specificperiod { - break + if request.Time <= timeLimit { + return &request, nil } - log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr) - lp.period-- - hops++ - } - return nil, NewError(ErrNotFound, "no updates found") -} - -// Load retrieves the Mutable Resource metadata chunk stored at rootAddr -// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents -func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) { - //TODO: Maybe add timeout to context, defaultRetrieveTimeout? - ctx, cancel := context.WithTimeout(ctx, defaultRetrieveTimeout) - defer cancel() - chunk, err := h.chunkStore.Get(ctx, rootAddr) + return nil, nil + }) if err != nil { - return nil, NewError(ErrNotFound, err.Error()) + return nil, err } - // create the index entry - rsrc := &resource{} + log.Info(fmt.Sprintf("Resource lookup finished in %d lookups", readCount)) - if err := rsrc.ResourceMetadata.binaryGet(chunk.Data()); err != nil { // Will fail if this is not really a metadata chunk - return nil, err + request, _ := requestPtr.(*Request) + if request == nil { + return nil, NewError(ErrNotFound, "no updates found") } + return h.updateCache(request) - rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.Data()) - if !bytes.Equal(rsrc.rootAddr, rootAddr) { - return nil, NewError(ErrCorruptData, "Corrupt metadata chunk") - } - h.set(rootAddr, rsrc) - log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency) - return rsrc, nil } -// update mutable resource index map with specified content -func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, error) { +// update mutable resource cache map with specified content +func (h *Handler) updateCache(request *Request) (*cacheEntry, error) { - // retrieve metadata from chunk data and check that it matches this mutable resource - var r SignedResourceUpdate - if err := r.fromChunk(chunk.Address(), chunk.Data()); err != nil { - return nil, err + updateAddr := request.Addr() + log.Trace("resource cache update", "topic", request.Topic.Hex(), "updatekey", updateAddr, "epoch time", request.Epoch.Time, "epoch level", request.Epoch.Level) + + rsrc := h.get(&request.View) + if rsrc == nil { + rsrc = &cacheEntry{} + h.set(&request.View, rsrc) } - log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Address(), "period", r.period, "version", r.version) // update our rsrcs entry map - rsrc.lastKey = chunk.Address() - rsrc.period = r.period - rsrc.version = r.version - rsrc.updated = time.Now() - rsrc.data = make([]byte, len(r.data)) - rsrc.multihash = r.multihash - copy(rsrc.data, r.data) + rsrc.lastKey = updateAddr + rsrc.ResourceUpdate = request.ResourceUpdate rsrc.Reader = bytes.NewReader(rsrc.data) - log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Address(), "period", rsrc.period, "version", rsrc.version) - h.set(chunk.Address(), rsrc) return rsrc, nil } @@ -442,23 +248,16 @@ func (h *Handler) updateIndex(rsrc *resource, chunk storage.Chunk) (*resource, e // Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit. // Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update // on the network. -func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) { - return h.update(ctx, r) -} - -// create and commit an update -func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) { +func (h *Handler) Update(ctx context.Context, r *Request) (updateAddr storage.Address, err error) { // we can't update anything without a store if h.chunkStore == nil { return nil, NewError(ErrInit, "Call Handler.SetStore() before updating") } - rsrc := h.get(r.rootAddr) - if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure - rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks - - return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist") + rsrc := h.get(&r.View) + if rsrc != nil && rsrc.Epoch.Equals(r.Epoch) { // This is the only cheap check we can do for sure + return nil, NewError(ErrInvalidValue, "A former update in this epoch is already known to exist") } chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big @@ -468,49 +267,32 @@ func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAd // send the chunk h.chunkStore.Put(ctx, chunk) - log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.Data(), "multihash", r.multihash) - - // update our resources map entry if the new update is older than the one we have, if we have it. - if rsrc != nil && (r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version)) { - rsrc.period = r.period - rsrc.version = r.version + log.Trace("resource update", "updateAddr", r.idAddr, "epoch time", r.Epoch.Time, "epoch level", r.Epoch.Level, "data", chunk.Data()) + // update our resources map cache entry if the new update is older than the one we have, if we have it. + if rsrc != nil && r.Epoch.After(rsrc.Epoch) { + rsrc.Epoch = r.Epoch rsrc.data = make([]byte, len(r.data)) - rsrc.updated = time.Now() - rsrc.lastKey = r.updateAddr - rsrc.multihash = r.multihash + rsrc.lastKey = r.idAddr copy(rsrc.data, r.data) rsrc.Reader = bytes.NewReader(rsrc.data) } - return r.updateAddr, nil + + return r.idAddr, nil } -// Retrieves the resource index value for the given nameHash -func (h *Handler) get(rootAddr storage.Address) *resource { - if len(rootAddr) < storage.AddressLength { - log.Warn("Handler.get with invalid rootAddr") - return nil - } - hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) +// Retrieves the resource cache value for the given nameHash +func (h *Handler) get(view *View) *cacheEntry { + mapKey := view.mapKey() h.resourceLock.RLock() defer h.resourceLock.RUnlock() - rsrc := h.resources[hashKey] + rsrc := h.resources[mapKey] return rsrc } -// Sets the resource index value for the given nameHash -func (h *Handler) set(rootAddr storage.Address, rsrc *resource) { - if len(rootAddr) < storage.AddressLength { - log.Warn("Handler.set with invalid rootAddr") - return - } - hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0])) +// Sets the resource cache value for the given View +func (h *Handler) set(view *View, rsrc *cacheEntry) { + mapKey := view.mapKey() h.resourceLock.Lock() defer h.resourceLock.Unlock() - h.resources[hashKey] = rsrc -} - -// Checks if we already have an update on this resource, according to the value in the current state of the resource index -func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool { - rsrc := h.get(rootAddr) - return rsrc != nil && rsrc.period == period + h.resources[mapKey] = rsrc } diff --git a/swarm/storage/mru/handler_test.go b/swarm/storage/mru/handler_test.go new file mode 100644 index 000000000..13eb9e51b --- /dev/null +++ b/swarm/storage/mru/handler_test.go @@ -0,0 +1,520 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/chunk" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +var ( + loglevel = flag.Int("loglevel", 3, "loglevel") + startTime = Timestamp{ + Time: uint64(4200), + } + cleanF func() + resourceName = "føø.bar" + hashfunc = storage.MakeHashFunc(storage.DefaultHash) +) + +func init() { + flag.Parse() + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) +} + +// simulated timeProvider +type fakeTimeProvider struct { + currentTime uint64 +} + +func (f *fakeTimeProvider) Tick() { + f.currentTime++ +} + +func (f *fakeTimeProvider) Set(time uint64) { + f.currentTime = time +} + +func (f *fakeTimeProvider) FastForward(offset uint64) { + f.currentTime += offset +} + +func (f *fakeTimeProvider) Now() Timestamp { + return Timestamp{ + Time: f.currentTime, + } +} + +// make updates and retrieve them based on periods and versions +func TestResourceHandler(t *testing.T) { + + // make fake timeProvider + clock := &fakeTimeProvider{ + currentTime: startTime.Time, // clock starts at t=4200 + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(clock, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create a new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + topic, _ := NewTopic("Mess with mru code and see what ghost catches you", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // data for updates: + updates := []string{ + "blinky", // t=4200 + "pinky", // t=4242 + "inky", // t=4284 + "clyde", // t=4285 + } + + request := NewFirstRequest(view.Topic) // this timestamps the update at t = 4200 (start time) + resourcekey := make(map[string]storage.Address) + data := []byte(updates[0]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[0]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4221 + + request, err = rh.NewRequest(ctx, &request.View) // this timestamps the update at t = 4221 + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != lookup.HighestLevel-1 { + t.Fatalf("Suggested epoch BaseTime should be 0 and Epoch level should be %d", lookup.HighestLevel-1) + } + + request.Epoch.Level = lookup.HighestLevel // force level 25 instead of 24 to make it fail + data = []byte(updates[1]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, request) + if err == nil { + t.Fatal("Expected update to fail since an update in this epoch already exists") + } + + // move the clock ahead 21 seconds + clock.FastForward(21) // t=4242 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[1]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 42 seconds + clock.FastForward(42) // t=4284 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + data = []byte(updates[2]) + request.SetData(data) + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[2]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + // move the clock ahead 1 second + clock.FastForward(1) // t=4285 + request, err = rh.NewRequest(ctx, &request.View) + if err != nil { + t.Fatal(err) + } + if request.Epoch.Base() != 0 || request.Epoch.Level != 22 { + t.Fatalf("Expected epoch base time to be %d, got %d. Expected epoch level to be %d, got %d", 0, request.Epoch.Base(), 22, request.Epoch.Level) + } + data = []byte(updates[3]) + request.SetData(data) + + if err := request.Sign(signer); err != nil { + t.Fatal(err) + } + resourcekey[updates[3]], err = rh.Update(ctx, request) + if err != nil { + t.Fatal(err) + } + + time.Sleep(time.Second) + rh.Close() + + // check we can retrieve the updates after close + clock.FastForward(2000) // t=6285 + + rhparams := &HandlerParams{} + + rh2, err := NewTestHandler(datadir, rhparams) + if err != nil { + t.Fatal(err) + } + + rsrc2, err := rh2.Lookup(ctx, NewQueryLatest(&request.View, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + + // last update should be "clyde" + if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { + t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) + } + if rsrc2.Level != 22 { + t.Fatalf("resource epoch level was %d, expected 22", rsrc2.Level) + } + if rsrc2.Base() != 0 { + t.Fatalf("resource epoch base time was %d, expected 0", rsrc2.Base()) + } + log.Debug("Latest lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + + // specific point in time + rsrc, err := rh2.Lookup(ctx, NewQuery(&request.View, 4284, lookup.NoClue)) + if err != nil { + t.Fatal(err) + } + // check data + if !bytes.Equal(rsrc.data, []byte(updates[2])) { + t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) + } + log.Debug("Historical lookup", "epoch base time", rsrc2.Base(), "epoch level", rsrc2.Level, "data", rsrc2.data) + + // beyond the first should yield an error + rsrc, err = rh2.Lookup(ctx, NewQuery(&request.View, startTime.Time-1, lookup.NoClue)) + if err == nil { + t.Fatalf("expected previous to fail, returned epoch %s data %v", rsrc.Epoch.String(), rsrc.data) + } + +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func generateData(x uint64) []byte { + return []byte(fmt.Sprintf("%d", x)) +} + +func TestSparseUpdates(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + rh, datadir, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + defer os.RemoveAll(datadir) + + // create a new resource + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + topic, _ := NewTopic("Very slow updates", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // publish one update every 5 years since Unix 0 until today + today := uint64(1533799046) + var epoch lookup.Epoch + var lastUpdateTime uint64 + for T := uint64(0); T < today; T += 5 * Year { + request := NewFirstRequest(view.Topic) + request.Epoch = lookup.GetNextEpoch(epoch, T) + request.data = generateData(T) // this generates some data that depends on T, so we can check later + request.Sign(signer) + if err != nil { + t.Fatal(err) + } + + if _, err := rh.Update(ctx, request); err != nil { + t.Fatal(err) + } + epoch = request.Epoch + lastUpdateTime = T + } + + query := NewQuery(&view, today, lookup.NoClue) + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err := rh.GetContent(&view) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(lastUpdateTime), content) { + t.Fatalf("Expected to recover last written value %d, got %s", lastUpdateTime, string(content)) + } + + // lookup the closest update to 35*Year + 6* Month (~ June 2005): + // it should find the update we put on 35*Year, since we were updating every 5 years. + + query.TimeLimit = 35*Year + 6*Month + + _, err = rh.Lookup(ctx, query) + if err != nil { + t.Fatal(err) + } + + _, content, err = rh.GetContent(&view) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(generateData(35*Year), content) { + t.Fatalf("Expected to recover %d, got %s", 35*Year, string(content)) + } +} + +func TestValidator(t *testing.T) { + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key. Alice will be the good girl + signer := newAliceSigner() + + // set up sim timeProvider + rh, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + // create new resource + topic, _ := NewTopic(resourceName, nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + mr := NewFirstRequest(view.Topic) + + // chunk with address + data := []byte("foo") + mr.SetData(data) + if err := mr.Sign(signer); err != nil { + t.Fatalf("sign fail: %v", err) + } + + chunk, err := mr.toChunk() + if err != nil { + t.Fatal(err) + } + if !rh.Validate(chunk.Address(), chunk.Data()) { + t.Fatal("Chunk validator fail on update chunk") + } + + address := chunk.Address() + // mess with the address + address[0] = 11 + address[15] = 99 + + if rh.Validate(address, chunk.Data()) { + t.Fatal("Expected Validate to fail with false chunk address") + } +} + +// tests that the content address validator correctly checks the data +// tests that resource update chunks are passed through content address validator +// there is some redundancy in this test as it also tests content addressed chunks, +// which should be evaluated as invalid chunks by this validator +func TestValidatorInStore(t *testing.T) { + + // make fake timeProvider + TimestampProvider = &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up localstore + datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(datadir) + + handlerParams := storage.NewDefaultLocalStoreParams() + handlerParams.Init(datadir) + store, err := storage.NewLocalStore(handlerParams, nil) + if err != nil { + t.Fatal(err) + } + + // set up resource handler and add is as a validator to the localstore + rhParams := &HandlerParams{} + rh := NewHandler(rhParams) + store.Validators = append(store.Validators, rh) + + // create content addressed chunks, one good, one faulty + chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) + goodChunk := chunks[0] + badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) + + topic, _ := NewTopic("xyzzy", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + // create a resource update chunk with correct publickey + id := ID{ + Epoch: lookup.Epoch{Time: 42, + Level: 1, + }, + View: view, + } + + updateAddr := id.Addr() + data := []byte("bar") + + r := new(Request) + r.idAddr = updateAddr + r.ResourceUpdate.ID = id + r.data = data + + r.Sign(signer) + + uglyChunk, err := r.toChunk() + if err != nil { + t.Fatal(err) + } + + // put the chunks in the store and check their error status + err = store.Put(context.Background(), goodChunk) + if err == nil { + t.Fatal("expected error on good content address chunk with resource validator only, but got nil") + } + err = store.Put(context.Background(), badChunk) + if err == nil { + t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") + } + err = store.Put(context.Background(), uglyChunk) + if err != nil { + t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) + } +} + +// create rpc and resourcehandler +func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { + + var fsClean func() + var rpcClean func() + cleanF = func() { + if fsClean != nil { + fsClean() + } + if rpcClean != nil { + rpcClean() + } + } + + // temp datadir + datadir, err = ioutil.TempDir("", "rh") + if err != nil { + return nil, "", nil, err + } + fsClean = func() { + os.RemoveAll(datadir) + } + + TimestampProvider = timeProvider + rhparams := &HandlerParams{} + rh, err = NewTestHandler(datadir, rhparams) + return rh, datadir, cleanF, err +} + +func newAliceSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + return NewGenericSigner(privKey) +} + +func newBobSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") + return NewGenericSigner(privKey) +} + +func newCharlieSigner() *GenericSigner { + privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") + return NewGenericSigner(privKey) +} + +func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { + chunk, err := rh.chunkStore.Get(context.TODO(), addr) + if err != nil { + return nil, err + } + var r Request + if err := r.fromChunk(addr, chunk.Data()); err != nil { + return nil, err + } + return r.data, nil +} diff --git a/swarm/storage/mru/id.go b/swarm/storage/mru/id.go new file mode 100644 index 000000000..f008169ed --- /dev/null +++ b/swarm/storage/mru/id.go @@ -0,0 +1,123 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "fmt" + "hash" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" + + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// ID uniquely identifies an update on the network. +type ID struct { + View `json:"view"` + lookup.Epoch `json:"epoch"` +} + +// ID layout: +// View viewLength bytes +// Epoch EpochLength +const idLength = viewLength + lookup.EpochLength + +// Addr calculates the resource update chunk address corresponding to this ID +func (u *ID) Addr() (updateAddr storage.Address) { + serializedData := make([]byte, idLength) + var cursor int + u.View.binaryPut(serializedData[cursor : cursor+viewLength]) + cursor += viewLength + + eid := u.Epoch.ID() + copy(serializedData[cursor:cursor+lookup.EpochLength], eid[:]) + + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + return hasher.Sum(nil) +} + +// binaryPut serializes this instance into the provided slice +func (u *ID) binaryPut(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize ID. Expected %d, got %d", idLength, len(serializedData)) + } + var cursor int + if err := u.View.binaryPut(serializedData[cursor : cursor+viewLength]); err != nil { + return err + } + cursor += viewLength + + epochBytes, err := u.Epoch.MarshalBinary() + if err != nil { + return err + } + copy(serializedData[cursor:cursor+lookup.EpochLength], epochBytes[:]) + cursor += lookup.EpochLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *ID) binaryLength() int { + return idLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *ID) binaryGet(serializedData []byte) error { + if len(serializedData) != idLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read ID. Expected %d, got %d", idLength, len(serializedData)) + } + + var cursor int + if err := u.View.binaryGet(serializedData[cursor : cursor+viewLength]); err != nil { + return err + } + cursor += viewLength + + if err := u.Epoch.UnmarshalBinary(serializedData[cursor : cursor+lookup.EpochLength]); err != nil { + return err + } + cursor += lookup.EpochLength + + return nil +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *ID) FromValues(values Values) error { + level, _ := strconv.ParseUint(values.Get("level"), 10, 32) + u.Epoch.Level = uint8(level) + u.Epoch.Time, _ = strconv.ParseUint(values.Get("time"), 10, 64) + + if u.View.User == (common.Address{}) { + return u.View.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *ID) AppendValues(values Values) { + values.Set("level", fmt.Sprintf("%d", u.Epoch.Level)) + values.Set("time", fmt.Sprintf("%d", u.Epoch.Time)) + u.View.AppendValues(values) +} diff --git a/swarm/storage/mru/id_test.go b/swarm/storage/mru/id_test.go new file mode 100644 index 000000000..eba58fbf3 --- /dev/null +++ b/swarm/storage/mru/id_test.go @@ -0,0 +1,28 @@ +package mru + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +func getTestID() *ID { + return &ID{ + View: *getTestView(), + Epoch: lookup.GetFirstEpoch(1000), + } +} + +func TestIDAddr(t *testing.T) { + ul := getTestID() + updateAddr := ul.Addr() + compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8b24583ec293e085f4c78aaee66d1bc5abfb8b4233304d14a349afa57af2a783") +} + +func TestIDSerializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestID(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019") +} + +func TestIDLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestID()) +} diff --git a/swarm/storage/mru/lookup.go b/swarm/storage/mru/lookup.go deleted file mode 100644 index b52cd5b4f..000000000 --- a/swarm/storage/mru/lookup.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "encoding/binary" - "hash" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// LookupParams is used to specify constraints when performing an update lookup -// Limit defines whether or not the lookup should be limited -// If Limit is set to true then Max defines the amount of hops that can be performed -type LookupParams struct { - UpdateLookup - Limit uint32 -} - -// RootAddr returns the metadata chunk address -func (r *LookupParams) RootAddr() storage.Address { - return r.rootAddr -} - -func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams { - return &LookupParams{ - UpdateLookup: UpdateLookup{ - period: period, - version: version, - rootAddr: rootAddr, - }, - Limit: limit, - } -} - -// LookupLatest generates lookup parameters that look for the latest version of a resource -func LookupLatest(rootAddr storage.Address) *LookupParams { - return NewLookupParams(rootAddr, 0, 0, 0) -} - -// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period -func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams { - return NewLookupParams(rootAddr, period, 0, 0) -} - -// LookupVersion generates lookup parameters that look for a specific version of a resource -func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams { - return NewLookupParams(rootAddr, period, version, 0) -} - -// UpdateLookup represents the components of a resource update search key -type UpdateLookup struct { - period uint32 - version uint32 - rootAddr storage.Address -} - -// 4 bytes period -// 4 bytes version -// storage.Keylength for rootAddr -const updateLookupLength = 4 + 4 + storage.AddressLength - -// UpdateAddr calculates the resource update chunk address corresponding to this lookup key -func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) { - serializedData := make([]byte, updateLookupLength) - u.binaryPut(serializedData) - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(serializedData) - return hasher.Sum(nil) -} - -// binaryPut serializes this UpdateLookup instance into the provided slice -func (u *UpdateLookup) binaryPut(serializedData []byte) error { - if len(serializedData) != updateLookupLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) - } - if len(u.rootAddr) != storage.AddressLength { - return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set") - } - binary.LittleEndian.PutUint32(serializedData[:4], u.period) - binary.LittleEndian.PutUint32(serializedData[4:8], u.version) - copy(serializedData[8:], u.rootAddr[:]) - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (u *UpdateLookup) binaryLength() int { - return updateLookupLength -} - -// binaryGet restores the current instance from the information contained in the passed slice -func (u *UpdateLookup) binaryGet(serializedData []byte) error { - if len(serializedData) != updateLookupLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData)) - } - u.period = binary.LittleEndian.Uint32(serializedData[:4]) - u.version = binary.LittleEndian.Uint32(serializedData[4:8]) - u.rootAddr = storage.Address(make([]byte, storage.AddressLength)) - copy(u.rootAddr[:], serializedData[8:]) - return nil -} diff --git a/swarm/storage/mru/lookup/epoch.go b/swarm/storage/mru/lookup/epoch.go new file mode 100644 index 000000000..bafe95477 --- /dev/null +++ b/swarm/storage/mru/lookup/epoch.go @@ -0,0 +1,91 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package lookup + +import ( + "encoding/binary" + "errors" + "fmt" +) + +// Epoch represents a time slot at a particular frequency level +type Epoch struct { + Time uint64 `json:"time"` // Time stores the time at which the update or lookup takes place + Level uint8 `json:"level"` // Level indicates the frequency level as the exponent of a power of 2 +} + +// EpochID is a unique identifier for an Epoch, based on its level and base time. +type EpochID [8]byte + +// EpochLength stores the serialized binary length of an Epoch +const EpochLength = 8 + +// MaxTime contains the highest possible time value an Epoch can handle +const MaxTime uint64 = (1 << 56) - 1 + +// Base returns the base time of the Epoch +func (e *Epoch) Base() uint64 { + return getBaseTime(e.Time, e.Level) +} + +// ID Returns the unique identifier of this epoch +func (e *Epoch) ID() EpochID { + base := e.Base() + var id EpochID + binary.LittleEndian.PutUint64(id[:], base) + id[7] = e.Level + return id +} + +// MarshalBinary implements the encoding.BinaryMarshaller interface +func (e *Epoch) MarshalBinary() (data []byte, err error) { + b := make([]byte, 8) + binary.LittleEndian.PutUint64(b[:], e.Time) + b[7] = e.Level + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaller interface +func (e *Epoch) UnmarshalBinary(data []byte) error { + if len(data) != EpochLength { + return errors.New("Invalid data unmarshalling Epoch") + } + b := make([]byte, 8) + copy(b, data) + e.Level = b[7] + b[7] = 0 + e.Time = binary.LittleEndian.Uint64(b) + return nil +} + +// After returns true if this epoch occurs later or exactly at the other epoch. +func (e *Epoch) After(epoch Epoch) bool { + if e.Time == epoch.Time { + return e.Level < epoch.Level + } + return e.Time >= epoch.Time +} + +// Equals compares two epochs and returns true if they refer to the same time period. +func (e *Epoch) Equals(epoch Epoch) bool { + return e.Level == epoch.Level && e.Base() == epoch.Base() +} + +// String implements the Stringer interface. +func (e *Epoch) String() string { + return fmt.Sprintf("Epoch{Time:%d, Level:%d}", e.Time, e.Level) +} diff --git a/swarm/storage/mru/lookup/epoch_test.go b/swarm/storage/mru/lookup/epoch_test.go new file mode 100644 index 000000000..8c63ec6c2 --- /dev/null +++ b/swarm/storage/mru/lookup/epoch_test.go @@ -0,0 +1,57 @@ +package lookup_test + +import ( + "testing" + + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +func TestMarshallers(t *testing.T) { + + for i := uint64(1); i < lookup.MaxTime; i *= 3 { + e := lookup.Epoch{ + Time: i, + Level: uint8(i % 20), + } + b, err := e.MarshalBinary() + if err != nil { + t.Fatal(err) + } + var e2 lookup.Epoch + if err := e2.UnmarshalBinary(b); err != nil { + t.Fatal(err) + } + if e != e2 { + t.Fatal("Expected unmarshalled epoch to be equal to marshalled onet.Fatal(err)") + } + } + +} + +func TestAfter(t *testing.T) { + a := lookup.Epoch{ + Time: 5, + Level: 3, + } + b := lookup.Epoch{ + Time: 6, + Level: 3, + } + c := lookup.Epoch{ + Time: 6, + Level: 4, + } + + if b.After(a) != true { + t.Fatal("Expected 'after' to be true, got false") + } + + if b.After(b) != false { + t.Fatal("Expected 'after' to be false when both epochs are identical, got true") + } + + if b.After(c) != true { + t.Fatal("Expected 'after' to be true when both epochs have the same time but the level is lower in the first one, but got false") + } + +} diff --git a/swarm/storage/mru/lookup/lookup.go b/swarm/storage/mru/lookup/lookup.go new file mode 100644 index 000000000..c98248d70 --- /dev/null +++ b/swarm/storage/mru/lookup/lookup.go @@ -0,0 +1,180 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +/* +Package lookup defines resource lookup algorithms and provides tools to place updates +so they can be found +*/ +package lookup + +const maxuint64 = ^uint64(0) + +// LowestLevel establishes the frequency resolution of the lookup algorithm as a power of 2. +const LowestLevel uint8 = 0 // default is 0 (1 second) + +// HighestLevel sets the lowest frequency the algorithm will operate at, as a power of 2. +// 25 -> 2^25 equals to roughly one year. +const HighestLevel = 25 // default is 25 (~1 year) + +// DefaultLevel sets what level will be chosen to search when there is no hint +const DefaultLevel = HighestLevel + +//Algorithm is the function signature of a lookup algorithm +type Algorithm func(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) + +// Lookup finds the update with the highest timestamp that is smaller or equal than 'now' +// It takes a hint which should be the epoch where the last known update was +// If you don't know in what epoch the last update happened, simply submit lookup.NoClue +// read() will be called on each lookup attempt +// Returns an error only if read() returns an error +// Returns nil if an update was not found +var Lookup Algorithm = FluzCapacitorAlgorithm + +// ReadFunc is a handler called by Lookup each time it attempts to find a value +// It should return <nil> if a value is not found +// It should return <nil> if a value is found, but its timestamp is higher than "now" +// It should only return an error in case the handler wants to stop the +// lookup process entirely. +type ReadFunc func(epoch Epoch, now uint64) (interface{}, error) + +// NoClue is a hint that can be provided when the Lookup caller does not have +// a clue about where the last update may be +var NoClue = Epoch{} + +// getBaseTime returns the epoch base time of the given +// time and level +func getBaseTime(t uint64, level uint8) uint64 { + return t & (maxuint64 << level) +} + +// Hint creates a hint based only on the last known update time +func Hint(last uint64) Epoch { + return Epoch{ + Time: last, + Level: DefaultLevel, + } +} + +// GetNextLevel returns the frequency level a next update should be placed at, provided where +// the last update was and what time it is now. +// This is the first nonzero bit of the XOR of 'last' and 'now', counting from the highest significant bit +// but limited to not return a level that is smaller than the last-1 +func GetNextLevel(last Epoch, now uint64) uint8 { + // First XOR the last epoch base time with the current clock. + // This will set all the common most significant bits to zero. + mix := (last.Base() ^ now) + + // Then, make sure we stop the below loop before one level below the current, by setting + // that level's bit to 1. + // If the next level is lower than the current one, it must be exactly level-1 and not lower. + mix |= (1 << (last.Level - 1)) + + // if the last update was more than 2^highestLevel seconds ago, choose the highest level + if mix > (maxuint64 >> (64 - HighestLevel - 1)) { + return HighestLevel + } + + // set up a mask to scan for nonzero bits, starting at the highest level + mask := uint64(1 << (HighestLevel)) + + for i := uint8(HighestLevel); i > LowestLevel; i-- { + if mix&mask != 0 { // if we find a nonzero bit, this is the level the next update should be at. + return i + } + mask = mask >> 1 // move our bit one position to the right + } + return 0 +} + +// GetNextEpoch returns the epoch where the next update should be located +// according to where the previous update was +// and what time it is now. +func GetNextEpoch(last Epoch, now uint64) Epoch { + if last == NoClue { + return GetFirstEpoch(now) + } + level := GetNextLevel(last, now) + return Epoch{ + Level: level, + Time: now, + } +} + +// GetFirstEpoch returns the epoch where the first update should be located +// based on what time it is now. +func GetFirstEpoch(now uint64) Epoch { + return Epoch{Level: HighestLevel, Time: now} +} + +var worstHint = Epoch{Time: 0, Level: 63} + +// FluzCapacitorAlgorithm works by narrowing the epoch search area if an update is found +// going back and forth in time +// First, it will attempt to find an update where it should be now if the hint was +// really the last update. If that lookup fails, then the last update must be either the hint itself +// or the epochs right below. If however, that lookup succeeds, then the update must be +// that one or within the epochs right below. +// see the guide for a more graphical representation +func FluzCapacitorAlgorithm(now uint64, hint Epoch, read ReadFunc) (value interface{}, err error) { + var lastFound interface{} + var epoch Epoch + if hint == NoClue { + hint = worstHint + } + + t := now + + for { + epoch = GetNextEpoch(hint, t) + value, err = read(epoch, now) + if err != nil { + return nil, err + } + if value != nil { + lastFound = value + if epoch.Level == LowestLevel || epoch.Equals(hint) { + return value, nil + } + hint = epoch + continue + } + if epoch.Base() == hint.Base() { + if lastFound != nil { + return lastFound, nil + } + // we have reached the hint itself + if hint == worstHint { + return nil, nil + } + // check it out + value, err = read(hint, now) + if err != nil { + return nil, err + } + if value != nil { + return value, nil + } + // bad hint. + epoch = hint + hint = worstHint + } + base := epoch.Base() + if base == 0 { + return nil, nil + } + t = base - 1 + } +} diff --git a/swarm/storage/mru/lookup/lookup_test.go b/swarm/storage/mru/lookup/lookup_test.go new file mode 100644 index 000000000..ca0bb73bb --- /dev/null +++ b/swarm/storage/mru/lookup/lookup_test.go @@ -0,0 +1,416 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package lookup_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +type Data struct { + Payload uint64 + Time uint64 +} + +type Store map[lookup.EpochID]*Data + +func write(store Store, epoch lookup.Epoch, value *Data) { + log.Debug("Write: %d-%d, value='%d'\n", epoch.Base(), epoch.Level, value.Payload) + store[epoch.ID()] = value +} + +func update(store Store, last lookup.Epoch, now uint64, value *Data) lookup.Epoch { + var epoch lookup.Epoch + + epoch = lookup.GetNextEpoch(last, now) + + write(store, epoch, value) + + return epoch +} + +const Day = 60 * 60 * 24 +const Year = Day * 365 +const Month = Day * 30 + +func makeReadFunc(store Store, counter *int) lookup.ReadFunc { + return func(epoch lookup.Epoch, now uint64) (interface{}, error) { + *counter++ + data := store[epoch.ID()] + var valueStr string + if data != nil { + valueStr = fmt.Sprintf("%d", data.Payload) + } + log.Debug("Read: %d-%d, value='%s'\n", epoch.Base(), epoch.Level, valueStr) + if data != nil && data.Time <= now { + return data, nil + } + return nil, nil + } +} + +func TestLookup(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every month for 12 months 3 years ago and then silence for two years + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 12; i++ { + t := uint64(now - Year*3 + i*Month) + data := Data{ + Payload: t, //our "payload" will be the timestamp itself. + Time: t, + } + epoch = update(store, epoch, t, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or same reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + // try to get an intermediate value + // if we look for a value in now - Year*3 + 6*Month, we should get that value + // Since the "payload" is the timestamp itself, we can check this. + + expectedTime := now - Year*3 + 6*Month + + value, err = lookup.Lookup(expectedTime, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + data, ok := value.(*Data) + + if !ok { + t.Fatal("Expected value to contain data") + } + + if data.Time != expectedTime { + t.Fatalf("Expected value timestamp to be %d, got %d", data.Time, expectedTime) + } + +} + +func TestOneUpdateAt0(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + update(store, epoch, 0, &data) + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +// Tests the update is found even when a bad hint is given +func TestBadHint(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + var epoch lookup.Epoch + data := Data{ + Payload: 79, + Time: 0, + } + + // place an update for t=1200 + update(store, epoch, 1200, &data) + + // come up with some evil hint + badHint := lookup.Epoch{ + Level: 18, + Time: 1200000000, + } + + value, err := lookup.Lookup(now, badHint, readFunc) + if err != nil { + t.Fatal(err) + } + if value != &data { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", data, value) + } +} + +func TestLookupFail(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // don't write anything and try to look up. + // we're testing we don't get stuck in a loop + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + if value != nil { + t.Fatal("Expected value to be nil, since the update should've failed") + } + + expectedReads := now/(1<<lookup.HighestLevel) + 1 + if uint64(readCount) != expectedReads { + t.Fatalf("Expected lookup to fail after %d reads. Did %d reads.", expectedReads, readCount) + } +} + +func TestHighFreqUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + + readFunc := makeReadFunc(store, &readCount) + now := uint64(1533903729) + + // write an update every second for the last 1000 seconds + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + value, err := lookup.Lookup(lastData.Time, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + readCountWithoutHint := readCount + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer or equal reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + + for i := uint64(0); i <= 994; i++ { + T := uint64(now - 1000 + i) // update every second for the last 1000 seconds + value, err := lookup.Lookup(T, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + data, _ := value.(*Data) + if data == nil { + t.Fatalf("Expected lookup to return %d, got nil", T) + } + if data.Payload != T { + t.Fatalf("Expected lookup to return %d, got %d", T, data.Time) + } + } +} + +func TestSparseUpdates(t *testing.T) { + + store := make(Store) + readCount := 0 + readFunc := makeReadFunc(store, &readCount) + + // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + + now := uint64(1533799046) + var epoch lookup.Epoch + + var lastData *Data + for i := uint64(0); i < 5; i++ { + T := uint64(Year * 5 * i) // write an update every 5 years 3 times starting in Jan 1st 1970 and then silence + data := Data{ + Payload: T, //our "payload" will be the timestamp itself. + Time: T, + } + epoch = update(store, epoch, T, &data) + lastData = &data + } + + // try to get the last value + + value, err := lookup.Lookup(now, lookup.NoClue, readFunc) + if err != nil { + t.Fatal(err) + } + + readCountWithoutHint := readCount + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + // reset the read count for the next test + readCount = 0 + // Provide a hint to get a faster lookup. In particular, we give the exact location of the last update + value, err = lookup.Lookup(now, epoch, readFunc) + if err != nil { + t.Fatal(err) + } + + if value != lastData { + t.Fatalf("Expected lookup to return the last written value: %v. Got %v", lastData, value) + } + + if readCount > readCountWithoutHint { + t.Fatalf("Expected lookup to complete with fewer reads than %d since we provided a hint. Did %d reads.", readCountWithoutHint, readCount) + } + +} + +// testG will hold precooked test results +// fields are abbreviated to reduce the size of the literal below +type testG struct { + e lookup.Epoch // last + n uint64 // next level + x uint8 // expected result +} + +// test cases +var testGetNextLevelCases []testG = []testG{{e: lookup.Epoch{Time: 989875233, Level: 12}, n: 989875233, x: 11}, {e: lookup.Epoch{Time: 995807650, Level: 18}, n: 995598156, x: 19}, {e: lookup.Epoch{Time: 969167082, Level: 0}, n: 968990357, x: 18}, {e: lookup.Epoch{Time: 993087628, Level: 14}, n: 992987044, x: 20}, {e: lookup.Epoch{Time: 963364631, Level: 20}, n: 963364630, x: 19}, {e: lookup.Epoch{Time: 963497510, Level: 16}, n: 963370732, x: 18}, {e: lookup.Epoch{Time: 955421349, Level: 22}, n: 955421348, x: 21}, {e: lookup.Epoch{Time: 968220379, Level: 15}, n: 968220378, x: 14}, {e: lookup.Epoch{Time: 939129014, Level: 6}, n: 939128771, x: 11}, {e: lookup.Epoch{Time: 907847903, Level: 6}, n: 907791833, x: 18}, {e: lookup.Epoch{Time: 910835564, Level: 15}, n: 910835564, x: 14}, {e: lookup.Epoch{Time: 913578333, Level: 22}, n: 881808431, x: 25}, {e: lookup.Epoch{Time: 895818460, Level: 3}, n: 895818132, x: 9}, {e: lookup.Epoch{Time: 903843025, Level: 24}, n: 895609561, x: 23}, {e: lookup.Epoch{Time: 877889433, Level: 13}, n: 877877093, x: 15}, {e: lookup.Epoch{Time: 901450396, Level: 10}, n: 901450058, x: 9}, {e: lookup.Epoch{Time: 925179910, Level: 3}, n: 925168393, x: 16}, {e: lookup.Epoch{Time: 913485477, Level: 21}, n: 913485476, x: 20}, {e: lookup.Epoch{Time: 924462991, Level: 18}, n: 924462990, x: 17}, {e: lookup.Epoch{Time: 941175128, Level: 13}, n: 941175127, x: 12}, {e: lookup.Epoch{Time: 920126583, Level: 3}, n: 920100782, x: 19}, {e: lookup.Epoch{Time: 932403200, Level: 9}, n: 932279891, x: 17}, {e: lookup.Epoch{Time: 948284931, Level: 2}, n: 948284921, x: 9}, {e: lookup.Epoch{Time: 953540997, Level: 7}, n: 950547986, x: 22}, {e: lookup.Epoch{Time: 926639837, Level: 18}, n: 918608882, x: 24}, {e: lookup.Epoch{Time: 954637598, Level: 1}, n: 954578761, x: 17}, {e: lookup.Epoch{Time: 943482981, Level: 10}, n: 942924151, x: 19}, {e: lookup.Epoch{Time: 963580771, Level: 7}, n: 963580771, x: 6}, {e: lookup.Epoch{Time: 993744930, Level: 7}, n: 993690858, x: 16}, {e: lookup.Epoch{Time: 1018890213, Level: 12}, n: 1018890212, x: 11}, {e: lookup.Epoch{Time: 1030309411, Level: 2}, n: 1030309227, x: 9}, {e: lookup.Epoch{Time: 1063204997, Level: 20}, n: 1063204996, x: 19}, {e: lookup.Epoch{Time: 1094340832, Level: 6}, n: 1094340633, x: 7}, {e: lookup.Epoch{Time: 1077880597, Level: 10}, n: 1075914292, x: 20}, {e: lookup.Epoch{Time: 1051114957, Level: 18}, n: 1051114957, x: 17}, {e: lookup.Epoch{Time: 1045649701, Level: 22}, n: 1045649700, x: 21}, {e: lookup.Epoch{Time: 1066198885, Level: 14}, n: 1066198884, x: 13}, {e: lookup.Epoch{Time: 1053231952, Level: 1}, n: 1053210845, x: 16}, {e: lookup.Epoch{Time: 1068763404, Level: 14}, n: 1068675428, x: 18}, {e: lookup.Epoch{Time: 1039042173, Level: 15}, n: 1038973110, x: 17}, {e: lookup.Epoch{Time: 1050747636, Level: 6}, n: 1050747364, x: 9}, {e: lookup.Epoch{Time: 1030034434, Level: 23}, n: 1030034433, x: 22}, {e: lookup.Epoch{Time: 1003783425, Level: 18}, n: 1003783424, x: 17}, {e: lookup.Epoch{Time: 988163976, Level: 15}, n: 988084064, x: 17}, {e: lookup.Epoch{Time: 1007222377, Level: 15}, n: 1007222377, x: 14}, {e: lookup.Epoch{Time: 1001211375, Level: 13}, n: 1001208178, x: 14}, {e: lookup.Epoch{Time: 997623199, Level: 8}, n: 997623198, x: 7}, {e: lookup.Epoch{Time: 1026283830, Level: 10}, n: 1006681704, x: 24}, {e: lookup.Epoch{Time: 1019421907, Level: 20}, n: 1019421906, x: 19}, {e: lookup.Epoch{Time: 1043154306, Level: 16}, n: 1043108343, x: 16}, {e: lookup.Epoch{Time: 1075643767, Level: 17}, n: 1075325898, x: 18}, {e: lookup.Epoch{Time: 1043726309, Level: 20}, n: 1043726308, x: 19}, {e: lookup.Epoch{Time: 1056415324, Level: 17}, n: 1056415324, x: 16}, {e: lookup.Epoch{Time: 1088650219, Level: 13}, n: 1088650218, x: 12}, {e: lookup.Epoch{Time: 1088551662, Level: 7}, n: 1088543355, x: 13}, {e: lookup.Epoch{Time: 1069667265, Level: 6}, n: 1069667075, x: 7}, {e: lookup.Epoch{Time: 1079145970, Level: 18}, n: 1079145969, x: 17}, {e: lookup.Epoch{Time: 1083338876, Level: 7}, n: 1083338875, x: 6}, {e: lookup.Epoch{Time: 1051581086, Level: 4}, n: 1051568869, x: 14}, {e: lookup.Epoch{Time: 1028430882, Level: 4}, n: 1028430864, x: 5}, {e: lookup.Epoch{Time: 1057356462, Level: 1}, n: 1057356417, x: 5}, {e: lookup.Epoch{Time: 1033104266, Level: 0}, n: 1033097479, x: 13}, {e: lookup.Epoch{Time: 1031391367, Level: 11}, n: 1031387304, x: 14}, {e: lookup.Epoch{Time: 1049781164, Level: 15}, n: 1049781163, x: 14}, {e: lookup.Epoch{Time: 1027271628, Level: 12}, n: 1027271627, x: 11}, {e: lookup.Epoch{Time: 1057270560, Level: 23}, n: 1057270560, x: 22}, {e: lookup.Epoch{Time: 1047501317, Level: 15}, n: 1047501317, x: 14}, {e: lookup.Epoch{Time: 1058349035, Level: 11}, n: 1045175573, x: 24}, {e: lookup.Epoch{Time: 1057396147, Level: 20}, n: 1057396147, x: 19}, {e: lookup.Epoch{Time: 1048906375, Level: 18}, n: 1039616919, x: 25}, {e: lookup.Epoch{Time: 1074294831, Level: 20}, n: 1074294831, x: 19}, {e: lookup.Epoch{Time: 1088946052, Level: 1}, n: 1088917364, x: 14}, {e: lookup.Epoch{Time: 1112337595, Level: 17}, n: 1111008110, x: 22}, {e: lookup.Epoch{Time: 1099990284, Level: 5}, n: 1099968370, x: 15}, {e: lookup.Epoch{Time: 1087036441, Level: 16}, n: 1053967855, x: 25}, {e: lookup.Epoch{Time: 1069225185, Level: 8}, n: 1069224660, x: 10}, {e: lookup.Epoch{Time: 1057505479, Level: 9}, n: 1057505170, x: 14}, {e: lookup.Epoch{Time: 1072381377, Level: 12}, n: 1065950959, x: 22}, {e: lookup.Epoch{Time: 1093887139, Level: 8}, n: 1093863305, x: 14}, {e: lookup.Epoch{Time: 1082366510, Level: 24}, n: 1082366510, x: 23}, {e: lookup.Epoch{Time: 1103231132, Level: 14}, n: 1102292201, x: 22}, {e: lookup.Epoch{Time: 1094502355, Level: 3}, n: 1094324652, x: 18}, {e: lookup.Epoch{Time: 1068488344, Level: 12}, n: 1067577330, x: 19}, {e: lookup.Epoch{Time: 1050278233, Level: 12}, n: 1050278232, x: 11}, {e: lookup.Epoch{Time: 1047660768, Level: 5}, n: 1047652137, x: 17}, {e: lookup.Epoch{Time: 1060116167, Level: 11}, n: 1060114091, x: 12}, {e: lookup.Epoch{Time: 1068149392, Level: 21}, n: 1052074801, x: 24}, {e: lookup.Epoch{Time: 1081934120, Level: 6}, n: 1081933847, x: 8}, {e: lookup.Epoch{Time: 1107943693, Level: 16}, n: 1107096139, x: 25}, {e: lookup.Epoch{Time: 1131571649, Level: 9}, n: 1131570428, x: 11}, {e: lookup.Epoch{Time: 1123139367, Level: 0}, n: 1122912198, x: 20}, {e: lookup.Epoch{Time: 1121144423, Level: 6}, n: 1120568289, x: 20}, {e: lookup.Epoch{Time: 1089932411, Level: 17}, n: 1089932410, x: 16}, {e: lookup.Epoch{Time: 1104899012, Level: 22}, n: 1098978789, x: 22}, {e: lookup.Epoch{Time: 1094588059, Level: 21}, n: 1094588059, x: 20}, {e: lookup.Epoch{Time: 1114987438, Level: 24}, n: 1114987437, x: 23}, {e: lookup.Epoch{Time: 1084186305, Level: 7}, n: 1084186241, x: 6}, {e: lookup.Epoch{Time: 1058827111, Level: 8}, n: 1058826504, x: 9}, {e: lookup.Epoch{Time: 1090679810, Level: 12}, n: 1090616539, x: 17}, {e: lookup.Epoch{Time: 1084299475, Level: 23}, n: 1084299475, x: 22}} + +func TestGetNextLevel(t *testing.T) { + + // First, test well-known cases + last := lookup.Epoch{ + Time: 1533799046, + Level: 5, + } + + level := lookup.GetNextLevel(last, last.Time) + expected := uint8(4) + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a nonzero level, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1<<lookup.HighestLevel)+3000) + expected = lookup.HighestLevel + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for updates set 2^lookup.HighestLevel seconds away, got %d", expected, level) + } + + level = lookup.GetNextLevel(last, last.Time+(1<<last.Level)) + expected = last.Level + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for updates set 2^last.Level seconds away, got %d", expected, level) + } + + last.Level = 0 + level = lookup.GetNextLevel(last, last.Time) + expected = 0 + if level != expected { + t.Fatalf("Expected GetNextLevel to return %d for same-time updates at a zero level, got %d", expected, level) + } + + // run a batch of 100 cooked tests + for _, s := range testGetNextLevelCases { + level := lookup.GetNextLevel(s.e, s.n) + if level != s.x { + t.Fatalf("Expected GetNextLevel to return %d for last=%s when now=%d, got %d", s.x, s.e.String(), s.n, level) + } + } + +} + +// cookGetNextLevelTests is used to generate a deterministic +// set of cases for TestGetNextLevel and thus "freeze" its current behavior +func CookGetNextLevelTests(t *testing.T) { + st := "" + var last lookup.Epoch + last.Time = 1000000000 + var now uint64 + var expected uint8 + for i := 0; i < 100; i++ { + last.Time += uint64(rand.Intn(1<<26)) - (1 << 25) + last.Level = uint8(rand.Intn(25)) + v := last.Level + uint8(rand.Intn(lookup.HighestLevel)) + if v > lookup.HighestLevel { + v = 0 + } + now = last.Time + uint64(rand.Intn(1<<v+1)) - (1 << v) + expected = lookup.GetNextLevel(last, now) + st = fmt.Sprintf("%s,testG{e:lookup.Epoch{Time:%d, Level:%d}, n:%d, x:%d}", st, last.Time, last.Level, now, expected) + } + fmt.Println(st) +} diff --git a/swarm/storage/mru/lookup_test.go b/swarm/storage/mru/lookup_test.go deleted file mode 100644 index b66b200a3..000000000 --- a/swarm/storage/mru/lookup_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package mru - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func getTestUpdateLookup() *UpdateLookup { - metadata := *getTestMetadata() - rootAddr, _, _, _ := metadata.serializeAndHash() - return &UpdateLookup{ - period: 79, - version: 2010, - rootAddr: rootAddr, - } -} - -func compareUpdateLookup(a, b *UpdateLookup) bool { - return a.version == b.version && - a.period == b.period && - bytes.Equal(a.rootAddr, b.rootAddr) -} - -func TestUpdateLookupUpdateAddr(t *testing.T) { - ul := getTestUpdateLookup() - updateAddr := ul.UpdateAddr() - compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0") -} - -func TestUpdateLookupSerializer(t *testing.T) { - serializedUpdateLookup := make([]byte, updateLookupLength) - ul := getTestUpdateLookup() - if err := ul.binaryPut(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") - - // set receiving slice to the wrong size - serializedUpdateLookup = make([]byte, updateLookupLength+7) - if err := ul.binaryPut(serializedUpdateLookup); err == nil { - t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength) - } - - // set rootAddr to an invalid length - ul.rootAddr = []byte{1, 2, 3, 4} - serializedUpdateLookup = make([]byte, updateLookupLength) - if err := ul.binaryPut(serializedUpdateLookup); err == nil { - t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size") - } -} - -func TestUpdateLookupDeserializer(t *testing.T) { - serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb") - var recoveredUpdateLookup UpdateLookup - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - originalUpdateLookup := *getTestUpdateLookup() - if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) { - t.Fatalf("Expected recovered UpdateLookup to match") - } - - // set source slice to the wrong size - serializedUpdateLookup = make([]byte, updateLookupLength+4) - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil { - t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength) - } -} - -func TestUpdateLookupSerializeDeserialize(t *testing.T) { - serializedUpdateLookup := make([]byte, updateLookupLength) - originalUpdateLookup := getTestUpdateLookup() - if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - var recoveredUpdateLookup UpdateLookup - if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil { - t.Fatal(err) - } - if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) { - t.Fatalf("Expected recovered UpdateLookup to match") - } -} diff --git a/swarm/storage/mru/metadata.go b/swarm/storage/mru/metadata.go deleted file mode 100644 index 509114895..000000000 --- a/swarm/storage/mru/metadata.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "encoding/binary" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// ResourceMetadata encapsulates the immutable information about a mutable resource :) -// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr -type ResourceMetadata struct { - StartTime Timestamp // time at which the resource starts to be valid - Frequency uint64 // expected update frequency for the resource - Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr - Owner common.Address // public address of the resource owner -} - -const frequencyLength = 8 // sizeof(uint64) -const nameLengthLength = 1 - -// Resource metadata chunk layout: -// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length -// Timestamp: timestampLength bytes -// frequency: frequencyLength bytes -// name length: nameLengthLength bytes -// name (variable length, can be empty, up to 255 bytes) -// ownerAddr: common.AddressLength -const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength - -// binaryGet populates the resource metadata from a byte array -func (r *ResourceMetadata) binaryGet(serializedData []byte) error { - if len(serializedData) < minimumMetadataLength { - return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData)) - } - - // first two bytes must be set to zero to indicate metadata chunks, so enforce this. - if serializedData[0] != 0 || serializedData[1] != 0 { - return NewError(ErrCorruptData, "Invalid metadata chunk") - } - - cursor := 2 - metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes - if metadataLength+chunkPrefixLength != len(serializedData) { - return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData)) - } - - cursor += 2 - - if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil { - return err - } - cursor += timestampLength - - r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength]) - cursor += frequencyLength - - nameLength := int(serializedData[cursor]) - if nameLength+minimumMetadataLength > len(serializedData) { - return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData)) - } - cursor++ - r.Name = string(serializedData[cursor : cursor+nameLength]) - cursor += nameLength - - copy(r.Owner[:], serializedData[cursor:]) - cursor += common.AddressLength - if cursor != len(serializedData) { - return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor) - } - return nil -} - -// binaryPut encodes the metadata into a byte array -func (r *ResourceMetadata) binaryPut(serializedData []byte) error { - metadataChunkLength := r.binaryLength() - if len(serializedData) != metadataChunkLength { - return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData)) - } - - // root chunk has first two bytes both set to 0, which distinguishes from update bytes - // therefore, skip the first two bytes of a zero-initialized array. - cursor := 2 - binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes - cursor += 2 - - r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength]) - cursor += timestampLength - - binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency) - cursor += frequencyLength - - // Encode the name string as a 1 byte length followed by the encoded string. - // Longer strings will be truncated. - nameLength := len(r.Name) - if nameLength > 255 { - nameLength = 255 - } - serializedData[cursor] = uint8(nameLength) - cursor++ - copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength])) - cursor += nameLength - - copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:]) - cursor += common.AddressLength - - return nil -} - -func (r *ResourceMetadata) binaryLength() int { - return minimumMetadataLength + len(r.Name) -} - -// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource -// returns the serialized metadata as a byproduct of having to hash it. -func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) { - - chunkData = make([]byte, r.binaryLength()) - if err := r.binaryPut(chunkData); err != nil { - return nil, nil, nil, err - } - rootAddr, metaHash = metadataHash(chunkData) - return rootAddr, metaHash, chunkData, nil - -} - -// creates a metadata chunk out of a resourceMetadata structure -func (metadata *ResourceMetadata) newChunk() (chunk storage.Chunk, metaHash []byte, err error) { - // the metadata chunk contains a timestamp of when the resource starts to be valid - // and also how frequently it is expected to be updated - // from this we know at what time we should look for updates, and how often - // it also contains the name of the resource, so we know what resource we are working with - - // the key (rootAddr) of the metadata chunk is content-addressed - // if it wasn't we couldn't replace it later - // resolving this relationship is left up to external agents (for example ENS) - rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() - if err != nil { - return nil, nil, err - } - - // make the chunk and send it to swarm - chunk = storage.NewChunk(rootAddr, chunkData) - - return chunk, metaHash, nil -} - -// metadataHash returns the metadata chunk root address and metadata hash -// that help identify and ascertain ownership of this resource -// We compute it as rootAddr = H(ownerAddr, H(metadata)) -// Where H() is SHA3 -// metadata are all the metadata fields, except ownerAddr -// ownerAddr is the public address of the resource owner -// Update chunks must carry a rootAddr reference and metaHash in order to be verified -// This way, a node that receives an update can check the signature, recover the public address -// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr -// the resource is claiming to update without having to lookup the metadata chunk. -// see verifyResourceOwnerhsip in signedupdate.go -func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(chunkData[:len(chunkData)-common.AddressLength]) - metaHash = hasher.Sum(nil) - hasher.Reset() - hasher.Write(metaHash) - hasher.Write(chunkData[len(chunkData)-common.AddressLength:]) - rootAddr = hasher.Sum(nil) - return -} diff --git a/swarm/storage/mru/metadata_test.go b/swarm/storage/mru/metadata_test.go deleted file mode 100644 index abbac6e3e..000000000 --- a/swarm/storage/mru/metadata_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. -package mru - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) { - if hexutil.Encode(actualValue) != expectedHex { - t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue)) - } -} - -func getTestMetadata() *ResourceMetadata { - return &ResourceMetadata{ - Name: "world news report, every hour, on the hour", - StartTime: Timestamp{ - Time: 1528880400, - }, - Frequency: 3600, - Owner: newCharlieSigner().Address(), - } -} - -func TestMetadataSerializerDeserializer(t *testing.T) { - metadata := *getTestMetadata() - - rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go - if err != nil { - t.Fatal(err) - } - const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb" - const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0" - const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c" - - compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr) - compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash) - compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData) - - recoveredMetadata := ResourceMetadata{} - recoveredMetadata.binaryGet(chunkData) - - if recoveredMetadata != metadata { - t.Fatalf("Expected that the recovered metadata equals the marshalled metadata") - } - - // we are going to mess with the data, so create a backup to go back to it for the next test - backup := make([]byte, len(chunkData)) - copy(backup, chunkData) - - chunkData = []byte{1, 2, 3} - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since chunk is too small") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with the prefix so it is not zero - chunkData[0] = 7 - chunkData[1] = 9 - - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since prefix bytes are not zero") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with the length header to trigger an error - chunkData[2] = 255 - chunkData[3] = 44 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since header length does not match") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with name length header to trigger a chunk too short error - chunkData[20] = 255 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since name length is incorrect") - } - - // restore backup - chunkData = make([]byte, len(backup)) - copy(chunkData, backup) - - // mess with name length header to trigger an leftover bytes to read error - chunkData[20] = 3 - if err := recoveredMetadata.binaryGet(chunkData); err == nil { - t.Fatal("Expected binaryGet to fail since name length is too small") - } -} - -func TestMetadataSerializerLengthCheck(t *testing.T) { - metadata := *getTestMetadata() - - // make a slice that is too small to contain the metadata - serializedMetadata := make([]byte, 4) - - if err := metadata.binaryPut(serializedMetadata); err == nil { - t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small") - } - -} diff --git a/swarm/storage/mru/query.go b/swarm/storage/mru/query.go new file mode 100644 index 000000000..9a0f261c4 --- /dev/null +++ b/swarm/storage/mru/query.go @@ -0,0 +1,78 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "fmt" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" +) + +// Query is used to specify constraints when performing an update lookup +// TimeLimit indicates an upper bound for the search. Set to 0 for "now" +type Query struct { + View + Hint lookup.Epoch + TimeLimit uint64 +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (q *Query) FromValues(values Values) error { + time, _ := strconv.ParseUint(values.Get("time"), 10, 64) + q.TimeLimit = uint64(time) + + level, _ := strconv.ParseUint(values.Get("hint.level"), 10, 32) + q.Hint.Level = uint8(level) + q.Hint.Time, _ = strconv.ParseUint(values.Get("hint.time"), 10, 64) + if q.View.User == (common.Address{}) { + return q.View.FromValues(values) + } + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (q *Query) AppendValues(values Values) { + if q.TimeLimit != 0 { + values.Set("time", fmt.Sprintf("%d", q.TimeLimit)) + } + if q.Hint.Level != 0 { + values.Set("hint.level", fmt.Sprintf("%d", q.Hint.Level)) + } + if q.Hint.Time != 0 { + values.Set("hint.time", fmt.Sprintf("%d", q.Hint.Time)) + } + q.View.AppendValues(values) +} + +// NewQuery constructs an Query structure to find updates on or before `time` +// if time == 0, the latest update will be looked up +func NewQuery(view *View, time uint64, hint lookup.Epoch) *Query { + return &Query{ + TimeLimit: time, + View: *view, + Hint: hint, + } +} + +// NewQueryLatest generates lookup parameters that look for the latest version of a resource +func NewQueryLatest(view *View, hint lookup.Epoch) *Query { + return NewQuery(view, 0, hint) +} diff --git a/swarm/storage/mru/query_test.go b/swarm/storage/mru/query_test.go new file mode 100644 index 000000000..189a465d6 --- /dev/null +++ b/swarm/storage/mru/query_test.go @@ -0,0 +1,38 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "testing" +) + +func getTestQuery() *Query { + ul := getTestID() + return &Query{ + TimeLimit: 5000, + View: ul.View, + Hint: ul.Epoch, + } +} + +func TestQueryValues(t *testing.T) { + var expected = KV{"hint.level": "25", "hint.time": "1000", "time": "5000", "topic": "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000", "user": "0x876A8936A7Cd0b79Ef0735AD0896c1AFe278781c"} + + query := getTestQuery() + testValueSerializer(t, query, expected) + +} diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go index af2ccf5c7..f6d0f38ff 100644 --- a/swarm/storage/mru/request.go +++ b/swarm/storage/mru/request.go @@ -19,157 +19,218 @@ package mru import ( "bytes" "encoding/json" + "hash" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" ) +// Request represents an update and/or resource create message +type Request struct { + ResourceUpdate // actual content that will be put on the chunk, less signature + Signature *Signature + idAddr storage.Address // cached chunk address for the update (not serialized, for internal use) + binaryData []byte // cached serialized data (does not get serialized again!, for efficiency/internal use) +} + // updateRequestJSON represents a JSON-serialized UpdateRequest type updateRequestJSON struct { - Name string `json:"name,omitempty"` - Frequency uint64 `json:"frequency,omitempty"` - StartTime uint64 `json:"startTime,omitempty"` - Owner string `json:"ownerAddr,omitempty"` - RootAddr string `json:"rootAddr,omitempty"` - MetaHash string `json:"metaHash,omitempty"` - Version uint32 `json:"version,omitempty"` - Period uint32 `json:"period,omitempty"` - Data string `json:"data,omitempty"` - Multihash bool `json:"multiHash"` - Signature string `json:"signature,omitempty"` + ID + ProtocolVersion uint8 `json:"protocolVersion"` + Data string `json:"data,omitempty"` + Signature string `json:"signature,omitempty"` } -// Request represents an update and/or resource create message -type Request struct { - SignedResourceUpdate - metadata ResourceMetadata - isNew bool +// Request layout +// resourceUpdate bytes +// SignatureLength bytes +const minimumSignedUpdateLength = minimumUpdateDataLength + signatureLength + +// NewFirstRequest returns a ready to sign request to publish a first update +func NewFirstRequest(topic Topic) *Request { + + request := new(Request) + + // get the current time + now := TimestampProvider.Now().Time + request.Epoch = lookup.GetFirstEpoch(now) + request.View.Topic = topic + request.Header.Version = ProtocolVersion + + return request +} + +// SetData stores the payload data the resource will be updated with +func (r *Request) SetData(data []byte) { + r.data = data + r.Signature = nil } -var zeroAddr = common.Address{} +// IsUpdate returns true if this request models a signed update or otherwise it is a signature request +func (r *Request) IsUpdate() bool { + return r.Signature != nil +} -// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data -func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) { +// Verify checks that signatures are valid and that the signer owns the resource to be updated +func (r *Request) Verify() (err error) { + if len(r.data) == 0 { + return NewError(ErrInvalidValue, "Update does not contain data") + } + if r.Signature == nil { + return NewError(ErrInvalidSignature, "Missing signature field") + } - request, err := NewCreateRequest(metadata) + digest, err := r.GetDigest() if err != nil { - return nil, err + return err } - // get the current time - now := TimestampProvider.Now().Time - - request.version = 1 - request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency) + // get the address of the signer (which also checks that it's a valid signature) + r.View.User, err = getUserAddr(digest, *r.Signature) if err != nil { - return nil, err + return err } - return request, nil + + // check that the lookup information contained in the chunk matches the updateAddr (chunk search key) + // that was used to retrieve this chunk + // if this validation fails, someone forged a chunk. + if !bytes.Equal(r.idAddr, r.Addr()) { + return NewError(ErrInvalidSignature, "Signature address does not match with update user address") + } + + return nil } -// NewCreateRequest returns a request to create a new resource -func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) { - if metadata.StartTime.Time == 0 { // get the current time - metadata.StartTime = TimestampProvider.Now() +// Sign executes the signature to validate the resource +func (r *Request) Sign(signer Signer) error { + r.View.User = signer.Address() + r.binaryData = nil //invalidate serialized data + digest, err := r.GetDigest() // computes digest and serializes into .binaryData + if err != nil { + return err } - if metadata.Owner == zeroAddr { - return nil, NewError(ErrInvalidValue, "OwnerAddr is not set") + signature, err := signer.Sign(digest) + if err != nil { + return err } - request = &Request{ - metadata: *metadata, + // Although the Signer interface returns the public address of the signer, + // recover it from the signature to see if they match + userAddr, err := getUserAddr(digest, signature) + if err != nil { + return NewError(ErrInvalidSignature, "Error verifying signature") } - request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash() - request.isNew = true - return request, nil -} -// Frequency returns the resource's expected update frequency -func (r *Request) Frequency() uint64 { - return r.metadata.Frequency -} + if userAddr != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! + return NewError(ErrInvalidSignature, "Signer address does not match update user address") + } -// Name returns the resource human-readable name -func (r *Request) Name() string { - return r.metadata.Name + r.Signature = &signature + r.idAddr = r.Addr() + return nil } -// Multihash returns true if the resource data should be interpreted as a multihash -func (r *Request) Multihash() bool { - return r.multihash -} +// GetDigest creates the resource update digest used in signatures +// the serialized payload is cached in .binaryData +func (r *Request) GetDigest() (result common.Hash, err error) { + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + dataLength := r.ResourceUpdate.binaryLength() + if r.binaryData == nil { + r.binaryData = make([]byte, dataLength+signatureLength) + if err := r.ResourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { + return result, err + } + } + hasher.Write(r.binaryData[:dataLength]) //everything except the signature. -// Period returns in which period the resource will be published -func (r *Request) Period() uint32 { - return r.period + return common.BytesToHash(hasher.Sum(nil)), nil } -// Version returns the resource version to publish -func (r *Request) Version() uint32 { - return r.version -} +// create an update chunk. +func (r *Request) toChunk() (storage.Chunk, error) { -// RootAddr returns the metadata chunk address -func (r *Request) RootAddr() storage.Address { - return r.rootAddr -} + // Check that the update is signed and serialized + // For efficiency, data is serialized during signature and cached in + // the binaryData field when computing the signature digest in .getDigest() + if r.Signature == nil || r.binaryData == nil { + return nil, NewError(ErrInvalidSignature, "toChunk called without a valid signature or payload data. Call .Sign() first.") + } -// StartTime returns the time that the resource was/will be created at -func (r *Request) StartTime() Timestamp { - return r.metadata.StartTime -} + resourceUpdateLength := r.ResourceUpdate.binaryLength() + + // signature is the last item in the chunk data + copy(r.binaryData[resourceUpdateLength:], r.Signature[:]) -// Owner returns the resource owner's address -func (r *Request) Owner() common.Address { - return r.metadata.Owner + chunk := storage.NewChunk(r.idAddr, r.binaryData) + return chunk, nil } -// Sign executes the signature to validate the resource and sets the owner address field -func (r *Request) Sign(signer Signer) error { - if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() { - return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource") - } +// fromChunk populates this structure from chunk data. It does not verify the signature is valid. +func (r *Request) fromChunk(updateAddr storage.Address, chunkdata []byte) error { + // for update chunk layout see Request definition - if err := r.SignedResourceUpdate.Sign(signer); err != nil { + //deserialize the resource update portion + if err := r.ResourceUpdate.binaryGet(chunkdata[:len(chunkdata)-signatureLength]); err != nil { return err } - r.metadata.Owner = signer.Address() - return nil -} -// SetData stores the payload data the resource will be updated with -func (r *Request) SetData(data []byte, multihash bool) { - r.data = data - r.multihash = multihash - r.signature = nil - if !r.isNew { - r.metadata.Frequency = 0 // mark as update + // Extract the signature + var signature *Signature + cursor := r.ResourceUpdate.binaryLength() + sigdata := chunkdata[cursor : cursor+signatureLength] + if len(sigdata) > 0 { + signature = &Signature{} + copy(signature[:], sigdata) } + + r.Signature = signature + r.idAddr = updateAddr + r.binaryData = chunkdata + + return nil + } -func (r *Request) IsNew() bool { - return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1) +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *Request) FromValues(values Values, data []byte) error { + signatureBytes, err := hexutil.Decode(values.Get("signature")) + if err != nil { + r.Signature = nil + } else { + if len(signatureBytes) != signatureLength { + return NewError(ErrInvalidSignature, "Incorrect signature length") + } + r.Signature = new(Signature) + copy(r.Signature[:], signatureBytes) + } + err = r.ResourceUpdate.FromValues(values, data) + if err != nil { + return err + } + r.idAddr = r.Addr() + return err } -func (r *Request) IsUpdate() bool { - return r.signature != nil +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *Request) AppendValues(values Values) []byte { + if r.Signature != nil { + values.Set("signature", hexutil.Encode(r.Signature[:])) + } + return r.ResourceUpdate.AppendValues(values) } // fromJSON takes an update request JSON and populates an UpdateRequest func (r *Request) fromJSON(j *updateRequestJSON) error { - r.version = j.Version - r.period = j.Period - r.multihash = j.Multihash - r.metadata.Name = j.Name - r.metadata.Frequency = j.Frequency - r.metadata.StartTime.Time = j.StartTime - - if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil { - return err - } + r.ID = j.ID + r.Header.Version = j.ProtocolVersion var err error if j.Data != "" { @@ -179,73 +240,18 @@ func (r *Request) fromJSON(j *updateRequestJSON) error { } } - var declaredRootAddr storage.Address - var declaredMetaHash []byte - - declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.AddressLength, "rootAddr") - if err != nil { - return err - } - declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash") - if err != nil { - return err - } - - if r.IsNew() { - // for new resource creation, rootAddr and metaHash are optional because - // we can derive them from the content itself. - // however, if the user sent them, we check them for consistency. - - r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash() - if err != nil { - return err - } - if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) { - return NewError(ErrInvalidValue, "rootAddr does not match resource metadata") - } - if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) { - return NewError(ErrInvalidValue, "metaHash does not match resource metadata") - } - - } else { - //Update message - r.rootAddr = declaredRootAddr - r.metaHash = declaredMetaHash - } - if j.Signature != "" { sigBytes, err := hexutil.Decode(j.Signature) if err != nil || len(sigBytes) != signatureLength { return NewError(ErrInvalidSignature, "Cannot decode signature") } - r.signature = new(Signature) - r.updateAddr = r.UpdateAddr() - copy(r.signature[:], sigBytes) + r.Signature = new(Signature) + r.idAddr = r.Addr() + copy(r.Signature[:], sigBytes) } return nil } -func decodeHexArray(dst []byte, src, name string) error { - bytes, err := decodeHexSlice(src, len(dst), name) - if err != nil { - return err - } - if bytes != nil { - copy(dst, bytes) - } - return nil -} - -func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) { - if src != "" { - bytes, err = hexutil.Decode(src) - if err != nil || len(bytes) != expectedLength { - return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name) - } - } - return bytes, nil -} - // UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object // Implements json.Unmarshaler interface func (r *Request) UnmarshalJSON(rawData []byte) error { @@ -259,38 +265,19 @@ func (r *Request) UnmarshalJSON(rawData []byte) error { // MarshalJSON takes an update request and encodes it as a JSON structure into a byte array // Implements json.Marshaler interface func (r *Request) MarshalJSON() (rawData []byte, err error) { - var signatureString, dataHashString, rootAddrString, metaHashString string - if r.signature != nil { - signatureString = hexutil.Encode(r.signature[:]) + var signatureString, dataString string + if r.Signature != nil { + signatureString = hexutil.Encode(r.Signature[:]) } if r.data != nil { - dataHashString = hexutil.Encode(r.data) - } - if r.rootAddr != nil { - rootAddrString = hexutil.Encode(r.rootAddr) - } - if r.metaHash != nil { - metaHashString = hexutil.Encode(r.metaHash) - } - var ownerAddrString string - if r.metadata.Frequency == 0 { - ownerAddrString = "" - } else { - ownerAddrString = hexutil.Encode(r.metadata.Owner[:]) + dataString = hexutil.Encode(r.data) } requestJSON := &updateRequestJSON{ - Name: r.metadata.Name, - Frequency: r.metadata.Frequency, - StartTime: r.metadata.StartTime.Time, - Version: r.version, - Period: r.period, - Owner: ownerAddrString, - Data: dataHashString, - Multihash: r.multihash, - Signature: signatureString, - RootAddr: rootAddrString, - MetaHash: metaHashString, + ID: r.ID, + ProtocolVersion: r.Header.Version, + Data: dataString, + Signature: signatureString, } return json.Marshal(requestJSON) diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go index dba55b27e..c32d5ec13 100644 --- a/swarm/storage/mru/request_test.go +++ b/swarm/storage/mru/request_test.go @@ -1,11 +1,32 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + package mru import ( + "bytes" "encoding/binary" "encoding/json" "fmt" "reflect" "testing" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru/lookup" ) func areEqualJSON(s1, s2 string) (bool, error) { @@ -29,19 +50,13 @@ func areEqualJSON(s1, s2 string) (bool, error) { // while also checking cryptographically that only the owner of a resource can update it. func TestEncodingDecodingUpdateRequests(t *testing.T) { - signer := newCharlieSigner() //Charlie, our good guy - falseSigner := newBobSigner() //Bob will play the bad guy again + charlie := newCharlieSigner() //Charlie + bob := newBobSigner() //Bob // Create a resource to our good guy Charlie's name - createRequest, err := NewCreateRequest(&ResourceMetadata{ - Name: "a good resource name", - Frequency: 300, - StartTime: Timestamp{Time: 1528900000}, - Owner: signer.Address()}) - - if err != nil { - t.Fatalf("Error creating resource name: %s", err) - } + topic, _ := NewTopic("a good resource name", nil) + createRequest := NewFirstRequest(topic) + createRequest.User = charlie.Address() // We now encode the create message to simulate we send it over the wire messageRawData, err := createRequest.MarshalJSON() @@ -64,27 +79,21 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { // and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct // proof of ownership - metaHash := createRequest.metaHash - rootAddr := createRequest.rootAddr - const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101" - const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}` + const expectedSignature = "0x32c2d2c7224e24e4d3ae6a10595fc6e945f1b3ecdf548a04d8247c240a50c9240076aa7730abad6c8a46dfea00cfb8f43b6211f02db5c4cc5ed8584cb0212a4d00" + const expectedJSON = `{"view":{"topic":"0x6120676f6f64207265736f75726365206e616d65000000000000000000000000","user":"0x876a8936a7cd0b79ef0735ad0896c1afe278781c"},"epoch":{"time":1000,"level":1},"protocolVersion":0,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421"}` //Put together an unsigned update request that we will serialize to send it to the signer. data := []byte("This hour's update: Swarm 99.0 has been released!") request := &Request{ - SignedResourceUpdate: SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - period: 7, - version: 1, - rootAddr: rootAddr, - }, - multihash: false, - metaHash: metaHash, + ResourceUpdate: ResourceUpdate{ + ID: ID{ + Epoch: lookup.Epoch{ + Time: 1000, + Level: 1, }, - data: data, + View: createRequest.ResourceUpdate.View, }, + data: data, }, } @@ -110,11 +119,11 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { } //sign the request and see if it matches our predefined signature above. - if err := recoveredRequest.Sign(signer); err != nil { + if err := recoveredRequest.Sign(charlie); err != nil { t.Fatalf("Error signing request: %s", err) } - compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature) + compareByteSliceToExpectedHex(t, "signature", recoveredRequest.Signature[:], expectedSignature) // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON // to alter the signature field. @@ -129,9 +138,9 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature") } - // Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource, + // Now imagine Bob wants to create an update of his own about the same resource, // signing a message with his private key - if err := request.Sign(falseSigner); err != nil { + if err := request.Sign(bob); err != nil { t.Fatalf("Error signing: %s", err) } @@ -147,29 +156,159 @@ func TestEncodingDecodingUpdateRequests(t *testing.T) { t.Fatalf("Error decoding message:%s", err) } - // Before discovering Bob's misdemeanor, let's see what would happen if we mess + // Before checking what happened with Bob's update, let's see what would happen if we mess // with the signature big time to see if Verify catches it - savedSignature := *recoveredRequest.signature // save the signature for later - binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature + savedSignature := *recoveredRequest.Signature // save the signature for later + binary.LittleEndian.PutUint64(recoveredRequest.Signature[5:], 556845463424) // write some random data to break the signature if err = recoveredRequest.Verify(); err == nil { t.Fatal("Expected Verify to fail on corrupt signature") } - // restore the Evil Bob's signature from corruption - *recoveredRequest.signature = savedSignature + // restore the Bob's signature from corruption + *recoveredRequest.Signature = savedSignature - // Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource - if err = recoveredRequest.Verify(); err == nil { - t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err) + // Now the signature is not corrupt + if err = recoveredRequest.Verify(); err != nil { + t.Fatal(err) } - // Sign with our friend Charlie's private key - if err := recoveredRequest.Sign(signer); err != nil { + // Reuse object and sign with our friend Charlie's private key + if err := recoveredRequest.Sign(charlie); err != nil { t.Fatalf("Error signing with the correct private key: %s", err) } - // And now, Verify should work since this resource belongs to Charlie + // And now, Verify should work since this update now belongs to Charlie if err = recoveredRequest.Verify(); err != nil { - t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err) + t.Fatalf("Error verifying that Charlie, can sign a reused request object:%s", err) + } + + // mess with the lookup key to make sure Verify fails: + recoveredRequest.Time = 77999 // this will alter the lookup key + if err = recoveredRequest.Verify(); err == nil { + t.Fatalf("Expected Verify to fail since the lookup key has been altered") + } +} + +func getTestRequest() *Request { + return &Request{ + ResourceUpdate: *getTestResourceUpdate(), + } +} + +func TestUpdateChunkSerializationErrorChecking(t *testing.T) { + + // Test that parseUpdate fails if the chunk is too small + var r Request + if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1+signatureLength)); err == nil { + t.Fatalf("Expected request.fromChunk to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) + } + + r = *getTestRequest() + + _, err := r.toChunk() + if err == nil { + t.Fatal("Expected request.toChunk to fail when there is no data") + } + r.data = []byte("Al bien hacer jamás le falta premio") // put some arbitrary length data + _, err = r.toChunk() + if err == nil { + t.Fatal("expected request.toChunk to fail when there is no signature") + } + + charlie := newCharlieSigner() + if err := r.Sign(charlie); err != nil { + t.Fatalf("error signing:%s", err) + } + + chunk, err := r.toChunk() + if err != nil { + t.Fatalf("error creating update chunk:%s", err) + } + + compareByteSliceToExpectedHex(t, "chunk", chunk.Data(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019416c206269656e206861636572206a616dc3a173206c652066616c7461207072656d696f5a0ffe0bc27f207cd5b00944c8b9cee93e08b89b5ada777f123ac535189333f174a6a4ca2f43a92c4a477a49d774813c36ce8288552c58e6205b0ac35d0507eb00") + + var recovered Request + recovered.fromChunk(chunk.Address(), chunk.Data()) + if !reflect.DeepEqual(recovered, r) { + t.Fatal("Expected recovered SignedResource update to equal the original one") + } +} + +// check that signature address matches update signer address +func TestReverse(t *testing.T) { + + epoch := lookup.Epoch{ + Time: 7888, + Level: 6, + } + + // make fake timeProvider + timeProvider := &fakeTimeProvider{ + currentTime: startTime.Time, + } + + // signer containing private key + signer := newAliceSigner() + + // set up rpc and create resourcehandler + _, _, teardownTest, err := setupTest(timeProvider, signer) + if err != nil { + t.Fatal(err) + } + defer teardownTest() + + topic, _ := NewTopic("Cervantes quotes", nil) + view := View{ + Topic: topic, + User: signer.Address(), + } + + data := []byte("Donde una puerta se cierra, otra se abre") + + request := new(Request) + request.View = view + request.Epoch = epoch + request.data = data + + // generate a chunk key for this request + key := request.Addr() + + if err = request.Sign(signer); err != nil { + t.Fatal(err) + } + + chunk, err := request.toChunk() + if err != nil { + t.Fatal(err) + } + + // check that we can recover the owner account from the update chunk's signature + var checkUpdate Request + if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { + t.Fatal(err) + } + checkdigest, err := checkUpdate.GetDigest() + if err != nil { + t.Fatal(err) + } + recoveredaddress, err := getUserAddr(checkdigest, *checkUpdate.Signature) + if err != nil { + t.Fatalf("Retrieve address from signature fail: %v", err) + } + originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) + + // check that the metadata retrieved from the chunk matches what we gave it + if recoveredaddress != originaladdress { + t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) + } + + if !bytes.Equal(key[:], chunk.Address()[:]) { + t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) + } + if epoch != checkUpdate.Epoch { + t.Fatalf("Expected epoch to be '%s', was '%s'", epoch.String(), checkUpdate.Epoch.String()) + } + if !bytes.Equal(data, checkUpdate.data) { + t.Fatalf("Expected data '%x', was '%x'", data, checkUpdate.data) } } diff --git a/swarm/storage/mru/resource.go b/swarm/storage/mru/resource.go deleted file mode 100644 index aa83ff62a..000000000 --- a/swarm/storage/mru/resource.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "context" - "time" - - "github.com/ethereum/go-ethereum/swarm/storage" -) - -const ( - defaultStoreTimeout = 4000 * time.Millisecond - hasherCount = 8 - resourceHashAlgorithm = storage.SHA3Hash - defaultRetrieveTimeout = 100 * time.Millisecond -) - -// resource caches resource data and the metadata of its root chunk. -type resource struct { - resourceUpdate - ResourceMetadata - *bytes.Reader - lastKey storage.Address - updated time.Time -} - -func (r *resource) Context() context.Context { - return context.TODO() -} - -// TODO Expire content after a defined period (to force resync) -func (r *resource) isSynced() bool { - return !r.updated.IsZero() -} - -// implements storage.LazySectionReader -func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) { - if !r.isSynced() { - return 0, NewError(ErrNotSynced, "Not synced") - } - return int64(len(r.resourceUpdate.data)), nil -} - -//returns the resource's human-readable name -func (r *resource) Name() string { - return r.ResourceMetadata.Name -} - -// Helper function to calculate the next update period number from the current time, start time and frequency -func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) { - if current < start { - return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start) - } - if frequency == 0 { - return 0, NewError(ErrInvalidValue, "frequency is 0") - } - timeDiff := current - start - period := timeDiff / frequency - return uint32(period + 1), nil -} diff --git a/swarm/storage/mru/resource_sign.go b/swarm/storage/mru/resource_sign.go index a9f7cb629..58196f10e 100644 --- a/swarm/storage/mru/resource_sign.go +++ b/swarm/storage/mru/resource_sign.go @@ -60,7 +60,16 @@ func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) return } -// PublicKey returns the public key of the signer's private key +// Address returns the public key of the signer's private key func (s *GenericSigner) Address() common.Address { return s.address } + +// getUserAddr extracts the address of the resource update signer +func getUserAddr(digest common.Hash, signature Signature) (common.Address, error) { + pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) + if err != nil { + return common.Address{}, err + } + return crypto.PubkeyToAddress(*pub), nil +} diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go deleted file mode 100644 index 0fb465bb0..000000000 --- a/swarm/storage/mru/resource_test.go +++ /dev/null @@ -1,902 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/binary" - "flag" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/contracts/ens" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/multihash" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -var ( - loglevel = flag.Int("loglevel", 3, "loglevel") - testHasher = storage.MakeHashFunc(resourceHashAlgorithm)() - startTime = Timestamp{ - Time: uint64(4200), - } - resourceFrequency = uint64(42) - cleanF func() - resourceName = "føø.bar" - hashfunc = storage.MakeHashFunc(storage.DefaultHash) -) - -func init() { - flag.Parse() - log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) -} - -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Now() Timestamp { - return Timestamp{ - Time: f.currentTime, - } -} - -func TestUpdateChunkSerializationErrorChecking(t *testing.T) { - - // Test that parseUpdate fails if the chunk is too small - var r SignedResourceUpdate - if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil { - t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength) - } - - r = SignedResourceUpdate{} - // Test that parseUpdate fails when the length header does not match the data array length - fakeChunk := make([]byte, 150) - binary.LittleEndian.PutUint16(fakeChunk, 44) - if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil { - t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in") - } - - r = SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - rootAddr: make([]byte, 79), // put the wrong length, should be storage.AddressLength - }, - metaHash: nil, - multihash: false, - }, - }, - } - _, err := r.toChunk() - if err == nil { - t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length") - } - r.rootAddr = make([]byte, storage.AddressLength) - r.metaHash = make([]byte, storage.AddressLength) - _, err = r.toChunk() - if err == nil { - t.Fatal("Expected newUpdateChunk to fail when there is no data") - } - r.data = make([]byte, 79) // put some arbitrary length data - _, err = r.toChunk() - if err == nil { - t.Fatal("expected newUpdateChunk to fail when there is no signature", err) - } - - alice := newAliceSigner() - if err := r.Sign(alice); err != nil { - t.Fatalf("error signing:%s", err) - - } - _, err = r.toChunk() - if err != nil { - t.Fatalf("error creating update chunk:%s", err) - } - - r.multihash = true - r.data[1] = 79 // mess with the multihash, corrupting one byte of it. - if err := r.Sign(alice); err == nil { - t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err) - } -} - -// check that signature address matches update signer address -func TestReverse(t *testing.T) { - - period := uint32(4) - version := uint32(2) - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create resourcehandler - _, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - metadata := ResourceMetadata{ - Name: resourceName, - StartTime: startTime, - Frequency: resourceFrequency, - Owner: signer.Address(), - } - - rootAddr, metaHash, _, err := metadata.serializeAndHash() - if err != nil { - t.Fatal(err) - } - - // generate some bogus data for the chunk and sign it - data := make([]byte, 8) - _, err = rand.Read(data) - if err != nil { - t.Fatal(err) - } - testHasher.Reset() - testHasher.Write(data) - - update := &SignedResourceUpdate{ - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: UpdateLookup{ - period: period, - version: version, - rootAddr: rootAddr, - }, - metaHash: metaHash, - }, - data: data, - }, - } - // generate a hash for t=4200 version 1 - key := update.UpdateAddr() - - if err = update.Sign(signer); err != nil { - t.Fatal(err) - } - - chunk, err := update.toChunk() - if err != nil { - t.Fatal(err) - } - - // check that we can recover the owner account from the update chunk's signature - var checkUpdate SignedResourceUpdate - if err := checkUpdate.fromChunk(chunk.Address(), chunk.Data()); err != nil { - t.Fatal(err) - } - checkdigest, err := checkUpdate.GetDigest() - if err != nil { - t.Fatal(err) - } - recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature) - if err != nil { - t.Fatalf("Retrieve address from signature fail: %v", err) - } - originaladdress := crypto.PubkeyToAddress(signer.PrivKey.PublicKey) - - // check that the metadata retrieved from the chunk matches what we gave it - if recoveredaddress != originaladdress { - t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) - } - - if !bytes.Equal(key[:], chunk.Address()[:]) { - t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Address()) - } - if period != checkUpdate.period { - t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period) - } - if version != checkUpdate.version { - t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version) - } - if !bytes.Equal(data, checkUpdate.data) { - t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data) - } -} - -// make updates and retrieve them based on periods and versions -func TestResourceHandler(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - - request, err := NewCreateUpdateRequest(metadata) - if err != nil { - t.Fatal(err) - } - request.Sign(signer) - if err != nil { - t.Fatal(err) - } - err = rh.New(ctx, request) - if err != nil { - t.Fatal(err) - } - - chunk, err := rh.chunkStore.Get(ctx, storage.Address(request.rootAddr)) - if err != nil { - t.Fatal(err) - } else if len(chunk.Data()) < 16 { - t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.Data())) - } - - var recoveredMetadata ResourceMetadata - - recoveredMetadata.binaryGet(chunk.Data()) - if err != nil { - t.Fatal(err) - } - if recoveredMetadata.StartTime.Time != timeProvider.currentTime { - t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime) - } - if recoveredMetadata.Frequency != resourceFrequency { - t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency) - } - - // data for updates: - updates := []string{ - "blinky", - "pinky", - "inky", - "clyde", - } - - // update halfway to first period. period=1, version=1 - resourcekey := make(map[string]storage.Address) - fwdClock(int(resourceFrequency/2), timeProvider) - data := []byte(updates[0]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // update on first period with version = 1 to make it fail since there is already one update with version=1 - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - if request.version != 2 || request.period != 1 { - t.Fatal("Suggested period should be 1 and version should be 2") - } - - request.version = 1 // force version 1 instead of 2 to make it fail - data = []byte(updates[1]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err == nil { - t.Fatal("Expected update to fail since this version already exists") - } - - // update on second period with version = 1, correct. period=2, version=1 - fwdClock(int(resourceFrequency/2), timeProvider) - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - fwdClock(int(resourceFrequency), timeProvider) - // Update on third period, with version = 1 - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - data = []byte(updates[2]) - request.SetData(data, false) - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // update just after third period - fwdClock(1, timeProvider) - request, err = rh.NewUpdateRequest(ctx, request.rootAddr) - if err != nil { - t.Fatal(err) - } - if request.period != 3 || request.version != 2 { - t.Fatal("Suggested period should be 3 and version should be 2") - } - data = []byte(updates[3]) - request.SetData(data, false) - - if err := request.Sign(signer); err != nil { - t.Fatal(err) - } - resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - rh.Close() - - // check we can retrieve the updates after close - // it will match on second iteration startTime + (resourceFrequency * 3) - fwdClock(int(resourceFrequency*2)-1, timeProvider) - - rhparams := &HandlerParams{} - - rh2, err := NewTestHandler(datadir, rhparams) - if err != nil { - t.Fatal(err) - } - - rsrc2, err := rh2.Load(context.TODO(), request.rootAddr) - if err != nil { - t.Fatal(err) - } - - _, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr)) - if err != nil { - t.Fatal(err) - } - - // last update should be "clyde", version two, time= startTime + (resourcefrequency * 3) - if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) - } - if rsrc2.version != 2 { - t.Fatalf("resource version was %d, expected 2", rsrc2.version) - } - if rsrc2.period != 3 { - t.Fatalf("resource period was %d, expected 3", rsrc2.period) - } - log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // specific period, latest version - rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3)) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) { - t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1]) - } - log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // specific period, specific version - lookupParams := LookupVersion(request.rootAddr, 3, 1) - rsrc, err = rh2.Lookup(ctx, lookupParams) - if err != nil { - t.Fatal(err) - } - // check data - if !bytes.Equal(rsrc.data, []byte(updates[2])) { - t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2]) - } - log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data) - - // we are now at third update - // check backwards stepping to the first - for i := 1; i >= 0; i-- { - rsrc, err := rh2.LookupPrevious(ctx, lookupParams) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(rsrc.data, []byte(updates[i])) { - t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i]) - - } - } - - // beyond the first should yield an error - rsrc, err = rh2.LookupPrevious(ctx, lookupParams) - if err == nil { - t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data) - } - -} - -func TestMultihash(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up rpc and create resourcehandler - rh, datadir, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create a new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - - mr, err := NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - err = rh.New(ctx, mr) - if err != nil { - t.Fatal(err) - } - - // we're naĂŻvely assuming keccak256 for swarm hashes - // if it ever changes this test should also change - multihashbytes := ens.EnsNode("foo") - multihashmulti := multihash.ToMultihash(multihashbytes.Bytes()) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - sha1bytes := make([]byte, multihash.MultihashLength) - sha1multi := multihash.ToMultihash(sha1bytes) - if err != nil { - t.Fatal(err) - } - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(sha1multi, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - // invalid multihashes - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti[1:], true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - _, err = rh.Update(ctx, &mr.SignedResourceUpdate) - if err == nil { - t.Fatalf("Expected update to fail with first byte skipped") - } - mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(multihashmulti[:len(multihashmulti)-2], true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - - _, err = rh.Update(ctx, &mr.SignedResourceUpdate) - if err == nil { - t.Fatalf("Expected update to fail with last byte skipped") - } - - data, err := getUpdateDirect(rh.Handler, multihashkey) - if err != nil { - t.Fatal(err) - } - multihashdecode, err := multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) - } - data, err = getUpdateDirect(rh.Handler, sha1key) - if err != nil { - t.Fatal(err) - } - shadecode, err := multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(shadecode, sha1bytes) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes) - } - rh.Close() - - rhparams := &HandlerParams{} - // test with signed data - rh2, err := NewTestHandler(datadir, rhparams) - if err != nil { - t.Fatal(err) - } - mr, err = NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - err = rh2.New(ctx, mr) - if err != nil { - t.Fatal(err) - } - - mr.SetData(multihashmulti, true) - mr.Sign(signer) - - if err != nil { - t.Fatal(err) - } - multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr) - if err != nil { - t.Fatal(err) - } - mr.SetData(sha1multi, true) - mr.Sign(signer) - if err != nil { - t.Fatal(err) - } - - sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate) - if err != nil { - t.Fatal(err) - } - - data, err = getUpdateDirect(rh2.Handler, multihashsignedkey) - if err != nil { - t.Fatal(err) - } - multihashdecode, err = multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes()) - } - data, err = getUpdateDirect(rh2.Handler, sha1signedkey) - if err != nil { - t.Fatal(err) - } - shadecode, err = multihash.FromMultihash(data) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(shadecode, sha1bytes) { - t.Fatalf("Decoded hash '%x' does not match original hash '%x'", shadecode, sha1bytes) - } -} - -// \TODO verify testing of signature validation and enforcement -func TestValidator(t *testing.T) { - - // make fake timeProvider - timeProvider := &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key. Alice will be the good girl - signer := newAliceSigner() - - // fake signer for false results. Bob will play the bad guy today. - falseSigner := newBobSigner() - - // set up sim timeProvider - rh, _, teardownTest, err := setupTest(timeProvider, signer) - if err != nil { - t.Fatal(err) - } - defer teardownTest() - - // create new resource - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - metadata := &ResourceMetadata{ - Name: resourceName, - Frequency: resourceFrequency, - StartTime: Timestamp{Time: timeProvider.Now().Time}, - Owner: signer.Address(), - } - mr, err := NewCreateRequest(metadata) - if err != nil { - t.Fatal(err) - } - mr.Sign(signer) - - err = rh.New(ctx, mr) - if err != nil { - t.Fatalf("Create resource fail: %v", err) - } - - // chunk with address - data := []byte("foo") - mr.SetData(data, false) - if err := mr.Sign(signer); err != nil { - t.Fatalf("sign fail: %v", err) - } - chunk, err := mr.SignedResourceUpdate.toChunk() - if err != nil { - t.Fatal(err) - } - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on update chunk") - } - - // chunk with address made from different publickey - if err := mr.Sign(falseSigner); err == nil { - t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err) - } - - // chunk with address made from different publickey - mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check - if err := mr.Sign(falseSigner); err != nil { - t.Fatalf("sign fail: %v", err) - } - - chunk, err = mr.SignedResourceUpdate.toChunk() - if err != nil { - t.Fatal(err) - } - - if rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator did not fail on update chunk with false address") - } - - ctx, cancel = context.WithTimeout(context.Background(), time.Second) - defer cancel() - - metadata = &ResourceMetadata{ - Name: resourceName, - StartTime: TimestampProvider.Now(), - Frequency: resourceFrequency, - Owner: signer.Address(), - } - chunk, _, err = metadata.newChunk() - if err != nil { - t.Fatal(err) - } - - if !rh.Validate(chunk.Address(), chunk.Data()) { - t.Fatal("Chunk validator fail on metadata chunk") - } -} - -// tests that the content address validator correctly checks the data -// tests that resource update chunks are passed through content address validator -// there is some redundancy in this test as it also tests content addressed chunks, -// which should be evaluated as invalid chunks by this validator -func TestValidatorInStore(t *testing.T) { - - // make fake timeProvider - TimestampProvider = &fakeTimeProvider{ - currentTime: startTime.Time, - } - - // signer containing private key - signer := newAliceSigner() - - // set up localstore - datadir, err := ioutil.TempDir("", "storage-testresourcevalidator") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(datadir) - - params := storage.NewDefaultLocalStoreParams() - params.Init(datadir) - store, err := storage.NewLocalStore(params, nil) - if err != nil { - t.Fatal(err) - } - - // set up resource handler and add is as a validator to the localstore - rhParams := &HandlerParams{} - rh := NewHandler(rhParams) - store.Validators = append(store.Validators, rh) - - // create content addressed chunks, one good, one faulty - chunks := storage.GenerateRandomChunks(chunk.DefaultSize, 2) - goodChunk := chunks[0] - badChunk := storage.NewChunk(chunks[1].Address(), goodChunk.Data()) - - metadata := &ResourceMetadata{ - StartTime: startTime, - Name: "xyzzy", - Frequency: resourceFrequency, - Owner: signer.Address(), - } - - rootChunk, metaHash, err := metadata.newChunk() - if err != nil { - t.Fatal(err) - } - // create a resource update chunk with correct publickey - updateLookup := UpdateLookup{ - period: 42, - version: 1, - rootAddr: rootChunk.Address(), - } - - updateAddr := updateLookup.UpdateAddr() - data := []byte("bar") - - r := SignedResourceUpdate{ - updateAddr: updateAddr, - resourceUpdate: resourceUpdate{ - updateHeader: updateHeader{ - UpdateLookup: updateLookup, - metaHash: metaHash, - }, - data: data, - }, - } - - r.Sign(signer) - - uglyChunk, err := r.toChunk() - if err != nil { - t.Fatal(err) - } - - // put the chunks in the store and check their error status - err = store.Put(context.Background(), goodChunk) - if err == nil { - t.Fatal("expected error on good content address chunk with resource validator only, but got nil") - } - err = store.Put(context.Background(), badChunk) - if err == nil { - t.Fatal("expected error on bad content address chunk with resource validator only, but got nil") - } - err = store.Put(context.Background(), uglyChunk) - if err != nil { - t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err) - } -} - -// fast-forward clock -func fwdClock(count int, timeProvider *fakeTimeProvider) { - for i := 0; i < count; i++ { - timeProvider.Tick() - } -} - -// create rpc and resourcehandler -func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) { - - var fsClean func() - var rpcClean func() - cleanF = func() { - if fsClean != nil { - fsClean() - } - if rpcClean != nil { - rpcClean() - } - } - - // temp datadir - datadir, err = ioutil.TempDir("", "rh") - if err != nil { - return nil, "", nil, err - } - fsClean = func() { - os.RemoveAll(datadir) - } - - TimestampProvider = timeProvider - rhparams := &HandlerParams{} - rh, err = NewTestHandler(datadir, rhparams) - return rh, datadir, cleanF, err -} - -func newAliceSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") - return NewGenericSigner(privKey) -} - -func newBobSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca") - return NewGenericSigner(privKey) -} - -func newCharlieSigner() *GenericSigner { - privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca") - return NewGenericSigner(privKey) -} - -func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { - chunk, err := rh.chunkStore.Get(context.TODO(), addr) - if err != nil { - return nil, err - } - var r SignedResourceUpdate - if err := r.fromChunk(addr, chunk.Data()); err != nil { - return nil, err - } - return r.data, nil -} diff --git a/swarm/storage/mru/signedupdate.go b/swarm/storage/mru/signedupdate.go deleted file mode 100644 index 41a5a5e63..000000000 --- a/swarm/storage/mru/signedupdate.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "bytes" - "hash" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource -type SignedResourceUpdate struct { - resourceUpdate // actual content that will be put on the chunk, less signature - signature *Signature - updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use) - binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use) -} - -// Verify checks that signatures are valid and that the signer owns the resource to be updated -func (r *SignedResourceUpdate) Verify() (err error) { - if len(r.data) == 0 { - return NewError(ErrInvalidValue, "Update does not contain data") - } - if r.signature == nil { - return NewError(ErrInvalidSignature, "Missing signature field") - } - - digest, err := r.GetDigest() - if err != nil { - return err - } - - // get the address of the signer (which also checks that it's a valid signature) - ownerAddr, err := getOwner(digest, *r.signature) - if err != nil { - return err - } - - if !bytes.Equal(r.updateAddr, r.UpdateAddr()) { - return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr") - } - - // Check if who signed the resource update really owns the resource - if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) { - return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err) - } - - return nil -} - -// Sign executes the signature to validate the resource -func (r *SignedResourceUpdate) Sign(signer Signer) error { - - r.binaryData = nil //invalidate serialized data - digest, err := r.GetDigest() // computes digest and serializes into .binaryData - if err != nil { - return err - } - - signature, err := signer.Sign(digest) - if err != nil { - return err - } - - // Although the Signer interface returns the public address of the signer, - // recover it from the signature to see if they match - ownerAddress, err := getOwner(digest, signature) - if err != nil { - return NewError(ErrInvalidSignature, "Error verifying signature") - } - - if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign! - return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr") - } - - r.signature = &signature - r.updateAddr = r.UpdateAddr() - return nil -} - -// create an update chunk. -func (r *SignedResourceUpdate) toChunk() (storage.Chunk, error) { - - // Check that the update is signed and serialized - // For efficiency, data is serialized during signature and cached in - // the binaryData field when computing the signature digest in .getDigest() - if r.signature == nil || r.binaryData == nil { - return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.") - } - - resourceUpdateLength := r.resourceUpdate.binaryLength() - // signature is the last item in the chunk data - copy(r.binaryData[resourceUpdateLength:], r.signature[:]) - - chunk := storage.NewChunk(r.updateAddr, r.binaryData) - return chunk, nil -} - -// fromChunk populates this structure from chunk data. It does not verify the signature is valid. -func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error { - // for update chunk layout see SignedResourceUpdate definition - - //deserialize the resource update portion - if err := r.resourceUpdate.binaryGet(chunkdata); err != nil { - return err - } - - // Extract the signature - var signature *Signature - cursor := r.resourceUpdate.binaryLength() - sigdata := chunkdata[cursor : cursor+signatureLength] - if len(sigdata) > 0 { - signature = &Signature{} - copy(signature[:], sigdata) - } - - r.signature = signature - r.updateAddr = updateAddr - r.binaryData = chunkdata - - return nil - -} - -// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash) -// the serialized payload is cached in .binaryData -func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - dataLength := r.resourceUpdate.binaryLength() - if r.binaryData == nil { - r.binaryData = make([]byte, dataLength+signatureLength) - if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil { - return result, err - } - } - hasher.Write(r.binaryData[:dataLength]) //everything except the signature. - - return common.BytesToHash(hasher.Sum(nil)), nil -} - -// getOwner extracts the address of the resource update signer -func getOwner(digest common.Hash, signature Signature) (common.Address, error) { - pub, err := crypto.SigToPub(digest.Bytes(), signature[:]) - if err != nil { - return common.Address{}, err - } - return crypto.PubkeyToAddress(*pub), nil -} - -// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource -// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming -// to update, it is proven that signer of the resource update owns the resource. -// See metadataHash in metadata.go for a more detailed explanation -func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool { - hasher := hashPool.Get().(hash.Hash) - defer hashPool.Put(hasher) - hasher.Reset() - hasher.Write(metaHash) - hasher.Write(ownerAddr.Bytes()) - rootAddr2 := hasher.Sum(nil) - return bytes.Equal(rootAddr2, rootAddr) -} diff --git a/swarm/storage/mru/timestampprovider.go b/swarm/storage/mru/timestampprovider.go index f483491aa..6ac153213 100644 --- a/swarm/storage/mru/timestampprovider.go +++ b/swarm/storage/mru/timestampprovider.go @@ -18,15 +18,16 @@ package mru import ( "encoding/binary" + "encoding/json" "time" ) // TimestampProvider sets the time source of the mru package var TimestampProvider timestampProvider = NewDefaultTimestampProvider() -// Encodes a point in time as a Unix epoch +// Timestamp encodes a point in time as a Unix epoch type Timestamp struct { - Time uint64 // Unix epoch timestamp, in seconds + Time uint64 `json:"time"` // Unix epoch timestamp, in seconds } // 8 bytes uint64 Time @@ -55,6 +56,18 @@ func (t *Timestamp) binaryPut(data []byte) error { return nil } +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Timestamp) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &t.Time) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Timestamp) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} + +// DefaultTimestampProvider is a TimestampProvider that uses system time +// as time source type DefaultTimestampProvider struct { } diff --git a/swarm/storage/mru/topic.go b/swarm/storage/mru/topic.go new file mode 100644 index 000000000..f318a5593 --- /dev/null +++ b/swarm/storage/mru/topic.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// TopicLength establishes the max length of a topic string +const TopicLength = storage.AddressLength + +// Topic represents what a resource talks about +type Topic [TopicLength]byte + +// ErrTopicTooLong is returned when creating a topic with a name/related content too long +var ErrTopicTooLong = fmt.Errorf("Topic is too long. Max length is %d", TopicLength) + +// NewTopic creates a new topic from a provided name and "related content" byte array, +// merging the two together. +// If relatedContent or name are longer than TopicLength, they will be truncated and an error returned +// name can be an empty string +// relatedContent can be nil +func NewTopic(name string, relatedContent []byte) (topic Topic, err error) { + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + err = ErrTopicTooLong + } + copy(topic[:], relatedContent[:contentLength]) + } + nameBytes := []byte(name) + nameLength := len(nameBytes) + if nameLength > TopicLength { + nameLength = TopicLength + err = ErrTopicTooLong + } + bitutil.XORBytes(topic[:], topic[:], nameBytes[:nameLength]) + return topic, err +} + +// Hex will return the topic encoded as an hex string +func (t *Topic) Hex() string { + return hexutil.Encode(t[:]) +} + +// FromHex will parse a hex string into this Topic instance +func (t *Topic) FromHex(hex string) error { + bytes, err := hexutil.Decode(hex) + if err != nil || len(bytes) != len(t) { + return NewErrorf(ErrInvalidValue, "Cannot decode topic") + } + copy(t[:], bytes) + return nil +} + +// Name will try to extract the resource name out of the topic +func (t *Topic) Name(relatedContent []byte) string { + nameBytes := *t + if relatedContent != nil { + contentLength := len(relatedContent) + if contentLength > TopicLength { + contentLength = TopicLength + } + bitutil.XORBytes(nameBytes[:], t[:], relatedContent[:contentLength]) + } + z := bytes.IndexByte(nameBytes[:], 0) + if z < 0 { + z = TopicLength + } + return string(nameBytes[:z]) + +} + +// UnmarshalJSON implements the json.Unmarshaller interface +func (t *Topic) UnmarshalJSON(data []byte) error { + var hex string + json.Unmarshal(data, &hex) + return t.FromHex(hex) +} + +// MarshalJSON implements the json.Marshaller interface +func (t *Topic) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Hex()) +} diff --git a/swarm/storage/mru/topic_test.go b/swarm/storage/mru/topic_test.go new file mode 100644 index 000000000..dad7c7ddc --- /dev/null +++ b/swarm/storage/mru/topic_test.go @@ -0,0 +1,50 @@ +package mru + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +func TestTopic(t *testing.T) { + related, _ := hexutil.Decode("0xabcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") + topicName := "test-topic" + topic, _ := NewTopic(topicName, related) + hex := topic.Hex() + expectedHex := "0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789" + if hex != expectedHex { + t.Fatalf("Expected %s, got %s", expectedHex, hex) + } + + var topic2 Topic + topic2.FromHex(hex) + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + + if topic2.Name(related) != topicName { + t.Fatal("Retrieved name does not match") + } + + bytes, err := topic2.MarshalJSON() + if err != nil { + t.Fatal(err) + } + expectedJSON := `"0xdfa89c750e3108f9c2aeef0123456789abcdef0123456789abcdef0123456789"` + equal, err := areEqualJSON(expectedJSON, string(bytes)) + if err != nil { + t.Fatal(err) + } + if !equal { + t.Fatalf("Expected JSON to be %s, got %s", expectedJSON, string(bytes)) + } + + err = topic2.UnmarshalJSON(bytes) + if err != nil { + t.Fatal(err) + } + if topic2 != topic { + t.Fatal("Expected recovered topic to be equal to original one") + } + +} diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go index d1bd37ddf..6aa57fce1 100644 --- a/swarm/storage/mru/update.go +++ b/swarm/storage/mru/update.go @@ -17,36 +17,35 @@ package mru import ( - "encoding/binary" - "errors" + "fmt" + "strconv" "github.com/ethereum/go-ethereum/swarm/chunk" - "github.com/ethereum/go-ethereum/swarm/log" - "github.com/ethereum/go-ethereum/swarm/multihash" ) -// resourceUpdate encapsulates the information sent as part of a resource update -type resourceUpdate struct { - updateHeader // metainformationa about this resource update - data []byte // actual data payload +// ProtocolVersion defines the current version of the protocol that will be included in each update message +const ProtocolVersion uint8 = 0 + +const headerLength = 8 + +// Header defines a update message header including a protocol version byte +type Header struct { + Version uint8 // Protocol version + Padding [headerLength - 1]uint8 // reserved for future use } -// Update chunk layout -// Prefix: -// 2 bytes updateHeaderLength -// 2 bytes data length -const chunkPrefixLength = 2 + 2 +// ResourceUpdate encapsulates the information sent as part of a resource update +type ResourceUpdate struct { + Header Header // + ID // Resource update identifying information + data []byte // actual data payload +} -// Header: (see updateHeader) -// Data: -// data (datalength bytes) -// -// Minimum size is Header + 1 (minimum data length, enforced) -const minimumUpdateDataLength = updateHeaderLength + 1 -const maxUpdateDataLength = chunk.DefaultSize - signatureLength - updateHeaderLength - chunkPrefixLength +const minimumUpdateDataLength = idLength + headerLength + 1 +const maxUpdateDataLength = chunk.DefaultSize - signatureLength - idLength - headerLength // binaryPut serializes the resource update information into the given slice -func (r *resourceUpdate) binaryPut(serializedData []byte) error { +func (r *ResourceUpdate) binaryPut(serializedData []byte) error { datalength := len(r.data) if datalength == 0 { return NewError(ErrInvalidValue, "cannot update a resource with no data") @@ -60,26 +59,17 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error { return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength()) } - if r.multihash { - if _, _, err := multihash.GetMultihashLength(r.data); err != nil { - return NewError(ErrInvalidValue, "Invalid multihash") - } - } - - // Add prefix: updateHeaderLength and actual data length - cursor := 0 - binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength)) - cursor += 2 + var cursor int + // serialize Header + serializedData[cursor] = r.Header.Version + copy(serializedData[cursor+1:headerLength], r.Header.Padding[:headerLength-1]) + cursor += headerLength - // data length - binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength)) - cursor += 2 - - // serialize header (see updateHeader) - if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + // serialize ID + if err := r.ID.binaryPut(serializedData[cursor : cursor+idLength]); err != nil { return err } - cursor += updateHeaderLength + cursor += idLength // add the data copy(serializedData[cursor:], r.data) @@ -89,60 +79,54 @@ func (r *resourceUpdate) binaryPut(serializedData []byte) error { } // binaryLength returns the expected number of bytes this structure will take to encode -func (r *resourceUpdate) binaryLength() int { - return chunkPrefixLength + updateHeaderLength + len(r.data) +func (r *ResourceUpdate) binaryLength() int { + return idLength + headerLength + len(r.data) } // binaryGet populates this instance from the information contained in the passed byte slice -func (r *resourceUpdate) binaryGet(serializedData []byte) error { +func (r *ResourceUpdate) binaryGet(serializedData []byte) error { if len(serializedData) < minimumUpdateDataLength { return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength) } - cursor := 0 - declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]) - if declaredHeaderlength != updateHeaderLength { - return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength) - } + dataLength := len(serializedData) - idLength - headerLength + // at this point we can be satisfied that we have the correct data length to read - cursor += 2 - datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) - cursor += 2 + var cursor int - if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) { - return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size") - } + // deserialize Header + r.Header.Version = serializedData[cursor] // extract the protocol version + copy(r.Header.Padding[:headerLength-1], serializedData[cursor+1:headerLength]) // extract the padding + cursor += headerLength - // at this point we can be satisfied that we have the correct data length to read - if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil { + if err := r.ID.binaryGet(serializedData[cursor : cursor+idLength]); err != nil { return err } - cursor += updateHeaderLength - - data := serializedData[cursor : cursor+datalength] - cursor += datalength + cursor += idLength - // if multihash content is indicated we check the validity of the multihash - if r.updateHeader.multihash { - mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data) - if err != nil { - log.Error("multihash parse error", "err", err) - return err - } - if datalength != mhLength+mhHeaderLength { - log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength) - return errors.New("Corrupt multihash data") - } - } + data := serializedData[cursor : cursor+dataLength] + cursor += dataLength // now that all checks have passed, copy data into structure - r.data = make([]byte, datalength) + r.data = make([]byte, dataLength) copy(r.data, data) return nil } -// Multihash specifies whether the resource data should be interpreted as multihash -func (r *resourceUpdate) Multihash() bool { - return r.multihash +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (r *ResourceUpdate) FromValues(values Values, data []byte) error { + r.data = data + version, _ := strconv.ParseUint(values.Get("protocolVersion"), 10, 32) + r.Header.Version = uint8(version) + return r.ID.FromValues(values) +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (r *ResourceUpdate) AppendValues(values Values) []byte { + r.ID.AppendValues(values) + values.Set("protocolVersion", fmt.Sprintf("%d", r.Header.Version)) + return r.data } diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go index 51e9d2fcc..bd706d83a 100644 --- a/swarm/storage/mru/update_test.go +++ b/swarm/storage/mru/update_test.go @@ -1,72 +1,50 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + package mru import ( - "bytes" "testing" ) -const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f" -const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20" - -func getTestResourceUpdate() *resourceUpdate { - return &resourceUpdate{ - updateHeader: *getTestUpdateHeader(false), - data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), - } -} - -func getTestResourceUpdateMultihash() *resourceUpdate { - return &resourceUpdate{ - updateHeader: *getTestUpdateHeader(true), - data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32}, +func getTestResourceUpdate() *ResourceUpdate { + return &ResourceUpdate{ + ID: *getTestID(), + data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"), } } -func compareResourceUpdate(a, b *resourceUpdate) bool { - return compareUpdateHeader(&a.updateHeader, &b.updateHeader) && - bytes.Equal(a.data, b.data) -} - func TestResourceUpdateSerializer(t *testing.T) { - var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation - update := getTestResourceUpdate() - serializedUpdate := make([]byte, serializedUpdateLength) - if err := update.binaryPut(serializedUpdate); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex) - - // Test fail if update does not contain data - update.data = nil - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data") - } + testBinarySerializerRecovery(t, getTestResourceUpdate(), "0x0000000000000000776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781ce803000000000019456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f") +} +func TestResourceUpdateLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestResourceUpdate()) // Test fail if update is too big - update.data = make([]byte, 10000) - if err := update.binaryPut(serializedUpdate); err == nil { + update := getTestResourceUpdate() + update.data = make([]byte, maxUpdateDataLength+100) + serialized := make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big") } - // Test fail if passed slice is not of the exact size required for this update - update.data = make([]byte, 1) - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size") - } - - // Test serializing a multihash update - var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation - update = getTestResourceUpdateMultihash() - serializedUpdate = make([]byte, serializedUpdateMultihashLength) - if err := update.binaryPut(serializedUpdate); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex) - - // mess with the multihash to test it fails with a wrong multihash error - update.data[1] = 79 - if err := update.binaryPut(serializedUpdate); err == nil { - t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash") + // test fail if data is empty or nil + update.data = nil + serialized = make([]byte, update.binaryLength()) + if err := update.binaryPut(serialized); err == nil { + t.Fatal("Expected resourceUpdate.binaryPut to fail since data is empty") } - } diff --git a/swarm/storage/mru/updateheader.go b/swarm/storage/mru/updateheader.go deleted file mode 100644 index f0039eaf6..000000000 --- a/swarm/storage/mru/updateheader.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. - -package mru - -import ( - "github.com/ethereum/go-ethereum/swarm/storage" -) - -// updateHeader models the non-payload components of a Resource Update -type updateHeader struct { - UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it) - multihash bool // Whether the data in this Resource Update should be interpreted as multihash - metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource. -} - -const metaHashLength = storage.AddressLength - -// updateLookupLength bytes -// 1 byte flags (multihash bool for now) -// 32 bytes metaHash -const updateHeaderLength = updateLookupLength + 1 + metaHashLength - -// binaryPut serializes the resource header information into the given slice -func (h *updateHeader) binaryPut(serializedData []byte) error { - if len(serializedData) != updateHeaderLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) - } - if len(h.metaHash) != metaHashLength { - return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set") - } - if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil { - return err - } - cursor := updateLookupLength - copy(serializedData[cursor:], h.metaHash[:metaHashLength]) - cursor += metaHashLength - - var flags byte - if h.multihash { - flags |= 0x01 - } - - serializedData[cursor] = flags - cursor++ - - return nil -} - -// binaryLength returns the expected size of this structure when serialized -func (h *updateHeader) binaryLength() int { - return updateHeaderLength -} - -// binaryGet restores the current updateHeader instance from the information contained in the passed slice -func (h *updateHeader) binaryGet(serializedData []byte) error { - if len(serializedData) != updateHeaderLength { - return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData)) - } - - if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil { - return err - } - cursor := updateLookupLength - h.metaHash = make([]byte, metaHashLength) - copy(h.metaHash[:storage.AddressLength], serializedData[cursor:cursor+storage.AddressLength]) - cursor += metaHashLength - - flags := serializedData[cursor] - cursor++ - - h.multihash = flags&0x01 != 0 - - return nil -} diff --git a/swarm/storage/mru/updateheader_test.go b/swarm/storage/mru/updateheader_test.go deleted file mode 100644 index b1f505989..000000000 --- a/swarm/storage/mru/updateheader_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package mru - -import ( - "bytes" - "testing" - - "github.com/ethereum/go-ethereum/common/hexutil" -) - -const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001" - -func getTestUpdateHeader(multihash bool) (header *updateHeader) { - _, metaHash, _, _ := getTestMetadata().serializeAndHash() - return &updateHeader{ - UpdateLookup: *getTestUpdateLookup(), - multihash: multihash, - metaHash: metaHash, - } -} - -func compareUpdateHeader(a, b *updateHeader) bool { - return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) && - a.multihash == b.multihash && - bytes.Equal(a.metaHash, b.metaHash) -} - -func TestUpdateHeaderSerializer(t *testing.T) { - header := getTestUpdateHeader(true) - serializedHeader := make([]byte, updateHeaderLength) - if err := header.binaryPut(serializedHeader); err != nil { - t.Fatal(err) - } - compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex) - - // trigger incorrect slice length error passing a slice that is 1 byte too big - if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil { - t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length") - } - - // trigger invalid metaHash error - header.metaHash = nil - if err := header.binaryPut(serializedHeader); err == nil { - t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length") - } -} - -func TestUpdateHeaderDeserializer(t *testing.T) { - originalUpdate := getTestUpdateHeader(true) - serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex) - var retrievedUpdate updateHeader - if err := retrievedUpdate.binaryGet(serializedData); err != nil { - t.Fatal(err) - } - if !compareUpdateHeader(originalUpdate, &retrievedUpdate) { - t.Fatalf("Expected deserialized structure to equal the original") - } - - // mess with source slice to test length checks - serializedData = []byte{1, 2, 3} - if err := retrievedUpdate.binaryGet(serializedData); err == nil { - t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small") - } - -} diff --git a/swarm/storage/mru/view.go b/swarm/storage/mru/view.go new file mode 100644 index 000000000..2e4ce4a0b --- /dev/null +++ b/swarm/storage/mru/view.go @@ -0,0 +1,125 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +package mru + +import ( + "hash" + "unsafe" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/swarm/storage" +) + +// View represents a particular user's view of a resource +type View struct { + Topic Topic `json:"topic"` + User common.Address `json:"user"` +} + +// View layout: +// TopicLength bytes +// userAddr common.AddressLength bytes +const viewLength = TopicLength + common.AddressLength + +// mapKey calculates a unique id for this view for the cache map in `Handler` +func (u *View) mapKey() uint64 { + serializedData := make([]byte, viewLength) + u.binaryPut(serializedData) + hasher := hashPool.Get().(hash.Hash) + defer hashPool.Put(hasher) + hasher.Reset() + hasher.Write(serializedData) + hash := hasher.Sum(nil) + return *(*uint64)(unsafe.Pointer(&hash[0])) +} + +// binaryPut serializes this View instance into the provided slice +func (u *View) binaryPut(serializedData []byte) error { + if len(serializedData) != viewLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize View. Expected %d, got %d", viewLength, len(serializedData)) + } + var cursor int + copy(serializedData[cursor:cursor+TopicLength], u.Topic[:TopicLength]) + cursor += TopicLength + + copy(serializedData[cursor:cursor+common.AddressLength], u.User[:]) + cursor += common.AddressLength + + return nil +} + +// binaryLength returns the expected size of this structure when serialized +func (u *View) binaryLength() int { + return viewLength +} + +// binaryGet restores the current instance from the information contained in the passed slice +func (u *View) binaryGet(serializedData []byte) error { + if len(serializedData) != viewLength { + return NewErrorf(ErrInvalidValue, "Incorrect slice size to read View. Expected %d, got %d", viewLength, len(serializedData)) + } + + var cursor int + copy(u.Topic[:], serializedData[cursor:cursor+TopicLength]) + cursor += TopicLength + + copy(u.User[:], serializedData[cursor:cursor+common.AddressLength]) + cursor += common.AddressLength + + return nil +} + +// Hex serializes the View to a hex string +func (u *View) Hex() string { + serializedData := make([]byte, viewLength) + u.binaryPut(serializedData) + return hexutil.Encode(serializedData) +} + +// FromValues deserializes this instance from a string key-value store +// useful to parse query strings +func (u *View) FromValues(values Values) (err error) { + topic := values.Get("topic") + if topic != "" { + if err := u.Topic.FromHex(values.Get("topic")); err != nil { + return err + } + } else { // see if the user set name and relatedcontent + name := values.Get("name") + relatedContent, _ := hexutil.Decode(values.Get("relatedcontent")) + if len(relatedContent) > 0 { + if len(relatedContent) < storage.AddressLength { + return NewErrorf(ErrInvalidValue, "relatedcontent field must be a hex-encoded byte array exactly %d bytes long", storage.AddressLength) + } + relatedContent = relatedContent[:storage.AddressLength] + } + u.Topic, err = NewTopic(name, relatedContent) + if err != nil { + return err + } + } + u.User = common.HexToAddress(values.Get("user")) + return nil +} + +// AppendValues serializes this structure into the provided string key-value store +// useful to build query strings +func (u *View) AppendValues(values Values) { + values.Set("topic", u.Topic.Hex()) + values.Set("user", u.User.Hex()) +} diff --git a/swarm/storage/mru/view_test.go b/swarm/storage/mru/view_test.go new file mode 100644 index 000000000..45720ba79 --- /dev/null +++ b/swarm/storage/mru/view_test.go @@ -0,0 +1,36 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. +package mru + +import ( + "testing" +) + +func getTestView() *View { + topic, _ := NewTopic("world news report, every hour", nil) + return &View{ + Topic: topic, + User: newCharlieSigner().Address(), + } +} + +func TestViewSerializerDeserializer(t *testing.T) { + testBinarySerializerRecovery(t, getTestView(), "0x776f726c64206e657773207265706f72742c20657665727920686f7572000000876a8936a7cd0b79ef0735ad0896c1afe278781c") +} + +func TestMetadataSerializerLengthCheck(t *testing.T) { + testBinarySerializerLengthCheck(t, getTestView()) +} diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index 074823032..2309c39f0 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -32,19 +32,6 @@ type TestServer interface { ServeHTTP(http.ResponseWriter, *http.Request) } -// simulated timeProvider -type fakeTimeProvider struct { - currentTime uint64 -} - -func (f *fakeTimeProvider) Tick() { - f.currentTime++ -} - -func (f *fakeTimeProvider) Now() mru.Timestamp { - return mru.Timestamp{Time: f.currentTime} -} - func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer { dir, err := ioutil.TempDir("", "swarm-storage-test") if err != nil { @@ -67,10 +54,6 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso t.Fatal(err) } - fakeTimeProvider := &fakeTimeProvider{ - currentTime: 42, - } - mru.TimestampProvider = fakeTimeProvider rhparams := &mru.HandlerParams{} rh, err := mru.NewTestHandler(resourceDir, rhparams) if err != nil { @@ -79,34 +62,36 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, reso a := api.NewAPI(fileStore, resolver, rh.Handler, nil) srv := httptest.NewServer(serverFunc(a)) - return &TestSwarmServer{ - Server: srv, - FileStore: fileStore, - dir: dir, - Hasher: storage.MakeHashFunc(storage.DefaultHash)(), - timestampProvider: fakeTimeProvider, + tss := &TestSwarmServer{ + Server: srv, + FileStore: fileStore, + dir: dir, + Hasher: storage.MakeHashFunc(storage.DefaultHash)(), cleanup: func() { srv.Close() rh.Close() os.RemoveAll(dir) os.RemoveAll(resourceDir) }, + CurrentTime: 42, } + mru.TimestampProvider = tss + return tss } type TestSwarmServer struct { *httptest.Server - Hasher storage.SwarmHash - FileStore *storage.FileStore - dir string - cleanup func() - timestampProvider *fakeTimeProvider + Hasher storage.SwarmHash + FileStore *storage.FileStore + dir string + cleanup func() + CurrentTime uint64 } func (t *TestSwarmServer) Close() { t.cleanup() } -func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp { - return t.timestampProvider.Now() +func (t *TestSwarmServer) Now() mru.Timestamp { + return mru.Timestamp{Time: t.CurrentTime} } |