aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile1
-rw-r--r--accounts/abi/bind/backends/simulated.go32
-rw-r--r--accounts/abi/method.go4
-rwxr-xr-xbuild/clean_go_build_cache.sh10
-rw-r--r--cmd/geth/main.go6
-rw-r--r--cmd/swarm/main.go94
-rw-r--r--cmd/swarm/mru.go169
-rw-r--r--common/types.go41
-rw-r--r--common/types_test.go180
-rw-r--r--consensus/clique/clique.go1
-rw-r--r--consensus/clique/snapshot.go24
-rw-r--r--consensus/clique/snapshot_test.go2
-rw-r--r--consensus/ethash/algorithm.go9
-rw-r--r--consensus/ethash/algorithm_test.go10
-rw-r--r--core/bloombits/matcher.go10
-rw-r--r--core/bloombits/matcher_test.go2
-rw-r--r--core/state_transition.go6
-rw-r--r--core/tx_pool.go2
-rw-r--r--core/vm/evm.go48
-rw-r--r--core/vm/gas_table.go16
-rw-r--r--core/vm/instructions.go60
-rw-r--r--core/vm/jump_table.go15
-rw-r--r--core/vm/memory_table.go4
-rw-r--r--core/vm/opcodes.go6
-rw-r--r--crypto/crypto.go6
-rw-r--r--crypto/secp256k1/LICENSE31
-rw-r--r--crypto/secp256k1/ext.h18
-rw-r--r--crypto/secp256k1/panic_cb.go18
-rw-r--r--crypto/secp256k1/secp256.go18
-rw-r--r--crypto/secp256k1/secp256_test.go18
-rw-r--r--eth/api_backend.go4
-rw-r--r--eth/filters/api.go80
-rw-r--r--eth/filters/bench_test.go4
-rw-r--r--eth/filters/filter.go77
-rw-r--r--eth/filters/filter_system_test.go35
-rw-r--r--eth/filters/filter_test.go16
-rw-r--r--eth/tracers/internal/tracers/assets.go4
-rw-r--r--eth/tracers/internal/tracers/noop_tracer.js2
-rw-r--r--interfaces.go1
-rw-r--r--les/api_backend.go5
-rw-r--r--light/lightchain_test.go2
-rw-r--r--p2p/rlpx.go15
-rw-r--r--p2p/simulations/adapters/inproc.go18
-rw-r--r--params/config.go2
-rw-r--r--params/gas_table.go17
-rw-r--r--params/protocol_params.go1
-rw-r--r--rpc/doc.go2
-rw-r--r--rpc/server.go13
-rw-r--r--swarm/api/api.go72
-rw-r--r--swarm/api/client/client.go87
-rw-r--r--swarm/api/client/client_test.go160
-rw-r--r--swarm/api/http/server.go188
-rw-r--r--swarm/api/http/server_test.go110
-rw-r--r--swarm/bmt/bmt.go366
-rw-r--r--swarm/bmt/bmt_test.go289
-rw-r--r--swarm/fuse/swarmfs_unix.go4
-rw-r--r--swarm/network/simulation/bucket.go81
-rw-r--r--swarm/network/simulation/bucket_test.go155
-rw-r--r--swarm/network/simulation/connect.go159
-rw-r--r--swarm/network/simulation/connect_test.go306
-rw-r--r--swarm/network/simulation/events.go157
-rw-r--r--swarm/network/simulation/events_test.go104
-rw-r--r--swarm/network/simulation/example_test.go140
-rw-r--r--swarm/network/simulation/http.go63
-rw-r--r--swarm/network/simulation/http_test.go104
-rw-r--r--swarm/network/simulation/kademlia.go96
-rw-r--r--swarm/network/simulation/kademlia_test.go67
-rw-r--r--swarm/network/simulation/node.go357
-rw-r--r--swarm/network/simulation/node_test.go462
-rw-r--r--swarm/network/simulation/service.go65
-rw-r--r--swarm/network/simulation/service_test.go46
-rw-r--r--swarm/network/simulation/simulation.go201
-rw-r--r--swarm/network/simulation/simulation_test.go207
-rw-r--r--swarm/network_test.go249
-rw-r--r--swarm/storage/mru/doc.go61
-rw-r--r--swarm/storage/mru/error.go41
-rw-r--r--swarm/storage/mru/handler.go514
-rw-r--r--swarm/storage/mru/lookup.go117
-rw-r--r--swarm/storage/mru/lookup_test.go85
-rw-r--r--swarm/storage/mru/metadata.go189
-rw-r--r--swarm/storage/mru/metadata_test.go126
-rw-r--r--swarm/storage/mru/request.go297
-rw-r--r--swarm/storage/mru/request_test.go175
-rw-r--r--swarm/storage/mru/resource.go1036
-rw-r--r--swarm/storage/mru/resource_sign.go30
-rw-r--r--swarm/storage/mru/resource_test.go800
-rw-r--r--swarm/storage/mru/signedupdate.go184
-rw-r--r--swarm/storage/mru/testutil.go56
-rw-r--r--swarm/storage/mru/timestampprovider.go71
-rw-r--r--swarm/storage/mru/update.go147
-rw-r--r--swarm/storage/mru/update_test.go72
-rw-r--r--swarm/storage/mru/updateheader.go88
-rw-r--r--swarm/storage/mru/updateheader_test.go64
-rw-r--r--swarm/swarm.go21
-rw-r--r--swarm/testutil/http.go54
95 files changed, 7537 insertions, 2145 deletions
diff --git a/Makefile b/Makefile
index 5cb9231a1..0c1bb3bce 100644
--- a/Makefile
+++ b/Makefile
@@ -41,6 +41,7 @@ lint: ## Run linters.
build/env.sh go run build/ci.go lint
clean:
+ ./build/clean_go_build_cache.sh
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index fd69538d5..fa8828f61 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -324,18 +324,24 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error) {
- // Initialize unset filter boundaried to run from genesis to chain head
- from := int64(0)
- if query.FromBlock != nil {
- from = query.FromBlock.Int64()
- }
- to := int64(-1)
- if query.ToBlock != nil {
- to = query.ToBlock.Int64()
+ var filter *filters.Filter
+ if query.BlockHash != nil {
+ // Block filter requested, construct a single-shot filter
+ filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics)
+ } else {
+ // Initialize unset filter boundaried to run from genesis to chain head
+ from := int64(0)
+ if query.FromBlock != nil {
+ from = query.FromBlock.Int64()
+ }
+ to := int64(-1)
+ if query.ToBlock != nil {
+ to = query.ToBlock.Int64()
+ }
+ // Construct the range filter
+ filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
}
- // Construct and execute the filter
- filter := filters.New(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics)
-
+ // Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
@@ -430,6 +436,10 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}
+func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
+ return fb.bc.GetHeaderByHash(hash), nil
+}
+
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
number := rawdb.ReadHeaderNumber(fb.db, hash)
if number == nil {
diff --git a/accounts/abi/method.go b/accounts/abi/method.go
index f434ffdbe..583105765 100644
--- a/accounts/abi/method.go
+++ b/accounts/abi/method.go
@@ -47,10 +47,8 @@ type Method struct {
// Please note that "int" is substitute for its canonical representation "int256"
func (method Method) Sig() string {
types := make([]string, len(method.Inputs))
- i := 0
- for _, input := range method.Inputs {
+ for i, input := range method.Inputs {
types[i] = input.Type.String()
- i++
}
return fmt.Sprintf("%v(%v)", method.Name, strings.Join(types, ","))
}
diff --git a/build/clean_go_build_cache.sh b/build/clean_go_build_cache.sh
new file mode 100755
index 000000000..e6a523fb4
--- /dev/null
+++ b/build/clean_go_build_cache.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
+
+golang_version=$(go version |cut -d' ' -f3 |sed 's/go//')
+
+# Clean go build cache when go version is greater than or equal to 1.10
+if !(version_gt 1.10 $golang_version); then
+ go clean -cache
+fi
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index e42aab30a..1c618de35 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -305,11 +305,11 @@ func startNode(ctx *cli.Context, stack *node.Node) {
status, _ := event.Wallet.Status()
log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status)
+ derivationPath := accounts.DefaultBaseDerivationPath
if event.Wallet.URL().Scheme == "ledger" {
- event.Wallet.SelfDerive(accounts.DefaultLedgerBaseDerivationPath, stateReader)
- } else {
- event.Wallet.SelfDerive(accounts.DefaultBaseDerivationPath, stateReader)
+ derivationPath = accounts.DefaultLedgerBaseDerivationPath
}
+ event.Wallet.SelfDerive(derivationPath, stateReader)
case accounts.WalletDropped:
log.Info("Old wallet dropped", "url", event.Wallet.URL())
diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go
index 5b0767951..d1dbb44df 100644
--- a/cmd/swarm/main.go
+++ b/cmd/swarm/main.go
@@ -182,6 +182,18 @@ var (
Usage: "Number of recent chunks cached in memory (default 5000)",
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
}
+ SwarmResourceMultihashFlag = cli.BoolFlag{
+ Name: "multihash",
+ Usage: "Determines how to interpret data for a resource update. If not present, data will be interpreted as raw, literal data that will be included in the resource",
+ }
+ SwarmResourceNameFlag = cli.StringFlag{
+ Name: "name",
+ Usage: "User-defined name for the new resource",
+ }
+ SwarmResourceDataOnCreateFlag = cli.StringFlag{
+ Name: "data",
+ Usage: "Initializes the resource with the given hex-encoded data. Data must be prefixed by 0x",
+ }
)
//declare a few constant error messages, useful for later error check comparisons in test
@@ -190,6 +202,15 @@ var (
SWARM_ERR_SWAP_SET_NO_API = "SWAP is enabled but --swap-api is not set"
)
+// this help command gets added to any subcommand that does not define it explicitly
+var defaultSubcommandHelp = cli.Command{
+ Action: func(ctx *cli.Context) { cli.ShowCommandHelpAndExit(ctx, "", 1) },
+ CustomHelpTemplate: helpTemplate,
+ Name: "help",
+ Usage: "shows this help",
+ Hidden: true,
+}
+
var defaultNodeConfig = node.DefaultConfig
// This init function sets defaults so cmd/swarm can run alongside geth.
@@ -227,6 +248,41 @@ func init() {
Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
},
{
+ CustomHelpTemplate: helpTemplate,
+ Name: "resource",
+ Usage: "(Advanced) Create and update Mutable Resources",
+ ArgsUsage: "<create|update|info>",
+ Description: "Works with Mutable Resource Updates",
+ Subcommands: []cli.Command{
+ {
+ Action: resourceCreate,
+ CustomHelpTemplate: helpTemplate,
+ Name: "create",
+ Usage: "creates a new Mutable Resource",
+ ArgsUsage: "<frequency>",
+ Description: "creates a new Mutable Resource",
+ Flags: []cli.Flag{SwarmResourceNameFlag, SwarmResourceDataOnCreateFlag, SwarmResourceMultihashFlag},
+ },
+ {
+ Action: resourceUpdate,
+ CustomHelpTemplate: helpTemplate,
+ Name: "update",
+ Usage: "updates the content of an existing Mutable Resource",
+ ArgsUsage: "<Manifest Address or ENS domain> <0x Hex data>",
+ Description: "updates the content of an existing Mutable Resource",
+ Flags: []cli.Flag{SwarmResourceMultihashFlag},
+ },
+ {
+ Action: resourceInfo,
+ CustomHelpTemplate: helpTemplate,
+ Name: "info",
+ Usage: "obtains information about an existing Mutable Resource",
+ ArgsUsage: "<Manifest Address or ENS domain>",
+ Description: "obtains information about an existing Mutable Resource",
+ },
+ },
+ },
+ {
Action: list,
CustomHelpTemplate: helpTemplate,
Name: "ls",
@@ -377,6 +433,11 @@ pv(1) tool to get a progress bar:
// See config.go
DumpConfigCommand,
}
+
+ // append a hidden help subcommand to all commands that have subcommands
+ // if a help command was already defined above, that one will take precedence.
+ addDefaultHelpSubcommands(app.Commands)
+
sort.Sort(cli.CommandsByName(app.Commands))
app.Flags = []cli.Flag{
@@ -549,6 +610,26 @@ func getAccount(bzzaccount string, ctx *cli.Context, stack *node.Node) *ecdsa.Pr
return decryptStoreAccount(ks, bzzaccount, utils.MakePasswordList(ctx))
}
+// getPrivKey returns the private key of the specified bzzaccount
+// Used only by client commands, such as `resource`
+func getPrivKey(ctx *cli.Context) *ecdsa.PrivateKey {
+ // booting up the swarm node just as we do in bzzd action
+ bzzconfig, err := buildConfig(ctx)
+ if err != nil {
+ utils.Fatalf("unable to configure swarm: %v", err)
+ }
+ cfg := defaultNodeConfig
+ if _, err := os.Stat(bzzconfig.Path); err == nil {
+ cfg.DataDir = bzzconfig.Path
+ }
+ utils.SetNodeConfig(ctx, &cfg)
+ stack, err := node.New(&cfg)
+ if err != nil {
+ utils.Fatalf("can't create node: %v", err)
+ }
+ return getAccount(bzzconfig.BzzAccount, ctx, stack)
+}
+
func decryptStoreAccount(ks *keystore.KeyStore, account string, passwords []string) *ecdsa.PrivateKey {
var a accounts.Account
var err error
@@ -613,3 +694,16 @@ func injectBootnodes(srv *p2p.Server, nodes []string) {
srv.AddPeer(n)
}
}
+
+// addDefaultHelpSubcommand scans through defined CLI commands and adds
+// a basic help subcommand to each
+// if a help command is already defined, it will take precedence over the default.
+func addDefaultHelpSubcommands(commands []cli.Command) {
+ for i := range commands {
+ cmd := &commands[i]
+ if cmd.Subcommands != nil {
+ cmd.Subcommands = append(cmd.Subcommands, defaultSubcommandHelp)
+ addDefaultHelpSubcommands(cmd.Subcommands)
+ }
+ }
+}
diff --git a/cmd/swarm/mru.go b/cmd/swarm/mru.go
new file mode 100644
index 000000000..6176b6d6c
--- /dev/null
+++ b/cmd/swarm/mru.go
@@ -0,0 +1,169 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+// Command resource allows the user to create and update signed mutable resource updates
+package main
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+ "github.com/ethereum/go-ethereum/swarm/storage/mru"
+ "gopkg.in/urfave/cli.v1"
+)
+
+func NewGenericSigner(ctx *cli.Context) mru.Signer {
+ return mru.NewGenericSigner(getPrivKey(ctx))
+}
+
+// swarm resource create <frequency> [--name <name>] [--data <0x Hexdata> [--multihash=false]]
+// swarm resource update <Manifest Address or ENS domain> <0x Hexdata> [--multihash=false]
+// swarm resource info <Manifest Address or ENS domain>
+
+func resourceCreate(ctx *cli.Context) {
+ args := ctx.Args()
+
+ var (
+ bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+ client = swarm.NewClient(bzzapi)
+ multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
+ initialData = ctx.String(SwarmResourceDataOnCreateFlag.Name)
+ name = ctx.String(SwarmResourceNameFlag.Name)
+ )
+
+ if len(args) < 1 {
+ fmt.Println("Incorrect number of arguments")
+ cli.ShowCommandHelpAndExit(ctx, "create", 1)
+ return
+ }
+ signer := NewGenericSigner(ctx)
+ frequency, err := strconv.ParseUint(args[0], 10, 64)
+ if err != nil {
+ fmt.Printf("Frequency formatting error: %s\n", err.Error())
+ cli.ShowCommandHelpAndExit(ctx, "create", 1)
+ return
+ }
+
+ metadata := mru.ResourceMetadata{
+ Name: name,
+ Frequency: frequency,
+ Owner: signer.Address(),
+ }
+
+ var newResourceRequest *mru.Request
+ if initialData != "" {
+ initialDataBytes, err := hexutil.Decode(initialData)
+ if err != nil {
+ fmt.Printf("Error parsing data: %s\n", err.Error())
+ cli.ShowCommandHelpAndExit(ctx, "create", 1)
+ return
+ }
+ newResourceRequest, err = mru.NewCreateUpdateRequest(&metadata)
+ if err != nil {
+ utils.Fatalf("Error creating new resource request: %s", err)
+ }
+ newResourceRequest.SetData(initialDataBytes, multihash)
+ if err = newResourceRequest.Sign(signer); err != nil {
+ utils.Fatalf("Error signing resource update: %s", err.Error())
+ }
+ } else {
+ newResourceRequest, err = mru.NewCreateRequest(&metadata)
+ if err != nil {
+ utils.Fatalf("Error creating new resource request: %s", err)
+ }
+ }
+
+ manifestAddress, err := client.CreateResource(newResourceRequest)
+ if err != nil {
+ utils.Fatalf("Error creating resource: %s", err.Error())
+ return
+ }
+ fmt.Println(manifestAddress) // output manifest address to the user in a single line (useful for other commands to pick up)
+
+}
+
+func resourceUpdate(ctx *cli.Context) {
+ args := ctx.Args()
+
+ var (
+ bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+ client = swarm.NewClient(bzzapi)
+ multihash = ctx.Bool(SwarmResourceMultihashFlag.Name)
+ )
+
+ if len(args) < 2 {
+ fmt.Println("Incorrect number of arguments")
+ cli.ShowCommandHelpAndExit(ctx, "update", 1)
+ return
+ }
+ signer := NewGenericSigner(ctx)
+ manifestAddressOrDomain := args[0]
+ data, err := hexutil.Decode(args[1])
+ if err != nil {
+ utils.Fatalf("Error parsing data: %s", err.Error())
+ return
+ }
+
+ // Retrieve resource status and metadata out of the manifest
+ updateRequest, err := client.GetResourceMetadata(manifestAddressOrDomain)
+ if err != nil {
+ utils.Fatalf("Error retrieving resource status: %s", err.Error())
+ }
+
+ // set the new data
+ updateRequest.SetData(data, multihash)
+
+ // sign update
+ if err = updateRequest.Sign(signer); err != nil {
+ utils.Fatalf("Error signing resource update: %s", err.Error())
+ }
+
+ // post update
+ err = client.UpdateResource(updateRequest)
+ if err != nil {
+ utils.Fatalf("Error updating resource: %s", err.Error())
+ return
+ }
+}
+
+func resourceInfo(ctx *cli.Context) {
+ var (
+ bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+ client = swarm.NewClient(bzzapi)
+ )
+ args := ctx.Args()
+ if len(args) < 1 {
+ fmt.Println("Incorrect number of arguments.")
+ cli.ShowCommandHelpAndExit(ctx, "info", 1)
+ return
+ }
+ manifestAddressOrDomain := args[0]
+ metadata, err := client.GetResourceMetadata(manifestAddressOrDomain)
+ if err != nil {
+ utils.Fatalf("Error retrieving resource metadata: %s", err.Error())
+ return
+ }
+ encodedMetadata, err := metadata.MarshalJSON()
+ if err != nil {
+ utils.Fatalf("Error encoding metadata to JSON for display:%s", err)
+ }
+ fmt.Println(string(encodedMetadata))
+}
diff --git a/common/types.go b/common/types.go
index 4d374ad24..71fe5c95c 100644
--- a/common/types.go
+++ b/common/types.go
@@ -17,6 +17,7 @@
package common
import (
+ "database/sql/driver"
"encoding/hex"
"encoding/json"
"fmt"
@@ -31,7 +32,9 @@ import (
// Lengths of hashes and addresses in bytes.
const (
- HashLength = 32
+ // HashLength is the expected length of the hash
+ HashLength = 32
+ // AddressLength is the expected length of the adddress
AddressLength = 20
)
@@ -120,6 +123,24 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
return reflect.ValueOf(h)
}
+// Scan implements Scanner for database/sql.
+func (h *Hash) Scan(src interface{}) error {
+ srcB, ok := src.([]byte)
+ if !ok {
+ return fmt.Errorf("can't scan %T into Hash", src)
+ }
+ if len(srcB) != HashLength {
+ return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength)
+ }
+ copy(h[:], srcB)
+ return nil
+}
+
+// Value implements valuer for database/sql.
+func (h Hash) Value() (driver.Value, error) {
+ return h[:], nil
+}
+
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
@@ -229,6 +250,24 @@ func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
+// Scan implements Scanner for database/sql.
+func (a *Address) Scan(src interface{}) error {
+ srcB, ok := src.([]byte)
+ if !ok {
+ return fmt.Errorf("can't scan %T into Address", src)
+ }
+ if len(srcB) != AddressLength {
+ return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength)
+ }
+ copy(a[:], srcB)
+ return nil
+}
+
+// Value implements valuer for database/sql.
+func (a Address) Value() (driver.Value, error) {
+ return a[:], nil
+}
+
// UnprefixedAddress allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
diff --git a/common/types_test.go b/common/types_test.go
index 9e0c5be3a..7095ccd01 100644
--- a/common/types_test.go
+++ b/common/types_test.go
@@ -17,9 +17,10 @@
package common
import (
+ "database/sql/driver"
"encoding/json"
-
"math/big"
+ "reflect"
"strings"
"testing"
)
@@ -193,3 +194,180 @@ func TestMixedcaseAccount_Address(t *testing.T) {
}
}
+
+func TestHash_Scan(t *testing.T) {
+ type args struct {
+ src interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "working scan",
+ args: args{src: []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ 0x10, 0x00,
+ }},
+ wantErr: false,
+ },
+ {
+ name: "non working scan",
+ args: args{src: int64(1234567890)},
+ wantErr: true,
+ },
+ {
+ name: "invalid length scan",
+ args: args{src: []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ }},
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := &Hash{}
+ if err := h.Scan(tt.args.src); (err != nil) != tt.wantErr {
+ t.Errorf("Hash.Scan() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ if !tt.wantErr {
+ for i := range h {
+ if h[i] != tt.args.src.([]byte)[i] {
+ t.Errorf(
+ "Hash.Scan() didn't scan the %d src correctly (have %X, want %X)",
+ i, h[i], tt.args.src.([]byte)[i],
+ )
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestHash_Value(t *testing.T) {
+ b := []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ 0x10, 0x00,
+ }
+ var usedH Hash
+ usedH.SetBytes(b)
+ tests := []struct {
+ name string
+ h Hash
+ want driver.Value
+ wantErr bool
+ }{
+ {
+ name: "Working value",
+ h: usedH,
+ want: b,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.h.Value()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Hash.Value() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Hash.Value() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestAddress_Scan(t *testing.T) {
+ type args struct {
+ src interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "working scan",
+ args: args{src: []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ }},
+ wantErr: false,
+ },
+ {
+ name: "non working scan",
+ args: args{src: int64(1234567890)},
+ wantErr: true,
+ },
+ {
+ name: "invalid length scan",
+ args: args{src: []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a,
+ }},
+ wantErr: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ a := &Address{}
+ if err := a.Scan(tt.args.src); (err != nil) != tt.wantErr {
+ t.Errorf("Address.Scan() error = %v, wantErr %v", err, tt.wantErr)
+ }
+
+ if !tt.wantErr {
+ for i := range a {
+ if a[i] != tt.args.src.([]byte)[i] {
+ t.Errorf(
+ "Address.Scan() didn't scan the %d src correctly (have %X, want %X)",
+ i, a[i], tt.args.src.([]byte)[i],
+ )
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestAddress_Value(t *testing.T) {
+ b := []byte{
+ 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e,
+ 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15,
+ }
+ var usedA Address
+ usedA.SetBytes(b)
+ tests := []struct {
+ name string
+ a Address
+ want driver.Value
+ wantErr bool
+ }{
+ {
+ name: "Working value",
+ a: usedA,
+ want: b,
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := tt.a.Value()
+ if (err != nil) != tt.wantErr {
+ t.Errorf("Address.Value() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("Address.Value() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go
index 01df4d5c7..8968f500f 100644
--- a/consensus/clique/clique.go
+++ b/consensus/clique/clique.go
@@ -53,7 +53,6 @@ const (
// Clique proof-of-authority protocol constants.
var (
epochLength = uint64(30000) // Default number of blocks after which to checkpoint and reset the pending votes
- blockPeriod = uint64(15) // Default minimum difference between two consecutive block's timestamps
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
diff --git a/consensus/clique/snapshot.go b/consensus/clique/snapshot.go
index 9ebdb8df1..2333d6924 100644
--- a/consensus/clique/snapshot.go
+++ b/consensus/clique/snapshot.go
@@ -19,6 +19,7 @@ package clique
import (
"bytes"
"encoding/json"
+ "sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
@@ -56,6 +57,13 @@ type Snapshot struct {
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
}
+// signers implements the sort interface to allow sorting a list of addresses
+type signers []common.Address
+
+func (s signers) Len() int { return len(s) }
+func (s signers) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 }
+func (s signers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
// newSnapshot creates a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
@@ -286,18 +294,12 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
// signers retrieves the list of authorized signers in ascending order.
func (s *Snapshot) signers() []common.Address {
- signers := make([]common.Address, 0, len(s.Signers))
- for signer := range s.Signers {
- signers = append(signers, signer)
- }
- for i := 0; i < len(signers); i++ {
- for j := i + 1; j < len(signers); j++ {
- if bytes.Compare(signers[i][:], signers[j][:]) > 0 {
- signers[i], signers[j] = signers[j], signers[i]
- }
- }
+ sigs := make([]common.Address, 0, len(s.Signers))
+ for sig := range s.Signers {
+ sigs = append(sigs, sig)
}
- return signers
+ sort.Sort(signers(sigs))
+ return sigs
}
// inturn returns if a signer at a given block height is in-turn or not.
diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go
index 29a837983..5ac730c9e 100644
--- a/consensus/clique/snapshot_test.go
+++ b/consensus/clique/snapshot_test.go
@@ -360,7 +360,7 @@ func TestVoting(t *testing.T) {
for j, vote := range tt.votes {
headers[j] = &types.Header{
Number: big.NewInt(int64(j) + 1),
- Time: big.NewInt(int64(j) * int64(blockPeriod)),
+ Time: big.NewInt(int64(j) * 15),
Coinbase: accounts.address(vote.voted),
Extra: make([]byte, extraVanity+extraSeal),
}
diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go
index fa1c2c824..f252a7f3a 100644
--- a/consensus/ethash/algorithm.go
+++ b/consensus/ethash/algorithm.go
@@ -214,15 +214,6 @@ func swap(buffer []byte) {
}
}
-// prepare converts an ethash cache or dataset from a byte stream into the internal
-// int representation. All ethash methods work with ints to avoid constant byte to
-// int conversions as well as to handle both little and big endian systems.
-func prepare(dest []uint32, src []byte) {
- for i := 0; i < len(dest); i++ {
- dest[i] = binary.LittleEndian.Uint32(src[i*4:])
- }
-}
-
// fnv is an algorithm inspired by the FNV hash, which in some cases is used as
// a non-associative substitute for XOR. Note that we multiply the prime with
// the full 32-bit input, in contrast with the FNV-1 spec which multiplies the
diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go
index 841e39233..f0c6465fd 100644
--- a/consensus/ethash/algorithm_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -18,6 +18,7 @@ package ethash
import (
"bytes"
+ "encoding/binary"
"io/ioutil"
"math/big"
"os"
@@ -30,6 +31,15 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
+// prepare converts an ethash cache or dataset from a byte stream into the internal
+// int representation. All ethash methods work with ints to avoid constant byte to
+// int conversions as well as to handle both little and big endian systems.
+func prepare(dest []uint32, src []byte) {
+ for i := 0; i < len(dest); i++ {
+ dest[i] = binary.LittleEndian.Uint32(src[i*4:])
+ }
+}
+
// Tests whether the dataset size calculator works correctly by cross checking the
// hard coded lookup table with the value generated by it.
func TestSizeCalculations(t *testing.T) {
diff --git a/core/bloombits/matcher.go b/core/bloombits/matcher.go
index 8d78adb75..3ec0d5ae9 100644
--- a/core/bloombits/matcher.go
+++ b/core/bloombits/matcher.go
@@ -59,7 +59,7 @@ type partialMatches struct {
// It can also have the actual results set to be used as a delivery data struct.
//
// The contest and error fields are used by the light client to terminate matching
-// early if an error is enountered on some path of the pipeline.
+// early if an error is encountered on some path of the pipeline.
type Retrieval struct {
Bit uint
Sections []uint64
@@ -218,7 +218,7 @@ func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uin
// run creates a daisy-chain of sub-matchers, one for the address set and one
// for each topic set, each sub-matcher receiving a section only if the previous
// ones have all found a potential match in one of the blocks of the section,
-// then binary AND-ing its own matches and forwaring the result to the next one.
+// then binary AND-ing its own matches and forwarding the result to the next one.
//
// The method starts feeding the section indexes into the first sub-matcher on a
// new goroutine and returns a sink channel receiving the results.
@@ -543,7 +543,7 @@ func (s *MatcherSession) Error() error {
}
// AllocateRetrieval assigns a bloom bit index to a client process that can either
-// immediately reuest and fetch the section contents assigned to this bit or wait
+// immediately request and fetch the section contents assigned to this bit or wait
// a little while for more sections to be requested.
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
fetcher := make(chan uint)
@@ -599,8 +599,8 @@ func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets []
}
}
-// Multiplex polls the matcher session for rerieval tasks and multiplexes it into
-// the reuested retrieval queue to be serviced together with other sessions.
+// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
+// the requested retrieval queue to be serviced together with other sessions.
//
// This method will block for the lifetime of the session. Even after termination
// of the session, any request in-flight need to be responded to! Empty responses
diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go
index 7a5f78ef3..91143e525 100644
--- a/core/bloombits/matcher_test.go
+++ b/core/bloombits/matcher_test.go
@@ -156,7 +156,7 @@ func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, in
// Track the number of retrieval requests made
var requested uint32
- // Start the matching session for the filter and the retriver goroutines
+ // Start the matching session for the filter and the retriever goroutines
quit := make(chan struct{})
matches := make(chan uint64, 16)
diff --git a/core/state_transition.go b/core/state_transition.go
index 5654cd01e..fda081b7d 100644
--- a/core/state_transition.go
+++ b/core/state_transition.go
@@ -35,7 +35,7 @@ var (
The State Transitioning Model
A state transition is a change made when a transaction is applied to the current world state
-The state transitioning model does all all the necessary work to work out a valid new state root.
+The state transitioning model does all the necessary work to work out a valid new state root.
1) Nonce handling
2) Pre pay gas
@@ -178,8 +178,8 @@ func (st *StateTransition) preCheck() error {
}
// TransitionDb will transition the state by applying the current message and
-// returning the result including the the used gas. It returns an error if it
-// failed. An error indicates a consensus issue.
+// returning the result including the used gas. It returns an error if failed.
+// An error indicates a consensus issue.
func (st *StateTransition) TransitionDb() (ret []byte, usedGas uint64, failed bool, err error) {
if err = st.preCheck(); err != nil {
return
diff --git a/core/tx_pool.go b/core/tx_pool.go
index 9c958e3b6..cfc92eb8b 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -130,7 +130,7 @@ type TxPoolConfig struct {
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
- AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account
+ AccountSlots uint64 // Number of executable transaction slots guaranteed per account
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
diff --git a/core/vm/evm.go b/core/vm/evm.go
index 69c8ec478..0189351e7 100644
--- a/core/vm/evm.go
+++ b/core/vm/evm.go
@@ -319,9 +319,8 @@ func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte
return ret, contract.Gas, err
}
-// Create creates a new contract using code as deployment code.
-func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
-
+// create creates a new contract using code as deployment code.
+func (evm *EVM) create(caller ContractRef, code []byte, gas uint64, value *big.Int, address common.Address) ([]byte, common.Address, uint64, error) {
// Depth check execution. Fail if we're trying to execute above the
// limit.
if evm.depth > int(params.CallCreateDepth) {
@@ -330,39 +329,38 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
if !evm.CanTransfer(evm.StateDB, caller.Address(), value) {
return nil, common.Address{}, gas, ErrInsufficientBalance
}
- // Ensure there's no existing contract already at the designated address
nonce := evm.StateDB.GetNonce(caller.Address())
evm.StateDB.SetNonce(caller.Address(), nonce+1)
- contractAddr = crypto.CreateAddress(caller.Address(), nonce)
- contractHash := evm.StateDB.GetCodeHash(contractAddr)
- if evm.StateDB.GetNonce(contractAddr) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
+ // Ensure there's no existing contract already at the designated address
+ contractHash := evm.StateDB.GetCodeHash(address)
+ if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) {
return nil, common.Address{}, 0, ErrContractAddressCollision
}
// Create a new account on the state
snapshot := evm.StateDB.Snapshot()
- evm.StateDB.CreateAccount(contractAddr)
+ evm.StateDB.CreateAccount(address)
if evm.ChainConfig().IsEIP158(evm.BlockNumber) {
- evm.StateDB.SetNonce(contractAddr, 1)
+ evm.StateDB.SetNonce(address, 1)
}
- evm.Transfer(evm.StateDB, caller.Address(), contractAddr, value)
+ evm.Transfer(evm.StateDB, caller.Address(), address, value)
// initialise a new contract and set the code that is to be used by the
// EVM. The contract is a scoped environment for this execution context
// only.
- contract := NewContract(caller, AccountRef(contractAddr), value, gas)
- contract.SetCallCode(&contractAddr, crypto.Keccak256Hash(code), code)
+ contract := NewContract(caller, AccountRef(address), value, gas)
+ contract.SetCallCode(&address, crypto.Keccak256Hash(code), code)
if evm.vmConfig.NoRecursion && evm.depth > 0 {
- return nil, contractAddr, gas, nil
+ return nil, address, gas, nil
}
if evm.vmConfig.Debug && evm.depth == 0 {
- evm.vmConfig.Tracer.CaptureStart(caller.Address(), contractAddr, true, code, gas, value)
+ evm.vmConfig.Tracer.CaptureStart(caller.Address(), address, true, code, gas, value)
}
start := time.Now()
- ret, err = run(evm, contract, nil)
+ ret, err := run(evm, contract, nil)
// check whether the max code size has been exceeded
maxCodeSizeExceeded := evm.ChainConfig().IsEIP158(evm.BlockNumber) && len(ret) > params.MaxCodeSize
@@ -373,7 +371,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
if err == nil && !maxCodeSizeExceeded {
createDataGas := uint64(len(ret)) * params.CreateDataGas
if contract.UseGas(createDataGas) {
- evm.StateDB.SetCode(contractAddr, ret)
+ evm.StateDB.SetCode(address, ret)
} else {
err = ErrCodeStoreOutOfGas
}
@@ -395,7 +393,23 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
if evm.vmConfig.Debug && evm.depth == 0 {
evm.vmConfig.Tracer.CaptureEnd(ret, gas-contract.Gas, time.Since(start), err)
}
- return ret, contractAddr, contract.Gas, err
+ return ret, address, contract.Gas, err
+
+}
+
+// Create creates a new contract using code as deployment code.
+func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
+ contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetNonce(caller.Address()))
+ return evm.create(caller, code, gas, value, contractAddr)
+}
+
+// Create2 creates a new contract using code as deployment code.
+//
+// The different between Create2 with Create is Create2 uses sha3(msg.sender ++ salt ++ init_code)[12:]
+// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
+func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
+ contractAddr = crypto.CreateAddress2(caller.Address(), common.BigToHash(salt), code)
+ return evm.create(caller, code, gas, endowment, contractAddr)
}
// ChainConfig returns the environment's chain configuration
diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go
index 0764c67a4..f9eea319e 100644
--- a/core/vm/gas_table.go
+++ b/core/vm/gas_table.go
@@ -241,6 +241,10 @@ func gasExtCodeCopy(gt params.GasTable, evm *EVM, contract *Contract, stack *Sta
return gas, nil
}
+func gasExtCodeHash(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ return gt.ExtcodeHash, nil
+}
+
func gasMLoad(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
var overflow bool
gas, err := memoryGasCost(mem, memorySize)
@@ -289,6 +293,18 @@ func gasCreate(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, m
return gas, nil
}
+func gasCreate2(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
+ var overflow bool
+ gas, err := memoryGasCost(mem, memorySize)
+ if err != nil {
+ return 0, err
+ }
+ if gas, overflow = math.SafeAdd(gas, params.Create2Gas); overflow {
+ return 0, errGasUintOverflow
+ }
+ return gas, nil
+}
+
func gasBalance(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
return gt.Balance, nil
}
diff --git a/core/vm/instructions.go b/core/vm/instructions.go
index 1ec13ba35..122fc21e4 100644
--- a/core/vm/instructions.go
+++ b/core/vm/instructions.go
@@ -496,6 +496,38 @@ func opExtCodeCopy(pc *uint64, evm *EVM, contract *Contract, memory *Memory, sta
return nil, nil
}
+// opExtCodeHash returns the code hash of a specified account.
+// There are several cases when the function is called, while we can relay everything
+// to `state.GetCodeHash` function to ensure the correctness.
+// (1) Caller tries to get the code hash of a normal contract account, state
+// should return the relative code hash and set it as the result.
+//
+// (2) Caller tries to get the code hash of a non-existent account, state should
+// return common.Hash{} and zero will be set as the result.
+//
+// (3) Caller tries to get the code hash for an account without contract code,
+// state should return emptyCodeHash(0xc5d246...) as the result.
+//
+// (4) Caller tries to get the code hash of a precompiled account, the result
+// should be zero or emptyCodeHash.
+//
+// It is worth noting that in order to avoid unnecessary create and clean,
+// all precompile accounts on mainnet have been transferred 1 wei, so the return
+// here should be emptyCodeHash.
+// If the precompile account is not transferred any amount on a private or
+// customized chain, the return value will be zero.
+//
+// (5) Caller tries to get the code hash for an account which is marked as suicided
+// in the current transaction, the code hash of this account should be returned.
+//
+// (6) Caller tries to get the code hash for an account which is marked as deleted,
+// this account should be regarded as a non-existent account and zero should be returned.
+func opExtCodeHash(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ slot := stack.peek()
+ slot.SetBytes(evm.StateDB.GetCodeHash(common.BigToAddress(slot)).Bytes())
+ return nil, nil
+}
+
func opGasprice(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
stack.push(evm.interpreter.intPool.get().Set(evm.GasPrice))
return nil, nil
@@ -665,6 +697,34 @@ func opCreate(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
return nil, nil
}
+func opCreate2(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
+ var (
+ endowment = stack.pop()
+ offset, size = stack.pop(), stack.pop()
+ salt = stack.pop()
+ input = memory.Get(offset.Int64(), size.Int64())
+ gas = contract.Gas
+ )
+
+ // Apply EIP150
+ gas -= gas / 64
+ contract.UseGas(gas)
+ res, addr, returnGas, suberr := evm.Create2(contract, input, gas, endowment, salt)
+ // Push item on the stack based on the returned error.
+ if suberr != nil {
+ stack.push(evm.interpreter.intPool.getZero())
+ } else {
+ stack.push(addr.Big())
+ }
+ contract.Gas += returnGas
+ evm.interpreter.intPool.put(endowment, offset, size, salt)
+
+ if suberr == errExecutionReverted {
+ return res, nil
+ }
+ return nil, nil
+}
+
func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Pop gas. The actual gas in in evm.callGasTemp.
evm.interpreter.intPool.put(stack.pop())
diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go
index 111a9b798..f387e6133 100644
--- a/core/vm/jump_table.go
+++ b/core/vm/jump_table.go
@@ -80,6 +80,21 @@ func newConstantinopleInstructionSet() [256]operation {
validateStack: makeStackFunc(2, 1),
valid: true,
}
+ instructionSet[EXTCODEHASH] = operation{
+ execute: opExtCodeHash,
+ gasCost: gasExtCodeHash,
+ validateStack: makeStackFunc(1, 1),
+ valid: true,
+ }
+ instructionSet[CREATE2] = operation{
+ execute: opCreate2,
+ gasCost: gasCreate2,
+ validateStack: makeStackFunc(4, 1),
+ memorySize: memoryCreate2,
+ valid: true,
+ writes: true,
+ returns: true,
+ }
return instructionSet
}
diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go
index ab49ebb38..8fa6c90ca 100644
--- a/core/vm/memory_table.go
+++ b/core/vm/memory_table.go
@@ -58,6 +58,10 @@ func memoryCreate(stack *Stack) *big.Int {
return calcMemSize(stack.Back(1), stack.Back(2))
}
+func memoryCreate2(stack *Stack) *big.Int {
+ return calcMemSize(stack.Back(1), stack.Back(2))
+}
+
func memoryCall(stack *Stack) *big.Int {
x := calcMemSize(stack.Back(5), stack.Back(6))
y := calcMemSize(stack.Back(3), stack.Back(4))
diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go
index 6c12c50e5..4349ffd29 100644
--- a/core/vm/opcodes.go
+++ b/core/vm/opcodes.go
@@ -90,6 +90,7 @@ const (
EXTCODECOPY
RETURNDATASIZE
RETURNDATACOPY
+ EXTCODEHASH
)
// 0x40 range - block operations.
@@ -209,6 +210,7 @@ const (
CALLCODE
RETURN
DELEGATECALL
+ CREATE2
STATICCALL = 0xfa
REVERT = 0xfd
@@ -266,6 +268,7 @@ var opCodeToString = map[OpCode]string{
EXTCODECOPY: "EXTCODECOPY",
RETURNDATASIZE: "RETURNDATASIZE",
RETURNDATACOPY: "RETURNDATACOPY",
+ EXTCODEHASH: "EXTCODEHASH",
// 0x40 range - block operations.
BLOCKHASH: "BLOCKHASH",
@@ -370,6 +373,7 @@ var opCodeToString = map[OpCode]string{
RETURN: "RETURN",
CALLCODE: "CALLCODE",
DELEGATECALL: "DELEGATECALL",
+ CREATE2: "CREATE2",
STATICCALL: "STATICCALL",
REVERT: "REVERT",
SELFDESTRUCT: "SELFDESTRUCT",
@@ -433,6 +437,7 @@ var stringToOp = map[string]OpCode{
"EXTCODECOPY": EXTCODECOPY,
"RETURNDATASIZE": RETURNDATASIZE,
"RETURNDATACOPY": RETURNDATACOPY,
+ "EXTCODEHASH": EXTCODEHASH,
"BLOCKHASH": BLOCKHASH,
"COINBASE": COINBASE,
"TIMESTAMP": TIMESTAMP,
@@ -521,6 +526,7 @@ var stringToOp = map[string]OpCode{
"LOG3": LOG3,
"LOG4": LOG4,
"CREATE": CREATE,
+ "CREATE2": CREATE2,
"CALL": CALL,
"RETURN": RETURN,
"CALLCODE": CALLCODE,
diff --git a/crypto/crypto.go b/crypto/crypto.go
index 619440e81..dec6e3c19 100644
--- a/crypto/crypto.go
+++ b/crypto/crypto.go
@@ -76,6 +76,12 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
return common.BytesToAddress(Keccak256(data)[12:])
}
+// CreateAddress2 creates an ethereum address given the address bytes, initial
+// contract code and a salt.
+func CreateAddress2(b common.Address, salt common.Hash, code []byte) common.Address {
+ return common.BytesToAddress(Keccak256([]byte{0xff}, b.Bytes(), salt.Bytes(), code)[12:])
+}
+
// ToECDSA creates a private key with the given D value.
func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {
return toECDSA(d, true)
diff --git a/crypto/secp256k1/LICENSE b/crypto/secp256k1/LICENSE
new file mode 100644
index 000000000..f9090e142
--- /dev/null
+++ b/crypto/secp256k1/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2010 The Go Authors. All rights reserved.
+Copyright (c) 2011 ThePiachu. All rights reserved.
+Copyright (c) 2015 Jeffrey Wilcke. All rights reserved.
+Copyright (c) 2015 Felix Lange. All rights reserved.
+Copyright (c) 2015 Gustav Simonsson. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of the copyright holder. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/crypto/secp256k1/ext.h b/crypto/secp256k1/ext.h
index 9b043c724..e422fe4b4 100644
--- a/crypto/secp256k1/ext.h
+++ b/crypto/secp256k1/ext.h
@@ -1,18 +1,6 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
// secp256k1_context_create_sign_verify creates a context for signing and signature verification.
static secp256k1_context* secp256k1_context_create_sign_verify() {
diff --git a/crypto/secp256k1/panic_cb.go b/crypto/secp256k1/panic_cb.go
index e0e9034ee..6d59a1d24 100644
--- a/crypto/secp256k1/panic_cb.go
+++ b/crypto/secp256k1/panic_cb.go
@@ -1,18 +1,6 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
package secp256k1
diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go
index eefbb99ee..843fb1252 100644
--- a/crypto/secp256k1/secp256.go
+++ b/crypto/secp256k1/secp256.go
@@ -1,18 +1,6 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
// Package secp256k1 wraps the bitcoin secp256k1 C library.
package secp256k1
diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go
index b608bcfcf..3bccddab8 100644
--- a/crypto/secp256k1/secp256_test.go
+++ b/crypto/secp256k1/secp256_test.go
@@ -1,18 +1,6 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
package secp256k1
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 016087dfe..03f6012d7 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -70,6 +70,10 @@ func (b *EthAPIBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNum
return b.eth.blockchain.GetHeaderByNumber(uint64(blockNr)), nil
}
+func (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
+ return b.eth.blockchain.GetHeaderByHash(hash), nil
+}
+
func (b *EthAPIBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) {
// Pending block is only known by the miner
if blockNr == rpc.PendingBlockNumber {
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 592ad3b82..6fea14fee 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -324,16 +324,26 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
//
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
- // Convert the RPC block numbers into internal representations
- if crit.FromBlock == nil {
- crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
- }
- if crit.ToBlock == nil {
- crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
+ var filter *Filter
+ if crit.BlockHash != nil {
+ // Block filter requested, construct a single-shot filter
+ filter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics)
+ } else {
+ // Convert the RPC block numbers into internal representations
+ var (
+ begin int64
+ end int64
+ )
+ if crit.FromBlock == nil {
+ begin = int64(rpc.LatestBlockNumber)
+ }
+ if crit.ToBlock == nil {
+ end = int64(rpc.LatestBlockNumber)
+ }
+ // Construct the range filter
+ filter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics)
}
- // Create and run the filter to get all the logs
- filter := New(api.backend, crit.FromBlock.Int64(), crit.ToBlock.Int64(), crit.Addresses, crit.Topics)
-
+ // Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
@@ -371,17 +381,24 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
return nil, fmt.Errorf("filter not found")
}
- begin := rpc.LatestBlockNumber.Int64()
- if f.crit.FromBlock != nil {
- begin = f.crit.FromBlock.Int64()
- }
- end := rpc.LatestBlockNumber.Int64()
- if f.crit.ToBlock != nil {
- end = f.crit.ToBlock.Int64()
+ var filter *Filter
+ if f.crit.BlockHash != nil {
+ // Block filter requested, construct a single-shot filter
+ filter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)
+ } else {
+ // Convert the RPC block numbers into internal representations
+ begin := rpc.LatestBlockNumber.Int64()
+ if f.crit.FromBlock != nil {
+ begin = f.crit.FromBlock.Int64()
+ }
+ end := rpc.LatestBlockNumber.Int64()
+ if f.crit.ToBlock != nil {
+ end = f.crit.ToBlock.Int64()
+ }
+ // Construct the range filter
+ filter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
}
- // Create and run the filter to get all the logs
- filter := New(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
-
+ // Run the filter and return all the logs
logs, err := filter.Logs(ctx)
if err != nil {
return nil, err
@@ -444,7 +461,8 @@ func returnLogs(logs []*types.Log) []*types.Log {
// UnmarshalJSON sets *args fields with given data.
func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
type input struct {
- From *rpc.BlockNumber `json:"fromBlock"`
+ BlockHash *common.Hash `json:"blockHash"`
+ FromBlock *rpc.BlockNumber `json:"fromBlock"`
ToBlock *rpc.BlockNumber `json:"toBlock"`
Addresses interface{} `json:"address"`
Topics []interface{} `json:"topics"`
@@ -455,12 +473,20 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
return err
}
- if raw.From != nil {
- args.FromBlock = big.NewInt(raw.From.Int64())
- }
+ if raw.BlockHash != nil {
+ if raw.FromBlock != nil || raw.ToBlock != nil {
+ // BlockHash is mutually exclusive with FromBlock/ToBlock criteria
+ return fmt.Errorf("cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other")
+ }
+ args.BlockHash = raw.BlockHash
+ } else {
+ if raw.FromBlock != nil {
+ args.FromBlock = big.NewInt(raw.FromBlock.Int64())
+ }
- if raw.ToBlock != nil {
- args.ToBlock = big.NewInt(raw.ToBlock.Int64())
+ if raw.ToBlock != nil {
+ args.ToBlock = big.NewInt(raw.ToBlock.Int64())
+ }
}
args.Addresses = []common.Address{}
@@ -538,7 +564,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error {
func decodeAddress(s string) (common.Address, error) {
b, err := hexutil.Decode(s)
if err == nil && len(b) != common.AddressLength {
- err = fmt.Errorf("hex has invalid length %d after decoding", len(b))
+ err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for address", len(b), common.AddressLength)
}
return common.BytesToAddress(b), err
}
@@ -546,7 +572,7 @@ func decodeAddress(s string) (common.Address, error) {
func decodeTopic(s string) (common.Hash, error) {
b, err := hexutil.Decode(s)
if err == nil && len(b) != common.HashLength {
- err = fmt.Errorf("hex has invalid length %d after decoding", len(b))
+ err = fmt.Errorf("hex has invalid length %d after decoding; expected %d for topic", len(b), common.HashLength)
}
return common.BytesToHash(b), err
}
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index faffaa70b..c5f681e02 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -135,7 +135,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
var addr common.Address
addr[0] = byte(i)
addr[1] = byte(i / 256)
- filter := New(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
+ filter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
if _, err := filter.Logs(context.Background()); err != nil {
b.Error("filter.Find error:", err)
}
@@ -192,7 +192,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
start := time.Now()
mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
- filter := New(backend, 0, int64(*headNum), []common.Address{{}}, nil)
+ filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
fmt.Println("Finished running filter benchmarks")
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 7000d74fa..071613ad7 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -18,6 +18,7 @@ package filters
import (
"context"
+ "errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@@ -33,6 +34,7 @@ type Backend interface {
ChainDb() ethdb.Database
EventMux() *event.TypeMux
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
+ HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
@@ -49,17 +51,19 @@ type Backend interface {
type Filter struct {
backend Backend
- db ethdb.Database
- begin, end int64
- addresses []common.Address
- topics [][]common.Hash
+ db ethdb.Database
+ addresses []common.Address
+ topics [][]common.Hash
+
+ block common.Hash // Block hash if filtering a single block
+ begin, end int64 // Range interval if filtering multiple blocks
matcher *bloombits.Matcher
}
-// New creates a new filter which uses a bloom filter on blocks to figure out whether
-// a particular block is interesting or not.
-func New(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
+// NewRangeFilter creates a new filter which uses a bloom filter on blocks to
+// figure out whether a particular block is interesting or not.
+func NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
// Flatten the address and topic filter clauses into a single bloombits filter
// system. Since the bloombits are not positional, nil topics are permitted,
// which get flattened into a nil byte slice.
@@ -78,23 +82,52 @@ func New(backend Backend, begin, end int64, addresses []common.Address, topics [
}
filters = append(filters, filter)
}
- // Assemble and return the filter
size, _ := backend.BloomStatus()
+ // Create a generic filter and convert it into a range filter
+ filter := newFilter(backend, addresses, topics)
+
+ filter.matcher = bloombits.NewMatcher(size, filters)
+ filter.begin = begin
+ filter.end = end
+
+ return filter
+}
+
+// NewBlockFilter creates a new filter which directly inspects the contents of
+// a block to figure out whether it is interesting or not.
+func NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {
+ // Create a generic filter and convert it into a block filter
+ filter := newFilter(backend, addresses, topics)
+ filter.block = block
+ return filter
+}
+
+// newFilter creates a generic filter that can either filter based on a block hash,
+// or based on range queries. The search criteria needs to be explicitly set.
+func newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter {
return &Filter{
backend: backend,
- begin: begin,
- end: end,
addresses: addresses,
topics: topics,
db: backend.ChainDb(),
- matcher: bloombits.NewMatcher(size, filters),
}
}
// Logs searches the blockchain for matching log entries, returning all from the
// first block that contains matches, updating the start of the filter accordingly.
func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
+ // If we're doing singleton block filtering, execute and return
+ if f.block != (common.Hash{}) {
+ header, err := f.backend.HeaderByHash(ctx, f.block)
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, errors.New("unknown block")
+ }
+ return f.blockLogs(ctx, header)
+ }
// Figure out the limits of the filter range
header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if header == nil {
@@ -187,13 +220,23 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e
if header == nil || err != nil {
return logs, err
}
- if bloomFilter(header.Bloom, f.addresses, f.topics) {
- found, err := f.checkMatches(ctx, header)
- if err != nil {
- return logs, err
- }
- logs = append(logs, found...)
+ found, err := f.blockLogs(ctx, header)
+ if err != nil {
+ return logs, err
+ }
+ logs = append(logs, found...)
+ }
+ return logs, nil
+}
+
+// blockLogs returns the logs matching the filter criteria within a single block.
+func (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
+ if bloomFilter(header.Bloom, f.addresses, f.topics) {
+ found, err := f.checkMatches(ctx, header)
+ if err != nil {
+ return logs, err
}
+ logs = append(logs, found...)
}
return logs, nil
}
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index ff1af85a8..e71080b1a 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -75,6 +75,14 @@ func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumbe
return rawdb.ReadHeader(b.db, hash, num), nil
}
+func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
+ number := rawdb.ReadHeaderNumber(b.db, hash)
+ if number == nil {
+ return nil, nil
+ }
+ return rawdb.ReadHeader(b.db, hash, *number), nil
+}
+
func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {
return rawdb.ReadReceipts(b.db, hash, *number), nil
@@ -343,6 +351,33 @@ func TestInvalidLogFilterCreation(t *testing.T) {
}
}
+func TestInvalidGetLogsRequest(t *testing.T) {
+ var (
+ mux = new(event.TypeMux)
+ db = ethdb.NewMemDatabase()
+ txFeed = new(event.Feed)
+ rmLogsFeed = new(event.Feed)
+ logsFeed = new(event.Feed)
+ chainFeed = new(event.Feed)
+ backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ api = NewPublicFilterAPI(backend, false)
+ blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+ )
+
+ // Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
+ testCases := []FilterCriteria{
+ 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},
+ 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},
+ 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},
+ }
+
+ for i, test := range testCases {
+ if _, err := api.GetLogs(context.Background(), test); err == nil {
+ t.Errorf("Expected Logs for case #%d to fail", i)
+ }
+ }
+}
+
// TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.
func TestLogFilter(t *testing.T) {
t.Parallel()
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index ccabe955c..396a03d61 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -92,7 +92,7 @@ func BenchmarkFilters(b *testing.B) {
}
b.ResetTimer()
- filter := New(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
+ filter := NewRangeFilter(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
for i := 0; i < b.N; i++ {
logs, _ := filter.Logs(context.Background())
@@ -175,14 +175,14 @@ func TestFilters(t *testing.T) {
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])
}
- filter := New(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
+ filter := NewRangeFilter(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
logs, _ := filter.Logs(context.Background())
if len(logs) != 4 {
t.Error("expected 4 log, got", len(logs))
}
- filter = New(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
+ filter = NewRangeFilter(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -191,7 +191,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = New(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
+ filter = NewRangeFilter(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs))
@@ -200,7 +200,7 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
}
- filter = New(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})
+ filter = NewRangeFilter(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 2 {
@@ -208,7 +208,7 @@ func TestFilters(t *testing.T) {
}
failHash := common.BytesToHash([]byte("fail"))
- filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}})
+ filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
@@ -216,14 +216,14 @@ func TestFilters(t *testing.T) {
}
failAddr := common.BytesToAddress([]byte("failmenow"))
- filter = New(backend, 0, -1, []common.Address{failAddr}, nil)
+ filter = NewRangeFilter(backend, 0, -1, []common.Address{failAddr}, nil)
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs))
}
- filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
+ filter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
logs, _ = filter.Logs(context.Background())
if len(logs) != 0 {
diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go
index a3963b53b..04dd6fe89 100644
--- a/eth/tracers/internal/tracers/assets.go
+++ b/eth/tracers/internal/tracers/assets.go
@@ -157,7 +157,7 @@ func evmdis_tracerJs() (*asset, error) {
return a, nil
}
-var _noop_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8b\x02\xac\x61\x27\x2a\x1c\xd9\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x04\xf9\xee\xc5\x92\x52\x13\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x1b\x19\x4f\xea\x76\x83\xe1\xfb\x6f\xbf\xfb\x11\xf5\xc0\xd8\xc9\x37\x6c\x03\x2b\xa7\x3d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb4\x2c\xca\x72\xd6\x7c\xf1\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x92\xf2\x35\x4e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x2f\x9d\xeb\x4f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe3\x25\xc7\xab\xf5\x13\xee\x39\x46\x56\xbc\xe2\xc0\x4a\x1e\x8f\xa9\xf1\xae\xc5\xbd\x6b\x39\x44\x06\x45\x8c\xf9\x25\x0e\xdc\xa1\x99\x70\x59\x78\x97\xa3\x6c\xcf\x51\x70\x27\x29\x74\x64\x4e\xc2\x02\xec\x72\x72\x1c\x58\xa3\x93\x80\x1f\x2e\x56\x67\xe0\x02\xa2\x19\xf2\x82\x2c\x0f\xa0\x90\x31\xeb\x5e\x82\xc2\x09\x9e\xec\x93\xf4\x2b\x16\xf2\x69\xee\x0e\x2e\x4c\x36\x83\x8c\x0c\x1b\xc8\xf2\xd4\x47\xe7\x3d\x1a\x46\x8a\xdc\x27\xbf\xc8\xb4\x26\x19\xde\xae\xea\xd7\x0f\x4f\x35\xaa\xf5\x33\xde\x56\x9b\x4d\xb5\xae\x9f\x7f\xc2\xd1\xd9\x20\xc9\xc0\x07\x9e\x51\x6e\x3f\x7a\xc7\x1d\x8e\xa4\x4a\xc1\x4e\x90\x3e\x13\xde\xdc\x6e\x6e\x5e\x57\xeb\xba\xfa\x75\x75\xbf\xaa\x9f\x21\x8a\xbb\x55\xbd\xbe\xdd\x6e\x71\xf7\xb0\x41\x85\xc7\x6a\x53\xaf\x6e\x9e\xee\xab\x0d\x1e\x9f\x36\x8f\x0f\xdb\xdb\x25\xb6\x9c\x53\x71\xd6\xff\xff\xce\xfb\xa9\x3d\x65\x74\x6c\xe4\x7c\xbc\x6c\xe2\x59\x12\xe2\x20\xc9\x77\x18\xe8\xc0\x50\x6e\xd9\x1d\xb8\x03\xa1\x95\xf1\xf4\xd5\xa5\x66\x16\x79\x09\xbb\x69\xe6\x7f\x3d\x48\xac\x7a\x04\xb1\x05\x22\x33\x7e\x1e\xcc\xc6\xeb\xb2\x3c\x1e\x8f\xcb\x5d\x48\x4b\xd1\x5d\xe9\x67\x5c\x2c\x7f\x59\x16\x99\x19\x44\xc6\x5a\xa9\x65\xcd\xe5\xbc\x4b\xd1\x26\x76\x43\xca\x8d\x04\x46\x23\xce\xb3\x8e\xb9\x65\xb4\xd2\xe5\x01\xfe\x4c\x4e\xb9\x43\xaf\xb2\x07\xe1\x37\x3a\xd0\xb6\x55\x37\x5a\xc6\x49\xf3\x8e\x5b\x83\xc9\x5c\x21\x35\x7e\x3a\x47\x82\x29\x85\x48\x6d\xbe\x9b\xfc\xb9\x65\x5d\x16\x1f\x8a\xab\xb2\x44\x34\x1e\xb3\xb7\x0b\x07\xf9\x23\x73\x45\x73\x9f\x7a\x82\x8c\x93\xe3\x74\x19\x39\xd4\xef\x6f\xc0\xef\xb9\x4d\xc6\x71\x59\x5c\x65\xdd\x35\xfa\x14\x26\xe8\x0b\x2f\xbb\x05\xba\xe6\x25\x3e\xe0\xe3\xa2\x98\xc8\x3d\x25\x6f\x9f\xa3\x8f\xc3\xf9\x4c\xa8\xb5\x44\xfe\x4c\xcb\x91\xa4\x07\x85\x8b\x61\x3f\x17\x78\x35\xe9\xff\xdb\x42\x39\x7e\xc9\x83\xbc\x9f\x7c\x66\x60\x9c\xab\x6f\x98\x03\x9c\xb1\x52\xbe\x7d\x39\xb0\xe6\xbf\x3d\x94\x2d\x69\x88\x13\x2e\x6b\x7a\x17\xc8\x5f\xc0\xe7\xf3\xc8\x1b\x73\x61\xb7\x2c\xae\xe6\xf7\xcf\x42\xb5\xf6\xfe\xef\x50\xc5\xc7\xe2\xaf\x00\x00\x00\xff\xff\x13\x5b\x7d\x37\xec\x04\x00\x00")
+var _noop_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x4f\x6f\xdb\x46\x10\xc5\xcf\xe6\xa7\x78\xc7\x04\x50\xc5\xfe\x39\x14\x70\x8a\x02\xac\x61\x27\x2a\x1c\xdb\x90\xe8\x06\x3e\x0e\xc9\xa1\xb8\xe9\x6a\x87\x9d\x9d\x95\x22\x18\xfe\xee\xc5\x92\x12\x12\x14\x69\x9b\x9b\xb0\xd2\xfb\xbd\x37\xf3\x46\x65\x89\x2b\x19\x8f\xea\xb6\x83\xe1\xc7\xef\x7f\xf8\x19\xf5\xc0\xd8\xca\x77\x6c\x03\x2b\xa7\x1d\xaa\x64\x83\x68\x2c\xca\x12\xf5\xe0\x22\x7a\xe7\x19\x2e\x62\x24\x35\x48\x0f\xfb\xc7\xef\xbd\x6b\x94\xf4\xb8\x2c\xca\x72\xd6\x7c\xf5\xeb\x4c\xe8\x95\x19\x51\x7a\x3b\x90\xf2\x25\x8e\x92\xd0\x52\x80\x72\xe7\xa2\xa9\x6b\x92\x31\x9c\x81\x42\x57\x8a\x62\x27\x9d\xeb\x8f\x19\xe9\x0c\x29\x74\xac\x93\xb5\xb1\xee\xe2\x39\xc7\xdb\xbb\x47\xdc\x72\x8c\xac\x78\xcb\x81\x95\x3c\x1e\x52\xe3\x5d\x8b\x5b\xd7\x72\x88\x0c\x8a\x18\xf3\x4b\x1c\xb8\x43\x33\xe1\xb2\xf0\x26\x47\xd9\x9c\xa2\xe0\x46\x52\xe8\xc8\x9c\x84\x05\xd8\xe5\xe4\xd8\xb3\x46\x27\x01\x3f\x9d\xad\x4e\xc0\x05\x44\x33\xe4\x15\x59\x1e\x40\x21\x63\xd6\xbd\x06\x85\x23\x3c\xd9\x67\xe9\x37\x2c\xe4\xf3\xdc\x1d\x5c\x98\x6c\x06\x19\x19\x36\x90\xe5\xa9\x0f\xce\x7b\x34\x8c\x14\xb9\x4f\x7e\x91\x69\x4d\x32\x7c\x58\xd5\xef\xee\x1f\x6b\x54\x77\x4f\xf8\x50\xad\xd7\xd5\x5d\xfd\xf4\x06\x07\x67\x83\x24\x03\xef\x79\x46\xb9\xdd\xe8\x1d\x77\x38\x90\x2a\x05\x3b\x42\xfa\x4c\x78\x7f\xbd\xbe\x7a\x57\xdd\xd5\xd5\x6f\xab\xdb\x55\xfd\x04\x51\xdc\xac\xea\xbb\xeb\xcd\x06\x37\xf7\x6b\x54\x78\xa8\xd6\xf5\xea\xea\xf1\xb6\x5a\xe3\xe1\x71\xfd\x70\xbf\xb9\x5e\x62\xc3\x39\x15\x67\xfd\xff\xef\xbc\x9f\xda\x53\x46\xc7\x46\xce\xc7\xf3\x26\x9e\x24\x21\x0e\x92\x7c\x87\x81\xf6\x0c\xe5\x96\xdd\x9e\x3b\x10\x5a\x19\x8f\xdf\x5c\x6a\x66\x91\x97\xb0\x9d\x66\xfe\xd7\x83\xc4\xaa\x47\x10\x5b\x20\x32\xe3\x97\xc1\x6c\xbc\x2c\xcb\xc3\xe1\xb0\xdc\x86\xb4\x14\xdd\x96\x7e\xc6\xc5\xf2\xd7\x65\x91\x99\x41\x64\xac\x95\x5a\xd6\x5c\xce\xc7\x14\x6d\x62\x37\xa4\xdc\x48\x60\x34\xe2\x3c\xeb\x98\x5b\x46\x2b\x5d\x1e\xe0\xaf\xe4\x94\x3b\xf4\x2a\x3b\x10\x7e\xa7\x3d\x6d\x5a\x75\xa3\x65\x9c\x34\x1f\xb9\x35\x98\xcc\x15\x52\xe3\xa7\x73\x24\x98\x52\x88\xd4\xe6\xbb\xc9\x9f\x5b\xd6\x65\xf1\x5c\x5c\x94\x25\xa2\xf1\x98\xbd\x5d\xd8\xcb\x9f\x99\x2b\x9a\xfb\xd4\x23\x64\x9c\x1c\xa7\xcb\xc8\xa1\xfe\x78\x0f\xfe\xc4\x6d\x32\x8e\xcb\xe2\x22\xeb\x2e\xd1\xa7\x30\x41\x5f\x79\xd9\x2e\xd0\x35\xaf\xf1\x8c\x97\x45\x31\x91\x7b\x4a\xde\xbe\x44\x1f\x86\xd3\x99\x50\x6b\x89\xfc\x89\x96\x23\x49\x0f\x0a\x67\xc3\x7e\x2e\xf0\x62\xd2\xff\xb7\x85\x72\xfc\x9a\x07\x79\x3f\xf9\xcc\xc0\x38\x57\xdf\x30\x07\x38\x63\xa5\x7c\xfb\xb2\x67\xcd\x7f\x7b\x28\x5b\xd2\x10\x27\x5c\xd6\xf4\x2e\x90\x3f\x83\x4f\xe7\x91\x37\xe6\xc2\x76\x59\x5c\xcc\xef\x5f\x84\x6a\xed\xd3\x39\xd4\x4c\xc2\xf3\xcb\x1b\xbc\x14\x2f\xc5\xdf\x01\x00\x00\xff\xff\x77\x56\xe7\x1a\xf7\x04\x00\x00")
func noop_tracerJsBytes() ([]byte, error) {
return bindataRead(
@@ -173,7 +173,7 @@ func noop_tracerJs() (*asset, error) {
}
info := bindataFileInfo{name: "noop_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
- a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0xcc, 0x83, 0xe9, 0x9e, 0xc1, 0x56, 0x41, 0x6c, 0x6a, 0x3b, 0x46, 0xc9, 0x5f, 0xe1, 0x5b, 0xcd, 0x6b, 0x53, 0x45, 0xcd, 0xfe, 0x1e, 0x86, 0x8c, 0x6b, 0xb, 0x70, 0x73, 0x9f, 0x4c, 0xb1}}
+ a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xf, 0x1c, 0x6f, 0x65, 0xaf, 0x90, 0x31, 0xab, 0xf, 0xe0, 0xca, 0x54, 0x7, 0xfd, 0xd3, 0xa1, 0x4a, 0x14, 0x1, 0x2a, 0x9d, 0xdc, 0xb9, 0x64, 0x69, 0x83, 0x30, 0xb1, 0x2a, 0xbd, 0xfb}}
return a, nil
}
diff --git a/eth/tracers/internal/tracers/noop_tracer.js b/eth/tracers/internal/tracers/noop_tracer.js
index f966ddc7d..fe7ddc85a 100644
--- a/eth/tracers/internal/tracers/noop_tracer.js
+++ b/eth/tracers/internal/tracers/noop_tracer.js
@@ -25,5 +25,5 @@
// result is invoked when all the opcodes have been iterated over and returns
// the final result of the tracing.
- result: function(ctx, db) { }
+ result: function(ctx, db) { return {}; }
}
diff --git a/interfaces.go b/interfaces.go
index a8b48c93d..26b0fcbc1 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -131,6 +131,7 @@ type ContractCaller interface {
// FilterQuery contains options for contract log filtering.
type FilterQuery struct {
+ BlockHash *common.Hash // used by eth_getLogs, return logs only from block with this hash
FromBlock *big.Int // beginning of the queried range, nil means genesis block
ToBlock *big.Int // end of the range, nil means latest block
Addresses []common.Address // restricts matches to events created by specific contracts
diff --git a/les/api_backend.go b/les/api_backend.go
index dea33c470..4232d3ae0 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -60,10 +60,13 @@ func (b *LesApiBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNum
if blockNr == rpc.LatestBlockNumber || blockNr == rpc.PendingBlockNumber {
return b.eth.blockchain.CurrentHeader(), nil
}
-
return b.eth.blockchain.GetHeaderByNumberOdr(ctx, uint64(blockNr))
}
+func (b *LesApiBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
+ return b.eth.blockchain.GetHeaderByHash(hash), nil
+}
+
func (b *LesApiBackend) BlockByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Block, error) {
header, err := b.HeaderByNumber(ctx, blockNr)
if header == nil || err != nil {
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
index c0aa51da2..5f0baaf4c 100644
--- a/light/lightchain_test.go
+++ b/light/lightchain_test.go
@@ -326,7 +326,7 @@ func TestBadHeaderHashes(t *testing.T) {
func TestReorgBadHeaderHashes(t *testing.T) {
bc := newTestLightChain()
- // Create a chain, import and ban aferwards
+ // Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
diff --git a/p2p/rlpx.go b/p2p/rlpx.go
index 149eda689..46b666869 100644
--- a/p2p/rlpx.go
+++ b/p2p/rlpx.go
@@ -181,9 +181,9 @@ func (t *rlpx) doEncHandshake(prv *ecdsa.PrivateKey, dial *discover.Node) (disco
err error
)
if dial == nil {
- sec, err = receiverEncHandshake(t.fd, prv, nil)
+ sec, err = receiverEncHandshake(t.fd, prv)
} else {
- sec, err = initiatorEncHandshake(t.fd, prv, dial.ID, nil)
+ sec, err = initiatorEncHandshake(t.fd, prv, dial.ID)
}
if err != nil {
return discover.NodeID{}, err
@@ -280,9 +280,9 @@ func (h *encHandshake) staticSharedSecret(prv *ecdsa.PrivateKey) ([]byte, error)
// it should be called on the dialing side of the connection.
//
// prv is the local client's private key.
-func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID discover.NodeID, token []byte) (s secrets, err error) {
+func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID discover.NodeID) (s secrets, err error) {
h := &encHandshake{initiator: true, remoteID: remoteID}
- authMsg, err := h.makeAuthMsg(prv, token)
+ authMsg, err := h.makeAuthMsg(prv)
if err != nil {
return s, err
}
@@ -306,7 +306,7 @@ func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID d
}
// makeAuthMsg creates the initiator handshake message.
-func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey, token []byte) (*authMsgV4, error) {
+func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey) (*authMsgV4, error) {
rpub, err := h.remoteID.Pubkey()
if err != nil {
return nil, fmt.Errorf("bad remoteID: %v", err)
@@ -324,7 +324,7 @@ func (h *encHandshake) makeAuthMsg(prv *ecdsa.PrivateKey, token []byte) (*authMs
}
// Sign known message: static-shared-secret ^ nonce
- token, err = h.staticSharedSecret(prv)
+ token, err := h.staticSharedSecret(prv)
if err != nil {
return nil, err
}
@@ -352,8 +352,7 @@ func (h *encHandshake) handleAuthResp(msg *authRespV4) (err error) {
// it should be called on the listening side of the connection.
//
// prv is the local client's private key.
-// token is the token from a previous session with this node.
-func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, token []byte) (s secrets, err error) {
+func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey) (s secrets, err error) {
authMsg := new(authMsgV4)
authPacket, err := readHandshakeMsg(authMsg, encAuthMsgLen, prv, conn)
if err != nil {
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index b68d08f39..c1b032a0d 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -296,6 +296,13 @@ func (sn *SimNode) Stop() error {
return sn.node.Stop()
}
+// Service returns a running service by name
+func (sn *SimNode) Service(name string) node.Service {
+ sn.lock.RLock()
+ defer sn.lock.RUnlock()
+ return sn.running[name]
+}
+
// Services returns a copy of the underlying services
func (sn *SimNode) Services() []node.Service {
sn.lock.RLock()
@@ -307,6 +314,17 @@ func (sn *SimNode) Services() []node.Service {
return services
}
+// ServiceMap returns a map by names of the underlying services
+func (sn *SimNode) ServiceMap() map[string]node.Service {
+ sn.lock.RLock()
+ defer sn.lock.RUnlock()
+ services := make(map[string]node.Service, len(sn.running))
+ for name, service := range sn.running {
+ services[name] = service
+ }
+ return services
+}
+
// Server returns the underlying p2p.Server
func (sn *SimNode) Server() *p2p.Server {
return sn.node.Server()
diff --git a/params/config.go b/params/config.go
index 6e6a5cb8b..b9e9bb8d6 100644
--- a/params/config.go
+++ b/params/config.go
@@ -211,6 +211,8 @@ func (c *ChainConfig) GasTable(num *big.Int) GasTable {
return GasTableHomestead
}
switch {
+ case c.IsConstantinople(num):
+ return GasTableConstantinople
case c.IsEIP158(num):
return GasTableEIP158
case c.IsEIP150(num):
diff --git a/params/gas_table.go b/params/gas_table.go
index d33bebbe5..6c4a38269 100644
--- a/params/gas_table.go
+++ b/params/gas_table.go
@@ -20,6 +20,7 @@ package params
type GasTable struct {
ExtcodeSize uint64
ExtcodeCopy uint64
+ ExtcodeHash uint64
Balance uint64
SLoad uint64
Calls uint64
@@ -63,7 +64,7 @@ var (
CreateBySuicide: 25000,
}
// GasTableEIP158 contain the gas re-prices for
- // the EIP15* phase.
+ // the EIP155/EIP158 phase.
GasTableEIP158 = GasTable{
ExtcodeSize: 700,
ExtcodeCopy: 700,
@@ -75,4 +76,18 @@ var (
CreateBySuicide: 25000,
}
+ // GasTableConstantinople contain the gas re-prices for
+ // the constantinople phase.
+ GasTableConstantinople = GasTable{
+ ExtcodeSize: 700,
+ ExtcodeCopy: 700,
+ ExtcodeHash: 400,
+ Balance: 400,
+ SLoad: 200,
+ Calls: 700,
+ Suicide: 5000,
+ ExpByte: 50,
+
+ CreateBySuicide: 25000,
+ }
)
diff --git a/params/protocol_params.go b/params/protocol_params.go
index 1ea9c5813..46624ac9a 100644
--- a/params/protocol_params.go
+++ b/params/protocol_params.go
@@ -57,6 +57,7 @@ const (
TierStepGas uint64 = 0 // Once per operation, for a selection of them.
LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas.
CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction.
+ Create2Gas uint64 = 32000 // Once per CREATE2 operation
SuicideRefundGas uint64 = 24000 // Refunded following a suicide operation.
MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL.
TxDataNonZeroGas uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions.
diff --git a/rpc/doc.go b/rpc/doc.go
index 78aa92f89..9a6c4abbc 100644
--- a/rpc/doc.go
+++ b/rpc/doc.go
@@ -58,7 +58,7 @@ An example server which uses the JSON codec:
return a + b
}
- func (s *CalculatorService Div(a, b int) (int, error) {
+ func (s *CalculatorService) Div(a, b int) (int, error) {
if b == 0 {
return 0, errors.New("divide by zero")
}
diff --git a/rpc/server.go b/rpc/server.go
index 90ffadd25..214e1d3ed 100644
--- a/rpc/server.go
+++ b/rpc/server.go
@@ -94,11 +94,12 @@ func (s *Server) RegisterName(name string, rcvr interface{}) error {
methods, subscriptions := suitableCallbacks(rcvrVal, svc.typ)
- // already a previous service register under given sname, merge methods/subscriptions
+ if len(methods) == 0 && len(subscriptions) == 0 {
+ return fmt.Errorf("Service %T doesn't have any suitable methods/subscriptions to expose", rcvr)
+ }
+
+ // already a previous service register under given name, merge methods/subscriptions
if regsvc, present := s.services[name]; present {
- if len(methods) == 0 && len(subscriptions) == 0 {
- return fmt.Errorf("Service %T doesn't have any suitable methods/subscriptions to expose", rcvr)
- }
for _, m := range methods {
regsvc.callbacks[formatName(m.method.Name)] = m
}
@@ -111,10 +112,6 @@ func (s *Server) RegisterName(name string, rcvr interface{}) error {
svc.name = name
svc.callbacks, svc.subscriptions = methods, subscriptions
- if len(svc.callbacks) == 0 && len(svc.subscriptions) == 0 {
- return fmt.Errorf("Service %T doesn't have any suitable methods/subscriptions to expose", rcvr)
- }
-
s.services[svc.name] = svc
return nil
}
diff --git a/swarm/api/api.go b/swarm/api/api.go
index 74af669c9..05a38b5e1 100644
--- a/swarm/api/api.go
+++ b/swarm/api/api.go
@@ -351,11 +351,12 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
// we need to do some extra work if this is a mutable resource manifest
if entry.ContentType == ResourceContentType {
- // get the resource root chunk key
- log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash)
+ // get the resource rootAddr
+ log.Trace("resource type", "menifestAddr", manifestAddr, "hash", entry.Hash)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rsrc, err := a.resource.Load(ctx, storage.Address(common.FromHex(entry.Hash)))
+ rootAddr := storage.Address(common.FromHex(entry.Hash))
+ rsrc, err := a.resource.Load(ctx, rootAddr)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -364,7 +365,8 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
}
// use this key to retrieve the latest update
- rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{})
+ params := mru.LookupLatest(rootAddr)
+ rsrc, err = a.resource.Lookup(ctx, params)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -374,10 +376,10 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string
// if it's multihash, we will transparently serve the content this multihash points to
// \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
- if rsrc.Multihash {
+ if rsrc.Multihash() {
// get the data of the update
- _, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex())
+ _, rsrcData, err := a.resource.GetContent(rootAddr)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
@@ -888,66 +890,39 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver
return addr, manifestEntryMap, nil
}
-// ResourceLookup Looks up mutable resource updates at specific periods and versions
-func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) {
+// ResourceLookup finds mutable resource updates at specific periods and versions
+func (a *API) ResourceLookup(ctx context.Context, params *mru.LookupParams) (string, []byte, error) {
var err error
- rsrc, err := a.resource.Load(ctx, addr)
+ rsrc, err := a.resource.Load(ctx, params.RootAddr())
if err != nil {
return "", nil, err
}
- if version != 0 {
- if period == 0 {
- return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0")
- }
- _, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup)
- } else if period != 0 {
- _, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup)
- } else {
- _, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup)
- }
+ _, err = a.resource.Lookup(ctx, params)
if err != nil {
return "", nil, err
}
var data []byte
- _, data, err = a.resource.GetContent(rsrc.NameHash().Hex())
+ _, data, err = a.resource.GetContent(params.RootAddr())
if err != nil {
return "", nil, err
}
return rsrc.Name(), data, nil
}
-// ResourceCreate creates Resource and returns its key
-func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) {
- key, _, err := a.resource.New(ctx, name, frequency)
- if err != nil {
- return nil, err
- }
- return key, nil
+// Create Mutable resource
+func (a *API) ResourceCreate(ctx context.Context, request *mru.Request) error {
+ return a.resource.New(ctx, request)
}
-// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval.
-// It will fail if the data is not a valid multihash.
-func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
- return a.resourceUpdate(ctx, name, data, true)
+// ResourceNewRequest creates a Request object to update a specific mutable resource
+func (a *API) ResourceNewRequest(ctx context.Context, rootAddr storage.Address) (*mru.Request, error) {
+ return a.resource.NewUpdateRequest(ctx, rootAddr)
}
// ResourceUpdate updates a Mutable Resource with arbitrary data.
// Upon retrieval the update will be retrieved verbatim as bytes.
-func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
- return a.resourceUpdate(ctx, name, data, false)
-}
-
-func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) {
- var addr storage.Address
- var err error
- if multihash {
- addr, err = a.resource.UpdateMultihash(ctx, name, data)
- } else {
- addr, err = a.resource.Update(ctx, name, data)
- }
- period, _ := a.resource.GetLastPeriod(name)
- version, _ := a.resource.GetVersion(name)
- return addr, period, version, err
+func (a *API) ResourceUpdate(ctx context.Context, request *mru.SignedResourceUpdate) (storage.Address, error) {
+ return a.resource.Update(ctx, request)
}
// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function
@@ -955,11 +930,6 @@ func (a *API) ResourceHashSize() int {
return a.resource.HashSize
}
-// ResourceIsValidated checks if the Mutable Resource has an active content validator.
-func (a *API) ResourceIsValidated() bool {
- return a.resource.IsValidated()
-}
-
// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
func (a *API) ResolveResourceManifest(ctx context.Context, addr storage.Address) (storage.Address, error) {
trie, err := loadManifest(ctx, a.fileStore, addr, nil)
diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go
index ef6222435..b3a5e929d 100644
--- a/swarm/api/client/client.go
+++ b/swarm/api/client/client.go
@@ -35,6 +35,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/swarm/api"
+ "github.com/ethereum/go-ethereum/swarm/storage/mru"
)
var (
@@ -562,3 +563,89 @@ func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error)
}
return string(data), nil
}
+
+// CreateResource creates a Mutable Resource with the given name and frequency, initializing it with the provided
+// data. Data is interpreted as multihash or not depending on the multihash parameter.
+// startTime=0 means "now"
+// Returns the resulting Mutable Resource manifest address that you can use to include in an ENS Resolver (setContent)
+// or reference future updates (Client.UpdateResource)
+func (c *Client) CreateResource(request *mru.Request) (string, error) {
+ responseStream, err := c.updateResource(request)
+ if err != nil {
+ return "", err
+ }
+ defer responseStream.Close()
+
+ body, err := ioutil.ReadAll(responseStream)
+ if err != nil {
+ return "", err
+ }
+
+ var manifestAddress string
+ if err = json.Unmarshal(body, &manifestAddress); err != nil {
+ return "", err
+ }
+ return manifestAddress, nil
+}
+
+// UpdateResource allows you to set a new version of your content
+func (c *Client) UpdateResource(request *mru.Request) error {
+ _, err := c.updateResource(request)
+ return err
+}
+
+func (c *Client) updateResource(request *mru.Request) (io.ReadCloser, error) {
+ body, err := request.MarshalJSON()
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest("POST", c.Gateway+"/bzz-resource:/", bytes.NewBuffer(body))
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ return res.Body, nil
+
+}
+
+// GetResource returns a byte stream with the raw content of the resource
+// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
+// points to that address
+func (c *Client) GetResource(manifestAddressOrDomain string) (io.ReadCloser, error) {
+
+ res, err := http.Get(c.Gateway + "/bzz-resource:/" + manifestAddressOrDomain)
+ if err != nil {
+ return nil, err
+ }
+ return res.Body, nil
+
+}
+
+// GetResourceMetadata returns a structure that describes the Mutable Resource
+// manifestAddressOrDomain is the address you obtained in CreateResource or an ENS domain whose Resolver
+// points to that address
+func (c *Client) GetResourceMetadata(manifestAddressOrDomain string) (*mru.Request, error) {
+
+ responseStream, err := c.GetResource(manifestAddressOrDomain + "/meta")
+ if err != nil {
+ return nil, err
+ }
+ defer responseStream.Close()
+
+ body, err := ioutil.ReadAll(responseStream)
+ if err != nil {
+ return nil, err
+ }
+
+ var metadata mru.Request
+ if err := metadata.UnmarshalJSON(body); err != nil {
+ return nil, err
+ }
+ return &metadata, nil
+}
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index e68147ab2..dc608e3f1 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -25,8 +25,12 @@ import (
"sort"
"testing"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
+ "github.com/ethereum/go-ethereum/swarm/multihash"
+ "github.com/ethereum/go-ethereum/swarm/storage/mru"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -354,3 +358,159 @@ func TestClientMultipartUpload(t *testing.T) {
checkDownloadFile(file)
}
}
+
+func newTestSigner() (*mru.GenericSigner, error) {
+ privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ if err != nil {
+ return nil, err
+ }
+ return mru.NewGenericSigner(privKey), nil
+}
+
+// test the transparent resolving of multihash resource types with bzz:// scheme
+//
+// first upload data, and store the multihash to the resulting manifest in a resource update
+// retrieving the update with the multihash should return the manifest pointing directly to the data
+// and raw retrieve of that hash should return the data
+func TestClientCreateResourceMultihash(t *testing.T) {
+
+ signer, _ := newTestSigner()
+
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
+ client := NewClient(srv.URL)
+ defer srv.Close()
+
+ // add the data our multihash aliased manifest will point to
+ databytes := []byte("bar")
+
+ swarmHash, err := client.UploadRaw(bytes.NewReader(databytes), int64(len(databytes)), false)
+ if err != nil {
+ t.Fatalf("Error uploading raw test data: %s", err)
+ }
+
+ s := common.FromHex(swarmHash)
+ mh := multihash.ToMultihash(s)
+
+ // our mutable resource "name"
+ resourceName := "foo.eth"
+
+ createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+ Name: resourceName,
+ Frequency: 13,
+ StartTime: srv.GetCurrentTime(),
+ Owner: signer.Address(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ createRequest.SetData(mh, true)
+ if err := createRequest.Sign(signer); err != nil {
+ t.Fatalf("Error signing update: %s", err)
+ }
+
+ resourceManifestHash, err := client.CreateResource(createRequest)
+
+ if err != nil {
+ t.Fatalf("Error creating resource: %s", err)
+ }
+
+ correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
+ if resourceManifestHash != correctManifestAddrHex {
+ t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
+ }
+
+ reader, err := client.GetResource(correctManifestAddrHex)
+ if err != nil {
+ t.Fatalf("Error retrieving resource: %s", err)
+ }
+ defer reader.Close()
+ gotData, err := ioutil.ReadAll(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(mh, gotData) {
+ t.Fatalf("Expected: %v, got %v", mh, gotData)
+ }
+
+}
+
+// TestClientCreateUpdateResource will check that mutable resources can be created and updated via the HTTP client.
+func TestClientCreateUpdateResource(t *testing.T) {
+
+ signer, _ := newTestSigner()
+
+ srv := testutil.NewTestSwarmServer(t, serverFunc)
+ client := NewClient(srv.URL)
+ defer srv.Close()
+
+ // set raw data for the resource
+ databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
+
+ // our mutable resource name
+ resourceName := "El Quijote"
+
+ createRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+ Name: resourceName,
+ Frequency: 13,
+ StartTime: srv.GetCurrentTime(),
+ Owner: signer.Address(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ createRequest.SetData(databytes, false)
+ if err := createRequest.Sign(signer); err != nil {
+ t.Fatalf("Error signing update: %s", err)
+ }
+
+ resourceManifestHash, err := client.CreateResource(createRequest)
+
+ correctManifestAddrHex := "cc7904c17b49f9679e2d8006fe25e87e3f5c2072c2b49cab50f15e544471b30a"
+ if resourceManifestHash != correctManifestAddrHex {
+ t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, resourceManifestHash)
+ }
+
+ reader, err := client.GetResource(correctManifestAddrHex)
+ if err != nil {
+ t.Fatalf("Error retrieving resource: %s", err)
+ }
+ defer reader.Close()
+ gotData, err := ioutil.ReadAll(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(databytes, gotData) {
+ t.Fatalf("Expected: %v, got %v", databytes, gotData)
+ }
+
+ // define different data
+ databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
+
+ updateRequest, err := client.GetResourceMetadata(correctManifestAddrHex)
+ if err != nil {
+ t.Fatalf("Error retrieving update request template: %s", err)
+ }
+
+ updateRequest.SetData(databytes, false)
+ if err := updateRequest.Sign(signer); err != nil {
+ t.Fatalf("Error signing update: %s", err)
+ }
+
+ if err = client.UpdateResource(updateRequest); err != nil {
+ t.Fatalf("Error updating resource: %s", err)
+ }
+
+ reader, err = client.GetResource(correctManifestAddrHex)
+ if err != nil {
+ t.Fatalf("Error retrieving resource: %s", err)
+ }
+ defer reader.Close()
+ gotData, err = ioutil.ReadAll(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(databytes, gotData) {
+ t.Fatalf("Expected: %v, got %v", databytes, gotData)
+ }
+
+}
diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go
index 5a7c9e93e..76a57625b 100644
--- a/swarm/api/http/server.go
+++ b/swarm/api/http/server.go
@@ -38,7 +38,6 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/log"
@@ -101,9 +100,11 @@ func NewServer(api *api.API, corsString string) *Server {
server.Handler = c.Handler(mux)
return server
}
+
func (s *Server) ListenAndServe(addr string) error {
return http.ListenAndServe(addr, s)
}
+
func (s *Server) HandleRootPaths(w http.ResponseWriter, r *Request) {
switch r.Method {
case http.MethodGet:
@@ -133,6 +134,7 @@ func (s *Server) HandleRootPaths(w http.ResponseWriter, r *Request) {
Respond(w, r, "Not Found", http.StatusNotFound)
}
}
+
func (s *Server) HandleBzz(w http.ResponseWriter, r *Request) {
switch r.Method {
case http.MethodGet:
@@ -240,12 +242,6 @@ func (s *Server) WrapHandler(parseBzzUri bool, h func(http.ResponseWriter, *Requ
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
// electron (chromium) api for registering bzz url scheme handlers:
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
-
-// browser API for registering bzz url scheme handlers:
-// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
-// electron (chromium) api for registering bzz url scheme handlers:
-// https://github.com/atom/electron/blob/master/docs/api/protocol.md
-
type Server struct {
http.Handler
api *api.API
@@ -340,7 +336,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) {
var addr storage.Address
if r.uri.Addr != "" && r.uri.Addr != "encrypt" {
- addr, err = s.api.Resolve(ctx, r.uri)
+ addr, err = s.api.Resolve(r.Context(), r.uri)
if err != nil {
postFilesFail.Inc(1)
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError)
@@ -348,7 +344,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) {
}
log.Debug("resolved key", "ruid", r.ruid, "key", addr)
} else {
- addr, err = s.api.NewManifest(ctx, toEncrypt)
+ addr, err = s.api.NewManifest(r.Context(), toEncrypt)
if err != nil {
postFilesFail.Inc(1)
Respond(w, r, err.Error(), http.StatusInternalServerError)
@@ -521,9 +517,8 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) {
// If the latter is used, a subsequent bzz:// GET call to the manifest of the resource will return
// the page that the multihash is pointing to, as if it held a normal swarm content manifest
//
-// The resource name will be verbatim what is passed as the address part of the url.
-// For example, if a POST is made to /bzz-resource:/foo.eth/raw/13 a new resource with frequency 13
-// and name "foo.eth" will be created
+// The POST request admits a JSON structure as defined in the mru package: `mru.updateRequestJSON`
+// The requests can be to a) create a resource, b) update a resource or c) both a+b: create a resource and set the initial content
func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
log.Debug("handle.post.resource", "ruid", r.ruid)
@@ -535,33 +530,54 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
defer sp.Finish()
var err error
- var addr storage.Address
- var name string
- var outdata []byte
- isRaw, frequency, err := resourcePostMode(r.uri.Path)
+
+ // Creation and update must send mru.updateRequestJSON JSON structure
+ body, err := ioutil.ReadAll(r.Body)
if err != nil {
- Respond(w, r, err.Error(), http.StatusBadRequest)
+ Respond(w, r, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ var updateRequest mru.Request
+ if err := updateRequest.UnmarshalJSON(body); err != nil { // decodes request JSON
+ Respond(w, r, err.Error(), http.StatusBadRequest) //TODO: send different status response depending on error
return
}
- // new mutable resource creation will always have a frequency field larger than 0
- if frequency > 0 {
-
- name = r.uri.Addr
+ if updateRequest.IsUpdate() {
+ // Verify that the signature is intact and that the signer is authorized
+ // to update this resource
+ // Check this early, to avoid creating a resource and then not being able to set its first update.
+ if err = updateRequest.Verify(); err != nil {
+ Respond(w, r, err.Error(), http.StatusForbidden)
+ return
+ }
+ }
- // the key is the content addressed root chunk holding mutable resource metadata information
- addr, err = s.api.ResourceCreate(ctx, name, frequency)
+ if updateRequest.IsNew() {
+ err = s.api.ResourceCreate(r.Context(), &updateRequest)
if err != nil {
code, err2 := s.translateResourceError(w, r, "resource creation fail", err)
-
Respond(w, r, err2.Error(), code)
return
}
+ }
+ if updateRequest.IsUpdate() {
+ _, err = s.api.ResourceUpdate(r.Context(), &updateRequest.SignedResourceUpdate)
+ if err != nil {
+ Respond(w, r, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ // at this point both possible operations (create, update or both) were successful
+ // so in case it was a new resource, then create a manifest and send it over.
+
+ if updateRequest.IsNew() {
// we create a manifest so we can retrieve the resource with bzz:// later
// this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource
- // root chunk
- m, err := s.api.NewResourceManifest(ctx, addr.Hex())
+ // metadata chunk (rootAddr)
+ m, err := s.api.NewResourceManifest(r.Context(), updateRequest.RootAddr().Hex())
if err != nil {
Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError)
return
@@ -571,85 +587,21 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) {
// the client can access the root chunk key directly through its Hash member
// the manifest key should be set as content in the resolver of the ENS name
// \TODO update manifest key automatically in ENS
- outdata, err = json.Marshal(m)
+ outdata, err := json.Marshal(m)
if err != nil {
Respond(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
return
}
- } else {
- // to update the resource through http we need to retrieve the key for the mutable resource root chunk
- // that means that we retrieve the manifest and inspect its Hash member.
- manifestAddr := r.uri.Address()
- if manifestAddr == nil {
- manifestAddr, err = s.api.Resolve(ctx, r.uri)
- if err != nil {
- getFail.Inc(1)
- Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
- return
- }
- } else {
- w.Header().Set("Cache-Control", "max-age=2147483648")
- }
-
- // get the root chunk key from the manifest
- addr, err = s.api.ResolveResourceManifest(ctx, manifestAddr)
- if err != nil {
- getFail.Inc(1)
- Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
- return
- }
-
- log.Debug("handle.post.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunkkey", addr)
-
- name, _, err = s.api.ResourceLookup(ctx, addr, 0, 0, &mru.LookupParams{})
- if err != nil {
- Respond(w, r, err.Error(), http.StatusNotFound)
- return
- }
- }
-
- // Creation and update must send data aswell. This data constitutes the update data itself.
- data, err := ioutil.ReadAll(r.Body)
- if err != nil {
- Respond(w, r, err.Error(), http.StatusInternalServerError)
- return
- }
-
- // Multihash will be passed as hex-encoded data, so we need to parse this to bytes
- if isRaw {
- _, _, _, err = s.api.ResourceUpdate(ctx, name, data)
- if err != nil {
- Respond(w, r, err.Error(), http.StatusBadRequest)
- return
- }
- } else {
- bytesdata, err := hexutil.Decode(string(data))
- if err != nil {
- Respond(w, r, err.Error(), http.StatusBadRequest)
- return
- }
- _, _, _, err = s.api.ResourceUpdateMultihash(ctx, name, bytesdata)
- if err != nil {
- Respond(w, r, err.Error(), http.StatusBadRequest)
- return
- }
- }
-
- // If we have data to return, write this now
- // \TODO there should always be data to return here
- if len(outdata) > 0 {
- w.Header().Add("Content-type", "text/plain")
- w.WriteHeader(http.StatusOK)
fmt.Fprint(w, string(outdata))
- return
}
- w.WriteHeader(http.StatusOK)
+ w.Header().Add("Content-type", "application/json")
}
// Retrieve mutable resource updates:
// bzz-resource://<id> - get latest update
// bzz-resource://<id>/<n> - get latest update on period n
// bzz-resource://<id>/<n>/<m> - get update version m of period n
+// bzz-resource://<id>/meta - get metadata and next version information
// <id> = ens name or hash
// TODO: Enable pass maxPeriod parameter
func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
@@ -669,31 +621,51 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
w.Header().Set("Cache-Control", "max-age=2147483648")
}
- // get the root chunk key from the manifest
- key, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
+ // get the root chunk rootAddr from the manifest
+ rootAddr, err := s.api.ResolveResourceManifest(r.Context(), manifestAddr)
if err != nil {
getFail.Inc(1)
Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound)
return
}
- log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk key", key)
+ log.Debug("handle.get.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunk addr", rootAddr)
- // determine if the query specifies period and version
+ // determine if the query specifies period and version or it is a metadata query
var params []string
if len(r.uri.Path) > 0 {
+ if r.uri.Path == "meta" {
+ unsignedUpdateRequest, err := s.api.ResourceNewRequest(r.Context(), rootAddr)
+ if err != nil {
+ getFail.Inc(1)
+ Respond(w, r, fmt.Sprintf("cannot retrieve resource metadata for rootAddr=%s: %s", rootAddr.Hex(), err), http.StatusNotFound)
+ return
+ }
+ rawResponse, err := unsignedUpdateRequest.MarshalJSON()
+ if err != nil {
+ Respond(w, r, fmt.Sprintf("cannot encode unsigned UpdateRequest: %v", err), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Add("Content-type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprint(w, string(rawResponse))
+ return
+
+ }
+
params = strings.Split(r.uri.Path, "/")
+
}
var name string
- var period uint64
- var version uint64
var data []byte
now := time.Now()
switch len(params) {
case 0: // latest only
- name, data, err = s.api.ResourceLookup(r.Context(), key, 0, 0, nil)
+ name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatest(rootAddr))
case 2: // specific period and version
+ var version uint64
+ var period uint64
version, err = strconv.ParseUint(params[1], 10, 32)
if err != nil {
break
@@ -702,13 +674,14 @@ func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) {
if err != nil {
break
}
- name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
+ name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupVersion(rootAddr, uint32(period), uint32(version)))
case 1: // last version of specific period
+ var period uint64
period, err = strconv.ParseUint(params[0], 10, 32)
if err != nil {
break
}
- name, data, err = s.api.ResourceLookup(r.Context(), key, uint32(period), uint32(version), nil)
+ name, data, err = s.api.ResourceLookup(r.Context(), mru.LookupLatestVersionInPeriod(rootAddr, uint32(period)))
default: // bogus
err = mru.NewError(storage.ErrInvalidValue, "invalid mutable resource request")
}
@@ -766,7 +739,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) {
var err error
addr := r.uri.Address()
if addr == nil {
- addr, err = s.api.Resolve(ctx, r.uri)
+ addr, err = s.api.Resolve(r.Context(), r.uri)
if err != nil {
getFail.Inc(1)
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
@@ -781,7 +754,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) {
// if path is set, interpret <key> as a manifest and return the
// raw entry at the given path
if r.uri.Path != "" {
- walker, err := s.api.NewManifestWalker(ctx, addr, nil)
+ walker, err := s.api.NewManifestWalker(r.Context(), addr, nil)
if err != nil {
getFail.Inc(1)
Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest)
@@ -875,7 +848,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) {
return
}
- addr, err := s.api.Resolve(ctx, r.uri)
+ addr, err := s.api.Resolve(r.Context(), r.uri)
if err != nil {
getListFail.Inc(1)
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
@@ -935,7 +908,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) {
manifestAddr := r.uri.Address()
if manifestAddr == nil {
- manifestAddr, err = s.api.Resolve(ctx, r.uri)
+ manifestAddr, err = s.api.Resolve(r.Context(), r.uri)
if err != nil {
getFileFail.Inc(1)
Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound)
@@ -947,8 +920,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) {
}
log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr)
-
- reader, contentType, status, contentKey, err := s.api.Get(ctx, manifestAddr, r.uri.Path)
+ reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, r.uri.Path)
etag := common.Bytes2Hex(contentKey)
noneMatchEtag := r.Header.Get("If-None-Match")
diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go
index 8e1c26a33..e8bc1bdae 100644
--- a/swarm/api/http/server_test.go
+++ b/swarm/api/http/server_test.go
@@ -34,12 +34,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
+ "github.com/ethereum/go-ethereum/swarm/storage/mru"
"github.com/ethereum/go-ethereum/swarm/testutil"
)
@@ -94,6 +95,14 @@ func serverFunc(api *api.API) testutil.TestServer {
return NewServer(api, "")
}
+func newTestSigner() (*mru.GenericSigner, error) {
+ privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ if err != nil {
+ return nil, err
+ }
+ return mru.NewGenericSigner(privKey), nil
+}
+
// test the transparent resolving of multihash resource types with bzz:// scheme
//
// first upload data, and store the multihash to the resulting manifest in a resource update
@@ -101,6 +110,8 @@ func serverFunc(api *api.API) testutil.TestServer {
// and raw retrieve of that hash should return the data
func TestBzzResourceMultihash(t *testing.T) {
+ signer, _ := newTestSigner()
+
srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close()
@@ -123,15 +134,35 @@ func TestBzzResourceMultihash(t *testing.T) {
s := common.FromHex(string(b))
mh := multihash.ToMultihash(s)
- mhHex := hexutil.Encode(mh)
log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
// our mutable resource "name"
keybytes := "foo.eth"
+ updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+ Name: keybytes,
+ Frequency: 13,
+ StartTime: srv.GetCurrentTime(),
+ Owner: signer.Address(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ updateRequest.SetData(mh, true)
+
+ if err := updateRequest.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ log.Info("added data", "manifest", string(b), "data", common.ToHex(mh))
+
+ body, err := updateRequest.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+
// create the multihash update
- url = fmt.Sprintf("%s/bzz-resource:/%s/13", srv.URL, keybytes)
- resp, err = http.Post(url, "application/octet-stream", bytes.NewReader([]byte(mhHex)))
+ url = fmt.Sprintf("%s/bzz-resource:/", srv.URL)
+ resp, err = http.Post(url, "application/json", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
@@ -149,9 +180,9 @@ func TestBzzResourceMultihash(t *testing.T) {
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
}
- correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
+ correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
if rsrcResp.Hex() != correctManifestAddrHex {
- t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp)
+ t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
}
// get bzz manifest transparent resource resolve
@@ -176,6 +207,8 @@ func TestBzzResourceMultihash(t *testing.T) {
// Test resource updates using the raw update methods
func TestBzzResource(t *testing.T) {
srv := testutil.NewTestSwarmServer(t, serverFunc)
+ signer, _ := newTestSigner()
+
defer srv.Close()
// our mutable resource "name"
@@ -188,9 +221,29 @@ func TestBzzResource(t *testing.T) {
t.Fatal(err)
}
+ updateRequest, err := mru.NewCreateUpdateRequest(&mru.ResourceMetadata{
+ Name: keybytes,
+ Frequency: 13,
+ StartTime: srv.GetCurrentTime(),
+ Owner: signer.Address(),
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ updateRequest.SetData(databytes, false)
+
+ if err := updateRequest.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+
+ body, err := updateRequest.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+
// creates resource and sets update 1
- url := fmt.Sprintf("%s/bzz-resource:/%s/raw/13", srv.URL, []byte(keybytes))
- resp, err := http.Post(url, "application/octet-stream", bytes.NewReader(databytes))
+ url := fmt.Sprintf("%s/bzz-resource:/", srv.URL)
+ resp, err := http.Post(url, "application/json", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
@@ -208,7 +261,7 @@ func TestBzzResource(t *testing.T) {
t.Fatalf("data %s could not be unmarshaled: %v", b, err)
}
- correctManifestAddrHex := "d689648fb9e00ddc7ebcf474112d5881c5bf7dbc6e394681b1d224b11b59b5e0"
+ correctManifestAddrHex := "6d3bc4664c97d8b821cb74bcae43f592494fb46d2d9cd31e69f3c7c802bbbd8e"
if rsrcResp.Hex() != correctManifestAddrHex {
t.Fatalf("Response resource key mismatch, expected '%s', got '%s'", correctManifestAddrHex, rsrcResp.Hex())
}
@@ -235,8 +288,7 @@ func TestBzzResource(t *testing.T) {
if len(manifest.Entries) != 1 {
t.Fatalf("Manifest has %d entries", len(manifest.Entries))
}
-
- correctRootKeyHex := "f667277e004e8486c7a3631fd226802430e84e9a81b6085d31f512a591ae0065"
+ correctRootKeyHex := "68f7ba07ac8867a4c841a4d4320e3cdc549df23702dc7285fcb6acf65df48562"
if manifest.Entries[0].Hash != correctRootKeyHex {
t.Fatalf("Expected manifest path '%s', got '%s'", correctRootKeyHex, manifest.Entries[0].Hash)
}
@@ -262,6 +314,11 @@ func TestBzzResource(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
+ if resp.StatusCode != http.StatusNotFound {
+ t.Fatalf("Expected get non-existent resource to fail with StatusNotFound (404), got %d", resp.StatusCode)
+ }
+
resp.Body.Close()
// get latest update (1.1) through resource directly
@@ -285,9 +342,36 @@ func TestBzzResource(t *testing.T) {
// update 2
log.Info("update 2")
- url = fmt.Sprintf("%s/bzz-resource:/%s/raw", srv.URL, correctManifestAddrHex)
+
+ // 1.- get metadata about this resource
+ url = fmt.Sprintf("%s/bzz-resource:/%s/", srv.URL, correctManifestAddrHex)
+ resp, err = http.Get(url + "meta")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ t.Fatalf("Get resource metadata returned %s", resp.Status)
+ }
+ b, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ updateRequest = &mru.Request{}
+ if err = updateRequest.UnmarshalJSON(b); err != nil {
+ t.Fatalf("Error decoding resource metadata: %s", err)
+ }
data := []byte("foo")
- resp, err = http.Post(url, "application/octet-stream", bytes.NewReader(data))
+ updateRequest.SetData(data, false)
+ if err = updateRequest.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ body, err = updateRequest.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err = http.Post(url, "application/json", bytes.NewReader(body))
if err != nil {
t.Fatal(err)
}
diff --git a/swarm/bmt/bmt.go b/swarm/bmt/bmt.go
index 835587020..1a141047a 100644
--- a/swarm/bmt/bmt.go
+++ b/swarm/bmt/bmt.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
-// Package bmt provides a binary merkle tree implementation
+// Package bmt provides a binary merkle tree implementation used for swarm chunk hash
package bmt
import (
@@ -26,16 +26,16 @@ import (
)
/*
-Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
+Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size.
It is defined as the root hash of the binary merkle tree built over fixed size segments
-of the underlying chunk using any base hash function (e.g keccak 256 SHA3).
-Chunk with data shorter than the fixed size are hashed as if they had zero padding
+of the underlying chunk using any base hash function (e.g., keccak 256 SHA3).
+Chunks with data shorter than the fixed size are hashed as if they had zero padding.
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
-segment is a substring of a chunk starting at a particular offset
+segment is a substring of a chunk starting at a particular offset.
The size of the underlying segments is fixed to the size of the base hash (called the resolution
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
@@ -46,11 +46,12 @@ Two implementations are provided:
that is simple to understand
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
- It implements the following interfaces
- * standard golang hash.Hash
- * SwarmHash
- * io.Writer
- * TODO: SegmentWriter
+
+ BMT Hasher implements the following interfaces
+ * standard golang hash.Hash - synchronous, reusable
+ * SwarmHash - SumWithSpan provided
+ * io.Writer - synchronous left-to-right datawriter
+ * AsyncWriter - concurrent section writes and asynchronous Sum call
*/
const (
@@ -69,7 +70,7 @@ type BaseHasherFunc func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// - implements the hash.Hash interface
// - reuses a pool of trees for amortised memory allocation and resource control
-// - supports order-agnostic concurrent segment writes (TODO:)
+// - supports order-agnostic concurrent segment writes and section (double segment) writes
// as well as sequential read and write
// - the same hasher instance must not be called concurrently on more than one chunk
// - the same hasher instance is synchronously reuseable
@@ -81,8 +82,7 @@ type Hasher struct {
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
}
-// New creates a reusable Hasher
-// implements the hash.Hash interface
+// New creates a reusable BMT Hasher that
// pulls a new tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
@@ -90,9 +90,9 @@ func New(p *TreePool) *Hasher {
}
}
-// TreePool provides a pool of trees used as resources by Hasher
-// a tree popped from the pool is guaranteed to have clean state
-// for hashing a new chunk
+// TreePool provides a pool of trees used as resources by the BMT Hasher.
+// A tree popped from the pool is guaranteed to have a clean state ready
+// for hashing a new chunk.
type TreePool struct {
lock sync.Mutex
c chan *tree // the channel to obtain a resource from the pool
@@ -101,7 +101,7 @@ type TreePool struct {
SegmentCount int // the number of segments on the base level of the BMT
Capacity int // pool capacity, controls concurrency
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
- Datalength int // the total length of the data (count * size)
+ Size int // the total length of the data (count * size)
count int // current count of (ever) allocated resources
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
}
@@ -112,12 +112,12 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
// initialises the zerohashes lookup table
depth := calculateDepthFor(segmentCount)
segmentSize := hasher().Size()
- zerohashes := make([][]byte, depth)
+ zerohashes := make([][]byte, depth+1)
zeros := make([]byte, segmentSize)
zerohashes[0] = zeros
h := hasher()
- for i := 1; i < depth; i++ {
- zeros = doHash(h, nil, zeros, zeros)
+ for i := 1; i < depth+1; i++ {
+ zeros = doSum(h, nil, zeros, zeros)
zerohashes[i] = zeros
}
return &TreePool{
@@ -126,7 +126,7 @@ func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
SegmentSize: segmentSize,
SegmentCount: segmentCount,
Capacity: capacity,
- Datalength: segmentCount * segmentSize,
+ Size: segmentCount * segmentSize,
Depth: depth,
zerohashes: zerohashes,
}
@@ -155,7 +155,7 @@ func (p *TreePool) reserve() *tree {
select {
case t = <-p.c:
default:
- t = newTree(p.SegmentSize, p.Depth)
+ t = newTree(p.SegmentSize, p.Depth, p.hasher)
p.count++
}
return t
@@ -173,29 +173,28 @@ func (p *TreePool) release(t *tree) {
// the tree is 'locked' while not in the pool
type tree struct {
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
- cur int // index of rightmost currently open segment
+ cursor int // index of rightmost currently open segment
offset int // offset (cursor position) within currently open segment
- segment []byte // the rightmost open segment (not complete)
section []byte // the rightmost open section (double segment)
- depth int // number of levels
result chan []byte // result channel
- hash []byte // to record the result
span []byte // The span of the data subsumed under the chunk
}
// node is a reuseable segment hasher representing a node in a BMT
type node struct {
- isLeft bool // whether it is left side of the parent double segment
- parent *node // pointer to parent node in the BMT
- state int32 // atomic increment impl concurrent boolean toggle
- left, right []byte // this is where the content segment is set
+ isLeft bool // whether it is left side of the parent double segment
+ parent *node // pointer to parent node in the BMT
+ state int32 // atomic increment impl concurrent boolean toggle
+ left, right []byte // this is where the two children sections are written
+ hasher hash.Hash // preconstructed hasher on nodes
}
// newNode constructs a segment hasher node in the BMT (used by newTree)
-func newNode(index int, parent *node) *node {
+func newNode(index int, parent *node, hasher hash.Hash) *node {
return &node{
parent: parent,
isLeft: index%2 == 0,
+ hasher: hasher,
}
}
@@ -253,16 +252,21 @@ func (t *tree) draw(hash []byte) string {
// newTree initialises a tree by building up the nodes of a BMT
// - segment size is stipulated to be the size of the hash
-func newTree(segmentSize, depth int) *tree {
- n := newNode(0, nil)
+func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {
+ n := newNode(0, nil, hashfunc())
prevlevel := []*node{n}
// iterate over levels and creates 2^(depth-level) nodes
+ // the 0 level is on double segment sections so we start at depth - 2 since
count := 2
for level := depth - 2; level >= 0; level-- {
nodes := make([]*node, count)
for i := 0; i < count; i++ {
parent := prevlevel[i/2]
- nodes[i] = newNode(i, parent)
+ var hasher hash.Hash
+ if level == 0 {
+ hasher = hashfunc()
+ }
+ nodes[i] = newNode(i, parent, hasher)
}
prevlevel = nodes
count *= 2
@@ -270,13 +274,12 @@ func newTree(segmentSize, depth int) *tree {
// the datanode level is the nodes on the last level
return &tree{
leaves: prevlevel,
- result: make(chan []byte, 1),
- segment: make([]byte, segmentSize),
+ result: make(chan []byte),
section: make([]byte, 2*segmentSize),
}
}
-// methods needed by hash.Hash
+// methods needed to implement hash.Hash
// Size returns the size
func (h *Hasher) Size() int {
@@ -285,63 +288,40 @@ func (h *Hasher) Size() int {
// BlockSize returns the block size
func (h *Hasher) BlockSize() int {
- return h.pool.SegmentSize
-}
-
-// Hash hashes the data and the span using the bmt hasher
-func Hash(h *Hasher, span, data []byte) []byte {
- h.ResetWithLength(span)
- h.Write(data)
- return h.Sum(nil)
-}
-
-// Datalength returns the maximum data size that is hashed by the hasher =
-// segment count times segment size
-func (h *Hasher) DataLength() int {
- return h.pool.Datalength
+ return 2 * h.pool.SegmentSize
}
-// Sum returns the hash of the buffer
+// Sum returns the BMT root hash of the buffer
+// using Sum presupposes sequential synchronous writes (io.Writer interface)
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
// caller must make sure Sum is not called concurrently with Write, writeSection
-// and WriteSegment (TODO:)
-func (h *Hasher) Sum(b []byte) (r []byte) {
- return h.sum(b, true, true)
-}
-
-// sum implements Sum taking parameters
-// * if the tree is released right away
-// * if sequential write is used (can read sections)
-func (h *Hasher) sum(b []byte, release, section bool) (r []byte) {
- t := h.bmt
- bh := h.pool.hasher()
- go h.writeSection(t.cur, t.section, true)
- bmtHash := <-t.result
+func (h *Hasher) Sum(b []byte) (s []byte) {
+ t := h.getTree()
+ // write the last section with final flag set to true
+ go h.writeSection(t.cursor, t.section, true, true)
+ // wait for the result
+ s = <-t.result
span := t.span
- // fmt.Println(t.draw(bmtHash))
- if release {
- h.releaseTree()
- }
+ // release the tree resource back to the pool
+ h.releaseTree()
// b + sha3(span + BMT(pure_chunk))
- if span == nil {
- return append(b, bmtHash...)
+ if len(span) == 0 {
+ return append(b, s...)
}
- return doHash(bh, b, span, bmtHash)
+ return doSum(h.pool.hasher(), b, span, s)
}
-// Hasher implements the SwarmHash interface
-
-// Hasher implements the io.Writer interface
+// methods needed to implement the SwarmHash and the io.Writer interfaces
-// Write fills the buffer to hash,
-// with every full segment calls writeSection
+// Write calls sequentially add to the buffer to be hashed,
+// with every full segment calls writeSection in a go routine
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
- if l <= 0 {
+ if l == 0 {
return 0, nil
}
- t := h.bmt
+ t := h.getTree()
secsize := 2 * h.pool.SegmentSize
// calculate length of missing bit to complete current open section
smax := secsize - t.offset
@@ -359,20 +339,21 @@ func (h *Hasher) Write(b []byte) (int, error) {
return l, nil
}
} else {
- if t.cur == h.pool.SegmentCount*2 {
+ // if end of a section
+ if t.cursor == h.pool.SegmentCount*2 {
return 0, nil
}
}
- // read full segments and the last possibly partial segment from the input buffer
+ // read full sections and the last possibly partial section from the input buffer
for smax < l {
// section complete; push to tree asynchronously
- go h.writeSection(t.cur, t.section, false)
+ go h.writeSection(t.cursor, t.section, true, false)
// reset section
t.section = make([]byte, secsize)
- // copy from imput buffer at smax to right half of section
+ // copy from input buffer at smax to right half of section
copy(t.section, b[smax:])
// advance cursor
- t.cur++
+ t.cursor++
// smax here represents successive offsets in the input buffer
smax += secsize
}
@@ -382,83 +363,225 @@ func (h *Hasher) Write(b []byte) (int, error) {
// Reset needs to be called before writing to the hasher
func (h *Hasher) Reset() {
- h.getTree()
+ h.releaseTree()
}
-// Hasher implements the SwarmHash interface
+// methods needed to implement the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash, i.e., span
func (h *Hasher) ResetWithLength(span []byte) {
h.Reset()
- h.bmt.span = span
+ h.getTree().span = span
}
// releaseTree gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (h *Hasher) releaseTree() {
t := h.bmt
- if t != nil {
- t.cur = 0
+ if t == nil {
+ return
+ }
+ h.bmt = nil
+ go func() {
+ t.cursor = 0
t.offset = 0
t.span = nil
- t.hash = nil
- h.bmt = nil
t.section = make([]byte, h.pool.SegmentSize*2)
- t.segment = make([]byte, h.pool.SegmentSize)
+ select {
+ case <-t.result:
+ default:
+ }
h.pool.release(t)
+ }()
+}
+
+// NewAsyncWriter extends Hasher with an interface for concurrent segment/section writes
+func (h *Hasher) NewAsyncWriter(double bool) *AsyncHasher {
+ secsize := h.pool.SegmentSize
+ if double {
+ secsize *= 2
+ }
+ write := func(i int, section []byte, final bool) {
+ h.writeSection(i, section, double, final)
}
+ return &AsyncHasher{
+ Hasher: h,
+ double: double,
+ secsize: secsize,
+ write: write,
+ }
+}
+
+// SectionWriter is an asynchronous segment/section writer interface
+type SectionWriter interface {
+ Reset() // standard init to be called before reuse
+ Write(index int, data []byte) // write into section of index
+ Sum(b []byte, length int, span []byte) []byte // returns the hash of the buffer
+ SectionSize() int // size of the async section unit to use
}
-// TODO: writeSegment writes the ith segment into the BMT tree
-// func (h *Hasher) writeSegment(i int, s []byte) {
-// go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s)
-// }
+// AsyncHasher extends BMT Hasher with an asynchronous segment/section writer interface
+// AsyncHasher is unsafe and does not check indexes and section data lengths
+// it must be used with the right indexes and length and the right number of sections
+//
+// behaviour is undefined if
+// * non-final sections are shorter or longer than secsize
+// * if final section does not match length
+// * write a section with index that is higher than length/secsize
+// * set length in Sum call when length/secsize < maxsec
+//
+// * if Sum() is not called on a Hasher that is fully written
+// a process will block, can be terminated with Reset
+// * it will not leak processes if not all sections are written but it blocks
+// and keeps the resource which can be released calling Reset()
+type AsyncHasher struct {
+ *Hasher // extends the Hasher
+ mtx sync.Mutex // to lock the cursor access
+ double bool // whether to use double segments (call Hasher.writeSection)
+ secsize int // size of base section (size of hash or double)
+ write func(i int, section []byte, final bool)
+}
+
+// methods needed to implement AsyncWriter
+
+// SectionSize returns the size of async section unit to use
+func (sw *AsyncHasher) SectionSize() int {
+ return sw.secsize
+}
+
+// Write writes the i-th section of the BMT base
+// this function can and is meant to be called concurrently
+// it sets max segment threadsafely
+func (sw *AsyncHasher) Write(i int, section []byte) {
+ sw.mtx.Lock()
+ defer sw.mtx.Unlock()
+ t := sw.getTree()
+ // cursor keeps track of the rightmost section written so far
+ // if index is lower than cursor then just write non-final section as is
+ if i < t.cursor {
+ // if index is not the rightmost, safe to write section
+ go sw.write(i, section, false)
+ return
+ }
+ // if there is a previous rightmost section safe to write section
+ if t.offset > 0 {
+ if i == t.cursor {
+ // i==cursor implies cursor was set by Hash call so we can write section as final one
+ // since it can be shorter, first we copy it to the padded buffer
+ t.section = make([]byte, sw.secsize)
+ copy(t.section, section)
+ go sw.write(i, t.section, true)
+ return
+ }
+ // the rightmost section just changed, so we write the previous one as non-final
+ go sw.write(t.cursor, t.section, false)
+ }
+ // set i as the index of the righmost section written so far
+ // set t.offset to cursor*secsize+1
+ t.cursor = i
+ t.offset = i*sw.secsize + 1
+ t.section = make([]byte, sw.secsize)
+ copy(t.section, section)
+}
+
+// Sum can be called any time once the length and the span is known
+// potentially even before all segments have been written
+// in such cases Sum will block until all segments are present and
+// the hash for the length can be calculated.
+//
+// b: digest is appended to b
+// length: known length of the input (unsafe; undefined if out of range)
+// meta: metadata to hash together with BMT root for the final digest
+// e.g., span for protection against existential forgery
+func (sw *AsyncHasher) Sum(b []byte, length int, meta []byte) (s []byte) {
+ sw.mtx.Lock()
+ t := sw.getTree()
+ if length == 0 {
+ sw.mtx.Unlock()
+ s = sw.pool.zerohashes[sw.pool.Depth]
+ } else {
+ // for non-zero input the rightmost section is written to the tree asynchronously
+ // if the actual last section has been written (t.cursor == length/t.secsize)
+ maxsec := (length - 1) / sw.secsize
+ if t.offset > 0 {
+ go sw.write(t.cursor, t.section, maxsec == t.cursor)
+ }
+ // set cursor to maxsec so final section is written when it arrives
+ t.cursor = maxsec
+ t.offset = length
+ result := t.result
+ sw.mtx.Unlock()
+ // wait for the result or reset
+ s = <-result
+ }
+ // relesase the tree back to the pool
+ sw.releaseTree()
+ // if no meta is given just append digest to b
+ if len(meta) == 0 {
+ return append(b, s...)
+ }
+ // hash together meta and BMT root hash using the pools
+ return doSum(sw.pool.hasher(), b, meta, s)
+}
// writeSection writes the hash of i-th section into level 1 node of the BMT tree
-func (h *Hasher) writeSection(i int, section []byte, final bool) {
+func (h *Hasher) writeSection(i int, section []byte, double bool, final bool) {
// select the leaf node for the section
- n := h.bmt.leaves[i]
- isLeft := n.isLeft
- n = n.parent
- bh := h.pool.hasher()
- // hash the section
- s := doHash(bh, nil, section)
+ var n *node
+ var isLeft bool
+ var hasher hash.Hash
+ var level int
+ t := h.getTree()
+ if double {
+ level++
+ n = t.leaves[i]
+ hasher = n.hasher
+ isLeft = n.isLeft
+ n = n.parent
+ // hash the section
+ section = doSum(hasher, nil, section)
+ } else {
+ n = t.leaves[i/2]
+ hasher = n.hasher
+ isLeft = i%2 == 0
+ }
// write hash into parent node
if final {
// for the last segment use writeFinalNode
- h.writeFinalNode(1, n, bh, isLeft, s)
+ h.writeFinalNode(level, n, hasher, isLeft, section)
} else {
- h.writeNode(n, bh, isLeft, s)
+ h.writeNode(n, hasher, isLeft, section)
}
}
// writeNode pushes the data to the node
-// if it is the first of 2 sisters written the routine returns
+// if it is the first of 2 sisters written, the routine terminates
// if it is the second, it calculates the hash and writes it
// to the parent node recursively
+// since hashing the parent is synchronous the same hasher can be used
func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) {
level := 1
for {
// at the root of the bmt just write the result to the result channel
if n == nil {
- h.bmt.result <- s
+ h.getTree().result <- s
return
}
- // otherwise assign child hash to branc
+ // otherwise assign child hash to left or right segment
if isLeft {
n.left = s
} else {
n.right = s
}
- // the child-thread first arriving will quit
+ // the child-thread first arriving will terminate
if n.toggle() {
return
}
- // the thread coming later now can be sure both left and right children are written
- // it calculates the hash of left|right and pushes it to the parent
- s = doHash(bh, nil, n.left, n.right)
+ // the thread coming second now can be sure both left and right children are written
+ // so it calculates the hash of left|right and pushes it to the parent
+ s = doSum(bh, nil, n.left, n.right)
isLeft = n.isLeft
n = n.parent
level++
@@ -476,7 +599,7 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
// at the root of the bmt just write the result to the result channel
if n == nil {
if s != nil {
- h.bmt.result <- s
+ h.getTree().result <- s
}
return
}
@@ -485,25 +608,28 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
// coming from left sister branch
// when the final section's path is going via left child node
// we include an all-zero subtree hash for the right level and toggle the node.
- // when the path is going through right child node, nothing to do
n.right = h.pool.zerohashes[level]
if s != nil {
n.left = s
// if a left final node carries a hash, it must be the first (and only thread)
// so the toggle is already in passive state no need no call
// yet thread needs to carry on pushing hash to parent
+ noHash = false
} else {
// if again first thread then propagate nil and calculate no hash
noHash = n.toggle()
}
} else {
// right sister branch
- // if s is nil, then thread arrived first at previous node and here there will be two,
- // so no need to do anything
if s != nil {
+ // if hash was pushed from right child node, write right segment change state
n.right = s
+ // if toggle is true, we arrived first so no hashing just push nil to parent
noHash = n.toggle()
+
} else {
+ // if s is nil, then thread arrived first at previous node and here there will be two,
+ // so no need to do anything and keep s = nil for parent
noHash = true
}
}
@@ -513,15 +639,16 @@ func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s
if noHash {
s = nil
} else {
- s = doHash(bh, nil, n.left, n.right)
+ s = doSum(bh, nil, n.left, n.right)
}
+ // iterate to parent
isLeft = n.isLeft
n = n.parent
level++
}
}
-// getTree obtains a BMT resource by reserving one from the pool
+// getTree obtains a BMT resource by reserving one from the pool and assigns it to the bmt field
func (h *Hasher) getTree() *tree {
if h.bmt != nil {
return h.bmt
@@ -539,7 +666,7 @@ func (n *node) toggle() bool {
}
// calculates the hash of the data using hash.Hash
-func doHash(h hash.Hash, b []byte, data ...[]byte) []byte {
+func doSum(h hash.Hash, b []byte, data ...[]byte) []byte {
h.Reset()
for _, v := range data {
h.Write(v)
@@ -547,6 +674,7 @@ func doHash(h hash.Hash, b []byte, data ...[]byte) []byte {
return h.Sum(b)
}
+// hashstr is a pretty printer for bytes used in tree.draw
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
diff --git a/swarm/bmt/bmt_test.go b/swarm/bmt/bmt_test.go
index ae40eadab..891d8cbb2 100644
--- a/swarm/bmt/bmt_test.go
+++ b/swarm/bmt/bmt_test.go
@@ -39,13 +39,12 @@ var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65,
// calculates the Keccak256 SHA3 hash of the data
func sha3hash(data ...[]byte) []byte {
h := sha3.NewKeccak256()
- return doHash(h, nil, data...)
+ return doSum(h, nil, data...)
}
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
-// all data lengths between 0 and 256 bytes
+// some small data lengths
func TestRefHasher(t *testing.T) {
-
// the test struct is used to specify the expected BMT hash for
// segment counts between from and to and lengths from 1 to datalength
type test struct {
@@ -129,7 +128,7 @@ func TestRefHasher(t *testing.T) {
}
}
-// tests if hasher responds with correct hash
+// tests if hasher responds with correct hash comparing the reference implementation return value
func TestHasherEmptyData(t *testing.T) {
hasher := sha3.NewKeccak256
var data []byte
@@ -140,7 +139,7 @@ func TestHasherEmptyData(t *testing.T) {
bmt := New(pool)
rbmt := NewRefHasher(hasher, count)
refHash := rbmt.Hash(data)
- expHash := Hash(bmt, nil, data)
+ expHash := syncHash(bmt, nil, data)
if !bytes.Equal(expHash, refHash) {
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
}
@@ -148,7 +147,8 @@ func TestHasherEmptyData(t *testing.T) {
}
}
-func TestHasherCorrectness(t *testing.T) {
+// tests sequential write with entire max size written in one go
+func TestSyncHasherCorrectness(t *testing.T) {
data := newData(BufferSize)
hasher := sha3.NewKeccak256
size := hasher().Size()
@@ -157,7 +157,7 @@ func TestHasherCorrectness(t *testing.T) {
for _, count := range counts {
t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) {
max := count * size
- incr := 1
+ var incr int
capacity := 1
pool := NewTreePool(hasher, count, capacity)
defer pool.Drain(0)
@@ -173,6 +173,44 @@ func TestHasherCorrectness(t *testing.T) {
}
}
+// tests order-neutral concurrent writes with entire max size written in one go
+func TestAsyncCorrectness(t *testing.T) {
+ data := newData(BufferSize)
+ hasher := sha3.NewKeccak256
+ size := hasher().Size()
+ whs := []whenHash{first, last, random}
+
+ for _, double := range []bool{false, true} {
+ for _, wh := range whs {
+ for _, count := range counts {
+ t.Run(fmt.Sprintf("double_%v_hash_when_%v_segments_%v", double, wh, count), func(t *testing.T) {
+ max := count * size
+ var incr int
+ capacity := 1
+ pool := NewTreePool(hasher, count, capacity)
+ defer pool.Drain(0)
+ for n := 1; n <= max; n += incr {
+ incr = 1 + rand.Intn(5)
+ bmt := New(pool)
+ d := data[:n]
+ rbmt := NewRefHasher(hasher, count)
+ exp := rbmt.Hash(d)
+ got := syncHash(bmt, nil, d)
+ if !bytes.Equal(got, exp) {
+ t.Fatalf("wrong sync hash for datalength %v: expected %x (ref), got %x", n, exp, got)
+ }
+ sw := bmt.NewAsyncWriter(double)
+ got = asyncHashRandom(sw, nil, d, wh)
+ if !bytes.Equal(got, exp) {
+ t.Fatalf("wrong async hash for datalength %v: expected %x, got %x", n, exp, got)
+ }
+ }
+ })
+ }
+ }
+ }
+}
+
// Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize
func TestHasherReuse(t *testing.T) {
t.Run(fmt.Sprintf("poolsize_%d", 1), func(t *testing.T) {
@@ -183,6 +221,7 @@ func TestHasherReuse(t *testing.T) {
})
}
+// tests if bmt reuse is not corrupting result
func testHasherReuse(poolsize int, t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, poolsize)
@@ -191,7 +230,7 @@ func testHasherReuse(poolsize int, t *testing.T) {
for i := 0; i < 100; i++ {
data := newData(BufferSize)
- n := rand.Intn(bmt.DataLength())
+ n := rand.Intn(bmt.Size())
err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount)
if err != nil {
t.Fatal(err)
@@ -199,8 +238,8 @@ func testHasherReuse(poolsize int, t *testing.T) {
}
}
-// Tests if pool can be cleanly reused even in concurrent use
-func TestBMTHasherConcurrentUse(t *testing.T) {
+// Tests if pool can be cleanly reused even in concurrent use by several hasher
+func TestBMTConcurrentUse(t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, PoolSize)
defer pool.Drain(0)
@@ -211,7 +250,7 @@ func TestBMTHasherConcurrentUse(t *testing.T) {
go func() {
bmt := New(pool)
data := newData(BufferSize)
- n := rand.Intn(bmt.DataLength())
+ n := rand.Intn(bmt.Size())
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
}()
}
@@ -234,7 +273,7 @@ LOOP:
// Tests BMT Hasher io.Writer interface is working correctly
// even multiple short random write buffers
-func TestBMTHasherWriterBuffers(t *testing.T) {
+func TestBMTWriterBuffers(t *testing.T) {
hasher := sha3.NewKeccak256
for _, count := range counts {
@@ -247,7 +286,7 @@ func TestBMTHasherWriterBuffers(t *testing.T) {
data := newData(n)
rbmt := NewRefHasher(hasher, count)
refHash := rbmt.Hash(data)
- expHash := Hash(bmt, nil, data)
+ expHash := syncHash(bmt, nil, data)
if !bytes.Equal(expHash, refHash) {
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
}
@@ -308,57 +347,65 @@ func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, coun
data := d[:n]
rbmt := NewRefHasher(hasher, count)
exp := sha3hash(span, rbmt.Hash(data))
- got := Hash(bmt, span, data)
+ got := syncHash(bmt, span, data)
if !bytes.Equal(got, exp) {
return fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
}
return err
}
-func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
-func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
-func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
-func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
-func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
-func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
-
-func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
-func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
-func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
-func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
-func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
-func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
-
-func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
-func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
-func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
-func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
-func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
-func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
-
-func BenchmarkBMTHasher_4k(t *testing.B) { benchmarkBMTHasher(4096, t) }
-func BenchmarkBMTHasher_2k(t *testing.B) { benchmarkBMTHasher(4096/2, t) }
-func BenchmarkBMTHasher_1k(t *testing.B) { benchmarkBMTHasher(4096/4, t) }
-func BenchmarkBMTHasher_512b(t *testing.B) { benchmarkBMTHasher(4096/8, t) }
-func BenchmarkBMTHasher_256b(t *testing.B) { benchmarkBMTHasher(4096/16, t) }
-func BenchmarkBMTHasher_128b(t *testing.B) { benchmarkBMTHasher(4096/32, t) }
-
-func BenchmarkBMTHasherNoPool_4k(t *testing.B) { benchmarkBMTHasherPool(1, 4096, t) }
-func BenchmarkBMTHasherNoPool_2k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/2, t) }
-func BenchmarkBMTHasherNoPool_1k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/4, t) }
-func BenchmarkBMTHasherNoPool_512b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/8, t) }
-func BenchmarkBMTHasherNoPool_256b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/16, t) }
-func BenchmarkBMTHasherNoPool_128b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/32, t) }
-
-func BenchmarkBMTHasherPool_4k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096, t) }
-func BenchmarkBMTHasherPool_2k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/2, t) }
-func BenchmarkBMTHasherPool_1k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/4, t) }
-func BenchmarkBMTHasherPool_512b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/8, t) }
-func BenchmarkBMTHasherPool_256b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/16, t) }
-func BenchmarkBMTHasherPool_128b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/32, t) }
+//
+func BenchmarkBMT(t *testing.B) {
+ for size := 4096; size >= 128; size /= 2 {
+ t.Run(fmt.Sprintf("%v_size_%v", "SHA3", size), func(t *testing.B) {
+ benchmarkSHA3(t, size)
+ })
+ t.Run(fmt.Sprintf("%v_size_%v", "Baseline", size), func(t *testing.B) {
+ benchmarkBMTBaseline(t, size)
+ })
+ t.Run(fmt.Sprintf("%v_size_%v", "REF", size), func(t *testing.B) {
+ benchmarkRefHasher(t, size)
+ })
+ t.Run(fmt.Sprintf("%v_size_%v", "BMT", size), func(t *testing.B) {
+ benchmarkBMT(t, size)
+ })
+ }
+}
+
+type whenHash = int
+
+const (
+ first whenHash = iota
+ last
+ random
+)
+
+func BenchmarkBMTAsync(t *testing.B) {
+ whs := []whenHash{first, last, random}
+ for size := 4096; size >= 128; size /= 2 {
+ for _, wh := range whs {
+ for _, double := range []bool{false, true} {
+ t.Run(fmt.Sprintf("double_%v_hash_when_%v_size_%v", double, wh, size), func(t *testing.B) {
+ benchmarkBMTAsync(t, size, wh, double)
+ })
+ }
+ }
+ }
+}
+
+func BenchmarkPool(t *testing.B) {
+ caps := []int{1, PoolSize}
+ for size := 4096; size >= 128; size /= 2 {
+ for _, c := range caps {
+ t.Run(fmt.Sprintf("poolsize_%v_size_%v", c, size), func(t *testing.B) {
+ benchmarkPool(t, c, size)
+ })
+ }
+ }
+}
// benchmarks simple sha3 hash on chunks
-func benchmarkSHA3(n int, t *testing.B) {
+func benchmarkSHA3(t *testing.B, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
h := hasher()
@@ -366,9 +413,7 @@ func benchmarkSHA3(n int, t *testing.B) {
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
- h.Reset()
- h.Write(data)
- h.Sum(nil)
+ doSum(h, nil, data)
}
}
@@ -377,7 +422,7 @@ func benchmarkSHA3(n int, t *testing.B) {
// doing it on n PoolSize each reusing the base hasher
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
-func benchmarkBMTBaseline(n int, t *testing.B) {
+func benchmarkBMTBaseline(t *testing.B, n int) {
hasher := sha3.NewKeccak256
hashSize := hasher().Size()
data := newData(hashSize)
@@ -394,9 +439,7 @@ func benchmarkBMTBaseline(n int, t *testing.B) {
defer wg.Done()
h := hasher()
for atomic.AddInt32(&i, 1) < count {
- h.Reset()
- h.Write(data)
- h.Sum(nil)
+ doSum(h, nil, data)
}
}()
}
@@ -405,21 +448,39 @@ func benchmarkBMTBaseline(n int, t *testing.B) {
}
// benchmarks BMT Hasher
-func benchmarkBMTHasher(n int, t *testing.B) {
+func benchmarkBMT(t *testing.B, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, PoolSize)
+ bmt := New(pool)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
- bmt := New(pool)
- Hash(bmt, nil, data)
+ syncHash(bmt, nil, data)
+ }
+}
+
+// benchmarks BMT hasher with asynchronous concurrent segment/section writes
+func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
+ data := newData(n)
+ hasher := sha3.NewKeccak256
+ pool := NewTreePool(hasher, SegmentCount, PoolSize)
+ bmt := New(pool).NewAsyncWriter(double)
+ idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
+ shuffle(len(idxs), func(i int, j int) {
+ idxs[i], idxs[j] = idxs[j], idxs[i]
+ })
+
+ t.ReportAllocs()
+ t.ResetTimer()
+ for i := 0; i < t.N; i++ {
+ asyncHash(bmt, nil, n, wh, idxs, segments)
}
}
// benchmarks 100 concurrent bmt hashes with pool capacity
-func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
+func benchmarkPool(t *testing.B, poolsize, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, poolsize)
@@ -434,7 +495,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
go func() {
defer wg.Done()
bmt := New(pool)
- Hash(bmt, nil, data)
+ syncHash(bmt, nil, data)
}()
}
wg.Wait()
@@ -442,7 +503,7 @@ func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
}
// benchmarks the reference hasher
-func benchmarkRefHasher(n int, t *testing.B) {
+func benchmarkRefHasher(t *testing.B, n int) {
data := newData(n)
hasher := sha3.NewKeccak256
rbmt := NewRefHasher(hasher, 128)
@@ -462,3 +523,93 @@ func newData(bufferSize int) []byte {
}
return data
}
+
+// Hash hashes the data and the span using the bmt hasher
+func syncHash(h *Hasher, span, data []byte) []byte {
+ h.ResetWithLength(span)
+ h.Write(data)
+ return h.Sum(nil)
+}
+
+func splitAndShuffle(secsize int, data []byte) (idxs []int, segments [][]byte) {
+ l := len(data)
+ n := l / secsize
+ if l%secsize > 0 {
+ n++
+ }
+ for i := 0; i < n; i++ {
+ idxs = append(idxs, i)
+ end := (i + 1) * secsize
+ if end > l {
+ end = l
+ }
+ section := data[i*secsize : end]
+ segments = append(segments, section)
+ }
+ shuffle(n, func(i int, j int) {
+ idxs[i], idxs[j] = idxs[j], idxs[i]
+ })
+ return idxs, segments
+}
+
+// splits the input data performs a random shuffle to mock async section writes
+func asyncHashRandom(bmt SectionWriter, span []byte, data []byte, wh whenHash) (s []byte) {
+ idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
+ return asyncHash(bmt, span, len(data), wh, idxs, segments)
+}
+
+// mock for async section writes for BMT SectionWriter
+// requires a permutation (a random shuffle) of list of all indexes of segments
+// and writes them in order to the appropriate section
+// the Sum function is called according to the wh parameter (first, last, random [relative to segment writes])
+func asyncHash(bmt SectionWriter, span []byte, l int, wh whenHash, idxs []int, segments [][]byte) (s []byte) {
+ bmt.Reset()
+ if l == 0 {
+ return bmt.Sum(nil, l, span)
+ }
+ c := make(chan []byte, 1)
+ hashf := func() {
+ c <- bmt.Sum(nil, l, span)
+ }
+ maxsize := len(idxs)
+ var r int
+ if wh == random {
+ r = rand.Intn(maxsize)
+ }
+ for i, idx := range idxs {
+ bmt.Write(idx, segments[idx])
+ if (wh == first || wh == random) && i == r {
+ go hashf()
+ }
+ }
+ if wh == last {
+ return bmt.Sum(nil, l, span)
+ }
+ return <-c
+}
+
+// this is also in swarm/network_test.go
+// shuffle pseudo-randomizes the order of elements.
+// n is the number of elements. Shuffle panics if n < 0.
+// swap swaps the elements with indexes i and j.
+func shuffle(n int, swap func(i, j int)) {
+ if n < 0 {
+ panic("invalid argument to Shuffle")
+ }
+
+ // Fisher-Yates shuffle: https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle
+ // Shuffle really ought not be called with n that doesn't fit in 32 bits.
+ // Not only will it take a very long time, but with 2³¹! possible permutations,
+ // there's no way that any PRNG can have a big enough internal state to
+ // generate even a minuscule percentage of the possible permutations.
+ // Nevertheless, the right API signature accepts an int n, so handle it as best we can.
+ i := n - 1
+ for ; i > 1<<31-1-1; i-- {
+ j := int(rand.Int63n(int64(i + 1)))
+ swap(i, j)
+ }
+ for ; i > 0; i-- {
+ j := int(rand.Int31n(int32(i + 1)))
+ swap(i, j)
+ }
+}
diff --git a/swarm/fuse/swarmfs_unix.go b/swarm/fuse/swarmfs_unix.go
index 7a913b0de..9ff55cc32 100644
--- a/swarm/fuse/swarmfs_unix.go
+++ b/swarm/fuse/swarmfs_unix.go
@@ -120,6 +120,10 @@ func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
log.Trace("swarmfs mount: traversing manifest map")
for suffix, entry := range manifestEntryMap {
+ if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry
+ log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.")
+ continue
+ }
addr := common.Hex2Bytes(entry.Hash)
fullpath := "/" + suffix
basepath := filepath.Dir(fullpath)
diff --git a/swarm/network/simulation/bucket.go b/swarm/network/simulation/bucket.go
new file mode 100644
index 000000000..b37afaaa4
--- /dev/null
+++ b/swarm/network/simulation/bucket.go
@@ -0,0 +1,81 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "github.com/ethereum/go-ethereum/p2p/discover"
+)
+
+// BucketKey is the type that should be used for keys in simulation buckets.
+type BucketKey string
+
+// NodeItem returns an item set in ServiceFunc function for a particualar node.
+func (s *Simulation) NodeItem(id discover.NodeID, key interface{}) (value interface{}, ok bool) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if _, ok := s.buckets[id]; !ok {
+ return nil, false
+ }
+ return s.buckets[id].Load(key)
+}
+
+// SetNodeItem sets a new item associated with the node with provided NodeID.
+// Buckets should be used to avoid managing separate simulation global state.
+func (s *Simulation) SetNodeItem(id discover.NodeID, key interface{}, value interface{}) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.buckets[id].Store(key, value)
+}
+
+// NodeItems returns a map of items from all nodes that are all set under the
+// same BucketKey.
+func (s *Simulation) NodesItems(key interface{}) (values map[discover.NodeID]interface{}) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ ids := s.NodeIDs()
+ values = make(map[discover.NodeID]interface{}, len(ids))
+ for _, id := range ids {
+ if _, ok := s.buckets[id]; !ok {
+ continue
+ }
+ if v, ok := s.buckets[id].Load(key); ok {
+ values[id] = v
+ }
+ }
+ return values
+}
+
+// UpNodesItems returns a map of items with the same BucketKey from all nodes that are up.
+func (s *Simulation) UpNodesItems(key interface{}) (values map[discover.NodeID]interface{}) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ ids := s.UpNodeIDs()
+ values = make(map[discover.NodeID]interface{})
+ for _, id := range ids {
+ if _, ok := s.buckets[id]; !ok {
+ continue
+ }
+ if v, ok := s.buckets[id].Load(key); ok {
+ values[id] = v
+ }
+ }
+ return values
+}
diff --git a/swarm/network/simulation/bucket_test.go b/swarm/network/simulation/bucket_test.go
new file mode 100644
index 000000000..461d99825
--- /dev/null
+++ b/swarm/network/simulation/bucket_test.go
@@ -0,0 +1,155 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+// TestServiceBucket tests all bucket functionalities using subtests.
+// It constructs a simulation of two nodes by adding items to their buckets
+// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems
+// is done by stopping one node and validating availability of its items.
+func TestServiceBucket(t *testing.T) {
+ testKey := "Key"
+ testValue := "Value"
+
+ sim := New(map[string]ServiceFunc{
+ "noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ b.Store(testKey, testValue+ctx.Config.ID.String())
+ return newNoopService(), nil, nil
+ },
+ })
+ defer sim.Close()
+
+ id1, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("ServiceFunc bucket Store", func(t *testing.T) {
+ v, ok := sim.NodeItem(id1, testKey)
+ if !ok {
+ t.Fatal("bucket item not found")
+ }
+ s, ok := v.(string)
+ if !ok {
+ t.Fatal("bucket item value is not string")
+ }
+ if s != testValue+id1.String() {
+ t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
+ }
+
+ v, ok = sim.NodeItem(id2, testKey)
+ if !ok {
+ t.Fatal("bucket item not found")
+ }
+ s, ok = v.(string)
+ if !ok {
+ t.Fatal("bucket item value is not string")
+ }
+ if s != testValue+id2.String() {
+ t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
+ }
+ })
+
+ customKey := "anotherKey"
+ customValue := "anotherValue"
+
+ t.Run("SetNodeItem", func(t *testing.T) {
+ sim.SetNodeItem(id1, customKey, customValue)
+
+ v, ok := sim.NodeItem(id1, customKey)
+ if !ok {
+ t.Fatal("bucket item not found")
+ }
+ s, ok := v.(string)
+ if !ok {
+ t.Fatal("bucket item value is not string")
+ }
+ if s != customValue {
+ t.Fatalf("expected %q, got %q", customValue, s)
+ }
+
+ v, ok = sim.NodeItem(id2, customKey)
+ if ok {
+ t.Fatal("bucket item should not be found")
+ }
+ })
+
+ if err := sim.StopNode(id2); err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run("UpNodesItems", func(t *testing.T) {
+ items := sim.UpNodesItems(testKey)
+
+ v, ok := items[id1]
+ if !ok {
+ t.Errorf("node 1 item not found")
+ }
+ s, ok := v.(string)
+ if !ok {
+ t.Fatal("node 1 item value is not string")
+ }
+ if s != testValue+id1.String() {
+ t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
+ }
+
+ v, ok = items[id2]
+ if ok {
+ t.Errorf("node 2 item should not be found")
+ }
+ })
+
+ t.Run("NodeItems", func(t *testing.T) {
+ items := sim.NodesItems(testKey)
+
+ v, ok := items[id1]
+ if !ok {
+ t.Errorf("node 1 item not found")
+ }
+ s, ok := v.(string)
+ if !ok {
+ t.Fatal("node 1 item value is not string")
+ }
+ if s != testValue+id1.String() {
+ t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
+ }
+
+ v, ok = items[id2]
+ if !ok {
+ t.Errorf("node 2 item not found")
+ }
+ s, ok = v.(string)
+ if !ok {
+ t.Fatal("node 1 item value is not string")
+ }
+ if s != testValue+id2.String() {
+ t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
+ }
+ })
+}
diff --git a/swarm/network/simulation/connect.go b/swarm/network/simulation/connect.go
new file mode 100644
index 000000000..3fe82052b
--- /dev/null
+++ b/swarm/network/simulation/connect.go
@@ -0,0 +1,159 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "strings"
+
+ "github.com/ethereum/go-ethereum/p2p/discover"
+)
+
+// ConnectToPivotNode connects the node with provided NodeID
+// to the pivot node, already set by Simulation.SetPivotNode method.
+// It is useful when constructing a star network topology
+// when simulation adds and removes nodes dynamically.
+func (s *Simulation) ConnectToPivotNode(id discover.NodeID) (err error) {
+ pid := s.PivotNodeID()
+ if pid == nil {
+ return ErrNoPivotNode
+ }
+ return s.connect(*pid, id)
+}
+
+// ConnectToLastNode connects the node with provided NodeID
+// to the last node that is up, and avoiding connection to self.
+// It is useful when constructing a chain network topology
+// when simulation adds and removes nodes dynamically.
+func (s *Simulation) ConnectToLastNode(id discover.NodeID) (err error) {
+ ids := s.UpNodeIDs()
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ lid := ids[l-1]
+ if lid == id {
+ lid = ids[l-2]
+ }
+ return s.connect(lid, id)
+}
+
+// ConnectToRandomNode connects the node with provieded NodeID
+// to a random node that is up.
+func (s *Simulation) ConnectToRandomNode(id discover.NodeID) (err error) {
+ n := s.randomUpNode(id)
+ if n == nil {
+ return ErrNodeNotFound
+ }
+ return s.connect(n.ID, id)
+}
+
+// ConnectNodesFull connects all nodes one to another.
+// It provides a complete connectivity in the network
+// which should be rarely needed.
+func (s *Simulation) ConnectNodesFull(ids []discover.NodeID) (err error) {
+ if ids == nil {
+ ids = s.UpNodeIDs()
+ }
+ l := len(ids)
+ for i := 0; i < l; i++ {
+ for j := i + 1; j < l; j++ {
+ err = s.connect(ids[i], ids[j])
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// ConnectNodesChain connects all nodes in a chain topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (s *Simulation) ConnectNodesChain(ids []discover.NodeID) (err error) {
+ if ids == nil {
+ ids = s.UpNodeIDs()
+ }
+ l := len(ids)
+ for i := 0; i < l-1; i++ {
+ err = s.connect(ids[i], ids[i+1])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConnectNodesRing connects all nodes in a ring topology.
+// If ids argument is nil, all nodes that are up will be connected.
+func (s *Simulation) ConnectNodesRing(ids []discover.NodeID) (err error) {
+ if ids == nil {
+ ids = s.UpNodeIDs()
+ }
+ l := len(ids)
+ if l < 2 {
+ return nil
+ }
+ for i := 0; i < l-1; i++ {
+ err = s.connect(ids[i], ids[i+1])
+ if err != nil {
+ return err
+ }
+ }
+ return s.connect(ids[l-1], ids[0])
+}
+
+// ConnectNodesStar connects all nodes in a star topology
+// with the center at provided NodeID.
+// If ids argument is nil, all nodes that are up will be connected.
+func (s *Simulation) ConnectNodesStar(id discover.NodeID, ids []discover.NodeID) (err error) {
+ if ids == nil {
+ ids = s.UpNodeIDs()
+ }
+ l := len(ids)
+ for i := 0; i < l; i++ {
+ if id == ids[i] {
+ continue
+ }
+ err = s.connect(id, ids[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ConnectNodesStar connects all nodes in a star topology
+// with the center at already set pivot node.
+// If ids argument is nil, all nodes that are up will be connected.
+func (s *Simulation) ConnectNodesStarPivot(ids []discover.NodeID) (err error) {
+ id := s.PivotNodeID()
+ if id == nil {
+ return ErrNoPivotNode
+ }
+ return s.ConnectNodesStar(*id, ids)
+}
+
+// connect connects two nodes but ignores already connected error.
+func (s *Simulation) connect(oneID, otherID discover.NodeID) error {
+ return ignoreAlreadyConnectedErr(s.Net.Connect(oneID, otherID))
+}
+
+func ignoreAlreadyConnectedErr(err error) error {
+ if err == nil || strings.Contains(err.Error(), "already connected") {
+ return nil
+ }
+ return err
+}
diff --git a/swarm/network/simulation/connect_test.go b/swarm/network/simulation/connect_test.go
new file mode 100644
index 000000000..10d73e4a1
--- /dev/null
+++ b/swarm/network/simulation/connect_test.go
@@ -0,0 +1,306 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/p2p/discover"
+)
+
+func TestConnectToPivotNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ pid, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sim.SetPivotNode(pid)
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectToPivotNode(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if sim.Net.GetConn(id, pid) == nil {
+ t.Error("node did not connect to pivot node")
+ }
+}
+
+func TestConnectToLastNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ n := 10
+
+ ids, err := sim.AddNodes(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectToLastNode(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, i := range ids[:n-2] {
+ if sim.Net.GetConn(id, i) != nil {
+ t.Error("node connected to the node that is not the last")
+ }
+ }
+
+ if sim.Net.GetConn(id, ids[n-1]) == nil {
+ t.Error("node did not connect to the last node")
+ }
+}
+
+func TestConnectToRandomNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ n := 10
+
+ ids, err := sim.AddNodes(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectToRandomNode(ids[0])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var cc int
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ if sim.Net.GetConn(ids[i], ids[j]) != nil {
+ cc++
+ }
+ }
+ }
+
+ if cc != 1 {
+ t.Errorf("expected one connection, got %v", cc)
+ }
+}
+
+func TestConnectNodesFull(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(12)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectNodesFull(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testFull(t, sim, ids)
+}
+
+func testFull(t *testing.T, sim *Simulation, ids []discover.NodeID) {
+ n := len(ids)
+ var cc int
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ if sim.Net.GetConn(ids[i], ids[j]) != nil {
+ cc++
+ }
+ }
+ }
+
+ want := n * (n - 1) / 2
+
+ if cc != want {
+ t.Errorf("expected %v connection, got %v", want, cc)
+ }
+}
+
+func TestConnectNodesChain(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectNodesChain(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testChain(t, sim, ids)
+}
+
+func testChain(t *testing.T, sim *Simulation, ids []discover.NodeID) {
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := sim.Net.GetConn(ids[i], ids[j])
+ if i == j-1 {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func TestConnectNodesRing(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ err = sim.ConnectNodesRing(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testRing(t, sim, ids)
+}
+
+func testRing(t *testing.T, sim *Simulation, ids []discover.NodeID) {
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := sim.Net.GetConn(ids[i], ids[j])
+ if i == j-1 || (i == 0 && j == n-1) {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func TestConnectToNodesStar(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ centerIndex := 2
+
+ err = sim.ConnectNodesStar(ids[centerIndex], ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testStar(t, sim, ids, centerIndex)
+}
+
+func testStar(t *testing.T, sim *Simulation, ids []discover.NodeID, centerIndex int) {
+ n := len(ids)
+ for i := 0; i < n; i++ {
+ for j := i + 1; j < n; j++ {
+ c := sim.Net.GetConn(ids[i], ids[j])
+ if i == centerIndex || j == centerIndex {
+ if c == nil {
+ t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
+ }
+ } else {
+ if c != nil {
+ t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
+ }
+ }
+ }
+ }
+}
+
+func TestConnectToNodesStarPivot(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(sim.Net.Conns) > 0 {
+ t.Fatal("no connections should exist after just adding nodes")
+ }
+
+ pivotIndex := 4
+
+ sim.SetPivotNode(ids[pivotIndex])
+
+ err = sim.ConnectNodesStarPivot(ids)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testStar(t, sim, ids, pivotIndex)
+}
diff --git a/swarm/network/simulation/events.go b/swarm/network/simulation/events.go
new file mode 100644
index 000000000..f9cfadb73
--- /dev/null
+++ b/swarm/network/simulation/events.go
@@ -0,0 +1,157 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+
+ "github.com/ethereum/go-ethereum/p2p/discover"
+
+ "github.com/ethereum/go-ethereum/p2p"
+)
+
+// PeerEvent is the type of the channel returned by Simulation.PeerEvents.
+type PeerEvent struct {
+ // NodeID is the ID of node that the event is caught on.
+ NodeID discover.NodeID
+ // Event is the event that is caught.
+ Event *p2p.PeerEvent
+ // Error is the error that may have happened during event watching.
+ Error error
+}
+
+// PeerEventsFilter defines a filter on PeerEvents to exclude messages with
+// defined properties. Use PeerEventsFilter methods to set required options.
+type PeerEventsFilter struct {
+ t *p2p.PeerEventType
+ protocol *string
+ msgCode *uint64
+}
+
+// NewPeerEventsFilter returns a new PeerEventsFilter instance.
+func NewPeerEventsFilter() *PeerEventsFilter {
+ return &PeerEventsFilter{}
+}
+
+// Type sets the filter to only one peer event type.
+func (f *PeerEventsFilter) Type(t p2p.PeerEventType) *PeerEventsFilter {
+ f.t = &t
+ return f
+}
+
+// Protocol sets the filter to only one message protocol.
+func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter {
+ f.protocol = &p
+ return f
+}
+
+// MsgCode sets the filter to only one msg code.
+func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter {
+ f.msgCode = &c
+ return f
+}
+
+// PeerEvents returns a channel of events that are captured by admin peerEvents
+// subscription nodes with provided NodeIDs. Additional filters can be set to ignore
+// events that are not relevant.
+func (s *Simulation) PeerEvents(ctx context.Context, ids []discover.NodeID, filters ...*PeerEventsFilter) <-chan PeerEvent {
+ eventC := make(chan PeerEvent)
+
+ for _, id := range ids {
+ s.shutdownWG.Add(1)
+ go func(id discover.NodeID) {
+ defer s.shutdownWG.Done()
+
+ client, err := s.Net.GetNode(id).Client()
+ if err != nil {
+ eventC <- PeerEvent{NodeID: id, Error: err}
+ return
+ }
+ events := make(chan *p2p.PeerEvent)
+ sub, err := client.Subscribe(ctx, "admin", events, "peerEvents")
+ if err != nil {
+ eventC <- PeerEvent{NodeID: id, Error: err}
+ return
+ }
+ defer sub.Unsubscribe()
+
+ for {
+ select {
+ case <-ctx.Done():
+ if err := ctx.Err(); err != nil {
+ select {
+ case eventC <- PeerEvent{NodeID: id, Error: err}:
+ case <-s.Done():
+ }
+ }
+ return
+ case <-s.Done():
+ return
+ case e := <-events:
+ match := len(filters) == 0 // if there are no filters match all events
+ for _, f := range filters {
+ if f.t != nil && *f.t != e.Type {
+ continue
+ }
+ if f.protocol != nil && *f.protocol != e.Protocol {
+ continue
+ }
+ if f.msgCode != nil && e.MsgCode != nil && *f.msgCode != *e.MsgCode {
+ continue
+ }
+ // all filter parameters matched, break the loop
+ match = true
+ break
+ }
+ if match {
+ select {
+ case eventC <- PeerEvent{NodeID: id, Event: e}:
+ case <-ctx.Done():
+ if err := ctx.Err(); err != nil {
+ select {
+ case eventC <- PeerEvent{NodeID: id, Error: err}:
+ case <-s.Done():
+ }
+ }
+ return
+ case <-s.Done():
+ return
+ }
+ }
+ case err := <-sub.Err():
+ if err != nil {
+ select {
+ case eventC <- PeerEvent{NodeID: id, Error: err}:
+ case <-ctx.Done():
+ if err := ctx.Err(); err != nil {
+ select {
+ case eventC <- PeerEvent{NodeID: id, Error: err}:
+ case <-s.Done():
+ }
+ }
+ return
+ case <-s.Done():
+ return
+ }
+ }
+ }
+ }
+ }(id)
+ }
+
+ return eventC
+}
diff --git a/swarm/network/simulation/events_test.go b/swarm/network/simulation/events_test.go
new file mode 100644
index 000000000..0c185d977
--- /dev/null
+++ b/swarm/network/simulation/events_test.go
@@ -0,0 +1,104 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+)
+
+// TestPeerEvents creates simulation, adds two nodes,
+// register for peer events, connects nodes in a chain
+// and waits for the number of connection events to
+// be received.
+func TestPeerEvents(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ _, err := sim.AddNodes(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ events := sim.PeerEvents(ctx, sim.NodeIDs())
+
+ // two nodes -> two connection events
+ expectedEventCount := 2
+
+ var wg sync.WaitGroup
+ wg.Add(expectedEventCount)
+
+ go func() {
+ for e := range events {
+ if e.Error != nil {
+ if e.Error == context.Canceled {
+ return
+ }
+ t.Error(e.Error)
+ continue
+ }
+ wg.Done()
+ }
+ }()
+
+ err = sim.ConnectNodesChain(sim.NodeIDs())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ wg.Wait()
+}
+
+func TestPeerEventsTimeout(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ _, err := sim.AddNodes(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ defer cancel()
+ events := sim.PeerEvents(ctx, sim.NodeIDs())
+
+ done := make(chan struct{})
+ go func() {
+ for e := range events {
+ if e.Error == context.Canceled {
+ return
+ }
+ if e.Error == context.DeadlineExceeded {
+ close(done)
+ return
+ } else {
+ t.Fatal(e.Error)
+ }
+ }
+ }()
+
+ select {
+ case <-time.After(time.Second):
+ t.Error("no context deadline received")
+ case <-done:
+ // all good, context deadline detected
+ }
+}
diff --git a/swarm/network/simulation/example_test.go b/swarm/network/simulation/example_test.go
new file mode 100644
index 000000000..2a8116921
--- /dev/null
+++ b/swarm/network/simulation/example_test.go
@@ -0,0 +1,140 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation_test
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/network/simulation"
+)
+
+// Every node can have a Kademlia associated using the node bucket under
+// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
+// all nodes have the their Kadmlias healthy.
+func ExampleSimulation_WaitTillHealthy() {
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ addr := network.NewAddrFromNodeID(ctx.Config.ID)
+ hp := network.NewHiveParams()
+ hp.Discovery = false
+ config := &network.BzzConfig{
+ OverlayAddr: addr.Over(),
+ UnderlayAddr: addr.Under(),
+ HiveParams: hp,
+ }
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ // store kademlia in node's bucket under BucketKeyKademlia
+ // so that it can be found by WaitTillHealthy method.
+ b.Store(simulation.BucketKeyKademlia, kad)
+ return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ },
+ })
+ defer sim.Close()
+
+ _, err := sim.AddNodesAndConnectRing(10)
+ if err != nil {
+ // handle error properly...
+ panic(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+ defer cancel()
+ ill, err := sim.WaitTillHealthy(ctx, 2)
+ if err != nil {
+ // inspect the latest detected not healthy kademlias
+ for id, kad := range ill {
+ fmt.Println("Node", id)
+ fmt.Println(kad.String())
+ }
+ // handle error...
+ }
+
+ // continue with the test
+}
+
+// Watch all peer events in the simulation network, buy receiving from a channel.
+func ExampleSimulation_PeerEvents() {
+ sim := simulation.New(nil)
+ defer sim.Close()
+
+ events := sim.PeerEvents(context.Background(), sim.NodeIDs())
+
+ go func() {
+ for e := range events {
+ if e.Error != nil {
+ log.Error("peer event", "err", e.Error)
+ continue
+ }
+ log.Info("peer event", "node", e.NodeID, "peer", e.Event.Peer, "msgcode", e.Event.MsgCode)
+ }
+ }()
+}
+
+// Detect when a nodes drop a peer.
+func ExampleSimulation_PeerEvents_disconnections() {
+ sim := simulation.New(nil)
+ defer sim.Close()
+
+ disconnections := sim.PeerEvents(
+ context.Background(),
+ sim.NodeIDs(),
+ simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
+ )
+
+ go func() {
+ for d := range disconnections {
+ if d.Error != nil {
+ log.Error("peer drop", "err", d.Error)
+ continue
+ }
+ log.Warn("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
+ }
+ }()
+}
+
+// Watch multiple types of events or messages. In this case, they differ only
+// by MsgCode, but filters can be set for different types or protocols, too.
+func ExampleSimulation_PeerEvents_multipleFilters() {
+ sim := simulation.New(nil)
+ defer sim.Close()
+
+ msgs := sim.PeerEvents(
+ context.Background(),
+ sim.NodeIDs(),
+ // Watch when bzz messages 1 and 4 are received.
+ simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(1),
+ simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeMsgRecv).Protocol("bzz").MsgCode(4),
+ )
+
+ go func() {
+ for m := range msgs {
+ if m.Error != nil {
+ log.Error("bzz message", "err", m.Error)
+ continue
+ }
+ log.Info("bzz message", "node", m.NodeID, "peer", m.Event.Peer)
+ }
+ }()
+}
diff --git a/swarm/network/simulation/http.go b/swarm/network/simulation/http.go
new file mode 100644
index 000000000..40f13f32d
--- /dev/null
+++ b/swarm/network/simulation/http.go
@@ -0,0 +1,63 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
+)
+
+// Package defaults.
+var (
+ DefaultHTTPSimAddr = ":8888"
+)
+
+//`With`(builder) pattern constructor for Simulation to
+//start with a HTTP server
+func (s *Simulation) WithServer(addr string) *Simulation {
+ //assign default addr if nothing provided
+ if addr == "" {
+ addr = DefaultHTTPSimAddr
+ }
+ log.Info(fmt.Sprintf("Initializing simulation server on %s...", addr))
+ //initialize the HTTP server
+ s.handler = simulations.NewServer(s.Net)
+ s.runC = make(chan struct{})
+ //add swarm specific routes to the HTTP server
+ s.addSimulationRoutes()
+ s.httpSrv = &http.Server{
+ Addr: addr,
+ Handler: s.handler,
+ }
+ go s.httpSrv.ListenAndServe()
+ return s
+}
+
+//register additional HTTP routes
+func (s *Simulation) addSimulationRoutes() {
+ s.handler.POST("/runsim", s.RunSimulation)
+}
+
+// StartNetwork starts all nodes in the network
+func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) {
+ log.Debug("RunSimulation endpoint running")
+ s.runC <- struct{}{}
+ w.WriteHeader(http.StatusOK)
+}
diff --git a/swarm/network/simulation/http_test.go b/swarm/network/simulation/http_test.go
new file mode 100644
index 000000000..4d8bf9946
--- /dev/null
+++ b/swarm/network/simulation/http_test.go
@@ -0,0 +1,104 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+func TestSimulationWithHTTPServer(t *testing.T) {
+ log.Debug("Init simulation")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
+ defer cancel()
+
+ sim := New(
+ map[string]ServiceFunc{
+ "noop": func(_ *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ return newNoopService(), nil, nil
+ },
+ }).WithServer(DefaultHTTPSimAddr)
+ defer sim.Close()
+ log.Debug("Done.")
+
+ _, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ log.Debug("Starting sim round and let it time out...")
+ //first test that running without sending to the channel will actually
+ //block the simulation, so let it time out
+ result := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
+ log.Debug("Just start the sim without any action and wait for the timeout")
+ //ensure with a Sleep that simulation doesn't terminate before the timeout
+ time.Sleep(2 * time.Second)
+ return nil
+ })
+
+ if result.Error != nil {
+ if result.Error.Error() == "context deadline exceeded" {
+ log.Debug("Expected timeout error received")
+ } else {
+ t.Fatal(result.Error)
+ }
+ }
+
+ //now run it again and send the expected signal on the waiting channel,
+ //then close the simulation
+ log.Debug("Starting sim round and wait for frontend signal...")
+ //this time the timeout should be long enough so that it doesn't kick in too early
+ ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel2()
+ go sendRunSignal(t)
+ result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
+ log.Debug("This run waits for the run signal from `frontend`...")
+ //ensure with a Sleep that simulation doesn't terminate before the signal is received
+ time.Sleep(2 * time.Second)
+ return nil
+ })
+ if result.Error != nil {
+ t.Fatal(result.Error)
+ }
+ log.Debug("Test terminated successfully")
+}
+
+func sendRunSignal(t *testing.T) {
+ //We need to first wait for the sim HTTP server to start running...
+ time.Sleep(2 * time.Second)
+ //then we can send the signal
+
+ log.Debug("Sending run signal to simulation: POST /runsim...")
+ resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil)
+ if err != nil {
+ t.Fatalf("Request failed: %v", err)
+ }
+ defer resp.Body.Close()
+ log.Debug("Signal sent")
+ if resp.StatusCode != http.StatusOK {
+ t.Fatalf("err %s", resp.Status)
+ }
+}
diff --git a/swarm/network/simulation/kademlia.go b/swarm/network/simulation/kademlia.go
new file mode 100644
index 000000000..3e45cb0ce
--- /dev/null
+++ b/swarm/network/simulation/kademlia.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "encoding/hex"
+ "time"
+
+ "github.com/ethereum/go-ethereum/p2p/discover"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/network"
+)
+
+// BucketKeyKademlia is the key to be used for storing the kademlia
+// instance for particuar node, usually inside the ServiceFunc function.
+var BucketKeyKademlia BucketKey = "kademlia"
+
+// WaitTillHealthy is blocking until the health of all kademlias is true.
+// If error is not nil, a map of kademlia that was found not healthy is returned.
+func (s *Simulation) WaitTillHealthy(ctx context.Context, kadMinProxSize int) (ill map[discover.NodeID]*network.Kademlia, err error) {
+ // Prepare PeerPot map for checking Kademlia health
+ var ppmap map[string]*network.PeerPot
+ kademlias := s.kademlias()
+ addrs := make([][]byte, 0, len(kademlias))
+ for _, k := range kademlias {
+ addrs = append(addrs, k.BaseAddr())
+ }
+ ppmap = network.NewPeerPotMap(kadMinProxSize, addrs)
+
+ // Wait for healthy Kademlia on every node before checking files
+ ticker := time.NewTicker(200 * time.Millisecond)
+ defer ticker.Stop()
+
+ ill = make(map[discover.NodeID]*network.Kademlia)
+ for {
+ select {
+ case <-ctx.Done():
+ return ill, ctx.Err()
+ case <-ticker.C:
+ for k := range ill {
+ delete(ill, k)
+ }
+ log.Debug("kademlia health check", "addr count", len(addrs))
+ for id, k := range kademlias {
+ //PeerPot for this node
+ addr := common.Bytes2Hex(k.BaseAddr())
+ pp := ppmap[addr]
+ //call Healthy RPC
+ h := k.Healthy(pp)
+ //print info
+ log.Debug(k.String())
+ log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
+ log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
+ if !h.GotNN || !h.Full {
+ ill[id] = k
+ }
+ }
+ if len(ill) == 0 {
+ return nil, nil
+ }
+ }
+ }
+}
+
+// kademlias returns all Kademlia instances that are set
+// in simulation bucket.
+func (s *Simulation) kademlias() (ks map[discover.NodeID]*network.Kademlia) {
+ items := s.UpNodesItems(BucketKeyKademlia)
+ ks = make(map[discover.NodeID]*network.Kademlia, len(items))
+ for id, v := range items {
+ k, ok := v.(*network.Kademlia)
+ if !ok {
+ continue
+ }
+ ks[id] = k
+ }
+ return ks
+}
diff --git a/swarm/network/simulation/kademlia_test.go b/swarm/network/simulation/kademlia_test.go
new file mode 100644
index 000000000..d11fe7e41
--- /dev/null
+++ b/swarm/network/simulation/kademlia_test.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/network"
+)
+
+func TestWaitTillHealthy(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ addr := network.NewAddrFromNodeID(ctx.Config.ID)
+ hp := network.NewHiveParams()
+ hp.Discovery = false
+ config := &network.BzzConfig{
+ OverlayAddr: addr.Over(),
+ UnderlayAddr: addr.Under(),
+ HiveParams: hp,
+ }
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ // store kademlia in node's bucket under BucketKeyKademlia
+ // so that it can be found by WaitTillHealthy method.
+ b.Store(BucketKeyKademlia, kad)
+ return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ },
+ })
+ defer sim.Close()
+
+ _, err := sim.AddNodesAndConnectRing(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
+ defer cancel()
+ ill, err := sim.WaitTillHealthy(ctx, 2)
+ if err != nil {
+ for id, kad := range ill {
+ t.Log("Node", id)
+ t.Log(kad.String())
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+}
diff --git a/swarm/network/simulation/node.go b/swarm/network/simulation/node.go
new file mode 100644
index 000000000..bc433cfd8
--- /dev/null
+++ b/swarm/network/simulation/node.go
@@ -0,0 +1,357 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "encoding/json"
+ "errors"
+ "io/ioutil"
+ "math/rand"
+ "os"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+// NodeIDs returns NodeIDs for all nodes in the network.
+func (s *Simulation) NodeIDs() (ids []discover.NodeID) {
+ nodes := s.Net.GetNodes()
+ ids = make([]discover.NodeID, len(nodes))
+ for i, node := range nodes {
+ ids[i] = node.ID()
+ }
+ return ids
+}
+
+// UpNodeIDs returns NodeIDs for nodes that are up in the network.
+func (s *Simulation) UpNodeIDs() (ids []discover.NodeID) {
+ nodes := s.Net.GetNodes()
+ for _, node := range nodes {
+ if node.Up {
+ ids = append(ids, node.ID())
+ }
+ }
+ return ids
+}
+
+// DownNodeIDs returns NodeIDs for nodes that are stopped in the network.
+func (s *Simulation) DownNodeIDs() (ids []discover.NodeID) {
+ nodes := s.Net.GetNodes()
+ for _, node := range nodes {
+ if !node.Up {
+ ids = append(ids, node.ID())
+ }
+ }
+ return ids
+}
+
+// AddNodeOption defines the option that can be passed
+// to Simulation.AddNode method.
+type AddNodeOption func(*adapters.NodeConfig)
+
+// AddNodeWithMsgEvents sets the EnableMsgEvents option
+// to NodeConfig.
+func AddNodeWithMsgEvents(enable bool) AddNodeOption {
+ return func(o *adapters.NodeConfig) {
+ o.EnableMsgEvents = enable
+ }
+}
+
+// AddNodeWithService specifies a service that should be
+// started on a node. This option can be repeated as variadic
+// argument toe AddNode and other add node related methods.
+// If AddNodeWithService is not specified, all services will be started.
+func AddNodeWithService(serviceName string) AddNodeOption {
+ return func(o *adapters.NodeConfig) {
+ o.Services = append(o.Services, serviceName)
+ }
+}
+
+// AddNode creates a new node with random configuration,
+// applies provided options to the config and adds the node to network.
+// By default all services will be started on a node. If one or more
+// AddNodeWithService option are provided, only specified services will be started.
+func (s *Simulation) AddNode(opts ...AddNodeOption) (id discover.NodeID, err error) {
+ conf := adapters.RandomNodeConfig()
+ for _, o := range opts {
+ o(conf)
+ }
+ if len(conf.Services) == 0 {
+ conf.Services = s.serviceNames
+ }
+ node, err := s.Net.NewNodeWithConfig(conf)
+ if err != nil {
+ return id, err
+ }
+ return node.ID(), s.Net.Start(node.ID())
+}
+
+// AddNodes creates new nodes with random configurations,
+// applies provided options to the config and adds nodes to network.
+func (s *Simulation) AddNodes(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
+ ids = make([]discover.NodeID, 0, count)
+ for i := 0; i < count; i++ {
+ id, err := s.AddNode(opts...)
+ if err != nil {
+ return nil, err
+ }
+ ids = append(ids, id)
+ }
+ return ids, nil
+}
+
+// AddNodesAndConnectFull is a helpper method that combines
+// AddNodes and ConnectNodesFull. Only new nodes will be connected.
+func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
+ if count < 2 {
+ return nil, errors.New("count of nodes must be at least 2")
+ }
+ ids, err = s.AddNodes(count, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = s.ConnectNodesFull(ids)
+ if err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// AddNodesAndConnectChain is a helpper method that combines
+// AddNodes and ConnectNodesChain. The chain will be continued from the last
+// added node, if there is one in simulation using ConnectToLastNode method.
+func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
+ if count < 2 {
+ return nil, errors.New("count of nodes must be at least 2")
+ }
+ id, err := s.AddNode(opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = s.ConnectToLastNode(id)
+ if err != nil {
+ return nil, err
+ }
+ ids, err = s.AddNodes(count-1, opts...)
+ if err != nil {
+ return nil, err
+ }
+ ids = append([]discover.NodeID{id}, ids...)
+ err = s.ConnectNodesChain(ids)
+ if err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// AddNodesAndConnectRing is a helpper method that combines
+// AddNodes and ConnectNodesRing.
+func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
+ if count < 2 {
+ return nil, errors.New("count of nodes must be at least 2")
+ }
+ ids, err = s.AddNodes(count, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = s.ConnectNodesRing(ids)
+ if err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+// AddNodesAndConnectStar is a helpper method that combines
+// AddNodes and ConnectNodesStar.
+func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (ids []discover.NodeID, err error) {
+ if count < 2 {
+ return nil, errors.New("count of nodes must be at least 2")
+ }
+ ids, err = s.AddNodes(count, opts...)
+ if err != nil {
+ return nil, err
+ }
+ err = s.ConnectNodesStar(ids[0], ids[1:])
+ if err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+//Upload a snapshot
+//This method tries to open the json file provided, applies the config to all nodes
+//and then loads the snapshot into the Simulation network
+func (s *Simulation) UploadSnapshot(snapshotFile string, opts ...AddNodeOption) error {
+ f, err := os.Open(snapshotFile)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ jsonbyte, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ var snap simulations.Snapshot
+ err = json.Unmarshal(jsonbyte, &snap)
+ if err != nil {
+ return err
+ }
+
+ //the snapshot probably has the property EnableMsgEvents not set
+ //just in case, set it to true!
+ //(we need this to wait for messages before uploading)
+ for _, n := range snap.Nodes {
+ n.Node.Config.EnableMsgEvents = true
+ n.Node.Config.Services = s.serviceNames
+ for _, o := range opts {
+ o(n.Node.Config)
+ }
+ }
+
+ log.Info("Waiting for p2p connections to be established...")
+
+ //now we can load the snapshot
+ err = s.Net.Load(&snap)
+ if err != nil {
+ return err
+ }
+ log.Info("Snapshot loaded")
+ return nil
+}
+
+// SetPivotNode sets the NodeID of the network's pivot node.
+// Pivot node is just a specific node that should be treated
+// differently then other nodes in test. SetPivotNode and
+// PivotNodeID are just a convenient functions to set and
+// retrieve it.
+func (s *Simulation) SetPivotNode(id discover.NodeID) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.pivotNodeID = &id
+}
+
+// PivotNodeID returns NodeID of the pivot node set by
+// Simulation.SetPivotNode method.
+func (s *Simulation) PivotNodeID() (id *discover.NodeID) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.pivotNodeID
+}
+
+// StartNode starts a node by NodeID.
+func (s *Simulation) StartNode(id discover.NodeID) (err error) {
+ return s.Net.Start(id)
+}
+
+// StartRandomNode starts a random node.
+func (s *Simulation) StartRandomNode() (id discover.NodeID, err error) {
+ n := s.randomDownNode()
+ if n == nil {
+ return id, ErrNodeNotFound
+ }
+ return n.ID, s.Net.Start(n.ID)
+}
+
+// StartRandomNodes starts random nodes.
+func (s *Simulation) StartRandomNodes(count int) (ids []discover.NodeID, err error) {
+ ids = make([]discover.NodeID, 0, count)
+ downIDs := s.DownNodeIDs()
+ for i := 0; i < count; i++ {
+ n := s.randomNode(downIDs, ids...)
+ if n == nil {
+ return nil, ErrNodeNotFound
+ }
+ err = s.Net.Start(n.ID)
+ if err != nil {
+ return nil, err
+ }
+ ids = append(ids, n.ID)
+ }
+ return ids, nil
+}
+
+// StopNode stops a node by NodeID.
+func (s *Simulation) StopNode(id discover.NodeID) (err error) {
+ return s.Net.Stop(id)
+}
+
+// StopRandomNode stops a random node.
+func (s *Simulation) StopRandomNode() (id discover.NodeID, err error) {
+ n := s.randomUpNode()
+ if n == nil {
+ return id, ErrNodeNotFound
+ }
+ return n.ID, s.Net.Stop(n.ID)
+}
+
+// StopRandomNodes stops random nodes.
+func (s *Simulation) StopRandomNodes(count int) (ids []discover.NodeID, err error) {
+ ids = make([]discover.NodeID, 0, count)
+ upIDs := s.UpNodeIDs()
+ for i := 0; i < count; i++ {
+ n := s.randomNode(upIDs, ids...)
+ if n == nil {
+ return nil, ErrNodeNotFound
+ }
+ err = s.Net.Stop(n.ID)
+ if err != nil {
+ return nil, err
+ }
+ ids = append(ids, n.ID)
+ }
+ return ids, nil
+}
+
+// seed the random generator for Simulation.randomNode.
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// randomUpNode returns a random SimNode that is up.
+// Arguments are NodeIDs for nodes that should not be returned.
+func (s *Simulation) randomUpNode(exclude ...discover.NodeID) *adapters.SimNode {
+ return s.randomNode(s.UpNodeIDs(), exclude...)
+}
+
+// randomUpNode returns a random SimNode that is not up.
+func (s *Simulation) randomDownNode(exclude ...discover.NodeID) *adapters.SimNode {
+ return s.randomNode(s.DownNodeIDs(), exclude...)
+}
+
+// randomUpNode returns a random SimNode from the slice of NodeIDs.
+func (s *Simulation) randomNode(ids []discover.NodeID, exclude ...discover.NodeID) *adapters.SimNode {
+ for _, e := range exclude {
+ var i int
+ for _, id := range ids {
+ if id == e {
+ ids = append(ids[:i], ids[i+1:]...)
+ } else {
+ i++
+ }
+ }
+ }
+ l := len(ids)
+ if l == 0 {
+ return nil
+ }
+ n := s.Net.GetNode(ids[rand.Intn(l)])
+ node, _ := n.Node.(*adapters.SimNode)
+ return node
+}
diff --git a/swarm/network/simulation/node_test.go b/swarm/network/simulation/node_test.go
new file mode 100644
index 000000000..94f0b4fac
--- /dev/null
+++ b/swarm/network/simulation/node_test.go
@@ -0,0 +1,462 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/swarm/network"
+)
+
+func TestUpDownNodeIDs(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gotIDs := sim.NodeIDs()
+
+ if !equalNodeIDs(ids, gotIDs) {
+ t.Error("returned nodes are not equal to added ones")
+ }
+
+ stoppedIDs, err := sim.StopRandomNodes(3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ gotIDs = sim.UpNodeIDs()
+
+ for _, id := range gotIDs {
+ if !sim.Net.GetNode(id).Up {
+ t.Errorf("node %s should not be down", id)
+ }
+ }
+
+ if !equalNodeIDs(ids, append(gotIDs, stoppedIDs...)) {
+ t.Error("returned nodes are not equal to added ones")
+ }
+
+ gotIDs = sim.DownNodeIDs()
+
+ for _, id := range gotIDs {
+ if sim.Net.GetNode(id).Up {
+ t.Errorf("node %s should not be up", id)
+ }
+ }
+
+ if !equalNodeIDs(stoppedIDs, gotIDs) {
+ t.Error("returned nodes are not equal to the stopped ones")
+ }
+}
+
+func equalNodeIDs(one, other []discover.NodeID) bool {
+ if len(one) != len(other) {
+ return false
+ }
+ var count int
+ for _, a := range one {
+ var found bool
+ for _, b := range other {
+ if a == b {
+ found = true
+ break
+ }
+ }
+ if found {
+ count++
+ } else {
+ return false
+ }
+ }
+ return count == len(one)
+}
+
+func TestAddNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id)
+ if n == nil {
+ t.Fatal("node not found")
+ }
+
+ if !n.Up {
+ t.Error("node not started")
+ }
+}
+
+func TestAddNodeWithMsgEvents(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ id, err := sim.AddNode(AddNodeWithMsgEvents(true))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !sim.Net.GetNode(id).Config.EnableMsgEvents {
+ t.Error("EnableMsgEvents is false")
+ }
+
+ id, err = sim.AddNode(AddNodeWithMsgEvents(false))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if sim.Net.GetNode(id).Config.EnableMsgEvents {
+ t.Error("EnableMsgEvents is true")
+ }
+}
+
+func TestAddNodeWithService(t *testing.T) {
+ sim := New(map[string]ServiceFunc{
+ "noop1": noopServiceFunc,
+ "noop2": noopServiceFunc,
+ })
+ defer sim.Close()
+
+ id, err := sim.AddNode(AddNodeWithService("noop1"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
+ if n.Service("noop1") == nil {
+ t.Error("service noop1 not found on node")
+ }
+ if n.Service("noop2") != nil {
+ t.Error("service noop2 should not be found on node")
+ }
+}
+
+func TestAddNodes(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ nodesCount := 12
+
+ ids, err := sim.AddNodes(nodesCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ count := len(ids)
+ if count != nodesCount {
+ t.Errorf("expected %v nodes, got %v", nodesCount, count)
+ }
+
+ count = len(sim.Net.GetNodes())
+ if count != nodesCount {
+ t.Errorf("expected %v nodes, got %v", nodesCount, count)
+ }
+}
+
+func TestAddNodesAndConnectFull(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ n := 12
+
+ ids, err := sim.AddNodesAndConnectFull(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testFull(t, sim, ids)
+}
+
+func TestAddNodesAndConnectChain(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ _, err := sim.AddNodesAndConnectChain(12)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // add another set of nodes to test
+ // if two chains are connected
+ _, err = sim.AddNodesAndConnectChain(7)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testChain(t, sim, sim.UpNodeIDs())
+}
+
+func TestAddNodesAndConnectRing(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodesAndConnectRing(12)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testRing(t, sim, ids)
+}
+
+func TestAddNodesAndConnectStar(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ ids, err := sim.AddNodesAndConnectStar(12)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ testStar(t, sim, ids, 0)
+}
+
+//To test that uploading a snapshot works
+func TestUploadSnapshot(t *testing.T) {
+ log.Debug("Creating simulation")
+ s := New(map[string]ServiceFunc{
+ "bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ addr := network.NewAddrFromNodeID(ctx.Config.ID)
+ hp := network.NewHiveParams()
+ hp.Discovery = false
+ config := &network.BzzConfig{
+ OverlayAddr: addr.Over(),
+ UnderlayAddr: addr.Under(),
+ HiveParams: hp,
+ }
+ kad := network.NewKademlia(addr.Over(), network.NewKadParams())
+ return network.NewBzz(config, kad, nil, nil, nil), nil, nil
+ },
+ })
+ defer s.Close()
+
+ nodeCount := 16
+ log.Debug("Uploading snapshot")
+ err := s.UploadSnapshot(fmt.Sprintf("../stream/testing/snapshot_%d.json", nodeCount))
+ if err != nil {
+ t.Fatalf("Error uploading snapshot to simulation network: %v", err)
+ }
+
+ ctx := context.Background()
+ log.Debug("Starting simulation...")
+ s.Run(ctx, func(ctx context.Context, sim *Simulation) error {
+ log.Debug("Checking")
+ nodes := sim.UpNodeIDs()
+ if len(nodes) != nodeCount {
+ t.Fatal("Simulation network node number doesn't match snapshot node number")
+ }
+ return nil
+ })
+ log.Debug("Done.")
+}
+
+func TestPivotNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id2, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if sim.PivotNodeID() != nil {
+ t.Error("expected no pivot node")
+ }
+
+ sim.SetPivotNode(id)
+
+ pid := sim.PivotNodeID()
+
+ if pid == nil {
+ t.Error("pivot node not set")
+ } else if *pid != id {
+ t.Errorf("expected pivot node %s, got %s", id, *pid)
+ }
+
+ sim.SetPivotNode(id2)
+
+ pid = sim.PivotNodeID()
+
+ if pid == nil {
+ t.Error("pivot node not set")
+ } else if *pid != id2 {
+ t.Errorf("expected pivot node %s, got %s", id2, *pid)
+ }
+}
+
+func TestStartStopNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id)
+ if n == nil {
+ t.Fatal("node not found")
+ }
+ if !n.Up {
+ t.Error("node not started")
+ }
+
+ err = sim.StopNode(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n.Up {
+ t.Error("node not stopped")
+ }
+
+ // Sleep here to ensure that Network.watchPeerEvents defer function
+ // has set the `node.Up = false` before we start the node again.
+ // p2p/simulations/network.go:215
+ //
+ // The same node is stopped and started again, and upon start
+ // watchPeerEvents is started in a goroutine. If the node is stopped
+ // and then very quickly started, that goroutine may be scheduled later
+ // then start and force `node.Up = false` in its defer function.
+ // This will make this test unreliable.
+ time.Sleep(time.Second)
+
+ err = sim.StartNode(id)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !n.Up {
+ t.Error("node not started")
+ }
+}
+
+func TestStartStopRandomNode(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ _, err := sim.AddNodes(3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ id, err := sim.StopRandomNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n := sim.Net.GetNode(id)
+ if n == nil {
+ t.Fatal("node not found")
+ }
+ if n.Up {
+ t.Error("node not stopped")
+ }
+
+ id2, err := sim.StopRandomNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Sleep here to ensure that Network.watchPeerEvents defer function
+ // has set the `node.Up = false` before we start the node again.
+ // p2p/simulations/network.go:215
+ //
+ // The same node is stopped and started again, and upon start
+ // watchPeerEvents is started in a goroutine. If the node is stopped
+ // and then very quickly started, that goroutine may be scheduled later
+ // then start and force `node.Up = false` in its defer function.
+ // This will make this test unreliable.
+ time.Sleep(time.Second)
+
+ idStarted, err := sim.StartRandomNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if idStarted != id && idStarted != id2 {
+ t.Error("unexpected started node ID")
+ }
+}
+
+func TestStartStopRandomNodes(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ _, err := sim.AddNodes(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ids, err := sim.StopRandomNodes(3)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, id := range ids {
+ n := sim.Net.GetNode(id)
+ if n == nil {
+ t.Fatal("node not found")
+ }
+ if n.Up {
+ t.Error("node not stopped")
+ }
+ }
+
+ // Sleep here to ensure that Network.watchPeerEvents defer function
+ // has set the `node.Up = false` before we start the node again.
+ // p2p/simulations/network.go:215
+ //
+ // The same node is stopped and started again, and upon start
+ // watchPeerEvents is started in a goroutine. If the node is stopped
+ // and then very quickly started, that goroutine may be scheduled later
+ // then start and force `node.Up = false` in its defer function.
+ // This will make this test unreliable.
+ time.Sleep(time.Second)
+
+ ids, err = sim.StartRandomNodes(2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, id := range ids {
+ n := sim.Net.GetNode(id)
+ if n == nil {
+ t.Fatal("node not found")
+ }
+ if !n.Up {
+ t.Error("node not started")
+ }
+ }
+}
diff --git a/swarm/network/simulation/service.go b/swarm/network/simulation/service.go
new file mode 100644
index 000000000..d1cbf1f8b
--- /dev/null
+++ b/swarm/network/simulation/service.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+// Service returns a single Service by name on a particular node
+// with provided id.
+func (s *Simulation) Service(name string, id discover.NodeID) node.Service {
+ simNode, ok := s.Net.GetNode(id).Node.(*adapters.SimNode)
+ if !ok {
+ return nil
+ }
+ services := simNode.ServiceMap()
+ if len(services) == 0 {
+ return nil
+ }
+ return services[name]
+}
+
+// RandomService returns a single Service by name on a
+// randomly chosen node that is up.
+func (s *Simulation) RandomService(name string) node.Service {
+ n := s.randomUpNode()
+ if n == nil {
+ return nil
+ }
+ return n.Service(name)
+}
+
+// Services returns all services with a provided name
+// from nodes that are up.
+func (s *Simulation) Services(name string) (services map[discover.NodeID]node.Service) {
+ nodes := s.Net.GetNodes()
+ services = make(map[discover.NodeID]node.Service)
+ for _, node := range nodes {
+ if !node.Up {
+ continue
+ }
+ simNode, ok := node.Node.(*adapters.SimNode)
+ if !ok {
+ continue
+ }
+ services[node.ID()] = simNode.Service(name)
+ }
+ return services
+}
diff --git a/swarm/network/simulation/service_test.go b/swarm/network/simulation/service_test.go
new file mode 100644
index 000000000..23b0d86f2
--- /dev/null
+++ b/swarm/network/simulation/service_test.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "testing"
+)
+
+func TestService(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ id, err := sim.AddNode()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, ok := sim.Service("noop", id).(*noopService)
+ if !ok {
+ t.Fatalf("service is not of %T type", &noopService{})
+ }
+
+ _, ok = sim.RandomService("noop").(*noopService)
+ if !ok {
+ t.Fatalf("service is not of %T type", &noopService{})
+ }
+
+ _, ok = sim.Services("noop")[id].(*noopService)
+ if !ok {
+ t.Fatalf("service is not of %T type", &noopService{})
+ }
+}
diff --git a/swarm/network/simulation/simulation.go b/swarm/network/simulation/simulation.go
new file mode 100644
index 000000000..2241dfca2
--- /dev/null
+++ b/swarm/network/simulation/simulation.go
@@ -0,0 +1,201 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p/discover"
+ "github.com/ethereum/go-ethereum/p2p/simulations"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+)
+
+// Common errors that are returned by functions in this package.
+var (
+ ErrNodeNotFound = errors.New("node not found")
+ ErrNoPivotNode = errors.New("no pivot node set")
+)
+
+// Simulation provides methods on network, nodes and services
+// to manage them.
+type Simulation struct {
+ // Net is exposed as a way to access lower level functionalities
+ // of p2p/simulations.Network.
+ Net *simulations.Network
+
+ serviceNames []string
+ cleanupFuncs []func()
+ buckets map[discover.NodeID]*sync.Map
+ pivotNodeID *discover.NodeID
+ shutdownWG sync.WaitGroup
+ done chan struct{}
+ mu sync.RWMutex
+
+ httpSrv *http.Server //attach a HTTP server via SimulationOptions
+ handler *simulations.Server //HTTP handler for the server
+ runC chan struct{} //channel where frontend signals it is ready
+}
+
+// ServiceFunc is used in New to declare new service constructor.
+// The first argument provides ServiceContext from the adapters package
+// giving for example the access to NodeID. Second argument is the sync.Map
+// where all "global" state related to the service should be kept.
+// All cleanups needed for constructed service and any other constructed
+// objects should ne provided in a single returned cleanup function.
+type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error)
+
+// New creates a new Simulation instance with new
+// simulations.Network initialized with provided services.
+func New(services map[string]ServiceFunc) (s *Simulation) {
+ s = &Simulation{
+ buckets: make(map[discover.NodeID]*sync.Map),
+ done: make(chan struct{}),
+ }
+
+ adapterServices := make(map[string]adapters.ServiceFunc, len(services))
+ for name, serviceFunc := range services {
+ s.serviceNames = append(s.serviceNames, name)
+ adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
+ b := new(sync.Map)
+ service, cleanup, err := serviceFunc(ctx, b)
+ if err != nil {
+ return nil, err
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if cleanup != nil {
+ s.cleanupFuncs = append(s.cleanupFuncs, cleanup)
+ }
+ s.buckets[ctx.Config.ID] = b
+ return service, nil
+ }
+ }
+
+ s.Net = simulations.NewNetwork(
+ adapters.NewSimAdapter(adapterServices),
+ &simulations.NetworkConfig{ID: "0"},
+ )
+
+ return s
+}
+
+// RunFunc is the function that will be called
+// on Simulation.Run method call.
+type RunFunc func(context.Context, *Simulation) error
+
+// Result is the returned value of Simulation.Run method.
+type Result struct {
+ Duration time.Duration
+ Error error
+}
+
+// Run calls the RunFunc function while taking care of
+// cancelation provided through the Context.
+func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) {
+ //if the option is set to run a HTTP server with the simulation,
+ //init the server and start it
+ start := time.Now()
+ if s.httpSrv != nil {
+ log.Info("Waiting for frontend to be ready...(send POST /runsim to HTTP server)")
+ //wait for the frontend to connect
+ select {
+ case <-s.runC:
+ case <-ctx.Done():
+ return Result{
+ Duration: time.Since(start),
+ Error: ctx.Err(),
+ }
+ }
+ log.Info("Received signal from frontend - starting simulation run.")
+ }
+ errc := make(chan error)
+ quit := make(chan struct{})
+ defer close(quit)
+ go func() {
+ select {
+ case errc <- f(ctx, s):
+ case <-quit:
+ }
+ }()
+ var err error
+ select {
+ case <-ctx.Done():
+ err = ctx.Err()
+ case err = <-errc:
+ }
+ return Result{
+ Duration: time.Since(start),
+ Error: err,
+ }
+}
+
+// Maximal number of parallel calls to cleanup functions on
+// Simulation.Close.
+var maxParallelCleanups = 10
+
+// Close calls all cleanup functions that are returned by
+// ServiceFunc, waits for all of them to finish and other
+// functions that explicitly block shutdownWG
+// (like Simulation.PeerEvents) and shuts down the network
+// at the end. It is used to clean all resources from the
+// simulation.
+func (s *Simulation) Close() {
+ close(s.done)
+ sem := make(chan struct{}, maxParallelCleanups)
+ s.mu.RLock()
+ cleanupFuncs := make([]func(), len(s.cleanupFuncs))
+ for i, f := range s.cleanupFuncs {
+ if f != nil {
+ cleanupFuncs[i] = f
+ }
+ }
+ s.mu.RUnlock()
+ for _, cleanup := range cleanupFuncs {
+ s.shutdownWG.Add(1)
+ sem <- struct{}{}
+ go func(cleanup func()) {
+ defer s.shutdownWG.Done()
+ defer func() { <-sem }()
+
+ cleanup()
+ }(cleanup)
+ }
+ if s.httpSrv != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
+ defer cancel()
+ err := s.httpSrv.Shutdown(ctx)
+ if err != nil {
+ log.Error("Error shutting down HTTP server!", "err", err)
+ }
+ close(s.runC)
+ }
+ s.shutdownWG.Wait()
+ s.Net.Shutdown()
+}
+
+// Done returns a channel that is closed when the simulation
+// is closed by Close method. It is useful for signaling termination
+// of all possible goroutines that are created within the test.
+func (s *Simulation) Done() <-chan struct{} {
+ return s.done
+}
diff --git a/swarm/network/simulation/simulation_test.go b/swarm/network/simulation/simulation_test.go
new file mode 100644
index 000000000..803e0499a
--- /dev/null
+++ b/swarm/network/simulation/simulation_test.go
@@ -0,0 +1,207 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package simulation
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
+ "github.com/ethereum/go-ethereum/rpc"
+ colorable "github.com/mattn/go-colorable"
+)
+
+var (
+ loglevel = flag.Int("loglevel", 2, "verbosity of logs")
+)
+
+func init() {
+ flag.Parse()
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
+// TestRun tests if Run method calls RunFunc and if it handles context properly.
+func TestRun(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ defer sim.Close()
+
+ t.Run("call", func(t *testing.T) {
+ expect := "something"
+ var got string
+ r := sim.Run(context.Background(), func(ctx context.Context, sim *Simulation) error {
+ got = expect
+ return nil
+ })
+
+ if r.Error != nil {
+ t.Errorf("unexpected error: %v", r.Error)
+ }
+ if got != expect {
+ t.Errorf("expected %q, got %q", expect, got)
+ }
+ })
+
+ t.Run("cancelation", func(t *testing.T) {
+ ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
+ defer cancel()
+
+ r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
+ time.Sleep(100 * time.Millisecond)
+ return nil
+ })
+
+ if r.Error != context.DeadlineExceeded {
+ t.Errorf("unexpected error: %v", r.Error)
+ }
+ })
+
+ t.Run("context value and duration", func(t *testing.T) {
+ ctx := context.WithValue(context.Background(), "hey", "there")
+ sleep := 50 * time.Millisecond
+
+ r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
+ if ctx.Value("hey") != "there" {
+ return errors.New("expected context value not passed")
+ }
+ time.Sleep(sleep)
+ return nil
+ })
+
+ if r.Error != nil {
+ t.Errorf("unexpected error: %v", r.Error)
+ }
+ if r.Duration < sleep {
+ t.Errorf("reported run duration less then expected: %s", r.Duration)
+ }
+ })
+}
+
+// TestClose tests are Close method triggers all close functions and are all nodes not up anymore.
+func TestClose(t *testing.T) {
+ var mu sync.Mutex
+ var cleanupCount int
+
+ sleep := 50 * time.Millisecond
+
+ sim := New(map[string]ServiceFunc{
+ "noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ return newNoopService(), func() {
+ time.Sleep(sleep)
+ mu.Lock()
+ defer mu.Unlock()
+ cleanupCount++
+ }, nil
+ },
+ })
+
+ nodeCount := 30
+
+ _, err := sim.AddNodes(nodeCount)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var upNodeCount int
+ for _, n := range sim.Net.GetNodes() {
+ if n.Up {
+ upNodeCount++
+ }
+ }
+ if upNodeCount != nodeCount {
+ t.Errorf("all nodes should be up, insted only %v are up", upNodeCount)
+ }
+
+ sim.Close()
+
+ if cleanupCount != nodeCount {
+ t.Errorf("number of cleanups expected %v, got %v", nodeCount, cleanupCount)
+ }
+
+ upNodeCount = 0
+ for _, n := range sim.Net.GetNodes() {
+ if n.Up {
+ upNodeCount++
+ }
+ }
+ if upNodeCount != 0 {
+ t.Errorf("all nodes should be down, insted %v are up", upNodeCount)
+ }
+}
+
+// TestDone checks if Close method triggers the closing of done channel.
+func TestDone(t *testing.T) {
+ sim := New(noopServiceFuncMap)
+ sleep := 50 * time.Millisecond
+ timeout := 2 * time.Second
+
+ start := time.Now()
+ go func() {
+ time.Sleep(sleep)
+ sim.Close()
+ }()
+
+ select {
+ case <-time.After(timeout):
+ t.Error("done channel closing timmed out")
+ case <-sim.Done():
+ if d := time.Since(start); d < sleep {
+ t.Errorf("done channel closed sooner then expected: %s", d)
+ }
+ }
+}
+
+// a helper map for usual services that do not do anyting
+var noopServiceFuncMap = map[string]ServiceFunc{
+ "noop": noopServiceFunc,
+}
+
+// a helper function for most basic noop service
+func noopServiceFunc(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
+ return newNoopService(), nil, nil
+}
+
+// noopService is the service that does not do anything
+// but implements node.Service interface.
+type noopService struct{}
+
+func newNoopService() node.Service {
+ return &noopService{}
+}
+
+func (t *noopService) Protocols() []p2p.Protocol {
+ return []p2p.Protocol{}
+}
+
+func (t *noopService) APIs() []rpc.API {
+ return []rpc.API{}
+}
+
+func (t *noopService) Start(server *p2p.Server) error {
+ return nil
+}
+
+func (t *noopService) Stop() error {
+ return nil
+}
diff --git a/swarm/network_test.go b/swarm/network_test.go
index 606a83be2..d2a030933 100644
--- a/swarm/network_test.go
+++ b/swarm/network_test.go
@@ -28,15 +28,14 @@ import (
"testing"
"time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover"
- "github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/network"
+ "github.com/ethereum/go-ethereum/swarm/network/simulation"
"github.com/ethereum/go-ethereum/swarm/storage"
colorable "github.com/mattn/go-colorable"
)
@@ -261,78 +260,67 @@ type testSwarmNetworkOptions struct {
// - May wait for Kademlia on every node to be healthy.
// - Checking if a file is retrievable from all nodes.
func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwarmNetworkStep) {
- dir, err := ioutil.TempDir("", "swarm-network-test")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir)
-
if o == nil {
o = new(testSwarmNetworkOptions)
}
- ctx := context.Background()
- if o.Timeout > 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, o.Timeout)
- defer cancel()
- }
-
- swarms := make(map[discover.NodeID]*Swarm)
- files := make([]file, 0)
-
- services := map[string]adapters.ServiceFunc{
- "swarm": func(ctx *adapters.ServiceContext) (node.Service, error) {
+ sim := simulation.New(map[string]simulation.ServiceFunc{
+ "swarm": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
config := api.NewConfig()
- dir, err := ioutil.TempDir(dir, "node")
+ dir, err := ioutil.TempDir("", "swarm-network-test-node")
if err != nil {
- return nil, err
+ return nil, nil, err
+ }
+ cleanup = func() {
+ err := os.RemoveAll(dir)
+ if err != nil {
+ log.Error("cleaning up swarm temp dir", "err", err)
+ }
}
config.Path = dir
privkey, err := crypto.GenerateKey()
if err != nil {
- return nil, err
+ return nil, cleanup, err
}
config.Init(privkey)
config.DeliverySkipCheck = o.SkipCheck
- s, err := NewSwarm(config, nil)
+ swarm, err := NewSwarm(config, nil)
if err != nil {
- return nil, err
+ return nil, cleanup, err
}
- log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", s.bzz.BaseAddr()))
- swarms[ctx.Config.ID] = s
- return s, nil
+ bucket.Store(simulation.BucketKeyKademlia, swarm.bzz.Hive.Overlay.(*network.Kademlia))
+ log.Info("new swarm", "bzzKey", config.BzzKey, "baseAddr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()))
+ return swarm, cleanup, nil
},
- }
-
- a := adapters.NewSimAdapter(services)
- net := simulations.NewNetwork(a, &simulations.NetworkConfig{
- ID: "0",
- DefaultService: "swarm",
})
- defer net.Shutdown()
+ defer sim.Close()
- trigger := make(chan discover.NodeID)
+ ctx := context.Background()
+ if o.Timeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, o.Timeout)
+ defer cancel()
+ }
- sim := simulations.NewSimulation(net)
+ files := make([]file, 0)
for i, step := range steps {
log.Debug("test sync step", "n", i+1, "nodes", step.nodeCount)
- change := step.nodeCount - len(allNodeIDs(net))
+ change := step.nodeCount - len(sim.UpNodeIDs())
if change > 0 {
- _, err := addNodes(change, net)
+ _, err := sim.AddNodesAndConnectChain(change)
if err != nil {
t.Fatal(err)
}
} else if change < 0 {
- err := removeNodes(-change, net)
+ _, err := sim.StopRandomNodes(-change)
if err != nil {
t.Fatal(err)
}
@@ -341,160 +329,48 @@ func testSwarmNetwork(t *testing.T, o *testSwarmNetworkOptions, steps ...testSwa
continue
}
- nodeIDs := allNodeIDs(net)
- shuffle(len(nodeIDs), func(i, j int) {
- nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
- })
- for _, id := range nodeIDs {
- key, data, err := uploadFile(swarms[id])
- if err != nil {
- t.Fatal(err)
- }
- log.Trace("file uploaded", "node", id, "key", key.String())
- files = append(files, file{
- addr: key,
- data: data,
- nodeID: id,
- })
- }
-
- // Prepare PeerPot map for checking Kademlia health
- var ppmap map[string]*network.PeerPot
- nIDs := allNodeIDs(net)
- addrs := make([][]byte, len(nIDs))
- if *waitKademlia {
- for i, id := range nIDs {
- addrs[i] = swarms[id].bzz.BaseAddr()
- }
- ppmap = network.NewPeerPotMap(2, addrs)
- }
-
var checkStatusM sync.Map
var nodeStatusM sync.Map
var totalFoundCount uint64
- result := sim.Run(ctx, &simulations.Step{
- Action: func(ctx context.Context) error {
- if *waitKademlia {
- // Wait for healthy Kademlia on every node before checking files
- ticker := time.NewTicker(200 * time.Millisecond)
- defer ticker.Stop()
-
- for range ticker.C {
- healthy := true
- log.Debug("kademlia health check", "node count", len(nIDs), "addr count", len(addrs))
- for i, id := range nIDs {
- swarm := swarms[id]
- //PeerPot for this node
- addr := common.Bytes2Hex(swarm.bzz.BaseAddr())
- pp := ppmap[addr]
- //call Healthy RPC
- h := swarm.bzz.Healthy(pp)
- //print info
- log.Debug(swarm.bzz.String())
- log.Debug("kademlia", "empty bins", pp.EmptyBins, "gotNN", h.GotNN, "knowNN", h.KnowNN, "full", h.Full)
- log.Debug("kademlia", "health", h.GotNN && h.KnowNN && h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i)
- log.Debug("kademlia", "ill condition", !h.GotNN || !h.Full, "addr", fmt.Sprintf("%x", swarm.bzz.BaseAddr()), "id", id, "i", i)
- if !h.GotNN || !h.Full {
- healthy = false
- break
- }
- }
- if healthy {
- break
- }
- }
+ result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
+ nodeIDs := sim.UpNodeIDs()
+ shuffle(len(nodeIDs), func(i, j int) {
+ nodeIDs[i], nodeIDs[j] = nodeIDs[j], nodeIDs[i]
+ })
+ for _, id := range nodeIDs {
+ key, data, err := uploadFile(sim.Service("swarm", id).(*Swarm))
+ if err != nil {
+ return err
}
+ log.Trace("file uploaded", "node", id, "key", key.String())
+ files = append(files, file{
+ addr: key,
+ data: data,
+ nodeID: id,
+ })
+ }
- go func() {
- // File retrieval check is repeated until all uploaded files are retrieved from all nodes
- // or until the timeout is reached.
- for {
- if retrieve(net, files, swarms, trigger, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
- return
- }
- }
- }()
- return nil
- },
- Trigger: trigger,
- Expect: &simulations.Expectation{
- Nodes: allNodeIDs(net),
- Check: func(ctx context.Context, id discover.NodeID) (bool, error) {
- // The check is done by a goroutine in the action function.
- return true, nil
- },
- },
- })
- if result.Error != nil {
- t.Fatal(result.Error)
- }
- log.Debug("done: test sync step", "n", i+1, "nodes", step.nodeCount)
- }
-}
-
-// allNodeIDs is returning NodeID for every node that is Up.
-func allNodeIDs(net *simulations.Network) (nodes []discover.NodeID) {
- for _, n := range net.GetNodes() {
- if n.Up {
- nodes = append(nodes, n.ID())
- }
- }
- return
-}
-
-// addNodes adds a number of nodes to the network.
-func addNodes(count int, net *simulations.Network) (ids []discover.NodeID, err error) {
- for i := 0; i < count; i++ {
- nodeIDs := allNodeIDs(net)
- l := len(nodeIDs)
- nodeconf := adapters.RandomNodeConfig()
- node, err := net.NewNodeWithConfig(nodeconf)
- if err != nil {
- return nil, fmt.Errorf("create node: %v", err)
- }
- err = net.Start(node.ID())
- if err != nil {
- return nil, fmt.Errorf("start node: %v", err)
- }
-
- log.Debug("created node", "id", node.ID())
-
- // connect nodes in a chain
- if l > 0 {
- var otherNodeID discover.NodeID
- for i := l - 1; i >= 0; i-- {
- n := net.GetNode(nodeIDs[i])
- if n.Up {
- otherNodeID = n.ID()
- break
+ if *waitKademlia {
+ if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
+ return err
}
}
- log.Debug("connect nodes", "one", node.ID(), "other", otherNodeID)
- if err := net.Connect(node.ID(), otherNodeID); err != nil {
- return nil, err
+
+ // File retrieval check is repeated until all uploaded files are retrieved from all nodes
+ // or until the timeout is reached.
+ for {
+ if retrieve(sim, files, &checkStatusM, &nodeStatusM, &totalFoundCount) == 0 {
+ return nil
+ }
}
- }
- ids = append(ids, node.ID())
- }
- return ids, nil
-}
+ })
-// removeNodes stops a random nodes in the network.
-func removeNodes(count int, net *simulations.Network) error {
- for i := 0; i < count; i++ {
- // allNodeIDs are returning only the Up nodes.
- nodeIDs := allNodeIDs(net)
- if len(nodeIDs) == 0 {
- break
- }
- node := net.GetNode(nodeIDs[rand.Intn(len(nodeIDs))])
- if err := node.Stop(); err != nil {
- return err
+ if result.Error != nil {
+ t.Fatal(result.Error)
}
- log.Debug("removed node", "id", node.ID())
+ log.Debug("done: test sync step", "n", i+1, "nodes", step.nodeCount)
}
- return nil
}
// uploadFile, uploads a short file to the swarm instance
@@ -522,10 +398,8 @@ func uploadFile(swarm *Swarm) (storage.Address, string, error) {
// retrieve is the function that is used for checking the availability of
// uploaded files in testSwarmNetwork test helper function.
func retrieve(
- net *simulations.Network,
+ sim *simulation.Simulation,
files []file,
- swarms map[discover.NodeID]*Swarm,
- trigger chan discover.NodeID,
checkStatusM *sync.Map,
nodeStatusM *sync.Map,
totalFoundCount *uint64,
@@ -537,7 +411,7 @@ func retrieve(
var totalWg sync.WaitGroup
errc := make(chan error)
- nodeIDs := allNodeIDs(net)
+ nodeIDs := sim.UpNodeIDs()
totalCheckCount := len(nodeIDs) * len(files)
@@ -553,8 +427,8 @@ func retrieve(
var wg sync.WaitGroup
+ swarm := sim.Service("swarm", id).(*Swarm)
for _, f := range files {
- swarm := swarms[id]
checkKey := check{
key: f.addr.String(),
@@ -601,7 +475,6 @@ func retrieve(
if foundCount == checkCount {
log.Info("all files are found for node", "id", id.String(), "duration", time.Since(start))
nodeStatusM.Store(id, 0)
- trigger <- id
return
}
log.Debug("files missing for node", "id", id.String(), "check", checkCount, "found", foundCount)
diff --git a/swarm/storage/mru/doc.go b/swarm/storage/mru/doc.go
new file mode 100644
index 000000000..e1d7c2c34
--- /dev/null
+++ b/swarm/storage/mru/doc.go
@@ -0,0 +1,61 @@
+// Package mru defines Mutable resource updates.
+// A Mutable Resource is an entity which allows updates to a resource
+// without resorting to ENS on each update.
+// The update scheme is built on swarm chunks with chunk keys following
+// a predictable, versionable pattern.
+//
+// Updates are defined to be periodic in nature, where the update frequency
+// is expressed in seconds.
+//
+// The root entry of a mutable resource is tied to a unique identifier that
+// is deterministically generated out of the metadata content that describes
+// the resource. This metadata includes a user-defined resource name, a resource
+// start time that indicates when the resource becomes valid,
+// the frequency in seconds with which the resource is expected to be updated, both of
+// which are stored as little-endian uint64 values in the database (for a
+// total of 16 bytes). It also contains the owner's address (ownerAddr)
+// This MRU info is stored in a separate content-addressed chunk
+// (call it the metadata chunk), with the following layout:
+//
+// (00|length|startTime|frequency|name|ownerAddr)
+//
+// (The two first zero-value bytes are used for disambiguation by the chunk validator,
+// and update chunk will always have a value > 0 there.)
+//
+// Each metadata chunk is identified by its rootAddr, calculated as follows:
+// metaHash=H(len(metadata), startTime, frequency,name)
+// rootAddr = H(metaHash, ownerAddr).
+// where H is the SHA3 hash function
+// This scheme effectively locks the root chunk so that only the owner of the private key
+// that ownerAddr was derived from can sign updates.
+//
+// The root entry tells the requester from when the mutable resource was
+// first added (Unix time in seconds) and in which moments to look for the
+// actual updates. Thus, a resource update for identifier "føø.bar"
+// starting at unix time 1528800000 with frequency 300 (every 5 mins) will have updates on 1528800300,
+// 1528800600, 1528800900 and so on.
+//
+// Actual data updates are also made in the form of swarm chunks. The keys
+// of the updates are the hash of a concatenation of properties as follows:
+//
+// updateAddr = H(period, version, rootAddr)
+// where H is the SHA3 hash function
+// The period is (currentTime - startTime) / frequency
+//
+// Using our previous example, this means that a period 3 will happen when the
+// clock hits 1528800900
+//
+// If more than one update is made in the same period, incremental
+// version numbers are used successively.
+//
+// A user looking up a resource would only need to know the rootAddr in order to get the versions
+//
+// the resource update data is:
+// resourcedata = headerlength|period|version|rootAddr|flags|metaHash
+// where flags is a 1-byte flags field. Flag 0 is set to 1 to indicate multihash
+//
+// the full update data that goes in the chunk payload is:
+// resourcedata|sign(resourcedata)
+//
+// headerlength is a 16 bit value containing the byte length of period|version|rootAddr|flags|metaHash
+package mru
diff --git a/swarm/storage/mru/error.go b/swarm/storage/mru/error.go
index bf33e6540..18ab52558 100644
--- a/swarm/storage/mru/error.go
+++ b/swarm/storage/mru/error.go
@@ -16,6 +16,10 @@
package mru
+import (
+ "fmt"
+)
+
const (
ErrInit = iota
ErrNotFound
@@ -30,3 +34,40 @@ const (
ErrPeriodDepth
ErrCnt
)
+
+// Error is a the typed error object used for Mutable Resources
+type Error struct {
+ code int
+ err string
+}
+
+// Error implements the error interface
+func (e *Error) Error() string {
+ return e.err
+}
+
+// Code returns the error code
+// Error codes are enumerated in the error.go file within the mru package
+func (e *Error) Code() int {
+ return e.code
+}
+
+// NewError creates a new Mutable Resource Error object with the specified code and custom error message
+func NewError(code int, s string) error {
+ if code < 0 || code >= ErrCnt {
+ panic("no such error code!")
+ }
+ r := &Error{
+ err: s,
+ }
+ switch code {
+ case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
+ r.code = code
+ }
+ return r
+}
+
+// NewErrorf is a convenience version of NewError that incorporates printf-style formatting
+func NewErrorf(code int, format string, args ...interface{}) error {
+ return NewError(code, fmt.Sprintf(format, args...))
+}
diff --git a/swarm/storage/mru/handler.go b/swarm/storage/mru/handler.go
new file mode 100644
index 000000000..188b986b8
--- /dev/null
+++ b/swarm/storage/mru/handler.go
@@ -0,0 +1,514 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// Handler is the API for Mutable Resources
+// It enables creating, updating, syncing and retrieving resources and their update data
+package mru
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+const chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler
+
+type Handler struct {
+ chunkStore *storage.NetStore
+ HashSize int
+ resources map[uint64]*resource
+ resourceLock sync.RWMutex
+ storeTimeout time.Duration
+ queryMaxPeriods uint32
+}
+
+// HandlerParams pass parameters to the Handler constructor NewHandler
+// Signer and TimestampProvider are mandatory parameters
+type HandlerParams struct {
+ QueryMaxPeriods uint32
+}
+
+// hashPool contains a pool of ready hashers
+var hashPool sync.Pool
+var minimumChunkLength int
+
+// init initializes the package and hashPool
+func init() {
+ hashPool = sync.Pool{
+ New: func() interface{} {
+ return storage.MakeHashFunc(resourceHashAlgorithm)()
+ },
+ }
+ if minimumMetadataLength < minimumUpdateDataLength {
+ minimumChunkLength = minimumMetadataLength
+ } else {
+ minimumChunkLength = minimumUpdateDataLength
+ }
+}
+
+// NewHandler creates a new Mutable Resource API
+func NewHandler(params *HandlerParams) (*Handler, error) {
+
+ rh := &Handler{
+ resources: make(map[uint64]*resource),
+ storeTimeout: defaultStoreTimeout,
+ queryMaxPeriods: params.QueryMaxPeriods,
+ }
+
+ for i := 0; i < hasherCount; i++ {
+ hashfunc := storage.MakeHashFunc(resourceHashAlgorithm)()
+ if rh.HashSize == 0 {
+ rh.HashSize = hashfunc.Size()
+ }
+ hashPool.Put(hashfunc)
+ }
+
+ return rh, nil
+}
+
+// SetStore sets the store backend for the Mutable Resource API
+func (h *Handler) SetStore(store *storage.NetStore) {
+ h.chunkStore = store
+}
+
+// Validate is a chunk validation method
+// If it looks like a resource update, the chunk address is checked against the ownerAddr of the update's signature
+// It implements the storage.ChunkValidator interface
+func (h *Handler) Validate(chunkAddr storage.Address, data []byte) bool {
+
+ dataLength := len(data)
+ if dataLength < minimumChunkLength {
+ return false
+ }
+
+ //metadata chunks have the first two bytes set to zero
+ if data[0] == 0 && data[1] == 0 && dataLength >= minimumMetadataLength {
+ //metadata chunk
+ rootAddr, _ := metadataHash(data)
+ valid := bytes.Equal(chunkAddr, rootAddr)
+ if !valid {
+ log.Debug(fmt.Sprintf("Invalid root metadata chunk with address: %s", chunkAddr.Hex()))
+ }
+ return valid
+ }
+
+ // if it is not a metadata chunk, check if it is a properly formatted update chunk with
+ // valid signature and proof of ownership of the resource it is trying
+ // to update
+
+ // First, deserialize the chunk
+ var r SignedResourceUpdate
+ if err := r.fromChunk(chunkAddr, data); err != nil {
+ log.Debug("Invalid resource chunk with address %s: %s ", chunkAddr.Hex(), err.Error())
+ return false
+ }
+
+ // check that the lookup information contained in the chunk matches the updateAddr (chunk search key)
+ // that was used to retrieve this chunk
+ // if this validation fails, someone forged a chunk.
+ if !bytes.Equal(chunkAddr, r.updateHeader.UpdateAddr()) {
+ log.Debug("period,version,rootAddr contained in update chunk do not match updateAddr %s", chunkAddr.Hex())
+ return false
+ }
+
+ // Verify signatures and that the signer actually owns the resource
+ // If it fails, it means either the signature is not valid, data is corrupted
+ // or someone is trying to update someone else's resource.
+ if err := r.Verify(); err != nil {
+ log.Debug("Invalid signature: %v", err)
+ return false
+ }
+
+ return true
+}
+
+// GetContent retrieves the data payload of the last synced update of the Mutable Resource
+func (h *Handler) GetContent(rootAddr storage.Address) (storage.Address, []byte, error) {
+ rsrc := h.get(rootAddr)
+ if rsrc == nil || !rsrc.isSynced() {
+ return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
+ }
+ return rsrc.lastKey, rsrc.data, nil
+}
+
+// GetLastPeriod retrieves the period of the last synced update of the Mutable Resource
+func (h *Handler) GetLastPeriod(rootAddr storage.Address) (uint32, error) {
+ rsrc := h.get(rootAddr)
+ if rsrc == nil {
+ return 0, NewError(ErrNotFound, " does not exist")
+ } else if !rsrc.isSynced() {
+ return 0, NewError(ErrNotSynced, " is not synced")
+ }
+ return rsrc.period, nil
+}
+
+// GetVersion retrieves the period of the last synced update of the Mutable Resource
+func (h *Handler) GetVersion(rootAddr storage.Address) (uint32, error) {
+ rsrc := h.get(rootAddr)
+ if rsrc == nil {
+ return 0, NewError(ErrNotFound, " does not exist")
+ } else if !rsrc.isSynced() {
+ return 0, NewError(ErrNotSynced, " is not synced")
+ }
+ return rsrc.version, nil
+}
+
+// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
+func (h *Handler) chunkSize() int64 {
+ return chunkSize
+}
+
+// New creates a new metadata chunk out of the request passed in.
+func (h *Handler) New(ctx context.Context, request *Request) error {
+
+ // frequency 0 is invalid
+ if request.metadata.Frequency == 0 {
+ return NewError(ErrInvalidValue, "frequency cannot be 0 when creating a resource")
+ }
+
+ // make sure owner is set to something
+ if request.metadata.Owner == zeroAddr {
+ return NewError(ErrInvalidValue, "ownerAddr must be set to create a new metadata chunk")
+ }
+
+ // create the meta chunk and store it in swarm
+ chunk, metaHash, err := request.metadata.newChunk()
+ if err != nil {
+ return err
+ }
+ if request.metaHash != nil && !bytes.Equal(request.metaHash, metaHash) ||
+ request.rootAddr != nil && !bytes.Equal(request.rootAddr, chunk.Addr) {
+ return NewError(ErrInvalidValue, "metaHash in UpdateRequest does not match actual metadata")
+ }
+
+ request.metaHash = metaHash
+ request.rootAddr = chunk.Addr
+
+ h.chunkStore.Put(ctx, chunk)
+ log.Debug("new resource", "name", request.metadata.Name, "startTime", request.metadata.StartTime, "frequency", request.metadata.Frequency, "owner", request.metadata.Owner)
+
+ // create the internal index for the resource and populate it with its metadata
+ rsrc := &resource{
+ resourceUpdate: resourceUpdate{
+ updateHeader: updateHeader{
+ UpdateLookup: UpdateLookup{
+ rootAddr: chunk.Addr,
+ },
+ },
+ },
+ ResourceMetadata: request.metadata,
+ updated: time.Now(),
+ }
+ h.set(chunk.Addr, rsrc)
+
+ return nil
+}
+
+// NewUpdateRequest prepares an UpdateRequest structure with all the necessary information to
+// just add the desired data and sign it.
+// The resulting structure can then be signed and passed to Handler.Update to be verified and sent
+func (h *Handler) NewUpdateRequest(ctx context.Context, rootAddr storage.Address) (updateRequest *Request, err error) {
+
+ if rootAddr == nil {
+ return nil, NewError(ErrInvalidValue, "rootAddr cannot be nil")
+ }
+
+ // Make sure we have a cache of the metadata chunk
+ rsrc, err := h.Load(ctx, rootAddr)
+ if err != nil {
+ return nil, err
+ }
+
+ now := TimestampProvider.Now()
+
+ updateRequest = new(Request)
+ updateRequest.period, err = getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err = h.lookup(rsrc, LookupLatestVersionInPeriod(rsrc.rootAddr, updateRequest.period)); err != nil {
+ if err.(*Error).code != ErrNotFound {
+ return nil, err
+ }
+ // not finding updates means that there is a network error
+ // or that the resource really does not have updates in this period.
+ }
+
+ updateRequest.multihash = rsrc.multihash
+ updateRequest.rootAddr = rsrc.rootAddr
+ updateRequest.metaHash = rsrc.metaHash
+ updateRequest.metadata = rsrc.ResourceMetadata
+
+ // if we already have an update for this period then increment version
+ // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
+ if h.hasUpdate(rootAddr, updateRequest.period) {
+ updateRequest.version = rsrc.version + 1
+ } else {
+ updateRequest.version = 1
+ }
+
+ return updateRequest, nil
+}
+
+// Lookup retrieves a specific or latest version of the resource update with metadata chunk at params.Root
+// Lookup works differently depending on the configuration of `LookupParams`
+// See the `LookupParams` documentation and helper functions:
+// `LookupLatest`, `LookupLatestVersionInPeriod` and `LookupVersion`
+// When looking for the latest update, it starts at the next period after the current time.
+// upon failure tries the corresponding keys of each previous period until one is found
+// (or startTime is reached, in which case there are no updates).
+func (h *Handler) Lookup(ctx context.Context, params *LookupParams) (*resource, error) {
+
+ rsrc := h.get(params.rootAddr)
+ if rsrc == nil {
+ return nil, NewError(ErrNothingToReturn, "resource not loaded")
+ }
+ return h.lookup(rsrc, params)
+}
+
+// LookupPrevious returns the resource before the one currently loaded in the resource cache
+// This is useful where resource updates are used incrementally in contrast to
+// merely replacing content.
+// Requires a cached resource object to determine the current state of the resource.
+func (h *Handler) LookupPrevious(ctx context.Context, params *LookupParams) (*resource, error) {
+ rsrc := h.get(params.rootAddr)
+ if rsrc == nil {
+ return nil, NewError(ErrNothingToReturn, "resource not loaded")
+ }
+ if !rsrc.isSynced() {
+ return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
+ } else if rsrc.period == 0 {
+ return nil, NewError(ErrNothingToReturn, " not found")
+ }
+ var version, period uint32
+ if rsrc.version > 1 {
+ version = rsrc.version - 1
+ period = rsrc.period
+ } else if rsrc.period == 1 {
+ return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
+ } else {
+ version = 0
+ period = rsrc.period - 1
+ }
+ return h.lookup(rsrc, NewLookupParams(rsrc.rootAddr, period, version, params.Limit))
+}
+
+// base code for public lookup methods
+func (h *Handler) lookup(rsrc *resource, params *LookupParams) (*resource, error) {
+
+ lp := *params
+ // we can't look for anything without a store
+ if h.chunkStore == nil {
+ return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
+ }
+
+ var specificperiod bool
+ if lp.period > 0 {
+ specificperiod = true
+ } else {
+ // get the current time and the next period
+ now := TimestampProvider.Now()
+
+ var period uint32
+ period, err := getNextPeriod(rsrc.StartTime.Time, now.Time, rsrc.Frequency)
+ if err != nil {
+ return nil, err
+ }
+ lp.period = period
+ }
+
+ // start from the last possible period, and iterate previous ones
+ // (unless we want a specific period only) until we find a match.
+ // If we hit startTime we're out of options
+ var specificversion bool
+ if lp.version > 0 {
+ specificversion = true
+ } else {
+ lp.version = 1
+ }
+
+ var hops uint32
+ if lp.Limit == 0 {
+ lp.Limit = h.queryMaxPeriods
+ }
+ log.Trace("resource lookup", "period", lp.period, "version", lp.version, "limit", lp.Limit)
+ for lp.period > 0 {
+ if lp.Limit != 0 && hops > lp.Limit {
+ return nil, NewErrorf(ErrPeriodDepth, "Lookup exceeded max period hops (%d)", lp.Limit)
+ }
+ updateAddr := lp.UpdateAddr()
+ chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
+ if err == nil {
+ if specificversion {
+ return h.updateIndex(rsrc, chunk)
+ }
+ // check if we have versions > 1. If a version fails, the previous version is used and returned.
+ log.Trace("rsrc update version 1 found, checking for version updates", "period", lp.period, "updateAddr", updateAddr)
+ for {
+ newversion := lp.version + 1
+ updateAddr := lp.UpdateAddr()
+ newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), updateAddr, defaultRetrieveTimeout)
+ if err != nil {
+ return h.updateIndex(rsrc, chunk)
+ }
+ chunk = newchunk
+ lp.version = newversion
+ log.Trace("version update found, checking next", "version", lp.version, "period", lp.period, "updateAddr", updateAddr)
+ }
+ }
+ if specificperiod {
+ break
+ }
+ log.Trace("rsrc update not found, checking previous period", "period", lp.period, "updateAddr", updateAddr)
+ lp.period--
+ hops++
+ }
+ return nil, NewError(ErrNotFound, "no updates found")
+}
+
+// Load retrieves the Mutable Resource metadata chunk stored at rootAddr
+// Upon retrieval it creates/updates the index entry for it with metadata corresponding to the chunk contents
+func (h *Handler) Load(ctx context.Context, rootAddr storage.Address) (*resource, error) {
+ chunk, err := h.chunkStore.GetWithTimeout(ctx, rootAddr, defaultRetrieveTimeout)
+ if err != nil {
+ return nil, NewError(ErrNotFound, err.Error())
+ }
+
+ // create the index entry
+ rsrc := &resource{}
+
+ if err := rsrc.ResourceMetadata.binaryGet(chunk.SData); err != nil { // Will fail if this is not really a metadata chunk
+ return nil, err
+ }
+
+ rsrc.rootAddr, rsrc.metaHash = metadataHash(chunk.SData)
+ if !bytes.Equal(rsrc.rootAddr, rootAddr) {
+ return nil, NewError(ErrCorruptData, "Corrupt metadata chunk")
+ }
+ h.set(rootAddr, rsrc)
+ log.Trace("resource index load", "rootkey", rootAddr, "name", rsrc.ResourceMetadata.Name, "starttime", rsrc.ResourceMetadata.StartTime, "frequency", rsrc.ResourceMetadata.Frequency)
+ return rsrc, nil
+}
+
+// update mutable resource index map with specified content
+func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
+
+ // retrieve metadata from chunk data and check that it matches this mutable resource
+ var r SignedResourceUpdate
+ if err := r.fromChunk(chunk.Addr, chunk.SData); err != nil {
+ return nil, err
+ }
+ log.Trace("resource index update", "name", rsrc.ResourceMetadata.Name, "updatekey", chunk.Addr, "period", r.period, "version", r.version)
+
+ // update our rsrcs entry map
+ rsrc.lastKey = chunk.Addr
+ rsrc.period = r.period
+ rsrc.version = r.version
+ rsrc.updated = time.Now()
+ rsrc.data = make([]byte, len(r.data))
+ rsrc.multihash = r.multihash
+ copy(rsrc.data, r.data)
+ rsrc.Reader = bytes.NewReader(rsrc.data)
+ log.Debug("resource synced", "name", rsrc.ResourceMetadata.Name, "updateAddr", chunk.Addr, "period", rsrc.period, "version", rsrc.version)
+ h.set(chunk.Addr, rsrc)
+ return rsrc, nil
+}
+
+// Update adds an actual data update
+// Uses the Mutable Resource metadata currently loaded in the resources map entry.
+// It is the caller's responsibility to make sure that this data is not stale.
+// Note that a Mutable Resource update cannot span chunks, and thus has a MAX NET LENGTH 4096, INCLUDING update header data and signature. An error will be returned if the total length of the chunk payload will exceed this limit.
+// Update can only check if the caller is trying to overwrite the very last known version, otherwise it just puts the update
+// on the network.
+func (h *Handler) Update(ctx context.Context, r *SignedResourceUpdate) (storage.Address, error) {
+ return h.update(ctx, r)
+}
+
+// create and commit an update
+func (h *Handler) update(ctx context.Context, r *SignedResourceUpdate) (updateAddr storage.Address, err error) {
+
+ // we can't update anything without a store
+ if h.chunkStore == nil {
+ return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
+ }
+
+ rsrc := h.get(r.rootAddr)
+ if rsrc != nil && rsrc.period != 0 && rsrc.version != 0 && // This is the only cheap check we can do for sure
+ rsrc.period == r.period && rsrc.version >= r.version { // without having to lookup update chunks
+
+ return nil, NewError(ErrInvalidValue, "A former update in this period is already known to exist")
+ }
+
+ chunk, err := r.toChunk() // Serialize the update into a chunk. Fails if data is too big
+ if err != nil {
+ return nil, err
+ }
+
+ // send the chunk
+ h.chunkStore.Put(ctx, chunk)
+ log.Trace("resource update", "updateAddr", r.updateAddr, "lastperiod", r.period, "version", r.version, "data", chunk.SData, "multihash", r.multihash)
+
+ // update our resources map entry if the new update is older than the one we have, if we have it.
+ if rsrc != nil && r.period > rsrc.period || (rsrc.period == r.period && r.version > rsrc.version) {
+ rsrc.period = r.period
+ rsrc.version = r.version
+ rsrc.data = make([]byte, len(r.data))
+ rsrc.updated = time.Now()
+ rsrc.lastKey = r.updateAddr
+ rsrc.multihash = r.multihash
+ copy(rsrc.data, r.data)
+ rsrc.Reader = bytes.NewReader(rsrc.data)
+ }
+ return r.updateAddr, nil
+}
+
+// Retrieves the resource index value for the given nameHash
+func (h *Handler) get(rootAddr storage.Address) *resource {
+ if len(rootAddr) < storage.KeyLength {
+ log.Warn("Handler.get with invalid rootAddr")
+ return nil
+ }
+ hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
+ h.resourceLock.RLock()
+ defer h.resourceLock.RUnlock()
+ rsrc := h.resources[hashKey]
+ return rsrc
+}
+
+// Sets the resource index value for the given nameHash
+func (h *Handler) set(rootAddr storage.Address, rsrc *resource) {
+ if len(rootAddr) < storage.KeyLength {
+ log.Warn("Handler.set with invalid rootAddr")
+ return
+ }
+ hashKey := *(*uint64)(unsafe.Pointer(&rootAddr[0]))
+ h.resourceLock.Lock()
+ defer h.resourceLock.Unlock()
+ h.resources[hashKey] = rsrc
+}
+
+// Checks if we already have an update on this resource, according to the value in the current state of the resource index
+func (h *Handler) hasUpdate(rootAddr storage.Address, period uint32) bool {
+ rsrc := h.get(rootAddr)
+ return rsrc != nil && rsrc.period == period
+}
diff --git a/swarm/storage/mru/lookup.go b/swarm/storage/mru/lookup.go
new file mode 100644
index 000000000..eb28336e1
--- /dev/null
+++ b/swarm/storage/mru/lookup.go
@@ -0,0 +1,117 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "encoding/binary"
+ "hash"
+
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// LookupParams is used to specify constraints when performing an update lookup
+// Limit defines whether or not the lookup should be limited
+// If Limit is set to true then Max defines the amount of hops that can be performed
+type LookupParams struct {
+ UpdateLookup
+ Limit uint32
+}
+
+// RootAddr returns the metadata chunk address
+func (r *LookupParams) RootAddr() storage.Address {
+ return r.rootAddr
+}
+
+func NewLookupParams(rootAddr storage.Address, period, version uint32, limit uint32) *LookupParams {
+ return &LookupParams{
+ UpdateLookup: UpdateLookup{
+ period: period,
+ version: version,
+ rootAddr: rootAddr,
+ },
+ Limit: limit,
+ }
+}
+
+// LookupLatest generates lookup parameters that look for the latest version of a resource
+func LookupLatest(rootAddr storage.Address) *LookupParams {
+ return NewLookupParams(rootAddr, 0, 0, 0)
+}
+
+// LookupLatestVersionInPeriod generates lookup parameters that look for the latest version of a resource in a given period
+func LookupLatestVersionInPeriod(rootAddr storage.Address, period uint32) *LookupParams {
+ return NewLookupParams(rootAddr, period, 0, 0)
+}
+
+// LookupVersion generates lookup parameters that look for a specific version of a resource
+func LookupVersion(rootAddr storage.Address, period, version uint32) *LookupParams {
+ return NewLookupParams(rootAddr, period, version, 0)
+}
+
+// UpdateLookup represents the components of a resource update search key
+type UpdateLookup struct {
+ period uint32
+ version uint32
+ rootAddr storage.Address
+}
+
+// 4 bytes period
+// 4 bytes version
+// storage.Keylength for rootAddr
+const updateLookupLength = 4 + 4 + storage.KeyLength
+
+// UpdateAddr calculates the resource update chunk address corresponding to this lookup key
+func (u *UpdateLookup) UpdateAddr() (updateAddr storage.Address) {
+ serializedData := make([]byte, updateLookupLength)
+ u.binaryPut(serializedData)
+ hasher := hashPool.Get().(hash.Hash)
+ defer hashPool.Put(hasher)
+ hasher.Reset()
+ hasher.Write(serializedData)
+ return hasher.Sum(nil)
+}
+
+// binaryPut serializes this UpdateLookup instance into the provided slice
+func (u *UpdateLookup) binaryPut(serializedData []byte) error {
+ if len(serializedData) != updateLookupLength {
+ return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
+ }
+ if len(u.rootAddr) != storage.KeyLength {
+ return NewError(ErrInvalidValue, "UpdateLookup.binaryPut called without rootAddr set")
+ }
+ binary.LittleEndian.PutUint32(serializedData[:4], u.period)
+ binary.LittleEndian.PutUint32(serializedData[4:8], u.version)
+ copy(serializedData[8:], u.rootAddr[:])
+ return nil
+}
+
+// binaryLength returns the expected size of this structure when serialized
+func (u *UpdateLookup) binaryLength() int {
+ return updateLookupLength
+}
+
+// binaryGet restores the current instance from the information contained in the passed slice
+func (u *UpdateLookup) binaryGet(serializedData []byte) error {
+ if len(serializedData) != updateLookupLength {
+ return NewErrorf(ErrInvalidValue, "Incorrect slice size to read UpdateLookup. Expected %d, got %d", updateLookupLength, len(serializedData))
+ }
+ u.period = binary.LittleEndian.Uint32(serializedData[:4])
+ u.version = binary.LittleEndian.Uint32(serializedData[4:8])
+ u.rootAddr = storage.Address(make([]byte, storage.KeyLength))
+ copy(u.rootAddr[:], serializedData[8:])
+ return nil
+}
diff --git a/swarm/storage/mru/lookup_test.go b/swarm/storage/mru/lookup_test.go
new file mode 100644
index 000000000..b66b200a3
--- /dev/null
+++ b/swarm/storage/mru/lookup_test.go
@@ -0,0 +1,85 @@
+package mru
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+func getTestUpdateLookup() *UpdateLookup {
+ metadata := *getTestMetadata()
+ rootAddr, _, _, _ := metadata.serializeAndHash()
+ return &UpdateLookup{
+ period: 79,
+ version: 2010,
+ rootAddr: rootAddr,
+ }
+}
+
+func compareUpdateLookup(a, b *UpdateLookup) bool {
+ return a.version == b.version &&
+ a.period == b.period &&
+ bytes.Equal(a.rootAddr, b.rootAddr)
+}
+
+func TestUpdateLookupUpdateAddr(t *testing.T) {
+ ul := getTestUpdateLookup()
+ updateAddr := ul.UpdateAddr()
+ compareByteSliceToExpectedHex(t, "updateAddr", updateAddr, "0x8fbc8d4777ef6da790257eda80ab4321fabd08cbdbe67e4e3da6caca386d64e0")
+}
+
+func TestUpdateLookupSerializer(t *testing.T) {
+ serializedUpdateLookup := make([]byte, updateLookupLength)
+ ul := getTestUpdateLookup()
+ if err := ul.binaryPut(serializedUpdateLookup); err != nil {
+ t.Fatal(err)
+ }
+ compareByteSliceToExpectedHex(t, "serializedUpdateLookup", serializedUpdateLookup, "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
+
+ // set receiving slice to the wrong size
+ serializedUpdateLookup = make([]byte, updateLookupLength+7)
+ if err := ul.binaryPut(serializedUpdateLookup); err == nil {
+ t.Fatalf("Expected UpdateLookup.binaryPut to fail when receiving slice has a length != %d", updateLookupLength)
+ }
+
+ // set rootAddr to an invalid length
+ ul.rootAddr = []byte{1, 2, 3, 4}
+ serializedUpdateLookup = make([]byte, updateLookupLength)
+ if err := ul.binaryPut(serializedUpdateLookup); err == nil {
+ t.Fatal("Expected UpdateLookup.binaryPut to fail when rootAddr is not of the correct size")
+ }
+}
+
+func TestUpdateLookupDeserializer(t *testing.T) {
+ serializedUpdateLookup, _ := hexutil.Decode("0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb")
+ var recoveredUpdateLookup UpdateLookup
+ if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
+ t.Fatal(err)
+ }
+ originalUpdateLookup := *getTestUpdateLookup()
+ if !compareUpdateLookup(&originalUpdateLookup, &recoveredUpdateLookup) {
+ t.Fatalf("Expected recovered UpdateLookup to match")
+ }
+
+ // set source slice to the wrong size
+ serializedUpdateLookup = make([]byte, updateLookupLength+4)
+ if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err == nil {
+ t.Fatalf("Expected UpdateLookup.binaryGet to fail when source slice has a length != %d", updateLookupLength)
+ }
+}
+
+func TestUpdateLookupSerializeDeserialize(t *testing.T) {
+ serializedUpdateLookup := make([]byte, updateLookupLength)
+ originalUpdateLookup := getTestUpdateLookup()
+ if err := originalUpdateLookup.binaryPut(serializedUpdateLookup); err != nil {
+ t.Fatal(err)
+ }
+ var recoveredUpdateLookup UpdateLookup
+ if err := recoveredUpdateLookup.binaryGet(serializedUpdateLookup); err != nil {
+ t.Fatal(err)
+ }
+ if !compareUpdateLookup(originalUpdateLookup, &recoveredUpdateLookup) {
+ t.Fatalf("Expected recovered UpdateLookup to match")
+ }
+}
diff --git a/swarm/storage/mru/metadata.go b/swarm/storage/mru/metadata.go
new file mode 100644
index 000000000..0ab0ed1d9
--- /dev/null
+++ b/swarm/storage/mru/metadata.go
@@ -0,0 +1,189 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "encoding/binary"
+ "hash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// ResourceMetadata encapsulates the immutable information about a mutable resource :)
+// once serialized into a chunk, the resource can be retrieved by knowing its content-addressed rootAddr
+type ResourceMetadata struct {
+ StartTime Timestamp // time at which the resource starts to be valid
+ Frequency uint64 // expected update frequency for the resource
+ Name string // name of the resource, for the reference of the user or to disambiguate resources with same starttime, frequency, owneraddr
+ Owner common.Address // public address of the resource owner
+}
+
+const frequencyLength = 8 // sizeof(uint64)
+const nameLengthLength = 1
+
+// Resource metadata chunk layout:
+// 4 prefix bytes (chunkPrefixLength). The first two set to zero. The second two indicate the length
+// Timestamp: timestampLength bytes
+// frequency: frequencyLength bytes
+// name length: nameLengthLength bytes
+// name (variable length, can be empty, up to 255 bytes)
+// ownerAddr: common.AddressLength
+const minimumMetadataLength = chunkPrefixLength + timestampLength + frequencyLength + nameLengthLength + common.AddressLength
+
+// binaryGet populates the resource metadata from a byte array
+func (r *ResourceMetadata) binaryGet(serializedData []byte) error {
+ if len(serializedData) < minimumMetadataLength {
+ return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short. Expected at least %d. Got %d.", minimumMetadataLength, len(serializedData))
+ }
+
+ // first two bytes must be set to zero to indicate metadata chunks, so enforce this.
+ if serializedData[0] != 0 || serializedData[1] != 0 {
+ return NewError(ErrCorruptData, "Invalid metadata chunk")
+ }
+
+ cursor := 2
+ metadataLength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])) // metadataLength does not include the 4 prefix bytes
+ if metadataLength+chunkPrefixLength != len(serializedData) {
+ return NewErrorf(ErrCorruptData, "Incorrect declared metadata length. Expected %d, got %d.", metadataLength+chunkPrefixLength, len(serializedData))
+ }
+
+ cursor += 2
+
+ if err := r.StartTime.binaryGet(serializedData[cursor : cursor+timestampLength]); err != nil {
+ return err
+ }
+ cursor += timestampLength
+
+ r.Frequency = binary.LittleEndian.Uint64(serializedData[cursor : cursor+frequencyLength])
+ cursor += frequencyLength
+
+ nameLength := int(serializedData[cursor])
+ if nameLength+minimumMetadataLength > len(serializedData) {
+ return NewErrorf(ErrInvalidValue, "Metadata chunk to deserialize is too short when decoding resource name. Expected at least %d. Got %d.", nameLength+minimumMetadataLength, len(serializedData))
+ }
+ cursor++
+ r.Name = string(serializedData[cursor : cursor+nameLength])
+ cursor += nameLength
+
+ copy(r.Owner[:], serializedData[cursor:])
+ cursor += common.AddressLength
+ if cursor != len(serializedData) {
+ return NewErrorf(ErrInvalidValue, "Metadata chunk has leftover data after deserialization. %d left to read", len(serializedData)-cursor)
+ }
+ return nil
+}
+
+// binaryPut encodes the metadata into a byte array
+func (r *ResourceMetadata) binaryPut(serializedData []byte) error {
+ metadataChunkLength := r.binaryLength()
+ if len(serializedData) != metadataChunkLength {
+ return NewErrorf(ErrInvalidValue, "Need a slice of exactly %d bytes to serialize this metadata, but got a slice of size %d.", metadataChunkLength, len(serializedData))
+ }
+
+ // root chunk has first two bytes both set to 0, which distinguishes from update bytes
+ // therefore, skip the first two bytes of a zero-initialized array.
+ cursor := 2
+ binary.LittleEndian.PutUint16(serializedData[cursor:cursor+2], uint16(metadataChunkLength-chunkPrefixLength)) // metadataLength does not include the 4 prefix bytes
+ cursor += 2
+
+ r.StartTime.binaryPut(serializedData[cursor : cursor+timestampLength])
+ cursor += timestampLength
+
+ binary.LittleEndian.PutUint64(serializedData[cursor:cursor+frequencyLength], r.Frequency)
+ cursor += frequencyLength
+
+ // Encode the name string as a 1 byte length followed by the encoded string.
+ // Longer strings will be truncated.
+ nameLength := len(r.Name)
+ if nameLength > 255 {
+ nameLength = 255
+ }
+ serializedData[cursor] = uint8(nameLength)
+ cursor++
+ copy(serializedData[cursor:cursor+nameLength], []byte(r.Name[:nameLength]))
+ cursor += nameLength
+
+ copy(serializedData[cursor:cursor+common.AddressLength], r.Owner[:])
+ cursor += common.AddressLength
+
+ return nil
+}
+
+func (r *ResourceMetadata) binaryLength() int {
+ return minimumMetadataLength + len(r.Name)
+}
+
+// serializeAndHash returns the root chunk addr and metadata hash that help identify and ascertain ownership of this resource
+// returns the serialized metadata as a byproduct of having to hash it.
+func (r *ResourceMetadata) serializeAndHash() (rootAddr, metaHash []byte, chunkData []byte, err error) {
+
+ chunkData = make([]byte, r.binaryLength())
+ if err := r.binaryPut(chunkData); err != nil {
+ return nil, nil, nil, err
+ }
+ rootAddr, metaHash = metadataHash(chunkData)
+ return rootAddr, metaHash, chunkData, nil
+
+}
+
+// creates a metadata chunk out of a resourceMetadata structure
+func (metadata *ResourceMetadata) newChunk() (chunk *storage.Chunk, metaHash []byte, err error) {
+ // the metadata chunk contains a timestamp of when the resource starts to be valid
+ // and also how frequently it is expected to be updated
+ // from this we know at what time we should look for updates, and how often
+ // it also contains the name of the resource, so we know what resource we are working with
+
+ // the key (rootAddr) of the metadata chunk is content-addressed
+ // if it wasn't we couldn't replace it later
+ // resolving this relationship is left up to external agents (for example ENS)
+ rootAddr, metaHash, chunkData, err := metadata.serializeAndHash()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // make the chunk and send it to swarm
+ chunk = storage.NewChunk(rootAddr, nil)
+ chunk.SData = chunkData
+ chunk.Size = int64(len(chunkData))
+
+ return chunk, metaHash, nil
+}
+
+// metadataHash returns the metadata chunk root address and metadata hash
+// that help identify and ascertain ownership of this resource
+// We compute it as rootAddr = H(ownerAddr, H(metadata))
+// Where H() is SHA3
+// metadata are all the metadata fields, except ownerAddr
+// ownerAddr is the public address of the resource owner
+// Update chunks must carry a rootAddr reference and metaHash in order to be verified
+// This way, a node that receives an update can check the signature, recover the public address
+// and check the ownership by computing H(ownerAddr, metaHash) and comparing it to the rootAddr
+// the resource is claiming to update without having to lookup the metadata chunk.
+// see verifyResourceOwnerhsip in signedupdate.go
+func metadataHash(chunkData []byte) (rootAddr, metaHash []byte) {
+ hasher := hashPool.Get().(hash.Hash)
+ defer hashPool.Put(hasher)
+ hasher.Reset()
+ hasher.Write(chunkData[:len(chunkData)-common.AddressLength])
+ metaHash = hasher.Sum(nil)
+ hasher.Reset()
+ hasher.Write(metaHash)
+ hasher.Write(chunkData[len(chunkData)-common.AddressLength:])
+ rootAddr = hasher.Sum(nil)
+ return
+}
diff --git a/swarm/storage/mru/metadata_test.go b/swarm/storage/mru/metadata_test.go
new file mode 100644
index 000000000..abbac6e3e
--- /dev/null
+++ b/swarm/storage/mru/metadata_test.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+package mru
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+func compareByteSliceToExpectedHex(t *testing.T, variableName string, actualValue []byte, expectedHex string) {
+ if hexutil.Encode(actualValue) != expectedHex {
+ t.Fatalf("%s: Expected %s to be %s, got %s", t.Name(), variableName, expectedHex, hexutil.Encode(actualValue))
+ }
+}
+
+func getTestMetadata() *ResourceMetadata {
+ return &ResourceMetadata{
+ Name: "world news report, every hour, on the hour",
+ StartTime: Timestamp{
+ Time: 1528880400,
+ },
+ Frequency: 3600,
+ Owner: newCharlieSigner().Address(),
+ }
+}
+
+func TestMetadataSerializerDeserializer(t *testing.T) {
+ metadata := *getTestMetadata()
+
+ rootAddr, metaHash, chunkData, err := metadata.serializeAndHash() // creates hashes and marshals, in one go
+ if err != nil {
+ t.Fatal(err)
+ }
+ const expectedRootAddr = "0xfb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fb"
+ const expectedMetaHash = "0xf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0"
+ const expectedChunkData = "0x00004f0010dd205b00000000100e0000000000002a776f726c64206e657773207265706f72742c20657665727920686f75722c206f6e2074686520686f7572876a8936a7cd0b79ef0735ad0896c1afe278781c"
+
+ compareByteSliceToExpectedHex(t, "rootAddr", rootAddr, expectedRootAddr)
+ compareByteSliceToExpectedHex(t, "metaHash", metaHash, expectedMetaHash)
+ compareByteSliceToExpectedHex(t, "chunkData", chunkData, expectedChunkData)
+
+ recoveredMetadata := ResourceMetadata{}
+ recoveredMetadata.binaryGet(chunkData)
+
+ if recoveredMetadata != metadata {
+ t.Fatalf("Expected that the recovered metadata equals the marshalled metadata")
+ }
+
+ // we are going to mess with the data, so create a backup to go back to it for the next test
+ backup := make([]byte, len(chunkData))
+ copy(backup, chunkData)
+
+ chunkData = []byte{1, 2, 3}
+ if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+ t.Fatal("Expected binaryGet to fail since chunk is too small")
+ }
+
+ // restore backup
+ chunkData = make([]byte, len(backup))
+ copy(chunkData, backup)
+
+ // mess with the prefix so it is not zero
+ chunkData[0] = 7
+ chunkData[1] = 9
+
+ if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+ t.Fatal("Expected binaryGet to fail since prefix bytes are not zero")
+ }
+
+ // restore backup
+ chunkData = make([]byte, len(backup))
+ copy(chunkData, backup)
+
+ // mess with the length header to trigger an error
+ chunkData[2] = 255
+ chunkData[3] = 44
+ if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+ t.Fatal("Expected binaryGet to fail since header length does not match")
+ }
+
+ // restore backup
+ chunkData = make([]byte, len(backup))
+ copy(chunkData, backup)
+
+ // mess with name length header to trigger a chunk too short error
+ chunkData[20] = 255
+ if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+ t.Fatal("Expected binaryGet to fail since name length is incorrect")
+ }
+
+ // restore backup
+ chunkData = make([]byte, len(backup))
+ copy(chunkData, backup)
+
+ // mess with name length header to trigger an leftover bytes to read error
+ chunkData[20] = 3
+ if err := recoveredMetadata.binaryGet(chunkData); err == nil {
+ t.Fatal("Expected binaryGet to fail since name length is too small")
+ }
+}
+
+func TestMetadataSerializerLengthCheck(t *testing.T) {
+ metadata := *getTestMetadata()
+
+ // make a slice that is too small to contain the metadata
+ serializedMetadata := make([]byte, 4)
+
+ if err := metadata.binaryPut(serializedMetadata); err == nil {
+ t.Fatal("Expected metadata.binaryPut to fail, since target slice is too small")
+ }
+
+}
diff --git a/swarm/storage/mru/request.go b/swarm/storage/mru/request.go
new file mode 100644
index 000000000..dd71f855d
--- /dev/null
+++ b/swarm/storage/mru/request.go
@@ -0,0 +1,297 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// updateRequestJSON represents a JSON-serialized UpdateRequest
+type updateRequestJSON struct {
+ Name string `json:"name,omitempty"`
+ Frequency uint64 `json:"frequency,omitempty"`
+ StartTime uint64 `json:"startTime,omitempty"`
+ Owner string `json:"ownerAddr,omitempty"`
+ RootAddr string `json:"rootAddr,omitempty"`
+ MetaHash string `json:"metaHash,omitempty"`
+ Version uint32 `json:"version,omitempty"`
+ Period uint32 `json:"period,omitempty"`
+ Data string `json:"data,omitempty"`
+ Multihash bool `json:"multiHash"`
+ Signature string `json:"signature,omitempty"`
+}
+
+// Request represents an update and/or resource create message
+type Request struct {
+ SignedResourceUpdate
+ metadata ResourceMetadata
+ isNew bool
+}
+
+var zeroAddr = common.Address{}
+
+// NewCreateUpdateRequest returns a ready to sign request to create and initialize a resource with data
+func NewCreateUpdateRequest(metadata *ResourceMetadata) (*Request, error) {
+
+ request, err := NewCreateRequest(metadata)
+ if err != nil {
+ return nil, err
+ }
+
+ // get the current time
+ now := TimestampProvider.Now().Time
+
+ request.version = 1
+ request.period, err = getNextPeriod(metadata.StartTime.Time, now, metadata.Frequency)
+ if err != nil {
+ return nil, err
+ }
+ return request, nil
+}
+
+// NewCreateRequest returns a request to create a new resource
+func NewCreateRequest(metadata *ResourceMetadata) (request *Request, err error) {
+ if metadata.StartTime.Time == 0 { // get the current time
+ metadata.StartTime = TimestampProvider.Now()
+ }
+
+ if metadata.Owner == zeroAddr {
+ return nil, NewError(ErrInvalidValue, "OwnerAddr is not set")
+ }
+
+ request = &Request{
+ metadata: *metadata,
+ }
+ request.rootAddr, request.metaHash, _, err = request.metadata.serializeAndHash()
+ request.isNew = true
+ return request, nil
+}
+
+// Frequency returns the resource's expected update frequency
+func (r *Request) Frequency() uint64 {
+ return r.metadata.Frequency
+}
+
+// Name returns the resource human-readable name
+func (r *Request) Name() string {
+ return r.metadata.Name
+}
+
+// Multihash returns true if the resource data should be interpreted as a multihash
+func (r *Request) Multihash() bool {
+ return r.multihash
+}
+
+// Period returns in which period the resource will be published
+func (r *Request) Period() uint32 {
+ return r.period
+}
+
+// Version returns the resource version to publish
+func (r *Request) Version() uint32 {
+ return r.version
+}
+
+// RootAddr returns the metadata chunk address
+func (r *Request) RootAddr() storage.Address {
+ return r.rootAddr
+}
+
+// StartTime returns the time that the resource was/will be created at
+func (r *Request) StartTime() Timestamp {
+ return r.metadata.StartTime
+}
+
+// Owner returns the resource owner's address
+func (r *Request) Owner() common.Address {
+ return r.metadata.Owner
+}
+
+// Sign executes the signature to validate the resource and sets the owner address field
+func (r *Request) Sign(signer Signer) error {
+ if r.metadata.Owner != zeroAddr && r.metadata.Owner != signer.Address() {
+ return NewError(ErrInvalidSignature, "Signer does not match current owner of the resource")
+ }
+
+ if err := r.SignedResourceUpdate.Sign(signer); err != nil {
+ return err
+ }
+ r.metadata.Owner = signer.Address()
+ return nil
+}
+
+// SetData stores the payload data the resource will be updated with
+func (r *Request) SetData(data []byte, multihash bool) {
+ r.data = data
+ r.multihash = multihash
+ r.signature = nil
+ if !r.isNew {
+ r.metadata.Frequency = 0 // mark as update
+ }
+}
+
+func (r *Request) IsNew() bool {
+ return r.metadata.Frequency > 0 && (r.period <= 1 || r.version <= 1)
+}
+
+func (r *Request) IsUpdate() bool {
+ return r.signature != nil
+}
+
+// fromJSON takes an update request JSON and populates an UpdateRequest
+func (r *Request) fromJSON(j *updateRequestJSON) error {
+
+ r.version = j.Version
+ r.period = j.Period
+ r.multihash = j.Multihash
+ r.metadata.Name = j.Name
+ r.metadata.Frequency = j.Frequency
+ r.metadata.StartTime.Time = j.StartTime
+
+ if err := decodeHexArray(r.metadata.Owner[:], j.Owner, "ownerAddr"); err != nil {
+ return err
+ }
+
+ var err error
+ if j.Data != "" {
+ r.data, err = hexutil.Decode(j.Data)
+ if err != nil {
+ return NewError(ErrInvalidValue, "Cannot decode data")
+ }
+ }
+
+ var declaredRootAddr storage.Address
+ var declaredMetaHash []byte
+
+ declaredRootAddr, err = decodeHexSlice(j.RootAddr, storage.KeyLength, "rootAddr")
+ if err != nil {
+ return err
+ }
+ declaredMetaHash, err = decodeHexSlice(j.MetaHash, 32, "metaHash")
+ if err != nil {
+ return err
+ }
+
+ if r.IsNew() {
+ // for new resource creation, rootAddr and metaHash are optional because
+ // we can derive them from the content itself.
+ // however, if the user sent them, we check them for consistency.
+
+ r.rootAddr, r.metaHash, _, err = r.metadata.serializeAndHash()
+ if err != nil {
+ return err
+ }
+ if j.RootAddr != "" && !bytes.Equal(declaredRootAddr, r.rootAddr) {
+ return NewError(ErrInvalidValue, "rootAddr does not match resource metadata")
+ }
+ if j.MetaHash != "" && !bytes.Equal(declaredMetaHash, r.metaHash) {
+ return NewError(ErrInvalidValue, "metaHash does not match resource metadata")
+ }
+
+ } else {
+ //Update message
+ r.rootAddr = declaredRootAddr
+ r.metaHash = declaredMetaHash
+ }
+
+ if j.Signature != "" {
+ sigBytes, err := hexutil.Decode(j.Signature)
+ if err != nil || len(sigBytes) != signatureLength {
+ return NewError(ErrInvalidSignature, "Cannot decode signature")
+ }
+ r.signature = new(Signature)
+ r.updateAddr = r.UpdateAddr()
+ copy(r.signature[:], sigBytes)
+ }
+ return nil
+}
+
+func decodeHexArray(dst []byte, src, name string) error {
+ bytes, err := decodeHexSlice(src, len(dst), name)
+ if err != nil {
+ return err
+ }
+ if bytes != nil {
+ copy(dst, bytes)
+ }
+ return nil
+}
+
+func decodeHexSlice(src string, expectedLength int, name string) (bytes []byte, err error) {
+ if src != "" {
+ bytes, err = hexutil.Decode(src)
+ if err != nil || len(bytes) != expectedLength {
+ return nil, NewErrorf(ErrInvalidValue, "Cannot decode %s", name)
+ }
+ }
+ return bytes, nil
+}
+
+// UnmarshalJSON takes a JSON structure stored in a byte array and populates the Request object
+// Implements json.Unmarshaler interface
+func (r *Request) UnmarshalJSON(rawData []byte) error {
+ var requestJSON updateRequestJSON
+ if err := json.Unmarshal(rawData, &requestJSON); err != nil {
+ return err
+ }
+ return r.fromJSON(&requestJSON)
+}
+
+// MarshalJSON takes an update request and encodes it as a JSON structure into a byte array
+// Implements json.Marshaler interface
+func (r *Request) MarshalJSON() (rawData []byte, err error) {
+ var signatureString, dataHashString, rootAddrString, metaHashString string
+ if r.signature != nil {
+ signatureString = hexutil.Encode(r.signature[:])
+ }
+ if r.data != nil {
+ dataHashString = hexutil.Encode(r.data)
+ }
+ if r.rootAddr != nil {
+ rootAddrString = hexutil.Encode(r.rootAddr)
+ }
+ if r.metaHash != nil {
+ metaHashString = hexutil.Encode(r.metaHash)
+ }
+ var ownerAddrString string
+ if r.metadata.Frequency == 0 {
+ ownerAddrString = ""
+ } else {
+ ownerAddrString = hexutil.Encode(r.metadata.Owner[:])
+ }
+
+ requestJSON := &updateRequestJSON{
+ Name: r.metadata.Name,
+ Frequency: r.metadata.Frequency,
+ StartTime: r.metadata.StartTime.Time,
+ Version: r.version,
+ Period: r.period,
+ Owner: ownerAddrString,
+ Data: dataHashString,
+ Multihash: r.multihash,
+ Signature: signatureString,
+ RootAddr: rootAddrString,
+ MetaHash: metaHashString,
+ }
+
+ return json.Marshal(requestJSON)
+}
diff --git a/swarm/storage/mru/request_test.go b/swarm/storage/mru/request_test.go
new file mode 100644
index 000000000..dba55b27e
--- /dev/null
+++ b/swarm/storage/mru/request_test.go
@@ -0,0 +1,175 @@
+package mru
+
+import (
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func areEqualJSON(s1, s2 string) (bool, error) {
+ //credit for the trick: turtlemonvh https://gist.github.com/turtlemonvh/e4f7404e28387fadb8ad275a99596f67
+ var o1 interface{}
+ var o2 interface{}
+
+ err := json.Unmarshal([]byte(s1), &o1)
+ if err != nil {
+ return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error())
+ }
+ err = json.Unmarshal([]byte(s2), &o2)
+ if err != nil {
+ return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error())
+ }
+
+ return reflect.DeepEqual(o1, o2), nil
+}
+
+// TestEncodingDecodingUpdateRequests ensures that requests are serialized properly
+// while also checking cryptographically that only the owner of a resource can update it.
+func TestEncodingDecodingUpdateRequests(t *testing.T) {
+
+ signer := newCharlieSigner() //Charlie, our good guy
+ falseSigner := newBobSigner() //Bob will play the bad guy again
+
+ // Create a resource to our good guy Charlie's name
+ createRequest, err := NewCreateRequest(&ResourceMetadata{
+ Name: "a good resource name",
+ Frequency: 300,
+ StartTime: Timestamp{Time: 1528900000},
+ Owner: signer.Address()})
+
+ if err != nil {
+ t.Fatalf("Error creating resource name: %s", err)
+ }
+
+ // We now encode the create message to simulate we send it over the wire
+ messageRawData, err := createRequest.MarshalJSON()
+ if err != nil {
+ t.Fatalf("Error encoding create resource request: %s", err)
+ }
+
+ // ... the message arrives and is decoded...
+ var recoveredCreateRequest Request
+ if err := recoveredCreateRequest.UnmarshalJSON(messageRawData); err != nil {
+ t.Fatalf("Error decoding create resource request: %s", err)
+ }
+
+ // ... but verification should fail because it is not signed!
+ if err := recoveredCreateRequest.Verify(); err == nil {
+ t.Fatal("Expected Verify to fail since the message is not signed")
+ }
+
+ // We now assume that the resource was created and propagated. With rootAddr we can retrieve the resource metadata
+ // and recover the information above. To sign an update, we need the rootAddr and the metaHash to construct
+ // proof of ownership
+
+ metaHash := createRequest.metaHash
+ rootAddr := createRequest.rootAddr
+ const expectedSignature = "0x1c2bab66dc4ed63783d62934e3a628e517888d6949aef0349f3bd677121db9aa09bbfb865904e6c50360e209e0fe6fe757f8a2474cf1b34169c99b95e3fd5a5101"
+ const expectedJSON = `{"rootAddr":"0x6e744a730f7ea0881528576f0354b6268b98e35a6981ef703153ff1b8d32bbef","metaHash":"0x0c0d5c18b89da503af92302a1a64fab6acb60f78e288eb9c3d541655cd359b60","version":1,"period":7,"data":"0x5468697320686f75722773207570646174653a20537761726d2039392e3020686173206265656e2072656c656173656421","multiHash":false}`
+
+ //Put together an unsigned update request that we will serialize to send it to the signer.
+ data := []byte("This hour's update: Swarm 99.0 has been released!")
+ request := &Request{
+ SignedResourceUpdate: SignedResourceUpdate{
+ resourceUpdate: resourceUpdate{
+ updateHeader: updateHeader{
+ UpdateLookup: UpdateLookup{
+ period: 7,
+ version: 1,
+ rootAddr: rootAddr,
+ },
+ multihash: false,
+ metaHash: metaHash,
+ },
+ data: data,
+ },
+ },
+ }
+
+ messageRawData, err = request.MarshalJSON()
+ if err != nil {
+ t.Fatalf("Error encoding update request: %s", err)
+ }
+
+ equalJSON, err := areEqualJSON(string(messageRawData), expectedJSON)
+ if err != nil {
+ t.Fatalf("Error decoding update request JSON: %s", err)
+ }
+ if !equalJSON {
+ t.Fatalf("Received a different JSON message. Expected %s, got %s", expectedJSON, string(messageRawData))
+ }
+
+ // now the encoded message messageRawData is sent over the wire and arrives to the signer
+
+ //Attempt to extract an UpdateRequest out of the encoded message
+ var recoveredRequest Request
+ if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
+ t.Fatalf("Error decoding update request: %s", err)
+ }
+
+ //sign the request and see if it matches our predefined signature above.
+ if err := recoveredRequest.Sign(signer); err != nil {
+ t.Fatalf("Error signing request: %s", err)
+ }
+
+ compareByteSliceToExpectedHex(t, "signature", recoveredRequest.signature[:], expectedSignature)
+
+ // mess with the signature and see what happens. To alter the signature, we briefly decode it as JSON
+ // to alter the signature field.
+ var j updateRequestJSON
+ if err := json.Unmarshal([]byte(expectedJSON), &j); err != nil {
+ t.Fatal("Error unmarshalling test json, check expectedJSON constant")
+ }
+ j.Signature = "Certainly not a signature"
+ corruptMessage, _ := json.Marshal(j) // encode the message with the bad signature
+ var corruptRequest Request
+ if err = corruptRequest.UnmarshalJSON(corruptMessage); err == nil {
+ t.Fatal("Expected DecodeUpdateRequest to fail when trying to interpret a corrupt message with an invalid signature")
+ }
+
+ // Now imagine Evil Bob (why always Bob, poor Bob) attempts to update Charlie's resource,
+ // signing a message with his private key
+ if err := request.Sign(falseSigner); err != nil {
+ t.Fatalf("Error signing: %s", err)
+ }
+
+ // Now Bob encodes the message to send it over the wire...
+ messageRawData, err = request.MarshalJSON()
+ if err != nil {
+ t.Fatalf("Error encoding message:%s", err)
+ }
+
+ // ... the message arrives to our Swarm node and it is decoded.
+ recoveredRequest = Request{}
+ if err := recoveredRequest.UnmarshalJSON(messageRawData); err != nil {
+ t.Fatalf("Error decoding message:%s", err)
+ }
+
+ // Before discovering Bob's misdemeanor, let's see what would happen if we mess
+ // with the signature big time to see if Verify catches it
+ savedSignature := *recoveredRequest.signature // save the signature for later
+ binary.LittleEndian.PutUint64(recoveredRequest.signature[5:], 556845463424) // write some random data to break the signature
+ if err = recoveredRequest.Verify(); err == nil {
+ t.Fatal("Expected Verify to fail on corrupt signature")
+ }
+
+ // restore the Evil Bob's signature from corruption
+ *recoveredRequest.signature = savedSignature
+
+ // Now the signature is not corrupt, however Verify should now fail because Bob doesn't own the resource
+ if err = recoveredRequest.Verify(); err == nil {
+ t.Fatalf("Expected Verify to fail because this resource belongs to Charlie, not Bob the attacker:%s", err)
+ }
+
+ // Sign with our friend Charlie's private key
+ if err := recoveredRequest.Sign(signer); err != nil {
+ t.Fatalf("Error signing with the correct private key: %s", err)
+ }
+
+ // And now, Verify should work since this resource belongs to Charlie
+ if err = recoveredRequest.Verify(); err != nil {
+ t.Fatalf("Error verifying that Charlie, the good guy, can sign his resource:%s", err)
+ }
+}
diff --git a/swarm/storage/mru/resource.go b/swarm/storage/mru/resource.go
index 4f5a4f44c..aa83ff62a 100644
--- a/swarm/storage/mru/resource.go
+++ b/swarm/storage/mru/resource.go
@@ -19,110 +19,25 @@ package mru
import (
"bytes"
"context"
- "encoding/binary"
- "errors"
- "fmt"
- "math/big"
- "path/filepath"
- "sync"
"time"
- "golang.org/x/net/idna"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/contracts/ens"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/swarm/log"
- "github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage"
)
const (
- signatureLength = 65
- metadataChunkOffsetSize = 18
- DbDirName = "resource"
- chunkSize = 4096 // temporary until we implement FileStore in the resourcehandler
- defaultStoreTimeout = 4000 * time.Millisecond
- hasherCount = 8
- resourceHash = storage.SHA3Hash
- defaultRetrieveTimeout = 100 * time.Millisecond
+ defaultStoreTimeout = 4000 * time.Millisecond
+ hasherCount = 8
+ resourceHashAlgorithm = storage.SHA3Hash
+ defaultRetrieveTimeout = 100 * time.Millisecond
)
-type blockEstimator struct {
- Start time.Time
- Average time.Duration
-}
-
-// TODO: Average must be adjusted when blockchain connection is present and synced
-func NewBlockEstimator() *blockEstimator {
- sampleDate, _ := time.Parse(time.RFC3339, "2018-05-04T20:35:22Z") // from etherscan.io
- sampleBlock := int64(3169691) // from etherscan.io
- ropstenStart, _ := time.Parse(time.RFC3339, "2016-11-20T11:48:50Z") // from etherscan.io
- ns := sampleDate.Sub(ropstenStart).Nanoseconds()
- period := int(ns / sampleBlock)
- parsestring := fmt.Sprintf("%dns", int(float64(period)*1.0005)) // increase the blockcount a little, so we don't overshoot the read block height; if we do, we will never find the updates when getting synced data
- periodNs, _ := time.ParseDuration(parsestring)
- return &blockEstimator{
- Start: ropstenStart,
- Average: periodNs,
- }
-}
-
-func (b *blockEstimator) HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error) {
- return &types.Header{
- Number: big.NewInt(time.Since(b.Start).Nanoseconds() / b.Average.Nanoseconds()),
- }, nil
-}
-
-type Error struct {
- code int
- err string
-}
-
-func (e *Error) Error() string {
- return e.err
-}
-
-func (e *Error) Code() int {
- return e.code
-}
-
-func NewError(code int, s string) error {
- if code < 0 || code >= ErrCnt {
- panic("no such error code!")
- }
- r := &Error{
- err: s,
- }
- switch code {
- case ErrNotFound, ErrIO, ErrUnauthorized, ErrInvalidValue, ErrDataOverflow, ErrNothingToReturn, ErrInvalidSignature, ErrNotSynced, ErrPeriodDepth, ErrCorruptData:
- r.code = code
- }
- return r
-}
-
-type Signature [signatureLength]byte
-
-type LookupParams struct {
- Limit bool
- Max uint32
-}
-
-// Encapsulates an specific resource update. When synced it contains the most recent
-// version of the resource update data.
+// resource caches resource data and the metadata of its root chunk.
type resource struct {
+ resourceUpdate
+ ResourceMetadata
*bytes.Reader
- Multihash bool
- name string
- nameHash common.Hash
- startBlock uint64
- lastPeriod uint32
- lastKey storage.Address
- frequency uint64
- version uint32
- data []byte
- updated time.Time
+ lastKey storage.Address
+ updated time.Time
}
func (r *resource) Context() context.Context {
@@ -134,937 +49,28 @@ func (r *resource) isSynced() bool {
return !r.updated.IsZero()
}
-func (r *resource) NameHash() common.Hash {
- return r.nameHash
-}
-
-func (r *resource) Size(context.Context, chan bool) (int64, error) {
+// implements storage.LazySectionReader
+func (r *resource) Size(ctx context.Context, _ chan bool) (int64, error) {
if !r.isSynced() {
return 0, NewError(ErrNotSynced, "Not synced")
}
- return int64(len(r.data)), nil
+ return int64(len(r.resourceUpdate.data)), nil
}
+//returns the resource's human-readable name
func (r *resource) Name() string {
- return r.name
-}
-
-func (r *resource) UnmarshalBinary(data []byte) error {
- r.startBlock = binary.LittleEndian.Uint64(data[:8])
- r.frequency = binary.LittleEndian.Uint64(data[8:16])
- r.name = string(data[16:])
- return nil
-}
-
-func (r *resource) MarshalBinary() ([]byte, error) {
- b := make([]byte, 16+len(r.name))
- binary.LittleEndian.PutUint64(b, r.startBlock)
- binary.LittleEndian.PutUint64(b[8:], r.frequency)
- copy(b[16:], []byte(r.name))
- return b, nil
-}
-
-type headerGetter interface {
- HeaderByNumber(context.Context, string, *big.Int) (*types.Header, error)
-}
-
-type ownerValidator interface {
- ValidateOwner(name string, address common.Address) (bool, error)
-}
-
-// Mutable resource is an entity which allows updates to a resource
-// without resorting to ENS on each update.
-// The update scheme is built on swarm chunks with chunk keys following
-// a predictable, versionable pattern.
-//
-// Updates are defined to be periodic in nature, where periods are
-// expressed in terms of number of blocks.
-//
-// The root entry of a mutable resource is tied to a unique identifier,
-// typically - but not necessarily - an ens name. The identifier must be
-// an valid IDNA string. It also contains the block number
-// when the resource update was first registered, and
-// the block frequency with which the resource will be updated, both of
-// which are stored as little-endian uint64 values in the database (for a
-// total of 16 bytes). It also contains the unique identifier.
-// It is stored in a separate content-addressed chunk (call it the metadata chunk),
-// with the following layout:
-//
-// (0x0000|startblock|frequency|identifier)
-//
-// (The two first zero-value bytes are used for disambiguation by the chunk validator,
-// and update chunk will always have a value > 0 there.)
-//
-// The root entry tells the requester from when the mutable resource was
-// first added (block number) and in which block number to look for the
-// actual updates. Thus, a resource update for identifier "føø.bar"
-// starting at block 4200 with frequency 42 will have updates on block 4242,
-// 4284, 4326 and so on.
-//
-// Actual data updates are also made in the form of swarm chunks. The keys
-// of the updates are the hash of a concatenation of properties as follows:
-//
-// sha256(period|version|namehash)
-//
-// The period is (currentblock - startblock) / frequency
-//
-// Using our previous example, this means that a period 3 will have 4326 as
-// the block number.
-//
-// If more than one update is made to the same block number, incremental
-// version numbers are used successively.
-//
-// A lookup agent need only know the identifier name in order to get the versions
-//
-// the resourcedata is:
-// headerlength|period|version|identifier|data
-//
-// if a validator is active, the chunk data is:
-// resourcedata|sign(resourcedata)
-// otherwise, the chunk data is the same as the resourcedata
-//
-// headerlength is a 16 bit value containing the byte length of period|version|name
-//
-// TODO: Include modtime in chunk data + signature
-type Handler struct {
- chunkStore *storage.NetStore
- HashSize int
- signer Signer
- headerGetter headerGetter
- ownerValidator ownerValidator
- resources map[string]*resource
- hashPool sync.Pool
- resourceLock sync.RWMutex
- storeTimeout time.Duration
- queryMaxPeriods *LookupParams
-}
-
-type HandlerParams struct {
- QueryMaxPeriods *LookupParams
- Signer Signer
- HeaderGetter headerGetter
- OwnerValidator ownerValidator
-}
-
-// Create or open resource update chunk store
-func NewHandler(params *HandlerParams) (*Handler, error) {
- if params.QueryMaxPeriods == nil {
- params.QueryMaxPeriods = &LookupParams{
- Limit: false,
- }
- }
- rh := &Handler{
- headerGetter: params.HeaderGetter,
- ownerValidator: params.OwnerValidator,
- resources: make(map[string]*resource),
- storeTimeout: defaultStoreTimeout,
- signer: params.Signer,
- hashPool: sync.Pool{
- New: func() interface{} {
- return storage.MakeHashFunc(resourceHash)()
- },
- },
- queryMaxPeriods: params.QueryMaxPeriods,
- }
-
- for i := 0; i < hasherCount; i++ {
- hashfunc := storage.MakeHashFunc(resourceHash)()
- if rh.HashSize == 0 {
- rh.HashSize = hashfunc.Size()
- }
- rh.hashPool.Put(hashfunc)
- }
-
- return rh, nil
-}
-
-// SetStore sets the store backend for resource updates
-func (h *Handler) SetStore(store *storage.NetStore) {
- h.chunkStore = store
-}
-
-// Validate is a chunk validation method (matches ChunkValidatorFunc signature)
-//
-// If resource update, owner is checked against ENS record of resource name inferred from chunk data
-// If parsed signature is nil, validates automatically
-// If not resource update, it validates are root chunk if length is metadataChunkOffsetSize and first two bytes are 0
-func (h *Handler) Validate(addr storage.Address, data []byte) bool {
- signature, period, version, name, parseddata, _, err := h.parseUpdate(data)
- if err != nil {
- log.Warn(err.Error())
- if len(data) > metadataChunkOffsetSize { // identifier comes after this byte range, and must be at least one byte
- if bytes.Equal(data[:2], []byte{0, 0}) {
- return true
- }
- }
- log.Error("Invalid resource chunk")
- return false
- } else if signature == nil {
- return bytes.Equal(h.resourceHash(period, version, ens.EnsNode(name)), addr)
- }
-
- digest := h.keyDataHash(addr, parseddata)
- addrSig, err := getAddressFromDataSig(digest, *signature)
- if err != nil {
- log.Error("Invalid signature on resource chunk")
- return false
- }
- ok, _ := h.checkAccess(name, addrSig)
- return ok
-}
-
-// If no ens client is supplied, resource updates are not validated
-func (h *Handler) IsValidated() bool {
- return h.ownerValidator != nil
-}
-
-// Create the resource update digest used in signatures
-func (h *Handler) keyDataHash(addr storage.Address, data []byte) common.Hash {
- hasher := h.hashPool.Get().(storage.SwarmHash)
- defer h.hashPool.Put(hasher)
- hasher.Reset()
- hasher.Write(addr[:])
- hasher.Write(data)
- return common.BytesToHash(hasher.Sum(nil))
-}
-
-// Checks if current address matches owner address of ENS
-func (h *Handler) checkAccess(name string, address common.Address) (bool, error) {
- if h.ownerValidator == nil {
- return true, nil
- }
- return h.ownerValidator.ValidateOwner(name, address)
-}
-
-// get data from current resource
-func (h *Handler) GetContent(name string) (storage.Address, []byte, error) {
- rsrc := h.get(name)
- if rsrc == nil || !rsrc.isSynced() {
- return nil, nil, NewError(ErrNotFound, " does not exist or is not synced")
- }
- return rsrc.lastKey, rsrc.data, nil
-}
-
-// Gets the period of the current data loaded in the resource
-func (h *Handler) GetLastPeriod(nameHash string) (uint32, error) {
- rsrc := h.get(nameHash)
- if rsrc == nil {
- return 0, NewError(ErrNotFound, " does not exist")
- } else if !rsrc.isSynced() {
- return 0, NewError(ErrNotSynced, " is not synced")
- }
- return rsrc.lastPeriod, nil
-}
-
-// Gets the version of the current data loaded in the resource
-func (h *Handler) GetVersion(nameHash string) (uint32, error) {
- rsrc := h.get(nameHash)
- if rsrc == nil {
- return 0, NewError(ErrNotFound, " does not exist")
- } else if !rsrc.isSynced() {
- return 0, NewError(ErrNotSynced, " is not synced")
- }
- return rsrc.version, nil
+ return r.ResourceMetadata.Name
}
-// \TODO should be hashsize * branches from the chosen chunker, implement with FileStore
-func (h *Handler) chunkSize() int64 {
- return chunkSize
-}
-
-// Creates a new root entry for a mutable resource identified by `name` with the specified `frequency`.
-//
-// The signature data should match the hash of the idna-converted name by the validator's namehash function, NOT the raw name bytes.
-//
-// The start block of the resource update will be the actual current block height of the connected network.
-func (h *Handler) New(ctx context.Context, name string, frequency uint64) (storage.Address, *resource, error) {
-
- // frequency 0 is invalid
- if frequency == 0 {
- return nil, nil, NewError(ErrInvalidValue, "Frequency cannot be 0")
- }
-
- // make sure name only contains ascii values
- if !isSafeName(name) {
- return nil, nil, NewError(ErrInvalidValue, fmt.Sprintf("Invalid name: '%s'", name))
- }
-
- nameHash := ens.EnsNode(name)
-
- // if the signer function is set, validate that the key of the signer has access to modify this ENS name
- if h.signer != nil {
- signature, err := h.signer.Sign(nameHash)
- if err != nil {
- return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
- }
- addr, err := getAddressFromDataSig(nameHash, signature)
- if err != nil {
- return nil, nil, NewError(ErrInvalidSignature, fmt.Sprintf("Retrieve address from signature fail: %v", err))
- }
- ok, err := h.checkAccess(name, addr)
- if err != nil {
- return nil, nil, err
- } else if !ok {
- return nil, nil, NewError(ErrUnauthorized, fmt.Sprintf("Not owner of '%s'", name))
- }
- }
-
- // get our blockheight at this time
- currentblock, err := h.getBlock(ctx, name)
- if err != nil {
- return nil, nil, err
- }
-
- chunk := h.newMetaChunk(name, currentblock, frequency)
-
- h.chunkStore.Put(ctx, chunk)
- log.Debug("new resource", "name", name, "key", nameHash, "startBlock", currentblock, "frequency", frequency)
-
- // create the internal index for the resource and populate it with the data of the first version
- rsrc := &resource{
- startBlock: currentblock,
- frequency: frequency,
- name: name,
- nameHash: nameHash,
- updated: time.Now(),
- }
- h.set(nameHash.Hex(), rsrc)
-
- return chunk.Addr, rsrc, nil
-}
-
-func (h *Handler) newMetaChunk(name string, startBlock uint64, frequency uint64) *storage.Chunk {
- // the metadata chunk points to data of first blockheight + update frequency
- // from this we know from what blockheight we should look for updates, and how often
- // it also contains the name of the resource, so we know what resource we are working with
- data := make([]byte, metadataChunkOffsetSize+len(name))
-
- // root block has first two bytes both set to 0, which distinguishes from update bytes
- val := make([]byte, 8)
- binary.LittleEndian.PutUint64(val, startBlock)
- copy(data[2:10], val)
- binary.LittleEndian.PutUint64(val, frequency)
- copy(data[10:18], val)
- copy(data[18:], []byte(name))
-
- // the key of the metadata chunk is content-addressed
- // if it wasn't we couldn't replace it later
- // resolving this relationship is left up to external agents (for example ENS)
- hasher := h.hashPool.Get().(storage.SwarmHash)
- hasher.Reset()
- hasher.Write(data)
- key := hasher.Sum(nil)
- h.hashPool.Put(hasher)
-
- // make the chunk and send it to swarm
- chunk := storage.NewChunk(key, nil)
- chunk.SData = make([]byte, metadataChunkOffsetSize+len(name))
- copy(chunk.SData, data)
- return chunk
-}
-
-// Searches and retrieves the specific version of the resource update identified by `name`
-// at the specific block height
-//
-// If refresh is set to true, the resource data will be reloaded from the resource update
-// metadata chunk.
-// It is the callers responsibility to make sure that this chunk exists (if the resource
-// update root data was retrieved externally, it typically doesn't)
-func (h *Handler) LookupVersionByName(ctx context.Context, name string, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
- return h.LookupVersion(ctx, ens.EnsNode(name), period, version, refresh, maxLookup)
-}
-
-func (h *Handler) LookupVersion(ctx context.Context, nameHash common.Hash, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
- rsrc := h.get(nameHash.Hex())
- if rsrc == nil {
- return nil, NewError(ErrNothingToReturn, "resource not loaded")
- }
- return h.lookup(rsrc, period, version, refresh, maxLookup)
-}
-
-// Retrieves the latest version of the resource update identified by `name`
-// at the specified block height
-//
-// If an update is found, version numbers are iterated until failure, and the last
-// successfully retrieved version is copied to the corresponding resources map entry
-// and returned.
-//
-// See also (*Handler).LookupVersion
-func (h *Handler) LookupHistoricalByName(ctx context.Context, name string, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
- return h.LookupHistorical(ctx, ens.EnsNode(name), period, refresh, maxLookup)
-}
-
-func (h *Handler) LookupHistorical(ctx context.Context, nameHash common.Hash, period uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
- rsrc := h.get(nameHash.Hex())
- if rsrc == nil {
- return nil, NewError(ErrNothingToReturn, "resource not loaded")
- }
- return h.lookup(rsrc, period, 0, refresh, maxLookup)
-}
-
-// Retrieves the latest version of the resource update identified by `name`
-// at the next update block height
-//
-// It starts at the next period after the current block height, and upon failure
-// tries the corresponding keys of each previous period until one is found
-// (or startBlock is reached, in which case there are no updates).
-//
-// Version iteration is done as in (*Handler).LookupHistorical
-//
-// See also (*Handler).LookupHistorical
-func (h *Handler) LookupLatestByName(ctx context.Context, name string, refresh bool, maxLookup *LookupParams) (*resource, error) {
- return h.LookupLatest(ctx, ens.EnsNode(name), refresh, maxLookup)
-}
-
-func (h *Handler) LookupLatest(ctx context.Context, nameHash common.Hash, refresh bool, maxLookup *LookupParams) (*resource, error) {
-
- // get our blockheight at this time and the next block of the update period
- rsrc := h.get(nameHash.Hex())
- if rsrc == nil {
- return nil, NewError(ErrNothingToReturn, "resource not loaded")
- }
- currentblock, err := h.getBlock(ctx, rsrc.name)
- if err != nil {
- return nil, err
- }
- nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
- if err != nil {
- return nil, err
- }
- return h.lookup(rsrc, nextperiod, 0, refresh, maxLookup)
-}
-
-// Returns the resource before the one currently loaded in the resource index
-//
-// This is useful where resource updates are used incrementally in contrast to
-// merely replacing content.
-//
-// Requires a synced resource object
-func (h *Handler) LookupPreviousByName(ctx context.Context, name string, maxLookup *LookupParams) (*resource, error) {
- return h.LookupPrevious(ctx, ens.EnsNode(name), maxLookup)
-}
-
-func (h *Handler) LookupPrevious(ctx context.Context, nameHash common.Hash, maxLookup *LookupParams) (*resource, error) {
- rsrc := h.get(nameHash.Hex())
- if rsrc == nil {
- return nil, NewError(ErrNothingToReturn, "resource not loaded")
- }
- if !rsrc.isSynced() {
- return nil, NewError(ErrNotSynced, "LookupPrevious requires synced resource.")
- } else if rsrc.lastPeriod == 0 {
- return nil, NewError(ErrNothingToReturn, " not found")
- }
- if rsrc.version > 1 {
- rsrc.version--
- } else if rsrc.lastPeriod == 1 {
- return nil, NewError(ErrNothingToReturn, "Current update is the oldest")
- } else {
- rsrc.version = 0
- rsrc.lastPeriod--
- }
- return h.lookup(rsrc, rsrc.lastPeriod, rsrc.version, false, maxLookup)
-}
-
-// base code for public lookup methods
-func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh bool, maxLookup *LookupParams) (*resource, error) {
-
- // we can't look for anything without a store
- if h.chunkStore == nil {
- return nil, NewError(ErrInit, "Call Handler.SetStore() before performing lookups")
- }
-
- // period 0 does not exist
- if period == 0 {
- return nil, NewError(ErrInvalidValue, "period must be >0")
- }
-
- // start from the last possible block period, and iterate previous ones until we find a match
- // if we hit startBlock we're out of options
- var specificversion bool
- if version > 0 {
- specificversion = true
- } else {
- version = 1
- }
-
- var hops uint32
- if maxLookup == nil {
- maxLookup = h.queryMaxPeriods
- }
- log.Trace("resource lookup", "period", period, "version", version, "limit", maxLookup.Limit, "max", maxLookup.Max)
- for period > 0 {
- if maxLookup.Limit && hops > maxLookup.Max {
- return nil, NewError(ErrPeriodDepth, fmt.Sprintf("Lookup exceeded max period hops (%d)", maxLookup.Max))
- }
- key := h.resourceHash(period, version, rsrc.nameHash)
- chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout)
- if err == nil {
- if specificversion {
- return h.updateIndex(rsrc, chunk)
- }
- // check if we have versions > 1. If a version fails, the previous version is used and returned.
- log.Trace("rsrc update version 1 found, checking for version updates", "period", period, "key", key)
- for {
- newversion := version + 1
- key := h.resourceHash(period, newversion, rsrc.nameHash)
- newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout)
- if err != nil {
- return h.updateIndex(rsrc, chunk)
- }
- chunk = newchunk
- version = newversion
- log.Trace("version update found, checking next", "version", version, "period", period, "key", key)
- }
- }
- log.Trace("rsrc update not found, checking previous period", "period", period, "key", key)
- period--
- hops++
- }
- return nil, NewError(ErrNotFound, "no updates found")
-}
-
-// Retrieves a resource metadata chunk and creates/updates the index entry for it
-// with the resulting metadata
-func (h *Handler) Load(ctx context.Context, addr storage.Address) (*resource, error) {
- chunk, err := h.chunkStore.GetWithTimeout(ctx, addr, defaultRetrieveTimeout)
- if err != nil {
- return nil, NewError(ErrNotFound, err.Error())
- }
-
- // minimum sanity check for chunk data (an update chunk first two bytes is headerlength uint16, and cannot be 0)
- // \TODO this is not enough to make sure the data isn't bogus. A normal content addressed chunk could still satisfy these criteria
- if !bytes.Equal(chunk.SData[:2], []byte{0x0, 0x0}) {
- return nil, NewError(ErrCorruptData, fmt.Sprintf("Chunk is not a resource metadata chunk"))
- } else if len(chunk.SData) <= metadataChunkOffsetSize {
- return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Invalid chunk length %d, should be minimum %d", len(chunk.SData), metadataChunkOffsetSize+1))
- }
-
- // create the index entry
- rsrc := &resource{}
- rsrc.UnmarshalBinary(chunk.SData[2:])
- rsrc.nameHash = ens.EnsNode(rsrc.name)
- h.set(rsrc.nameHash.Hex(), rsrc)
- log.Trace("resource index load", "rootkey", addr, "name", rsrc.name, "namehash", rsrc.nameHash, "startblock", rsrc.startBlock, "frequency", rsrc.frequency)
- return rsrc, nil
-}
-
-// update mutable resource index map with specified content
-func (h *Handler) updateIndex(rsrc *resource, chunk *storage.Chunk) (*resource, error) {
-
- // retrieve metadata from chunk data and check that it matches this mutable resource
- signature, period, version, name, data, multihash, err := h.parseUpdate(chunk.SData)
- if rsrc.name != name {
- return nil, NewError(ErrNothingToReturn, fmt.Sprintf("Update belongs to '%s', but have '%s'", name, rsrc.name))
- }
- log.Trace("resource index update", "name", rsrc.name, "namehash", rsrc.nameHash, "updatekey", chunk.Addr, "period", period, "version", version)
-
- // check signature (if signer algorithm is present)
- // \TODO maybe this check is redundant if also checked upon retrieval of chunk
- if signature != nil {
- digest := h.keyDataHash(chunk.Addr, data)
- _, err = getAddressFromDataSig(digest, *signature)
- if err != nil {
- return nil, NewError(ErrUnauthorized, fmt.Sprintf("Invalid signature: %v", err))
- }
- }
-
- // update our rsrcs entry map
- rsrc.lastKey = chunk.Addr
- rsrc.lastPeriod = period
- rsrc.version = version
- rsrc.updated = time.Now()
- rsrc.data = make([]byte, len(data))
- rsrc.Multihash = multihash
- rsrc.Reader = bytes.NewReader(rsrc.data)
- copy(rsrc.data, data)
- log.Debug(" synced", "name", rsrc.name, "key", chunk.Addr, "period", rsrc.lastPeriod, "version", rsrc.version)
- h.set(rsrc.nameHash.Hex(), rsrc)
- return rsrc, nil
-}
-
-// retrieve update metadata from chunk data
-// mirrors newUpdateChunk()
-func (h *Handler) parseUpdate(chunkdata []byte) (*Signature, uint32, uint32, string, []byte, bool, error) {
- // absolute minimum an update chunk can contain:
- // 14 = header + one byte of name + one byte of data
- if len(chunkdata) < 14 {
- return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, "chunk less than 13 bytes cannot be a resource update chunk")
- }
- cursor := 0
- headerlength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
- cursor += 2
- datalength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2])
- cursor += 2
- var exclsignlength int
- // we need extra magic if it's a multihash, since we used datalength 0 in header as an indicator of multihash content
- // retrieve the second varint and set this as the data length
- // TODO: merge with isMultihash code
- if datalength == 0 {
- uvarintbuf := bytes.NewBuffer(chunkdata[headerlength+4:])
- r, err := binary.ReadUvarint(uvarintbuf)
- if err != nil {
- errstr := fmt.Sprintf("corrupt multihash, hash id varint could not be read: %v", err)
- log.Warn(errstr)
- return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
-
- }
- r, err = binary.ReadUvarint(uvarintbuf)
- if err != nil {
- errstr := fmt.Sprintf("corrupt multihash, hash length field could not be read: %v", err)
- log.Warn(errstr)
- return nil, 0, 0, "", nil, false, NewError(ErrCorruptData, errstr)
-
- }
- exclsignlength = int(headerlength + uint16(r))
- } else {
- exclsignlength = int(headerlength + datalength + 4)
- }
-
- // the total length excluding signature is headerlength and datalength fields plus the length of the header and the data given in these fields
- exclsignlength = int(headerlength + datalength + 4)
- if exclsignlength > len(chunkdata) || exclsignlength < 14 {
- return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d longer than actual chunk data length %d", headerlength, exclsignlength, len(chunkdata)))
- } else if exclsignlength < 14 {
- return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("Reported headerlength %d + datalength %d is smaller than minimum valid resource chunk length %d", headerlength, datalength, 14))
- }
-
- // at this point we can be satisfied that the data integrity is ok
- var period uint32
- var version uint32
- var name string
- var data []byte
- period = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
- cursor += 4
- version = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4])
- cursor += 4
- namelength := int(headerlength) - cursor + 4
- if l := len(chunkdata); l < cursor+namelength {
- return nil, 0, 0, "", nil, false, NewError(ErrNothingToReturn, fmt.Sprintf("chunk less than %v bytes is too short to read the name", l))
- }
- name = string(chunkdata[cursor : cursor+namelength])
- cursor += namelength
-
- // if multihash content is indicated we check the validity of the multihash
- // \TODO the check above for multihash probably is sufficient also for this case (or can be with a small adjustment) and if so this code should be removed
- var intdatalength int
- var ismultihash bool
- if datalength == 0 {
- var intheaderlength int
- var err error
- intdatalength, intheaderlength, err = multihash.GetMultihashLength(chunkdata[cursor:])
- if err != nil {
- log.Error("multihash parse error", "err", err)
- return nil, 0, 0, "", nil, false, err
- }
- intdatalength += intheaderlength
- multihashboundary := cursor + intdatalength
- if len(chunkdata) != multihashboundary && len(chunkdata) < multihashboundary+signatureLength {
- log.Debug("multihash error", "chunkdatalen", len(chunkdata), "multihashboundary", multihashboundary)
- return nil, 0, 0, "", nil, false, errors.New("Corrupt multihash data")
- }
- ismultihash = true
- } else {
- intdatalength = int(datalength)
- }
- data = make([]byte, intdatalength)
- copy(data, chunkdata[cursor:cursor+intdatalength])
-
- // omit signatures if we have no validator
- var signature *Signature
- cursor += intdatalength
- if h.signer != nil {
- sigdata := chunkdata[cursor : cursor+signatureLength]
- if len(sigdata) > 0 {
- signature = &Signature{}
- copy(signature[:], sigdata)
- }
- }
-
- return signature, period, version, name, data, ismultihash, nil
-}
-
-// Adds an actual data update
-//
-// Uses the data currently loaded in the resources map entry.
-// It is the caller's responsibility to make sure that this data is not stale.
-//
-// A resource update cannot span chunks, and thus has max length 4096
-func (h *Handler) UpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, error) {
- // \TODO perhaps this check should be in newUpdateChunk()
- if _, _, err := multihash.GetMultihashLength(data); err != nil {
- return nil, NewError(ErrNothingToReturn, err.Error())
- }
- return h.update(ctx, name, data, true)
-}
-
-func (h *Handler) Update(ctx context.Context, name string, data []byte) (storage.Address, error) {
- return h.update(ctx, name, data, false)
-}
-
-// create and commit an update
-func (h *Handler) update(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, error) {
-
- // zero-length updates are bogus
- if len(data) == 0 {
- return nil, NewError(ErrInvalidValue, "I refuse to waste swarm space for updates with empty values, amigo (data length is 0)")
- }
-
- // we can't update anything without a store
- if h.chunkStore == nil {
- return nil, NewError(ErrInit, "Call Handler.SetStore() before updating")
- }
-
- // signature length is 0 if we are not using them
- var signaturelength int
- if h.signer != nil {
- signaturelength = signatureLength
- }
-
- // get the cached information
- nameHash := ens.EnsNode(name)
- nameHashHex := nameHash.Hex()
- rsrc := h.get(nameHashHex)
- if rsrc == nil {
- return nil, NewError(ErrNotFound, fmt.Sprintf(" object '%s' not in index", name))
- } else if !rsrc.isSynced() {
- return nil, NewError(ErrNotSynced, " object not in sync")
- }
-
- // an update can be only one chunk long; data length less header and signature data
- // 12 = length of header and data length fields (2xuint16) plus period and frequency value fields (2xuint32)
- datalimit := h.chunkSize() - int64(signaturelength-len(name)-12)
- if int64(len(data)) > datalimit {
- return nil, NewError(ErrDataOverflow, fmt.Sprintf("Data overflow: %d / %d bytes", len(data), datalimit))
- }
-
- // get our blockheight at this time and the next block of the update period
- currentblock, err := h.getBlock(ctx, name)
- if err != nil {
- return nil, NewError(ErrIO, fmt.Sprintf("Could not get block height: %v", err))
- }
- nextperiod, err := getNextPeriod(rsrc.startBlock, currentblock, rsrc.frequency)
- if err != nil {
- return nil, err
- }
-
- // if we already have an update for this block then increment version
- // resource object MUST be in sync for version to be correct, but we checked this earlier in the method already
- var version uint32
- if h.hasUpdate(nameHashHex, nextperiod) {
- version = rsrc.version
- }
- version++
-
- // calculate the chunk key
- key := h.resourceHash(nextperiod, version, rsrc.nameHash)
-
- // if we have a signing function, sign the update
- // \TODO this code should probably be consolidated with corresponding code in New()
- var signature *Signature
- if h.signer != nil {
- // sign the data hash with the key
- digest := h.keyDataHash(key, data)
- sig, err := h.signer.Sign(digest)
- if err != nil {
- return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Sign fail: %v", err))
- }
- signature = &sig
-
- // get the address of the signer (which also checks that it's a valid signature)
- addr, err := getAddressFromDataSig(digest, *signature)
- if err != nil {
- return nil, NewError(ErrInvalidSignature, fmt.Sprintf("Invalid data/signature: %v", err))
- }
- if h.signer != nil {
- // check if the signer has access to update
- ok, err := h.checkAccess(name, addr)
- if err != nil {
- return nil, NewError(ErrIO, fmt.Sprintf("Access check fail: %v", err))
- } else if !ok {
- return nil, NewError(ErrUnauthorized, fmt.Sprintf("Address %x does not have access to update %s", addr, name))
- }
- }
- }
-
- // a datalength field set to 0 means the content is a multihash
- var datalength int
- if !multihash {
- datalength = len(data)
- }
- chunk := newUpdateChunk(key, signature, nextperiod, version, name, data, datalength)
-
- // send the chunk
- h.chunkStore.Put(ctx, chunk)
- log.Trace("resource update", "name", name, "key", key, "currentblock", currentblock, "lastperiod", nextperiod, "version", version, "data", chunk.SData, "multihash", multihash)
-
- // update our resources map entry and return the new key
- rsrc.lastPeriod = nextperiod
- rsrc.version = version
- rsrc.data = make([]byte, len(data))
- copy(rsrc.data, data)
- return key, nil
-}
-
-// Closes the datastore.
-// Always call this at shutdown to avoid data corruption.
-func (h *Handler) Close() {
- h.chunkStore.Close()
-}
-
-// gets the current block height
-func (h *Handler) getBlock(ctx context.Context, name string) (uint64, error) {
- blockheader, err := h.headerGetter.HeaderByNumber(ctx, name, nil)
- if err != nil {
- return 0, err
- }
- return blockheader.Number.Uint64(), nil
-}
-
-// Calculate the period index (aka major version number) from a given block number
-func (h *Handler) BlockToPeriod(name string, blocknumber uint64) (uint32, error) {
- return getNextPeriod(h.resources[name].startBlock, blocknumber, h.resources[name].frequency)
-}
-
-// Calculate the block number from a given period index (aka major version number)
-func (h *Handler) PeriodToBlock(name string, period uint32) uint64 {
- return h.resources[name].startBlock + (uint64(period) * h.resources[name].frequency)
-}
-
-// Retrieves the resource index value for the given nameHash
-func (h *Handler) get(nameHash string) *resource {
- h.resourceLock.RLock()
- defer h.resourceLock.RUnlock()
- rsrc := h.resources[nameHash]
- return rsrc
-}
-
-// Sets the resource index value for the given nameHash
-func (h *Handler) set(nameHash string, rsrc *resource) {
- h.resourceLock.Lock()
- defer h.resourceLock.Unlock()
- h.resources[nameHash] = rsrc
-}
-
-// used for chunk keys
-func (h *Handler) resourceHash(period uint32, version uint32, namehash common.Hash) storage.Address {
- // format is: hash(period|version|namehash)
- hasher := h.hashPool.Get().(storage.SwarmHash)
- defer h.hashPool.Put(hasher)
- hasher.Reset()
- b := make([]byte, 4)
- binary.LittleEndian.PutUint32(b, period)
- hasher.Write(b)
- binary.LittleEndian.PutUint32(b, version)
- hasher.Write(b)
- hasher.Write(namehash[:])
- return hasher.Sum(nil)
-}
-
-// Checks if we already have an update on this resource, according to the value in the current state of the resource index
-func (h *Handler) hasUpdate(nameHash string, period uint32) bool {
- return h.resources[nameHash].lastPeriod == period
-}
-
-func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Address, error) {
- pub, err := crypto.SigToPub(datahash.Bytes(), signature[:])
- if err != nil {
- return common.Address{}, err
- }
- return crypto.PubkeyToAddress(*pub), nil
-}
-
-// create an update chunk
-func newUpdateChunk(addr storage.Address, signature *Signature, period uint32, version uint32, name string, data []byte, datalength int) *storage.Chunk {
-
- // no signatures if no validator
- var signaturelength int
- if signature != nil {
- signaturelength = signatureLength
- }
-
- // prepend version and period to allow reverse lookups
- headerlength := len(name) + 4 + 4
-
- actualdatalength := len(data)
- chunk := storage.NewChunk(addr, nil)
- chunk.SData = make([]byte, 4+signaturelength+headerlength+actualdatalength) // initial 4 are uint16 length descriptors for headerlength and datalength
-
- // data header length does NOT include the header length prefix bytes themselves
- cursor := 0
- binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(headerlength))
- cursor += 2
-
- // data length
- binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(datalength))
- cursor += 2
-
- // header = period + version + name
- binary.LittleEndian.PutUint32(chunk.SData[cursor:], period)
- cursor += 4
-
- binary.LittleEndian.PutUint32(chunk.SData[cursor:], version)
- cursor += 4
-
- namebytes := []byte(name)
- copy(chunk.SData[cursor:], namebytes)
- cursor += len(namebytes)
-
- // add the data
- copy(chunk.SData[cursor:], data)
-
- // if signature is present it's the last item in the chunk data
- if signature != nil {
- cursor += actualdatalength
- copy(chunk.SData[cursor:], signature[:])
- }
-
- chunk.Size = int64(len(chunk.SData))
- return chunk
-}
-
-// Helper function to calculate the next update period number from the current block, start block and frequency
+// Helper function to calculate the next update period number from the current time, start time and frequency
func getNextPeriod(start uint64, current uint64, frequency uint64) (uint32, error) {
if current < start {
- return 0, NewError(ErrInvalidValue, fmt.Sprintf("given current block value %d < start block %d", current, start))
- }
- blockdiff := current - start
- period := blockdiff / frequency
- return uint32(period + 1), nil
-}
-
-// ToSafeName is a helper function to create an valid idna of a given resource update name
-func ToSafeName(name string) (string, error) {
- return idna.ToASCII(name)
-}
-
-// check that name identifiers contain valid bytes
-// Strings created using ToSafeName() should satisfy this check
-func isSafeName(name string) bool {
- if name == "" {
- return false
- }
- validname, err := idna.ToASCII(name)
- if err != nil {
- return false
- }
- return validname == name
-}
-
-func NewTestHandler(datadir string, params *HandlerParams) (*Handler, error) {
- path := filepath.Join(datadir, DbDirName)
- rh, err := NewHandler(params)
- if err != nil {
- return nil, fmt.Errorf("resource handler create fail: %v", err)
+ return 0, NewErrorf(ErrInvalidValue, "given current time value %d < start time %d", current, start)
}
- localstoreparams := storage.NewDefaultLocalStoreParams()
- localstoreparams.Init(path)
- localStore, err := storage.NewLocalStore(localstoreparams, nil)
- if err != nil {
- return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+ if frequency == 0 {
+ return 0, NewError(ErrInvalidValue, "frequency is 0")
}
- localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHash)))
- localStore.Validators = append(localStore.Validators, rh)
- netStore := storage.NewNetStore(localStore, nil)
- rh.SetStore(netStore)
- return rh, nil
+ timeDiff := current - start
+ period := timeDiff / frequency
+ return uint32(period + 1), nil
}
diff --git a/swarm/storage/mru/resource_sign.go b/swarm/storage/mru/resource_sign.go
index c6185a3bb..a9f7cb629 100644
--- a/swarm/storage/mru/resource_sign.go
+++ b/swarm/storage/mru/resource_sign.go
@@ -23,20 +23,44 @@ import (
"github.com/ethereum/go-ethereum/crypto"
)
-// Signs resource updates
+const signatureLength = 65
+
+// Signature is an alias for a static byte array with the size of a signature
+type Signature [signatureLength]byte
+
+// Signer signs Mutable Resource update payloads
type Signer interface {
Sign(common.Hash) (Signature, error)
+ Address() common.Address
}
+// GenericSigner implements the Signer interface
+// It is the vanilla signer that probably should be used in most cases
type GenericSigner struct {
PrivKey *ecdsa.PrivateKey
+ address common.Address
}
-func (self *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
- signaturebytes, err := crypto.Sign(data.Bytes(), self.PrivKey)
+// NewGenericSigner builds a signer that will sign everything with the provided private key
+func NewGenericSigner(privKey *ecdsa.PrivateKey) *GenericSigner {
+ return &GenericSigner{
+ PrivKey: privKey,
+ address: crypto.PubkeyToAddress(privKey.PublicKey),
+ }
+}
+
+// Sign signs the supplied data
+// It wraps the ethereum crypto.Sign() method
+func (s *GenericSigner) Sign(data common.Hash) (signature Signature, err error) {
+ signaturebytes, err := crypto.Sign(data.Bytes(), s.PrivKey)
if err != nil {
return
}
copy(signature[:], signaturebytes)
return
}
+
+// PublicKey returns the public key of the signer's private key
+func (s *GenericSigner) Address() common.Address {
+ return s.address
+}
diff --git a/swarm/storage/mru/resource_test.go b/swarm/storage/mru/resource_test.go
index 48387d981..95c9eccdf 100644
--- a/swarm/storage/mru/resource_test.go
+++ b/swarm/storage/mru/resource_test.go
@@ -22,21 +22,12 @@ import (
"crypto/rand"
"encoding/binary"
"flag"
- "fmt"
"io/ioutil"
- "math/big"
"os"
- "strings"
"testing"
"time"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/ens"
- "github.com/ethereum/go-ethereum/contracts/ens/contract"
- "github.com/ethereum/go-ethereum/core"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/multihash"
@@ -44,49 +35,96 @@ import (
)
var (
- loglevel = flag.Int("loglevel", 3, "loglevel")
- testHasher = storage.MakeHashFunc(storage.SHA3Hash)()
- zeroAddr = common.Address{}
- startBlock = uint64(4200)
+ loglevel = flag.Int("loglevel", 3, "loglevel")
+ testHasher = storage.MakeHashFunc(resourceHashAlgorithm)()
+ startTime = Timestamp{
+ Time: uint64(4200),
+ }
resourceFrequency = uint64(42)
cleanF func()
- domainName = "føø.bar"
- safeName string
- nameHash common.Hash
+ resourceName = "føø.bar"
hashfunc = storage.MakeHashFunc(storage.DefaultHash)
)
func init() {
- var err error
flag.Parse()
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
- safeName, err = ToSafeName(domainName)
- if err != nil {
- panic(err)
- }
- nameHash = ens.EnsNode(safeName)
}
-// simulated backend does not have the blocknumber call
-// so we use this wrapper to fake returning the block count
-type fakeBackend struct {
- *backends.SimulatedBackend
- blocknumber int64
+// simulated timeProvider
+type fakeTimeProvider struct {
+ currentTime uint64
+}
+
+func (f *fakeTimeProvider) Tick() {
+ f.currentTime++
}
-func (f *fakeBackend) Commit() {
- if f.SimulatedBackend != nil {
- f.SimulatedBackend.Commit()
+func (f *fakeTimeProvider) Now() Timestamp {
+ return Timestamp{
+ Time: f.currentTime,
}
- f.blocknumber++
}
-func (f *fakeBackend) HeaderByNumber(context context.Context, name string, bigblock *big.Int) (*types.Header, error) {
- f.blocknumber++
- biggie := big.NewInt(f.blocknumber)
- return &types.Header{
- Number: biggie,
- }, nil
+func TestUpdateChunkSerializationErrorChecking(t *testing.T) {
+
+ // Test that parseUpdate fails if the chunk is too small
+ var r SignedResourceUpdate
+ if err := r.fromChunk(storage.ZeroAddr, make([]byte, minimumUpdateDataLength-1)); err == nil {
+ t.Fatalf("Expected parseUpdate to fail when chunkData contains less than %d bytes", minimumUpdateDataLength)
+ }
+
+ r = SignedResourceUpdate{}
+ // Test that parseUpdate fails when the length header does not match the data array length
+ fakeChunk := make([]byte, 150)
+ binary.LittleEndian.PutUint16(fakeChunk, 44)
+ if err := r.fromChunk(storage.ZeroAddr, fakeChunk); err == nil {
+ t.Fatal("Expected parseUpdate to fail when the header length does not match the actual data array passed in")
+ }
+
+ r = SignedResourceUpdate{
+ resourceUpdate: resourceUpdate{
+ updateHeader: updateHeader{
+ UpdateLookup: UpdateLookup{
+
+ rootAddr: make([]byte, 79), // put the wrong length, should be storage.KeyLength
+ },
+ metaHash: nil,
+ multihash: false,
+ },
+ },
+ }
+ _, err := r.toChunk()
+ if err == nil {
+ t.Fatal("Expected newUpdateChunk to fail when rootAddr or metaHash have the wrong length")
+ }
+ r.rootAddr = make([]byte, storage.KeyLength)
+ r.metaHash = make([]byte, storage.KeyLength)
+ _, err = r.toChunk()
+ if err == nil {
+ t.Fatal("Expected newUpdateChunk to fail when there is no data")
+ }
+ r.data = make([]byte, 79) // put some arbitrary length data
+ _, err = r.toChunk()
+ if err == nil {
+ t.Fatal("expected newUpdateChunk to fail when there is no signature", err)
+ }
+
+ alice := newAliceSigner()
+ if err := r.Sign(alice); err != nil {
+ t.Fatalf("error signing:%s", err)
+
+ }
+ _, err = r.toChunk()
+ if err != nil {
+ t.Fatalf("error creating update chunk:%s", err)
+ }
+
+ r.multihash = true
+ r.data[1] = 79 // mess with the multihash, corrupting one byte of it.
+ if err := r.Sign(alice); err == nil {
+ t.Fatal("expected Sign() to fail when an invalid multihash is in data and multihash=true", err)
+ }
}
// check that signature address matches update signer address
@@ -95,21 +133,32 @@ func TestReverse(t *testing.T) {
period := uint32(4)
version := uint32(2)
- // signer containing private key
- signer, err := newTestSigner()
- if err != nil {
- t.Fatal(err)
+ // make fake timeProvider
+ timeProvider := &fakeTimeProvider{
+ currentTime: startTime.Time,
}
+ // signer containing private key
+ signer := newAliceSigner()
+
// set up rpc and create resourcehandler
- rh, _, teardownTest, err := setupTest(nil, nil, signer)
+ _, _, teardownTest, err := setupTest(timeProvider, signer)
if err != nil {
t.Fatal(err)
}
defer teardownTest()
- // generate a hash for block 4200 version 1
- key := rh.resourceHash(period, version, ens.EnsNode(safeName))
+ metadata := ResourceMetadata{
+ Name: resourceName,
+ StartTime: startTime,
+ Frequency: resourceFrequency,
+ Owner: signer.Address(),
+ }
+
+ rootAddr, metaHash, _, err := metadata.serializeAndHash()
+ if err != nil {
+ t.Fatal(err)
+ }
// generate some bogus data for the chunk and sign it
data := make([]byte, 8)
@@ -119,21 +168,42 @@ func TestReverse(t *testing.T) {
}
testHasher.Reset()
testHasher.Write(data)
- digest := rh.keyDataHash(key, data)
- sig, err := rh.signer.Sign(digest)
- if err != nil {
+
+ update := &SignedResourceUpdate{
+ resourceUpdate: resourceUpdate{
+ updateHeader: updateHeader{
+ UpdateLookup: UpdateLookup{
+ period: period,
+ version: version,
+ rootAddr: rootAddr,
+ },
+ metaHash: metaHash,
+ },
+ data: data,
+ },
+ }
+ // generate a hash for t=4200 version 1
+ key := update.UpdateAddr()
+
+ if err = update.Sign(signer); err != nil {
t.Fatal(err)
}
- chunk := newUpdateChunk(key, &sig, period, version, safeName, data, len(data))
+ chunk, err := update.toChunk()
+ if err != nil {
+ t.Fatal(err)
+ }
// check that we can recover the owner account from the update chunk's signature
- checksig, checkperiod, checkversion, checkname, checkdata, _, err := rh.parseUpdate(chunk.SData)
+ var checkUpdate SignedResourceUpdate
+ if err := checkUpdate.fromChunk(chunk.Addr, chunk.SData); err != nil {
+ t.Fatal(err)
+ }
+ checkdigest, err := checkUpdate.GetDigest()
if err != nil {
t.Fatal(err)
}
- checkdigest := rh.keyDataHash(chunk.Addr, checkdata)
- recoveredaddress, err := getAddressFromDataSig(checkdigest, *checksig)
+ recoveredaddress, err := getOwner(checkdigest, *checkUpdate.signature)
if err != nil {
t.Fatalf("Retrieve address from signature fail: %v", err)
}
@@ -147,28 +217,29 @@ func TestReverse(t *testing.T) {
if !bytes.Equal(key[:], chunk.Addr[:]) {
t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Addr)
}
- if period != checkperiod {
- t.Fatalf("Expected period '%d', was '%d'", period, checkperiod)
- }
- if version != checkversion {
- t.Fatalf("Expected version '%d', was '%d'", version, checkversion)
+ if period != checkUpdate.period {
+ t.Fatalf("Expected period '%d', was '%d'", period, checkUpdate.period)
}
- if safeName != checkname {
- t.Fatalf("Expected name '%s', was '%s'", safeName, checkname)
+ if version != checkUpdate.version {
+ t.Fatalf("Expected version '%d', was '%d'", version, checkUpdate.version)
}
- if !bytes.Equal(data, checkdata) {
- t.Fatalf("Expectedn data '%x', was '%x'", data, checkdata)
+ if !bytes.Equal(data, checkUpdate.data) {
+ t.Fatalf("Expectedn data '%x', was '%x'", data, checkUpdate.data)
}
}
// make updates and retrieve them based on periods and versions
-func TestHandler(t *testing.T) {
+func TestResourceHandler(t *testing.T) {
- // make fake backend, set up rpc and create resourcehandler
- backend := &fakeBackend{
- blocknumber: int64(startBlock),
+ // make fake timeProvider
+ timeProvider := &fakeTimeProvider{
+ currentTime: startTime.Time,
}
- rh, datadir, teardownTest, err := setupTest(backend, nil, nil)
+
+ // signer containing private key
+ signer := newAliceSigner()
+
+ rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
if err != nil {
t.Fatal(err)
}
@@ -177,24 +248,45 @@ func TestHandler(t *testing.T) {
// create a new resource
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- rootChunkKey, _, err := rh.New(ctx, safeName, resourceFrequency)
+
+ metadata := &ResourceMetadata{
+ Name: resourceName,
+ Frequency: resourceFrequency,
+ StartTime: Timestamp{Time: timeProvider.Now().Time},
+ Owner: signer.Address(),
+ }
+
+ request, err := NewCreateUpdateRequest(metadata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ request.Sign(signer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = rh.New(ctx, request)
if err != nil {
t.Fatal(err)
}
- chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(rootChunkKey))
+ chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(request.rootAddr))
if err != nil {
t.Fatal(err)
} else if len(chunk.SData) < 16 {
t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))
}
- startblocknumber := binary.LittleEndian.Uint64(chunk.SData[2:10])
- chunkfrequency := binary.LittleEndian.Uint64(chunk.SData[10:])
- if startblocknumber != uint64(backend.blocknumber) {
- t.Fatalf("stored block number %d does not match provided block number %d", startblocknumber, backend.blocknumber)
+
+ var recoveredMetadata ResourceMetadata
+
+ recoveredMetadata.binaryGet(chunk.SData)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if recoveredMetadata.StartTime.Time != timeProvider.currentTime {
+ t.Fatalf("stored startTime %d does not match provided startTime %d", recoveredMetadata.StartTime.Time, timeProvider.currentTime)
}
- if chunkfrequency != resourceFrequency {
- t.Fatalf("stored frequency %d does not match provided frequency %d", chunkfrequency, resourceFrequency)
+ if recoveredMetadata.Frequency != resourceFrequency {
+ t.Fatalf("stored frequency %d does not match provided frequency %d", recoveredMetadata.Frequency, resourceFrequency)
}
// data for updates:
@@ -205,232 +297,273 @@ func TestHandler(t *testing.T) {
"clyde",
}
- // update halfway to first period
+ // update halfway to first period. period=1, version=1
resourcekey := make(map[string]storage.Address)
- fwdBlocks(int(resourceFrequency/2), backend)
+ fwdClock(int(resourceFrequency/2), timeProvider)
data := []byte(updates[0])
- resourcekey[updates[0]], err = rh.Update(ctx, safeName, data)
+ request.SetData(data, false)
+ if err := request.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ resourcekey[updates[0]], err = rh.Update(ctx, &request.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- // update on first period
- fwdBlocks(int(resourceFrequency/2), backend)
+ // update on first period with version = 1 to make it fail since there is already one update with version=1
+ request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if request.version != 2 || request.period != 1 {
+ t.Fatal("Suggested period should be 1 and version should be 2")
+ }
+
+ request.version = 1 // force version 1 instead of 2 to make it fail
data = []byte(updates[1])
- resourcekey[updates[1]], err = rh.Update(ctx, safeName, data)
+ request.SetData(data, false)
+ if err := request.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
+ if err == nil {
+ t.Fatal("Expected update to fail since this version already exists")
+ }
+
+ // update on second period with version = 1, correct. period=2, version=1
+ fwdClock(int(resourceFrequency/2), timeProvider)
+ request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ request.SetData(data, false)
+ if err := request.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ resourcekey[updates[1]], err = rh.Update(ctx, &request.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- // update on second period
- fwdBlocks(int(resourceFrequency), backend)
+ fwdClock(int(resourceFrequency), timeProvider)
+ // Update on third period, with version = 1
+ request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
data = []byte(updates[2])
- resourcekey[updates[2]], err = rh.Update(ctx, safeName, data)
+ request.SetData(data, false)
+ if err := request.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ resourcekey[updates[2]], err = rh.Update(ctx, &request.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- // update just after second period
- fwdBlocks(1, backend)
+ // update just after third period
+ fwdClock(1, timeProvider)
+ request, err = rh.NewUpdateRequest(ctx, request.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if request.period != 3 || request.version != 2 {
+ t.Fatal("Suggested period should be 3 and version should be 2")
+ }
data = []byte(updates[3])
- resourcekey[updates[3]], err = rh.Update(ctx, safeName, data)
+ request.SetData(data, false)
+
+ if err := request.Sign(signer); err != nil {
+ t.Fatal(err)
+ }
+ resourcekey[updates[3]], err = rh.Update(ctx, &request.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
+
time.Sleep(time.Second)
rh.Close()
// check we can retrieve the updates after close
- // it will match on second iteration startblocknumber + (resourceFrequency * 3)
- fwdBlocks(int(resourceFrequency*2)-1, backend)
+ // it will match on second iteration startTime + (resourceFrequency * 3)
+ fwdClock(int(resourceFrequency*2)-1, timeProvider)
- rhparams := &HandlerParams{
- QueryMaxPeriods: &LookupParams{
- Limit: false,
- },
- Signer: nil,
- HeaderGetter: rh.headerGetter,
- }
+ rhparams := &HandlerParams{}
rh2, err := NewTestHandler(datadir, rhparams)
if err != nil {
t.Fatal(err)
}
- rsrc2, err := rh2.Load(context.TODO(), rootChunkKey)
- _, err = rh2.LookupLatest(ctx, nameHash, true, nil)
+
+ rsrc2, err := rh2.Load(context.TODO(), request.rootAddr)
if err != nil {
t.Fatal(err)
}
- // last update should be "clyde", version two, blockheight startblocknumber + (resourcefrequency * 3)
+ _, err = rh2.Lookup(ctx, LookupLatest(request.rootAddr))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // last update should be "clyde", version two, time= startTime + (resourcefrequency * 3)
if !bytes.Equal(rsrc2.data, []byte(updates[len(updates)-1])) {
- t.Fatalf("resource data was %v, expected %v", rsrc2.data, updates[len(updates)-1])
+ t.Fatalf("resource data was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
}
if rsrc2.version != 2 {
t.Fatalf("resource version was %d, expected 2", rsrc2.version)
}
- if rsrc2.lastPeriod != 3 {
- t.Fatalf("resource period was %d, expected 3", rsrc2.lastPeriod)
+ if rsrc2.period != 3 {
+ t.Fatalf("resource period was %d, expected 3", rsrc2.period)
}
- log.Debug("Latest lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
+ log.Debug("Latest lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
- // specific block, latest version
- rsrc, err := rh2.LookupHistorical(ctx, nameHash, 3, true, rh2.queryMaxPeriods)
+ // specific period, latest version
+ rsrc, err := rh2.Lookup(ctx, LookupLatestVersionInPeriod(request.rootAddr, 3))
if err != nil {
t.Fatal(err)
}
// check data
if !bytes.Equal(rsrc.data, []byte(updates[len(updates)-1])) {
- t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[len(updates)-1])
+ t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[len(updates)-1])
}
- log.Debug("Historical lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
+ log.Debug("Historical lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
- // specific block, specific version
- rsrc, err = rh2.LookupVersion(ctx, nameHash, 3, 1, true, rh2.queryMaxPeriods)
+ // specific period, specific version
+ lookupParams := LookupVersion(request.rootAddr, 3, 1)
+ rsrc, err = rh2.Lookup(ctx, lookupParams)
if err != nil {
t.Fatal(err)
}
// check data
if !bytes.Equal(rsrc.data, []byte(updates[2])) {
- t.Fatalf("resource data (historical) was %v, expected %v", rsrc2.data, updates[2])
+ t.Fatalf("resource data (historical) was %v, expected %v", string(rsrc2.data), updates[2])
}
- log.Debug("Specific version lookup", "period", rsrc2.lastPeriod, "version", rsrc2.version, "data", rsrc2.data)
+ log.Debug("Specific version lookup", "period", rsrc2.period, "version", rsrc2.version, "data", rsrc2.data)
// we are now at third update
// check backwards stepping to the first
for i := 1; i >= 0; i-- {
- rsrc, err := rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods)
+ rsrc, err := rh2.LookupPrevious(ctx, lookupParams)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(rsrc.data, []byte(updates[i])) {
- t.Fatalf("resource data (previous) was %v, expected %v", rsrc2.data, updates[i])
+ t.Fatalf("resource data (previous) was %v, expected %v", rsrc.data, updates[i])
}
}
// beyond the first should yield an error
- rsrc, err = rh2.LookupPreviousByName(ctx, safeName, rh2.queryMaxPeriods)
+ rsrc, err = rh2.LookupPrevious(ctx, lookupParams)
if err == nil {
- t.Fatalf("expeected previous to fail, returned period %d version %d data %v", rsrc2.lastPeriod, rsrc2.version, rsrc2.data)
+ t.Fatalf("expected previous to fail, returned period %d version %d data %v", rsrc.period, rsrc.version, rsrc.data)
}
}
-// create ENS enabled resource update, with and without valid owner
-func TestENSOwner(t *testing.T) {
+func TestMultihash(t *testing.T) {
+
+ // make fake timeProvider
+ timeProvider := &fakeTimeProvider{
+ currentTime: startTime.Time,
+ }
// signer containing private key
- signer, err := newTestSigner()
+ signer := newAliceSigner()
+
+ // set up rpc and create resourcehandler
+ rh, datadir, teardownTest, err := setupTest(timeProvider, signer)
if err != nil {
t.Fatal(err)
}
+ defer teardownTest()
- // ens address and transact options
- addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
- transactOpts := bind.NewKeyedTransactor(signer.PrivKey)
+ // create a new resource
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
- // set up ENS sim
- domainparts := strings.Split(safeName, ".")
- contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1])
- if err != nil {
- t.Fatal(err)
+ metadata := &ResourceMetadata{
+ Name: resourceName,
+ Frequency: resourceFrequency,
+ StartTime: Timestamp{Time: timeProvider.Now().Time},
+ Owner: signer.Address(),
}
- ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend)
+ mr, err := NewCreateRequest(metadata)
if err != nil {
t.Fatal(err)
}
-
- // set up rpc and create resourcehandler with ENS sim backend
- rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer)
+ err = rh.New(ctx, mr)
if err != nil {
t.Fatal(err)
}
- defer teardownTest()
- // create new resource when we are owner = ok
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- _, _, err = rh.New(ctx, safeName, resourceFrequency)
+ // we're naïvely assuming keccak256 for swarm hashes
+ // if it ever changes this test should also change
+ multihashbytes := ens.EnsNode("foo")
+ multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
if err != nil {
- t.Fatalf("Create resource fail: %v", err)
+ t.Fatal(err)
}
-
- data := []byte("foo")
- // update resource when we are owner = ok
- _, err = rh.Update(ctx, safeName, data)
+ mr.SetData(multihashmulti, true)
+ mr.Sign(signer)
if err != nil {
- t.Fatalf("Update resource fail: %v", err)
+ t.Fatal(err)
}
-
- // update resource when we are not owner = !ok
- signertwo, err := newTestSigner()
+ multihashkey, err := rh.Update(ctx, &mr.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- rh.signer = signertwo
- _, err = rh.Update(ctx, safeName, data)
- if err == nil {
- t.Fatalf("Expected resource update fail due to owner mismatch")
- }
-}
-
-func TestMultihash(t *testing.T) {
- // signer containing private key
- signer, err := newTestSigner()
+ sha1bytes := make([]byte, multihash.MultihashLength)
+ sha1multi := multihash.ToMultihash(sha1bytes)
if err != nil {
t.Fatal(err)
}
-
- // make fake backend, set up rpc and create resourcehandler
- backend := &fakeBackend{
- blocknumber: int64(startBlock),
+ mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
+ if err != nil {
+ t.Fatal(err)
}
-
- // set up rpc and create resourcehandler
- rh, datadir, teardownTest, err := setupTest(backend, nil, nil)
+ mr.SetData(sha1multi, true)
+ mr.Sign(signer)
if err != nil {
t.Fatal(err)
}
- defer teardownTest()
-
- // create a new resource
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- _, _, err = rh.New(ctx, safeName, resourceFrequency)
+ sha1key, err := rh.Update(ctx, &mr.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- // we're naïvely assuming keccak256 for swarm hashes
- // if it ever changes this test should also change
- multihashbytes := ens.EnsNode("foo")
- multihashmulti := multihash.ToMultihash(multihashbytes.Bytes())
- multihashkey, err := rh.UpdateMultihash(ctx, safeName, multihashmulti)
+ // invalid multihashes
+ mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
if err != nil {
t.Fatal(err)
}
-
- sha1bytes := make([]byte, multihash.MultihashLength)
- sha1multi := multihash.ToMultihash(sha1bytes)
- sha1key, err := rh.UpdateMultihash(ctx, safeName, sha1multi)
+ mr.SetData(multihashmulti[1:], true)
+ mr.Sign(signer)
if err != nil {
t.Fatal(err)
}
-
- // invalid multihashes
- _, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[1:])
+ _, err = rh.Update(ctx, &mr.SignedResourceUpdate)
if err == nil {
t.Fatalf("Expected update to fail with first byte skipped")
}
- _, err = rh.UpdateMultihash(ctx, safeName, multihashmulti[:len(multihashmulti)-2])
+ mr, err = rh.NewUpdateRequest(ctx, mr.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mr.SetData(multihashmulti[:len(multihashmulti)-2], true)
+ mr.Sign(signer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = rh.Update(ctx, &mr.SignedResourceUpdate)
if err == nil {
t.Fatalf("Expected update to fail with last byte skipped")
}
- data, err := getUpdateDirect(rh, multihashkey)
+ data, err := getUpdateDirect(rh.Handler, multihashkey)
if err != nil {
t.Fatal(err)
}
@@ -441,7 +574,7 @@ func TestMultihash(t *testing.T) {
if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
}
- data, err = getUpdateDirect(rh, sha1key)
+ data, err = getUpdateDirect(rh.Handler, sha1key)
if err != nil {
t.Fatal(err)
}
@@ -454,33 +587,48 @@ func TestMultihash(t *testing.T) {
}
rh.Close()
- rhparams := &HandlerParams{
- QueryMaxPeriods: &LookupParams{
- Limit: false,
- },
- Signer: signer,
- HeaderGetter: rh.headerGetter,
- OwnerValidator: rh.ownerValidator,
- }
+ rhparams := &HandlerParams{}
// test with signed data
rh2, err := NewTestHandler(datadir, rhparams)
if err != nil {
t.Fatal(err)
}
- _, _, err = rh2.New(ctx, safeName, resourceFrequency)
+ mr, err = NewCreateRequest(metadata)
if err != nil {
t.Fatal(err)
}
- multihashsignedkey, err := rh2.UpdateMultihash(ctx, safeName, multihashmulti)
+ err = rh2.New(ctx, mr)
if err != nil {
t.Fatal(err)
}
- sha1signedkey, err := rh2.UpdateMultihash(ctx, safeName, sha1multi)
+
+ mr.SetData(multihashmulti, true)
+ mr.Sign(signer)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ multihashsignedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mr, err = rh2.NewUpdateRequest(ctx, mr.rootAddr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mr.SetData(sha1multi, true)
+ mr.Sign(signer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sha1signedkey, err := rh2.Update(ctx, &mr.SignedResourceUpdate)
if err != nil {
t.Fatal(err)
}
- data, err = getUpdateDirect(rh2, multihashsignedkey)
+ data, err = getUpdateDirect(rh2.Handler, multihashsignedkey)
if err != nil {
t.Fatal(err)
}
@@ -491,7 +639,7 @@ func TestMultihash(t *testing.T) {
if !bytes.Equal(multihashdecode, multihashbytes.Bytes()) {
t.Fatalf("Decoded hash '%x' does not match original hash '%x'", multihashdecode, multihashbytes.Bytes())
}
- data, err = getUpdateDirect(rh2, sha1signedkey)
+ data, err = getUpdateDirect(rh2.Handler, sha1signedkey)
if err != nil {
t.Fatal(err)
}
@@ -504,63 +652,95 @@ func TestMultihash(t *testing.T) {
}
}
-func TestChunkValidator(t *testing.T) {
- // signer containing private key
- signer, err := newTestSigner()
- if err != nil {
- t.Fatal(err)
+// \TODO verify testing of signature validation and enforcement
+func TestValidator(t *testing.T) {
+
+ // make fake timeProvider
+ timeProvider := &fakeTimeProvider{
+ currentTime: startTime.Time,
}
- // ens address and transact options
- addr := crypto.PubkeyToAddress(signer.PrivKey.PublicKey)
- transactOpts := bind.NewKeyedTransactor(signer.PrivKey)
+ // signer containing private key. Alice will be the good girl
+ signer := newAliceSigner()
- // set up ENS sim
- domainparts := strings.Split(safeName, ".")
- contractAddr, contractbackend, err := setupENS(addr, transactOpts, domainparts[0], domainparts[1])
- if err != nil {
- t.Fatal(err)
- }
+ // fake signer for false results. Bob will play the bad guy today.
+ falseSigner := newBobSigner()
- ensClient, err := ens.NewENS(transactOpts, contractAddr, contractbackend)
+ // set up sim timeProvider
+ rh, _, teardownTest, err := setupTest(timeProvider, signer)
if err != nil {
t.Fatal(err)
}
+ defer teardownTest()
- // set up rpc and create resourcehandler with ENS sim backend
- rh, _, teardownTest, err := setupTest(contractbackend, ensClient, signer)
+ // create new resource
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ metadata := &ResourceMetadata{
+ Name: resourceName,
+ Frequency: resourceFrequency,
+ StartTime: Timestamp{Time: timeProvider.Now().Time},
+ Owner: signer.Address(),
+ }
+ mr, err := NewCreateRequest(metadata)
if err != nil {
t.Fatal(err)
}
- defer teardownTest()
+ mr.Sign(signer)
- // create new resource when we are owner = ok
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
- key, rsrc, err := rh.New(ctx, safeName, resourceFrequency)
+ err = rh.New(ctx, mr)
if err != nil {
t.Fatalf("Create resource fail: %v", err)
}
+ // chunk with address
data := []byte("foo")
- key = rh.resourceHash(1, 1, rsrc.nameHash)
- digest := rh.keyDataHash(key, data)
- sig, err := rh.signer.Sign(digest)
- if err != nil {
+ mr.SetData(data, false)
+ if err := mr.Sign(signer); err != nil {
t.Fatalf("sign fail: %v", err)
}
- chunk := newUpdateChunk(key, &sig, 1, 1, safeName, data, len(data))
+ chunk, err := mr.SignedResourceUpdate.toChunk()
+ if err != nil {
+ t.Fatal(err)
+ }
if !rh.Validate(chunk.Addr, chunk.SData) {
t.Fatal("Chunk validator fail on update chunk")
}
+ // chunk with address made from different publickey
+ if err := mr.Sign(falseSigner); err == nil {
+ t.Fatalf("Expected Sign to fail since we are using a different OwnerAddr: %v", err)
+ }
+
+ // chunk with address made from different publickey
+ mr.metadata.Owner = zeroAddr // set to zero to bypass .Sign() check
+ if err := mr.Sign(falseSigner); err != nil {
+ t.Fatalf("sign fail: %v", err)
+ }
+
+ chunk, err = mr.SignedResourceUpdate.toChunk()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if rh.Validate(chunk.Addr, chunk.SData) {
+ t.Fatal("Chunk validator did not fail on update chunk with false address")
+ }
+
ctx, cancel = context.WithTimeout(context.Background(), time.Second)
defer cancel()
- startBlock, err := rh.getBlock(ctx, safeName)
+
+ metadata = &ResourceMetadata{
+ Name: resourceName,
+ StartTime: TimestampProvider.Now(),
+ Frequency: resourceFrequency,
+ Owner: signer.Address(),
+ }
+ chunk, _, err = metadata.newChunk()
if err != nil {
t.Fatal(err)
}
- chunk = rh.newMetaChunk(safeName, startBlock, resourceFrequency)
+
if !rh.Validate(chunk.Addr, chunk.SData) {
t.Fatal("Chunk validator fail on metadata chunk")
}
@@ -568,8 +748,17 @@ func TestChunkValidator(t *testing.T) {
// tests that the content address validator correctly checks the data
// tests that resource update chunks are passed through content address validator
-// the test checking the resouce update validator internal correctness is found in resource_test.go
-func TestValidator(t *testing.T) {
+// there is some redundancy in this test as it also tests content addressed chunks,
+// which should be evaluated as invalid chunks by this validator
+func TestValidatorInStore(t *testing.T) {
+
+ // make fake timeProvider
+ TimestampProvider = &fakeTimeProvider{
+ currentTime: startTime.Time,
+ }
+
+ // signer containing private key
+ signer := newAliceSigner()
// set up localstore
datadir, err := ioutil.TempDir("", "storage-testresourcevalidator")
@@ -585,9 +774,7 @@ func TestValidator(t *testing.T) {
t.Fatal(err)
}
- // add content address validator and resource validator to validators and check puts
- // bad should fail, good should pass
- store.Validators = append(store.Validators, storage.NewContentAddressValidator(hashfunc))
+ // set up resource handler and add is as a validator to the localstore
rhParams := &HandlerParams{}
rh, err := NewHandler(rhParams)
if err != nil {
@@ -595,73 +782,75 @@ func TestValidator(t *testing.T) {
}
store.Validators = append(store.Validators, rh)
+ // create content addressed chunks, one good, one faulty
chunks := storage.GenerateRandomChunks(storage.DefaultChunkSize, 2)
goodChunk := chunks[0]
badChunk := chunks[1]
badChunk.SData = goodChunk.SData
- key := rh.resourceHash(42, 1, ens.EnsNode("xyzzy.eth"))
- data := []byte("bar")
- uglyChunk := newUpdateChunk(key, nil, 42, 1, "xyzzy.eth", data, len(data))
- storage.PutChunks(store, goodChunk, badChunk, uglyChunk)
- if err := goodChunk.GetErrored(); err != nil {
- t.Fatalf("expected no error on good content address chunk with both validators, but got: %s", err)
+ metadata := &ResourceMetadata{
+ StartTime: startTime,
+ Name: "xyzzy",
+ Frequency: resourceFrequency,
+ Owner: signer.Address(),
}
- if err := badChunk.GetErrored(); err == nil {
- t.Fatal("expected error on bad chunk address with both validators, but got nil")
+
+ rootChunk, metaHash, err := metadata.newChunk()
+ if err != nil {
+ t.Fatal(err)
}
- if err := uglyChunk.GetErrored(); err != nil {
- t.Fatalf("expected no error on resource update chunk with both validators, but got: %s", err)
+ // create a resource update chunk with correct publickey
+ updateLookup := UpdateLookup{
+ period: 42,
+ version: 1,
+ rootAddr: rootChunk.Addr,
}
- // (redundant check)
- // use only resource validator, and check puts
- // bad should fail, good should fail, resource should pass
- store.Validators[0] = store.Validators[1]
- store.Validators = store.Validators[:1]
+ updateAddr := updateLookup.UpdateAddr()
+ data := []byte("bar")
- chunks = storage.GenerateRandomChunks(storage.DefaultChunkSize, 2)
- goodChunk = chunks[0]
- badChunk = chunks[1]
- badChunk.SData = goodChunk.SData
+ r := SignedResourceUpdate{
+ updateAddr: updateAddr,
+ resourceUpdate: resourceUpdate{
+ updateHeader: updateHeader{
+ UpdateLookup: updateLookup,
+ metaHash: metaHash,
+ },
+ data: data,
+ },
+ }
- key = rh.resourceHash(42, 2, ens.EnsNode("xyzzy.eth"))
- data = []byte("baz")
- uglyChunk = newUpdateChunk(key, nil, 42, 2, "xyzzy.eth", data, len(data))
+ r.Sign(signer)
- storage.PutChunks(store, goodChunk, badChunk, uglyChunk)
+ uglyChunk, err := r.toChunk()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // put the chunks in the store and check their error status
+ storage.PutChunks(store, goodChunk)
if goodChunk.GetErrored() == nil {
t.Fatal("expected error on good content address chunk with resource validator only, but got nil")
}
+ storage.PutChunks(store, badChunk)
if badChunk.GetErrored() == nil {
t.Fatal("expected error on bad content address chunk with resource validator only, but got nil")
}
+ storage.PutChunks(store, uglyChunk)
if err := uglyChunk.GetErrored(); err != nil {
t.Fatalf("expected no error on resource update chunk with resource validator only, but got: %s", err)
}
}
-// fast-forward blockheight
-func fwdBlocks(count int, backend *fakeBackend) {
+// fast-forward clock
+func fwdClock(count int, timeProvider *fakeTimeProvider) {
for i := 0; i < count; i++ {
- backend.Commit()
- }
-}
-
-type ensOwnerValidator struct {
- *ens.ENS
-}
-
-func (e ensOwnerValidator) ValidateOwner(name string, address common.Address) (bool, error) {
- addr, err := e.Owner(ens.EnsNode(name))
- if err != nil {
- return false, err
+ timeProvider.Tick()
}
- return address == addr, nil
}
// create rpc and resourcehandler
-func setupTest(backend headerGetter, ensBackend *ens.ENS, signer Signer) (rh *Handler, datadir string, teardown func(), err error) {
+func setupTest(timeProvider timestampProvider, signer Signer) (rh *TestHandler, datadir string, teardown func(), err error) {
var fsClean func()
var rpcClean func()
@@ -683,74 +872,25 @@ func setupTest(backend headerGetter, ensBackend *ens.ENS, signer Signer) (rh *Ha
os.RemoveAll(datadir)
}
- var ov ownerValidator
- if ensBackend != nil {
- ov = ensOwnerValidator{ensBackend}
- }
-
- rhparams := &HandlerParams{
- QueryMaxPeriods: &LookupParams{
- Limit: false,
- },
- Signer: signer,
- HeaderGetter: backend,
- OwnerValidator: ov,
- }
+ TimestampProvider = timeProvider
+ rhparams := &HandlerParams{}
rh, err = NewTestHandler(datadir, rhparams)
return rh, datadir, cleanF, err
}
-// Set up simulated ENS backend for use with ENSHandler tests
-func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, top string) (common.Address, *fakeBackend, error) {
-
- // create the domain hash values to pass to the ENS contract methods
- var tophash [32]byte
- var subhash [32]byte
-
- testHasher.Reset()
- testHasher.Write([]byte(top))
- copy(tophash[:], testHasher.Sum(nil))
- testHasher.Reset()
- testHasher.Write([]byte(sub))
- copy(subhash[:], testHasher.Sum(nil))
-
- // initialize contract backend and deploy
- contractBackend := &fakeBackend{
- SimulatedBackend: backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}),
- }
-
- contractAddress, _, ensinstance, err := contract.DeployENS(transactOpts, contractBackend)
- if err != nil {
- return zeroAddr, nil, fmt.Errorf("can't deploy: %v", err)
- }
-
- // update the registry for the correct owner address
- if _, err = ensinstance.SetOwner(transactOpts, [32]byte{}, addr); err != nil {
- return zeroAddr, nil, fmt.Errorf("can't setowner: %v", err)
- }
- contractBackend.Commit()
-
- if _, err = ensinstance.SetSubnodeOwner(transactOpts, [32]byte{}, tophash, addr); err != nil {
- return zeroAddr, nil, fmt.Errorf("can't register top: %v", err)
- }
- contractBackend.Commit()
-
- if _, err = ensinstance.SetSubnodeOwner(transactOpts, ens.EnsNode(top), subhash, addr); err != nil {
- return zeroAddr, nil, fmt.Errorf("can't register top: %v", err)
- }
- contractBackend.Commit()
+func newAliceSigner() *GenericSigner {
+ privKey, _ := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
+ return NewGenericSigner(privKey)
+}
- return contractAddress, contractBackend, nil
+func newBobSigner() *GenericSigner {
+ privKey, _ := crypto.HexToECDSA("accedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedeaccedecaca")
+ return NewGenericSigner(privKey)
}
-func newTestSigner() (*GenericSigner, error) {
- privKey, err := crypto.GenerateKey()
- if err != nil {
- return nil, err
- }
- return &GenericSigner{
- PrivKey: privKey,
- }, nil
+func newCharlieSigner() *GenericSigner {
+ privKey, _ := crypto.HexToECDSA("facadefacadefacadefacadefacadefacadefacadefacadefacadefacadefaca")
+ return NewGenericSigner(privKey)
}
func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
@@ -758,9 +898,9 @@ func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) {
if err != nil {
return nil, err
}
- _, _, _, _, data, _, err := rh.parseUpdate(chunk.SData)
- if err != nil {
+ var r SignedResourceUpdate
+ if err := r.fromChunk(addr, chunk.SData); err != nil {
return nil, err
}
- return data, nil
+ return r.data, nil
}
diff --git a/swarm/storage/mru/signedupdate.go b/swarm/storage/mru/signedupdate.go
new file mode 100644
index 000000000..1c6d02e82
--- /dev/null
+++ b/swarm/storage/mru/signedupdate.go
@@ -0,0 +1,184 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "bytes"
+ "hash"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// SignedResourceUpdate represents a resource update with all the necessary information to prove ownership of the resource
+type SignedResourceUpdate struct {
+ resourceUpdate // actual content that will be put on the chunk, less signature
+ signature *Signature
+ updateAddr storage.Address // resulting chunk address for the update (not serialized, for internal use)
+ binaryData []byte // resulting serialized data (not serialized, for efficiency/internal use)
+}
+
+// Verify checks that signatures are valid and that the signer owns the resource to be updated
+func (r *SignedResourceUpdate) Verify() (err error) {
+ if len(r.data) == 0 {
+ return NewError(ErrInvalidValue, "Update does not contain data")
+ }
+ if r.signature == nil {
+ return NewError(ErrInvalidSignature, "Missing signature field")
+ }
+
+ digest, err := r.GetDigest()
+ if err != nil {
+ return err
+ }
+
+ // get the address of the signer (which also checks that it's a valid signature)
+ ownerAddr, err := getOwner(digest, *r.signature)
+ if err != nil {
+ return err
+ }
+
+ if !bytes.Equal(r.updateAddr, r.UpdateAddr()) {
+ return NewError(ErrInvalidSignature, "Signature address does not match with ownerAddr")
+ }
+
+ // Check if who signed the resource update really owns the resource
+ if !verifyOwner(ownerAddr, r.metaHash, r.rootAddr) {
+ return NewErrorf(ErrUnauthorized, "signature is valid but signer does not own the resource: %v", err)
+ }
+
+ return nil
+}
+
+// Sign executes the signature to validate the resource
+func (r *SignedResourceUpdate) Sign(signer Signer) error {
+
+ r.binaryData = nil //invalidate serialized data
+ digest, err := r.GetDigest() // computes digest and serializes into .binaryData
+ if err != nil {
+ return err
+ }
+
+ signature, err := signer.Sign(digest)
+ if err != nil {
+ return err
+ }
+
+ // Although the Signer interface returns the public address of the signer,
+ // recover it from the signature to see if they match
+ ownerAddress, err := getOwner(digest, signature)
+ if err != nil {
+ return NewError(ErrInvalidSignature, "Error verifying signature")
+ }
+
+ if ownerAddress != signer.Address() { // sanity check to make sure the Signer is declaring the same address used to sign!
+ return NewError(ErrInvalidSignature, "Signer address does not match ownerAddr")
+ }
+
+ r.signature = &signature
+ r.updateAddr = r.UpdateAddr()
+ return nil
+}
+
+// create an update chunk.
+func (r *SignedResourceUpdate) toChunk() (*storage.Chunk, error) {
+
+ // Check that the update is signed and serialized
+ // For efficiency, data is serialized during signature and cached in
+ // the binaryData field when computing the signature digest in .getDigest()
+ if r.signature == nil || r.binaryData == nil {
+ return nil, NewError(ErrInvalidSignature, "newUpdateChunk called without a valid signature or payload data. Call .Sign() first.")
+ }
+
+ chunk := storage.NewChunk(r.updateAddr, nil)
+ resourceUpdateLength := r.resourceUpdate.binaryLength()
+ chunk.SData = r.binaryData
+
+ // signature is the last item in the chunk data
+ copy(chunk.SData[resourceUpdateLength:], r.signature[:])
+
+ chunk.Size = int64(len(chunk.SData))
+ return chunk, nil
+}
+
+// fromChunk populates this structure from chunk data. It does not verify the signature is valid.
+func (r *SignedResourceUpdate) fromChunk(updateAddr storage.Address, chunkdata []byte) error {
+ // for update chunk layout see SignedResourceUpdate definition
+
+ //deserialize the resource update portion
+ if err := r.resourceUpdate.binaryGet(chunkdata); err != nil {
+ return err
+ }
+
+ // Extract the signature
+ var signature *Signature
+ cursor := r.resourceUpdate.binaryLength()
+ sigdata := chunkdata[cursor : cursor+signatureLength]
+ if len(sigdata) > 0 {
+ signature = &Signature{}
+ copy(signature[:], sigdata)
+ }
+
+ r.signature = signature
+ r.updateAddr = updateAddr
+ r.binaryData = chunkdata
+
+ return nil
+
+}
+
+// GetDigest creates the resource update digest used in signatures (formerly known as keyDataHash)
+// the serialized payload is cached in .binaryData
+func (r *SignedResourceUpdate) GetDigest() (result common.Hash, err error) {
+ hasher := hashPool.Get().(hash.Hash)
+ defer hashPool.Put(hasher)
+ hasher.Reset()
+ dataLength := r.resourceUpdate.binaryLength()
+ if r.binaryData == nil {
+ r.binaryData = make([]byte, dataLength+signatureLength)
+ if err := r.resourceUpdate.binaryPut(r.binaryData[:dataLength]); err != nil {
+ return result, err
+ }
+ }
+ hasher.Write(r.binaryData[:dataLength]) //everything except the signature.
+
+ return common.BytesToHash(hasher.Sum(nil)), nil
+}
+
+// getOwner extracts the address of the resource update signer
+func getOwner(digest common.Hash, signature Signature) (common.Address, error) {
+ pub, err := crypto.SigToPub(digest.Bytes(), signature[:])
+ if err != nil {
+ return common.Address{}, err
+ }
+ return crypto.PubkeyToAddress(*pub), nil
+}
+
+// verifyResourceOwnerhsip checks that the signer of the update actually owns the resource
+// H(ownerAddr, metaHash) is computed. If it matches the rootAddr the update chunk is claiming
+// to update, it is proven that signer of the resource update owns the resource.
+// See metadataHash in metadata.go for a more detailed explanation
+func verifyOwner(ownerAddr common.Address, metaHash []byte, rootAddr storage.Address) bool {
+ hasher := hashPool.Get().(hash.Hash)
+ defer hashPool.Put(hasher)
+ hasher.Reset()
+ hasher.Write(metaHash)
+ hasher.Write(ownerAddr.Bytes())
+ rootAddr2 := hasher.Sum(nil)
+ return bytes.Equal(rootAddr2, rootAddr)
+}
diff --git a/swarm/storage/mru/testutil.go b/swarm/storage/mru/testutil.go
new file mode 100644
index 000000000..751f51af3
--- /dev/null
+++ b/swarm/storage/mru/testutil.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+const (
+ testDbDirName = "mru"
+)
+
+type TestHandler struct {
+ *Handler
+}
+
+func (t *TestHandler) Close() {
+ t.chunkStore.Close()
+}
+
+// NewTestHandler creates Handler object to be used for testing purposes.
+func NewTestHandler(datadir string, params *HandlerParams) (*TestHandler, error) {
+ path := filepath.Join(datadir, testDbDirName)
+ rh, err := NewHandler(params)
+ if err != nil {
+ return nil, fmt.Errorf("resource handler create fail: %v", err)
+ }
+ localstoreparams := storage.NewDefaultLocalStoreParams()
+ localstoreparams.Init(path)
+ localStore, err := storage.NewLocalStore(localstoreparams, nil)
+ if err != nil {
+ return nil, fmt.Errorf("localstore create fail, path %s: %v", path, err)
+ }
+ localStore.Validators = append(localStore.Validators, storage.NewContentAddressValidator(storage.MakeHashFunc(resourceHashAlgorithm)))
+ localStore.Validators = append(localStore.Validators, rh)
+ netStore := storage.NewNetStore(localStore, nil)
+ rh.SetStore(netStore)
+ return &TestHandler{rh}, nil
+}
diff --git a/swarm/storage/mru/timestampprovider.go b/swarm/storage/mru/timestampprovider.go
new file mode 100644
index 000000000..f483491aa
--- /dev/null
+++ b/swarm/storage/mru/timestampprovider.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "encoding/binary"
+ "time"
+)
+
+// TimestampProvider sets the time source of the mru package
+var TimestampProvider timestampProvider = NewDefaultTimestampProvider()
+
+// Encodes a point in time as a Unix epoch
+type Timestamp struct {
+ Time uint64 // Unix epoch timestamp, in seconds
+}
+
+// 8 bytes uint64 Time
+const timestampLength = 8
+
+// timestampProvider interface describes a source of timestamp information
+type timestampProvider interface {
+ Now() Timestamp // returns the current timestamp information
+}
+
+// binaryGet populates the timestamp structure from the given byte slice
+func (t *Timestamp) binaryGet(data []byte) error {
+ if len(data) != timestampLength {
+ return NewError(ErrCorruptData, "timestamp data has the wrong size")
+ }
+ t.Time = binary.LittleEndian.Uint64(data[:8])
+ return nil
+}
+
+// binaryPut Serializes a Timestamp to a byte slice
+func (t *Timestamp) binaryPut(data []byte) error {
+ if len(data) != timestampLength {
+ return NewError(ErrCorruptData, "timestamp data has the wrong size")
+ }
+ binary.LittleEndian.PutUint64(data, t.Time)
+ return nil
+}
+
+type DefaultTimestampProvider struct {
+}
+
+// NewDefaultTimestampProvider creates a system clock based timestamp provider
+func NewDefaultTimestampProvider() *DefaultTimestampProvider {
+ return &DefaultTimestampProvider{}
+}
+
+// Now returns the current time according to this provider
+func (dtp *DefaultTimestampProvider) Now() Timestamp {
+ return Timestamp{
+ Time: uint64(time.Now().Unix()),
+ }
+}
diff --git a/swarm/storage/mru/update.go b/swarm/storage/mru/update.go
new file mode 100644
index 000000000..88c4ac4e5
--- /dev/null
+++ b/swarm/storage/mru/update.go
@@ -0,0 +1,147 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "encoding/binary"
+ "errors"
+
+ "github.com/ethereum/go-ethereum/swarm/log"
+ "github.com/ethereum/go-ethereum/swarm/multihash"
+)
+
+// resourceUpdate encapsulates the information sent as part of a resource update
+type resourceUpdate struct {
+ updateHeader // metainformationa about this resource update
+ data []byte // actual data payload
+}
+
+// Update chunk layout
+// Prefix:
+// 2 bytes updateHeaderLength
+// 2 bytes data length
+const chunkPrefixLength = 2 + 2
+
+// Header: (see updateHeader)
+// Data:
+// data (datalength bytes)
+//
+// Minimum size is Header + 1 (minimum data length, enforced)
+const minimumUpdateDataLength = updateHeaderLength + 1
+const maxUpdateDataLength = chunkSize - signatureLength - updateHeaderLength - chunkPrefixLength
+
+// binaryPut serializes the resource update information into the given slice
+func (r *resourceUpdate) binaryPut(serializedData []byte) error {
+ datalength := len(r.data)
+ if datalength == 0 {
+ return NewError(ErrInvalidValue, "cannot update a resource with no data")
+ }
+
+ if datalength > maxUpdateDataLength {
+ return NewErrorf(ErrInvalidValue, "data is too big (length=%d). Max length=%d", datalength, maxUpdateDataLength)
+ }
+
+ if len(serializedData) != r.binaryLength() {
+ return NewErrorf(ErrInvalidValue, "slice passed to putBinary must be of exact size. Expected %d bytes", r.binaryLength())
+ }
+
+ if r.multihash {
+ if _, _, err := multihash.GetMultihashLength(r.data); err != nil {
+ return NewError(ErrInvalidValue, "Invalid multihash")
+ }
+ }
+
+ // Add prefix: updateHeaderLength and actual data length
+ cursor := 0
+ binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(updateHeaderLength))
+ cursor += 2
+
+ // data length
+ binary.LittleEndian.PutUint16(serializedData[cursor:], uint16(datalength))
+ cursor += 2
+
+ // serialize header (see updateHeader)
+ if err := r.updateHeader.binaryPut(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
+ return err
+ }
+ cursor += updateHeaderLength
+
+ // add the data
+ copy(serializedData[cursor:], r.data)
+ cursor += datalength
+
+ return nil
+}
+
+// binaryLength returns the expected number of bytes this structure will take to encode
+func (r *resourceUpdate) binaryLength() int {
+ return chunkPrefixLength + updateHeaderLength + len(r.data)
+}
+
+// binaryGet populates this instance from the information contained in the passed byte slice
+func (r *resourceUpdate) binaryGet(serializedData []byte) error {
+ if len(serializedData) < minimumUpdateDataLength {
+ return NewErrorf(ErrNothingToReturn, "chunk less than %d bytes cannot be a resource update chunk", minimumUpdateDataLength)
+ }
+ cursor := 0
+ declaredHeaderlength := binary.LittleEndian.Uint16(serializedData[cursor : cursor+2])
+ if declaredHeaderlength != updateHeaderLength {
+ return NewErrorf(ErrCorruptData, "Invalid header length. Expected %d, got %d", updateHeaderLength, declaredHeaderlength)
+ }
+
+ cursor += 2
+ datalength := int(binary.LittleEndian.Uint16(serializedData[cursor : cursor+2]))
+ cursor += 2
+
+ if chunkPrefixLength+updateHeaderLength+datalength+signatureLength != len(serializedData) {
+ return NewError(ErrNothingToReturn, "length specified in header is different than actual chunk size")
+ }
+
+ // at this point we can be satisfied that we have the correct data length to read
+ if err := r.updateHeader.binaryGet(serializedData[cursor : cursor+updateHeaderLength]); err != nil {
+ return err
+ }
+ cursor += updateHeaderLength
+
+ data := serializedData[cursor : cursor+datalength]
+ cursor += datalength
+
+ // if multihash content is indicated we check the validity of the multihash
+ if r.updateHeader.multihash {
+ mhLength, mhHeaderLength, err := multihash.GetMultihashLength(data)
+ if err != nil {
+ log.Error("multihash parse error", "err", err)
+ return err
+ }
+ if datalength != mhLength+mhHeaderLength {
+ log.Debug("multihash error", "datalength", datalength, "mhLength", mhLength, "mhHeaderLength", mhHeaderLength)
+ return errors.New("Corrupt multihash data")
+ }
+ }
+
+ // now that all checks have passed, copy data into structure
+ r.data = make([]byte, datalength)
+ copy(r.data, data)
+
+ return nil
+
+}
+
+// Multihash specifies whether the resource data should be interpreted as multihash
+func (r *resourceUpdate) Multihash() bool {
+ return r.multihash
+}
diff --git a/swarm/storage/mru/update_test.go b/swarm/storage/mru/update_test.go
new file mode 100644
index 000000000..51e9d2fcc
--- /dev/null
+++ b/swarm/storage/mru/update_test.go
@@ -0,0 +1,72 @@
+package mru
+
+import (
+ "bytes"
+ "testing"
+)
+
+const serializedUpdateHex = "0x490034004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf000456c20717565206c6565206d7563686f207920616e6461206d7563686f2c207665206d7563686f20792073616265206d7563686f"
+const serializedUpdateMultihashHex = "0x490022004f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf0011b200102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1c1e1f20"
+
+func getTestResourceUpdate() *resourceUpdate {
+ return &resourceUpdate{
+ updateHeader: *getTestUpdateHeader(false),
+ data: []byte("El que lee mucho y anda mucho, ve mucho y sabe mucho"),
+ }
+}
+
+func getTestResourceUpdateMultihash() *resourceUpdate {
+ return &resourceUpdate{
+ updateHeader: *getTestUpdateHeader(true),
+ data: []byte{0x1b, 0x20, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 28, 30, 31, 32},
+ }
+}
+
+func compareResourceUpdate(a, b *resourceUpdate) bool {
+ return compareUpdateHeader(&a.updateHeader, &b.updateHeader) &&
+ bytes.Equal(a.data, b.data)
+}
+
+func TestResourceUpdateSerializer(t *testing.T) {
+ var serializedUpdateLength = len(serializedUpdateHex)/2 - 1 // hack to calculate the byte length out of the hex representation
+ update := getTestResourceUpdate()
+ serializedUpdate := make([]byte, serializedUpdateLength)
+ if err := update.binaryPut(serializedUpdate); err != nil {
+ t.Fatal(err)
+ }
+ compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateHex)
+
+ // Test fail if update does not contain data
+ update.data = nil
+ if err := update.binaryPut(serializedUpdate); err == nil {
+ t.Fatal("Expected resourceUpdate.binaryPut to fail since update does not contain data")
+ }
+
+ // Test fail if update is too big
+ update.data = make([]byte, 10000)
+ if err := update.binaryPut(serializedUpdate); err == nil {
+ t.Fatal("Expected resourceUpdate.binaryPut to fail since update is too big")
+ }
+
+ // Test fail if passed slice is not of the exact size required for this update
+ update.data = make([]byte, 1)
+ if err := update.binaryPut(serializedUpdate); err == nil {
+ t.Fatal("Expected resourceUpdate.binaryPut to fail since passed slice is not of the appropriate size")
+ }
+
+ // Test serializing a multihash update
+ var serializedUpdateMultihashLength = len(serializedUpdateMultihashHex)/2 - 1 // hack to calculate the byte length out of the hex representation
+ update = getTestResourceUpdateMultihash()
+ serializedUpdate = make([]byte, serializedUpdateMultihashLength)
+ if err := update.binaryPut(serializedUpdate); err != nil {
+ t.Fatal(err)
+ }
+ compareByteSliceToExpectedHex(t, "serializedUpdate", serializedUpdate, serializedUpdateMultihashHex)
+
+ // mess with the multihash to test it fails with a wrong multihash error
+ update.data[1] = 79
+ if err := update.binaryPut(serializedUpdate); err == nil {
+ t.Fatal("Expected resourceUpdate.binaryPut to fail since data contains an invalid multihash")
+ }
+
+}
diff --git a/swarm/storage/mru/updateheader.go b/swarm/storage/mru/updateheader.go
new file mode 100644
index 000000000..3ac20c189
--- /dev/null
+++ b/swarm/storage/mru/updateheader.go
@@ -0,0 +1,88 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+package mru
+
+import (
+ "github.com/ethereum/go-ethereum/swarm/storage"
+)
+
+// updateHeader models the non-payload components of a Resource Update
+type updateHeader struct {
+ UpdateLookup // UpdateLookup contains the information required to locate this resource (components of the search key used to find it)
+ multihash bool // Whether the data in this Resource Update should be interpreted as multihash
+ metaHash []byte // SHA3 hash of the metadata chunk (less ownerAddr). Used to prove ownerhsip of the resource.
+}
+
+const metaHashLength = storage.KeyLength
+
+// updateLookupLength bytes
+// 1 byte flags (multihash bool for now)
+// 32 bytes metaHash
+const updateHeaderLength = updateLookupLength + 1 + metaHashLength
+
+// binaryPut serializes the resource header information into the given slice
+func (h *updateHeader) binaryPut(serializedData []byte) error {
+ if len(serializedData) != updateHeaderLength {
+ return NewErrorf(ErrInvalidValue, "Incorrect slice size to serialize updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
+ }
+ if len(h.metaHash) != metaHashLength {
+ return NewError(ErrInvalidValue, "updateHeader.binaryPut called without metaHash set")
+ }
+ if err := h.UpdateLookup.binaryPut(serializedData[:updateLookupLength]); err != nil {
+ return err
+ }
+ cursor := updateLookupLength
+ copy(serializedData[cursor:], h.metaHash[:metaHashLength])
+ cursor += metaHashLength
+
+ var flags byte
+ if h.multihash {
+ flags |= 0x01
+ }
+
+ serializedData[cursor] = flags
+ cursor++
+
+ return nil
+}
+
+// binaryLength returns the expected size of this structure when serialized
+func (h *updateHeader) binaryLength() int {
+ return updateHeaderLength
+}
+
+// binaryGet restores the current updateHeader instance from the information contained in the passed slice
+func (h *updateHeader) binaryGet(serializedData []byte) error {
+ if len(serializedData) != updateHeaderLength {
+ return NewErrorf(ErrInvalidValue, "Incorrect slice size to read updateHeaderLength. Expected %d, got %d", updateHeaderLength, len(serializedData))
+ }
+
+ if err := h.UpdateLookup.binaryGet(serializedData[:updateLookupLength]); err != nil {
+ return err
+ }
+ cursor := updateLookupLength
+ h.metaHash = make([]byte, metaHashLength)
+ copy(h.metaHash[:storage.KeyLength], serializedData[cursor:cursor+storage.KeyLength])
+ cursor += metaHashLength
+
+ flags := serializedData[cursor]
+ cursor++
+
+ h.multihash = flags&0x01 != 0
+
+ return nil
+}
diff --git a/swarm/storage/mru/updateheader_test.go b/swarm/storage/mru/updateheader_test.go
new file mode 100644
index 000000000..b1f505989
--- /dev/null
+++ b/swarm/storage/mru/updateheader_test.go
@@ -0,0 +1,64 @@
+package mru
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/hexutil"
+)
+
+const serializedUpdateHeaderMultihashHex = "0x4f000000da070000fb0ed7efa696bdb0b54cd75554cc3117ffc891454317df7dd6fefad978e2f2fbf74a10ce8f26ffc8bfaa07c3031a34b2c61f517955e7deb1592daccf96c69cf001"
+
+func getTestUpdateHeader(multihash bool) (header *updateHeader) {
+ _, metaHash, _, _ := getTestMetadata().serializeAndHash()
+ return &updateHeader{
+ UpdateLookup: *getTestUpdateLookup(),
+ multihash: multihash,
+ metaHash: metaHash,
+ }
+}
+
+func compareUpdateHeader(a, b *updateHeader) bool {
+ return compareUpdateLookup(&a.UpdateLookup, &b.UpdateLookup) &&
+ a.multihash == b.multihash &&
+ bytes.Equal(a.metaHash, b.metaHash)
+}
+
+func TestUpdateHeaderSerializer(t *testing.T) {
+ header := getTestUpdateHeader(true)
+ serializedHeader := make([]byte, updateHeaderLength)
+ if err := header.binaryPut(serializedHeader); err != nil {
+ t.Fatal(err)
+ }
+ compareByteSliceToExpectedHex(t, "serializedHeader", serializedHeader, serializedUpdateHeaderMultihashHex)
+
+ // trigger incorrect slice length error passing a slice that is 1 byte too big
+ if err := header.binaryPut(make([]byte, updateHeaderLength+1)); err == nil {
+ t.Fatal("Expected updateHeader.binaryPut to fail since supplied slice is of incorrect length")
+ }
+
+ // trigger invalid metaHash error
+ header.metaHash = nil
+ if err := header.binaryPut(serializedHeader); err == nil {
+ t.Fatal("Expected updateHeader.binaryPut to fail metaHash is of incorrect length")
+ }
+}
+
+func TestUpdateHeaderDeserializer(t *testing.T) {
+ originalUpdate := getTestUpdateHeader(true)
+ serializedData, _ := hexutil.Decode(serializedUpdateHeaderMultihashHex)
+ var retrievedUpdate updateHeader
+ if err := retrievedUpdate.binaryGet(serializedData); err != nil {
+ t.Fatal(err)
+ }
+ if !compareUpdateHeader(originalUpdate, &retrievedUpdate) {
+ t.Fatalf("Expected deserialized structure to equal the original")
+ }
+
+ // mess with source slice to test length checks
+ serializedData = []byte{1, 2, 3}
+ if err := retrievedUpdate.binaryGet(serializedData); err == nil {
+ t.Fatal("Expected retrievedUpdate.binaryGet, since passed slice is too small")
+ }
+
+}
diff --git a/swarm/swarm.go b/swarm/swarm.go
index bf8bcdbd5..db7d2dfed 100644
--- a/swarm/swarm.go
+++ b/swarm/swarm.go
@@ -192,25 +192,8 @@ func NewSwarm(config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err e
self.fileStore = storage.NewFileStore(netStore, self.config.FileStoreParams)
var resourceHandler *mru.Handler
- rhparams := &mru.HandlerParams{
- // TODO: config parameter to set limits
- QueryMaxPeriods: &mru.LookupParams{
- Limit: false,
- },
- Signer: &mru.GenericSigner{
- PrivKey: self.privateKey,
- },
- }
- if resolver != nil {
- resolver.SetNameHash(ens.EnsNode)
- // Set HeaderGetter and OwnerValidator interfaces to resolver only if it is not nil.
- rhparams.HeaderGetter = resolver
- rhparams.OwnerValidator = resolver
- } else {
- log.Warn("No ETH API specified, resource updates will use block height approximation")
- // TODO: blockestimator should use saved values derived from last time ethclient was connected
- rhparams.HeaderGetter = mru.NewBlockEstimator()
- }
+ rhparams := &mru.HandlerParams{}
+
resourceHandler, err = mru.NewHandler(rhparams)
if err != nil {
return nil, err
diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go
index debf0b14b..238f78308 100644
--- a/swarm/testutil/http.go
+++ b/swarm/testutil/http.go
@@ -17,15 +17,12 @@
package testutil
import (
- "context"
"io/ioutil"
- "math/big"
"net/http"
"net/http/httptest"
"os"
"testing"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/mru"
@@ -35,16 +32,17 @@ type TestServer interface {
ServeHTTP(http.ResponseWriter, *http.Request)
}
-type fakeBackend struct {
- blocknumber int64
+// simulated timeProvider
+type fakeTimeProvider struct {
+ currentTime uint64
}
-func (f *fakeBackend) HeaderByNumber(context context.Context, _ string, bigblock *big.Int) (*types.Header, error) {
- f.blocknumber++
- biggie := big.NewInt(f.blocknumber)
- return &types.Header{
- Number: biggie,
- }, nil
+func (f *fakeTimeProvider) Tick() {
+ f.currentTime++
+}
+
+func (f *fakeTimeProvider) Now() mru.Timestamp {
+ return mru.Timestamp{Time: f.currentTime}
}
func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *TestSwarmServer {
@@ -68,24 +66,25 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
if err != nil {
t.Fatal(err)
}
- rhparams := &mru.HandlerParams{
- QueryMaxPeriods: &mru.LookupParams{},
- HeaderGetter: &fakeBackend{
- blocknumber: 42,
- },
+
+ fakeTimeProvider := &fakeTimeProvider{
+ currentTime: 42,
}
+ mru.TimestampProvider = fakeTimeProvider
+ rhparams := &mru.HandlerParams{}
rh, err := mru.NewTestHandler(resourceDir, rhparams)
if err != nil {
t.Fatal(err)
}
- a := api.NewAPI(fileStore, nil, rh)
+ a := api.NewAPI(fileStore, nil, rh.Handler)
srv := httptest.NewServer(serverFunc(a))
return &TestSwarmServer{
- Server: srv,
- FileStore: fileStore,
- dir: dir,
- Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
+ Server: srv,
+ FileStore: fileStore,
+ dir: dir,
+ Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
+ timestampProvider: fakeTimeProvider,
cleanup: func() {
srv.Close()
rh.Close()
@@ -97,12 +96,17 @@ func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer) *Tes
type TestSwarmServer struct {
*httptest.Server
- Hasher storage.SwarmHash
- FileStore *storage.FileStore
- dir string
- cleanup func()
+ Hasher storage.SwarmHash
+ FileStore *storage.FileStore
+ dir string
+ cleanup func()
+ timestampProvider *fakeTimeProvider
}
func (t *TestSwarmServer) Close() {
t.cleanup()
}
+
+func (t *TestSwarmServer) GetCurrentTime() mru.Timestamp {
+ return t.timestampProvider.Now()
+}