aboutsummaryrefslogtreecommitdiffstats
path: root/cmd
diff options
context:
space:
mode:
authorethersphere <thesw@rm.eth>2018-06-20 20:06:27 +0800
committerethersphere <thesw@rm.eth>2018-06-22 03:10:31 +0800
commite187711c6545487d4cac3701f0f506bb536234e2 (patch)
treed2f6150f70b84b36e49a449082aeda267b4b9046 /cmd
parent574378edb50c907b532946a1d4654dbd6701b20a (diff)
downloaddexon-e187711c6545487d4cac3701f0f506bb536234e2.tar.gz
dexon-e187711c6545487d4cac3701f0f506bb536234e2.tar.zst
dexon-e187711c6545487d4cac3701f0f506bb536234e2.zip
swarm: network rewrite merge
Diffstat (limited to 'cmd')
-rw-r--r--cmd/p2psim/main.go5
-rw-r--r--cmd/swarm/config.go109
-rw-r--r--cmd/swarm/config_test.go129
-rw-r--r--cmd/swarm/db.go28
-rw-r--r--cmd/swarm/download.go85
-rw-r--r--cmd/swarm/export_test.go139
-rw-r--r--cmd/swarm/fs.go127
-rw-r--r--cmd/swarm/fs_test.go234
-rw-r--r--cmd/swarm/hash.go6
-rw-r--r--cmd/swarm/main.go321
-rw-r--r--cmd/swarm/manifest.go14
-rw-r--r--cmd/swarm/run_test.go132
-rw-r--r--cmd/swarm/swarm-smoke/main.go101
-rw-r--r--cmd/swarm/swarm-smoke/upload_and_sync.go184
-rw-r--r--cmd/swarm/upload.go9
-rw-r--r--cmd/swarm/upload_test.go243
16 files changed, 1561 insertions, 305 deletions
diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go
index 0c8ed038d..d32c29863 100644
--- a/cmd/p2psim/main.go
+++ b/cmd/p2psim/main.go
@@ -275,9 +275,8 @@ func createNode(ctx *cli.Context) error {
if len(ctx.Args()) != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name)
}
- config := &adapters.NodeConfig{
- Name: ctx.String("name"),
- }
+ config := adapters.RandomNodeConfig()
+ config.Name = ctx.String("name")
if key := ctx.String("key"); key != "" {
privKey, err := crypto.HexToECDSA(key)
if err != nil {
diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go
index adac772ba..64c37a0b5 100644
--- a/cmd/swarm/config.go
+++ b/cmd/swarm/config.go
@@ -24,6 +24,7 @@ import (
"reflect"
"strconv"
"strings"
+ "time"
"unicode"
cli "gopkg.in/urfave/cli.v1"
@@ -37,6 +38,8 @@ import (
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
)
+const SWARM_VERSION = "0.3"
+
var (
//flag definition for the dumpconfig command
DumpConfigCommand = cli.Command{
@@ -58,19 +61,25 @@ var (
//constants for environment variables
const (
- SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
- SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
- SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
- SWARM_ENV_PORT = "SWARM_PORT"
- SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
- SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
- SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
- SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE"
- SWARM_ENV_ENS_API = "SWARM_ENS_API"
- SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
- SWARM_ENV_CORS = "SWARM_CORS"
- SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
- GETH_ENV_DATADIR = "GETH_DATADIR"
+ SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
+ SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
+ SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
+ SWARM_ENV_PORT = "SWARM_PORT"
+ SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
+ SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
+ SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
+ SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
+ SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
+ SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
+ SWARM_ENV_ENS_API = "SWARM_ENS_API"
+ SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
+ SWARM_ENV_CORS = "SWARM_CORS"
+ SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
+ SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
+ SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
+ SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
+ SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
+ GETH_ENV_DATADIR = "GETH_DATADIR"
)
// These settings ensure that TOML keys use the same names as Go struct fields.
@@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
//before booting the swarm node, build the configuration
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
- //check for deprecated flags
- checkDeprecated(ctx)
//start by creating a default config
- config = bzzapi.NewDefaultConfig()
+ config = bzzapi.NewConfig()
//first load settings from config file (if provided)
config, err = configFileOverride(config, ctx)
if err != nil {
@@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
- currentConfig.NetworkId = uint64(id)
+ currentConfig.NetworkID = uint64(id)
}
}
@@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SwapEnabled = true
}
- if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) {
- currentConfig.SyncEnabled = true
+ if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
+ currentConfig.SyncEnabled = false
+ }
+
+ if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
+ currentConfig.SyncUpdateDelay = d
}
- currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name)
- if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
+ if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
+ currentConfig.DeliverySkipCheck = true
+ }
+
+ currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
+ if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.EnsAPIs = ensAPIs
}
- if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
- currentConfig.EnsRoot = common.HexToAddress(ensaddr)
- }
-
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
currentConfig.Cors = cors
}
@@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
}
+ if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
+ currentConfig.LocalStoreParams.ChunkDbPath = storePath
+ }
+
+ if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
+ currentConfig.LocalStoreParams.DbCapacity = storeCapacity
+ }
+
+ if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
+ currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
+ }
+
return currentConfig
}
@@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 {
- currentConfig.NetworkId = uint64(id)
+ currentConfig.NetworkID = uint64(id)
}
}
@@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
}
}
- if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" {
- if sync, err := strconv.ParseBool(syncenable); err != nil {
- currentConfig.SyncEnabled = sync
+ if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
+ if sync, err := strconv.ParseBool(syncdisable); err != nil {
+ currentConfig.SyncEnabled = !sync
+ }
+ }
+
+ if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
+ if skipCheck, err := strconv.ParseBool(v); err != nil {
+ currentConfig.DeliverySkipCheck = skipCheck
+ }
+ }
+
+ if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
+ if d, err := time.ParseDuration(v); err != nil {
+ currentConfig.SyncUpdateDelay = d
}
}
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
- currentConfig.SwapApi = swapapi
+ currentConfig.SwapAPI = swapapi
}
- if currentConfig.SwapEnabled && currentConfig.SwapApi == "" {
+ if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
@@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
return nil
}
-//deprecated flags checked here
-func checkDeprecated(ctx *cli.Context) {
- // exit if the deprecated --ethapi flag is set
- if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
- utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
- }
- // warn if --ens-api flag is set
- if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
- log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
- }
-}
-
//validate configuration parameters
func validateConfig(cfg *bzzapi.Config) (err error) {
for _, ensAPI := range cfg.EnsAPIs {
diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go
index 9bf584f50..d5011e3a7 100644
--- a/cmd/swarm/config_test.go
+++ b/cmd/swarm/config_test.go
@@ -34,7 +34,7 @@ import (
func TestDumpConfig(t *testing.T) {
swarm := runSwarm(t, "dumpconfig")
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatal(err)
@@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
swarm.ExpectExit()
}
-func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
+func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
swarm.ExpectExit()
}
-func TestFailsNoBzzAccount(t *testing.T) {
+func TestConfigFailsNoBzzAccount(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
swarm.ExpectExit()
}
-func TestCmdLineOverrides(t *testing.T) {
+func TestConfigCmdLineOverrides(t *testing.T) {
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
t.Fatal(err)
@@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
- fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
+ fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
+ fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
@@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 42 {
- t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId)
+ if info.NetworkID != 42 {
+ t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
+ }
+
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
if info.Cors != "*" {
@@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
node.Shutdown()
}
-func TestFileOverrides(t *testing.T) {
+func TestConfigFileOverrides(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
//create a config file
//first, create a default conf
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
- defaultConf.SyncEnabled = true
- defaultConf.NetworkId = 54
+ defaultConf.SyncEnabled = false
+ defaultConf.DeliverySkipCheck = true
+ defaultConf.NetworkID = 54
defaultConf.Port = httpPort
- defaultConf.StoreParams.DbCapacity = 9000000
- defaultConf.ChunkerParams.Branches = 64
- defaultConf.HiveParams.CallInterval = 6000000000
+ defaultConf.DbCapacity = 9000000
+ defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
- defaultConf.SyncParams.KeyBufferSize = 512
+ //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML string
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
@@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 54 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if info.NetworkID != 54 {
+ t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
}
- if info.StoreParams.DbCapacity != 9000000 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
- if info.ChunkerParams.Branches != 64 {
- t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
+ if info.DbCapacity != 9000000 {
+ t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
}
- if info.HiveParams.CallInterval != 6000000000 {
- t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
+ if info.HiveParams.KeepAliveInterval != 6000000000 {
+ t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
- if info.SyncParams.KeyBufferSize != 512 {
- t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
- }
+ // if info.SyncParams.KeyBufferSize != 512 {
+ // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
+ // }
node.Shutdown()
}
-func TestEnvVars(t *testing.T) {
+func TestConfigEnvVars(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
@@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
- envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true"))
+ envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
+ envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
dir, err := ioutil.TempDir("", "bzztest")
if err != nil {
@@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != 999 {
- t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId)
+ if info.NetworkID != 999 {
+ t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
}
if info.Cors != "*" {
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
+ }
+
+ if !info.DeliverySkipCheck {
+ t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
}
node.Shutdown()
cmd.Process.Kill()
}
-func TestCmdLineOverridesFile(t *testing.T) {
+func TestConfigCmdLineOverridesFile(t *testing.T) {
// assign ports
httpPort, err := assignTCPPort()
@@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
//create a config file
//first, create a default conf
- defaultConf := api.NewDefaultConfig()
+ defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded
- defaultConf.SyncEnabled = false
- defaultConf.NetworkId = 54
+ defaultConf.SyncEnabled = true
+ defaultConf.NetworkID = 54
defaultConf.Port = "8588"
- defaultConf.StoreParams.DbCapacity = 9000000
- defaultConf.ChunkerParams.Branches = 64
- defaultConf.HiveParams.CallInterval = 6000000000
+ defaultConf.DbCapacity = 9000000
+ defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
- defaultConf.SyncParams.KeyBufferSize = 512
+ //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML file
out, err := tomlSettings.Marshal(&defaultConf)
if err != nil {
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
}
//write file
- f, err := ioutil.TempFile("", "testconfig.toml")
+ fname := "testconfig.toml"
+ f, err := ioutil.TempFile("", fname)
if err != nil {
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
}
+ defer os.Remove(fname)
//write file
_, err = f.WriteString(string(out))
if err != nil {
@@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
- fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name),
+ fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "",
@@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
}
- if info.NetworkId != expectNetworkId {
- t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId)
+ if info.NetworkID != expectNetworkId {
+ t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
}
- if !info.SyncEnabled {
- t.Fatal("Expected Sync to be enabled, but is false")
+ if info.SyncEnabled {
+ t.Fatal("Expected Sync to be disabled, but is true")
}
- if info.StoreParams.DbCapacity != 9000000 {
- t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId)
+ if info.LocalStoreParams.DbCapacity != 9000000 {
+ t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
}
- if info.ChunkerParams.Branches != 64 {
- t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches)
- }
-
- if info.HiveParams.CallInterval != 6000000000 {
- t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
+ if info.HiveParams.KeepAliveInterval != 6000000000 {
+ t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
}
- if info.SyncParams.KeyBufferSize != 512 {
- t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
- }
+ // if info.SyncParams.KeyBufferSize != 512 {
+ // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
+ // }
node.Shutdown()
}
diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go
index dfd2d069b..fe03f2d16 100644
--- a/cmd/swarm/db.go
+++ b/cmd/swarm/db.go
@@ -23,6 +23,7 @@ import (
"path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1"
@@ -30,11 +31,11 @@ import (
func dbExport(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 2 {
- utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
+ if len(args) != 3 {
+ utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
func dbImport(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 2 {
- utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
+ if len(args) != 3 {
+ utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
func dbClean(ctx *cli.Context) {
args := ctx.Args()
- if len(args) != 1 {
- utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
+ if len(args) != 2 {
+ utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
}
- store, err := openDbStore(args[0])
+ store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil {
utils.Fatalf("error opening local chunk database: %s", err)
}
@@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
store.Cleanup()
}
-func openDbStore(path string) (*storage.DbStore, error) {
+func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
}
- hash := storage.MakeHashFunc("SHA3")
- return storage.NewDbStore(path, hash, 10000000, 0)
+
+ storeparams := storage.NewDefaultStoreParams()
+ ldbparams := storage.NewLDBStoreParams(storeparams, path)
+ ldbparams.BaseKey = basekey
+ return storage.NewLDBStore(ldbparams)
}
diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go
new file mode 100644
index 000000000..c2418f744
--- /dev/null
+++ b/cmd/swarm/download.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+package main
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/swarm/api"
+ swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+ "gopkg.in/urfave/cli.v1"
+)
+
+func download(ctx *cli.Context) {
+ log.Debug("downloading content using swarm down")
+ args := ctx.Args()
+ dest := "."
+
+ switch len(args) {
+ case 0:
+ utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
+ case 1:
+ log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
+ default:
+ log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
+ if absDest, err := filepath.Abs(args[1]); err == nil {
+ dest = absDest
+ } else {
+ utils.Fatalf("could not get download path: %v", err)
+ }
+ }
+
+ var (
+ bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
+ isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
+ client = swarm.NewClient(bzzapi)
+ )
+
+ if fi, err := os.Stat(dest); err == nil {
+ if isRecursive && !fi.Mode().IsDir() {
+ utils.Fatalf("destination path is not a directory!")
+ }
+ } else {
+ if !os.IsNotExist(err) {
+ utils.Fatalf("could not stat path: %v", err)
+ }
+ }
+
+ uri, err := api.Parse(args[0])
+ if err != nil {
+ utils.Fatalf("could not parse uri argument: %v", err)
+ }
+
+ // assume behaviour according to --recursive switch
+ if isRecursive {
+ if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
+ utils.Fatalf("encoutered an error while downloading directory: %v", err)
+ }
+ } else {
+ // we are downloading a file
+ log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
+
+ err := client.DownloadFile(uri.Addr, uri.Path, dest)
+ if err != nil {
+ utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
+ }
+ }
+}
diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go
new file mode 100644
index 000000000..525538ad7
--- /dev/null
+++ b/cmd/swarm/export_test.go
@@ -0,0 +1,139 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/swarm"
+)
+
+// TestCLISwarmExportImport perform the following test:
+// 1. runs swarm node
+// 2. uploads a random file
+// 3. runs an export of the local datastore
+// 4. runs a second swarm node
+// 5. imports the exported datastore
+// 6. fetches the uploaded random file from the second node
+func TestCLISwarmExportImport(t *testing.T) {
+ cluster := newTestCluster(t, 1)
+
+ // generate random 10mb file
+ f, cleanup := generateRandomFile(t, 10000000)
+ defer cleanup()
+
+ // upload the file with 'swarm up' and expect a hash
+ up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
+ _, matches := up.ExpectRegexp(`[a-f\d]{64}`)
+ up.ExpectExit()
+ hash := matches[0]
+
+ var info swarm.Info
+ if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+
+ cluster.Stop()
+ defer cluster.Cleanup()
+
+ // generate an export.tar
+ exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
+ exportCmd.ExpectExit()
+
+ // start second cluster
+ cluster2 := newTestCluster(t, 1)
+
+ var info2 swarm.Info
+ if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+
+ // stop second cluster, so that we close LevelDB
+ cluster2.Stop()
+ defer cluster2.Cleanup()
+
+ // import the export.tar
+ importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
+ importCmd.ExpectExit()
+
+ // spin second cluster back up
+ cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
+
+ // try to fetch imported file
+ res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if res.StatusCode != 200 {
+ t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
+ }
+
+ // compare downloaded file with the generated random file
+ mustEqualFiles(t, f, res.Body)
+}
+
+func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
+ h := md5.New()
+ upLen, err := io.Copy(h, up)
+ if err != nil {
+ t.Fatal(err)
+ }
+ upHash := h.Sum(nil)
+ h.Reset()
+ downLen, err := io.Copy(h, down)
+ if err != nil {
+ t.Fatal(err)
+ }
+ downHash := h.Sum(nil)
+
+ if !bytes.Equal(upHash, downHash) || upLen != downLen {
+ t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
+ }
+}
+
+func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
+ // create a tmp file
+ tmp, err := ioutil.TempFile("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // callback for tmp file cleanup
+ teardown = func() {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ }
+
+ // write 10mb random data to file
+ buf := make([]byte, 10000000)
+ _, err = rand.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(tmp.Name(), buf, 0755)
+
+ return tmp, teardown
+}
diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go
new file mode 100644
index 000000000..0124586cf
--- /dev/null
+++ b/cmd/swarm/fs.go
@@ -0,0 +1,127 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/ethereum/go-ethereum/swarm/fuse"
+ "gopkg.in/urfave/cli.v1"
+)
+
+func mount(cliContext *cli.Context) {
+ args := cliContext.Args()
+ if len(args) < 2 {
+ utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
+ }
+
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := &fuse.MountInfo{}
+ mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
+ if err != nil {
+ utils.Fatalf("error expanding path for mount point: %v", err)
+ }
+ err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
+ if err != nil {
+ utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
+ }
+}
+
+func unmount(cliContext *cli.Context) {
+ args := cliContext.Args()
+
+ if len(args) < 1 {
+ utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
+ }
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := fuse.MountInfo{}
+ err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
+ if err != nil {
+ utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
+ }
+ fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
+}
+
+func listMounts(cliContext *cli.Context) {
+ client, err := dialRPC(cliContext)
+ if err != nil {
+ utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
+ }
+ defer client.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mf := []fuse.MountInfo{}
+ err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
+ if err != nil {
+ utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
+ }
+ if len(mf) == 0 {
+ fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
+ } else {
+ fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
+ for i, mountInfo := range mf {
+ fmt.Printf("%d:\n", i)
+ fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
+ fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
+ fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
+ }
+ }
+}
+
+func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
+ var endpoint string
+
+ if ctx.IsSet(utils.IPCPathFlag.Name) {
+ endpoint = ctx.String(utils.IPCPathFlag.Name)
+ } else {
+ utils.Fatalf("swarm ipc endpoint not specified")
+ }
+
+ if endpoint == "" {
+ endpoint = node.DefaultIPCEndpoint(clientIdentifier)
+ } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
+ // Backwards compatibility with geth < 1.5 which required
+ // these prefixes.
+ endpoint = endpoint[4:]
+ }
+ return rpc.Dial(endpoint)
+}
diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go
new file mode 100644
index 000000000..25705c0a4
--- /dev/null
+++ b/cmd/swarm/fs_test.go
@@ -0,0 +1,234 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ colorable "github.com/mattn/go-colorable"
+)
+
+func init() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
+type testFile struct {
+ filePath string
+ content string
+}
+
+// TestCLISwarmFs is a high-level test of swarmfs
+func TestCLISwarmFs(t *testing.T) {
+ cluster := newTestCluster(t, 3)
+ defer cluster.Shutdown()
+
+ // create a tmp dir
+ mountPoint, err := ioutil.TempDir("", "swarm-test")
+ log.Debug("swarmfs cli test", "1st mount", mountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(mountPoint)
+
+ handlingNode := cluster.Nodes[0]
+ mhash := doUploadEmptyDir(t, handlingNode)
+ log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ mount := runSwarm(t, []string{
+ "fs",
+ "mount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ mhash,
+ mountPoint,
+ }...)
+ mount.ExpectExit()
+
+ filesToAssert := []*testFile{}
+
+ dirPath, err := createDirInDir(mountPoint, "testSubDir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
+
+ dummyContent := "somerandomtestcontentthatshouldbeasserted"
+ dirs := []string{
+ mountPoint,
+ dirPath,
+ dirPath2,
+ }
+ files := []string{"f1.tmp", "f2.tmp"}
+ for _, d := range dirs {
+ for _, entry := range files {
+ tFile, err := createTestFileInPath(d, entry, dummyContent)
+ if err != nil {
+ t.Fatal(err)
+ }
+ filesToAssert = append(filesToAssert, tFile)
+ }
+ }
+ if len(filesToAssert) != len(dirs)*len(files) {
+ t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
+ }
+ hashRegexp := `[a-f\d]{64}`
+ log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ unmount := runSwarm(t, []string{
+ "fs",
+ "unmount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ mountPoint,
+ }...)
+ _, matches := unmount.ExpectRegexp(hashRegexp)
+ unmount.ExpectExit()
+
+ hash := matches[0]
+ if hash == mhash {
+ t.Fatal("this should not be equal")
+ }
+ log.Debug("swarmfs cli test: asserting no files in mount point")
+
+ //check that there's nothing in the mount folder
+ filesInDir, err := ioutil.ReadDir(mountPoint)
+ if err != nil {
+ t.Fatalf("had an error reading the directory: %v", err)
+ }
+
+ if len(filesInDir) != 0 {
+ t.Fatal("there shouldn't be anything here")
+ }
+
+ secondMountPoint, err := ioutil.TempDir("", "swarm-test")
+ log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(secondMountPoint)
+
+ log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ //remount, check files
+ newMount := runSwarm(t, []string{
+ "fs",
+ "mount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ hash, // the latest hash
+ secondMountPoint,
+ }...)
+
+ newMount.ExpectExit()
+ time.Sleep(1 * time.Second)
+
+ filesInDir, err = ioutil.ReadDir(secondMountPoint)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(filesInDir) == 0 {
+ t.Fatal("there should be something here")
+ }
+
+ log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
+
+ for _, file := range filesToAssert {
+ file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
+ fileBytes, err := ioutil.ReadFile(file.filePath)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
+ t.Fatal("this should be equal")
+ }
+ }
+
+ log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
+
+ unmountSec := runSwarm(t, []string{
+ "fs",
+ "unmount",
+ "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
+ secondMountPoint,
+ }...)
+
+ _, matches = unmountSec.ExpectRegexp(hashRegexp)
+ unmountSec.ExpectExit()
+
+ if matches[0] != hash {
+ t.Fatal("these should be equal - no changes made")
+ }
+}
+
+func doUploadEmptyDir(t *testing.T, node *testNode) string {
+ // create a tmp dir
+ tmpDir, err := ioutil.TempDir("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ hashRegexp := `[a-f\d]{64}`
+
+ flags := []string{
+ "--bzzapi", node.URL,
+ "--recursive",
+ "up",
+ tmpDir}
+
+ log.Info("swarmfs cli test: uploading dir with 'swarm up'")
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
+ up.ExpectExit()
+ hash := matches[0]
+ log.Info("swarmfs cli test: dir uploaded", "hash", hash)
+ return hash
+}
+
+func createDirInDir(createInDir string, dirToCreate string) (string, error) {
+ fullpath := filepath.Join(createInDir, dirToCreate)
+ err := os.MkdirAll(fullpath, 0777)
+ if err != nil {
+ return "", err
+ }
+ return fullpath, nil
+}
+
+func createTestFileInPath(dir, filename, content string) (*testFile, error) {
+ tFile := &testFile{}
+ filePath := filepath.Join(dir, filename)
+ if file, err := os.Create(filePath); err == nil {
+ tFile.content = content
+ tFile.filePath = filePath
+
+ _, err = io.WriteString(file, content)
+ if err != nil {
+ return nil, err
+ }
+ file.Close()
+ }
+
+ return tFile, nil
+}
diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go
index 792e8d0d7..c82456b3c 100644
--- a/cmd/swarm/hash.go
+++ b/cmd/swarm/hash.go
@@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
defer f.Close()
stat, _ := f.Stat()
- chunker := storage.NewTreeChunker(storage.NewChunkerParams())
- key, err := chunker.Split(f, stat.Size(), nil, nil, nil)
+ fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
+ addr, _, err := fileStore.Store(f, stat.Size(), false)
if err != nil {
utils.Fatalf("%v\n", err)
} else {
- fmt.Printf("%v\n", key)
+ fmt.Printf("%v\n", addr)
}
}
diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go
index 360020b77..9877e9150 100644
--- a/cmd/swarm/main.go
+++ b/cmd/swarm/main.go
@@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
@@ -49,6 +48,22 @@ import (
)
const clientIdentifier = "swarm"
+const helpTemplate = `NAME:
+{{.HelpName}} - {{.Usage}}
+
+USAGE:
+{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
+
+CATEGORY:
+{{.Category}}{{end}}{{if .Description}}
+
+DESCRIPTION:
+{{.Description}}{{end}}{{if .VisibleFlags}}
+
+OPTIONS:
+{{range .VisibleFlags}}{{.}}
+{{end}}{{end}}
+`
var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
@@ -87,10 +102,6 @@ var (
Usage: "Network identifier (integer, default 3=swarm testnet)",
EnvVar: SWARM_ENV_NETWORK_ID,
}
- SwarmConfigPathFlag = cli.StringFlag{
- Name: "bzzconfig",
- Usage: "DEPRECATED: please use --config path/to/TOML-file",
- }
SwarmSwapEnabledFlag = cli.BoolFlag{
Name: "swap",
Usage: "Swarm SWAP enabled (default false)",
@@ -101,10 +112,20 @@ var (
Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
EnvVar: SWARM_ENV_SWAP_API,
}
- SwarmSyncEnabledFlag = cli.BoolTFlag{
- Name: "sync",
- Usage: "Swarm Syncing enabled (default true)",
- EnvVar: SWARM_ENV_SYNC_ENABLE,
+ SwarmSyncDisabledFlag = cli.BoolTFlag{
+ Name: "nosync",
+ Usage: "Disable swarm syncing",
+ EnvVar: SWARM_ENV_SYNC_DISABLE,
+ }
+ SwarmSyncUpdateDelay = cli.DurationFlag{
+ Name: "sync-update-delay",
+ Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
+ EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
+ }
+ SwarmDeliverySkipCheckFlag = cli.BoolFlag{
+ Name: "delivery-skip-check",
+ Usage: "Skip chunk delivery check (default false)",
+ EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
}
EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api",
@@ -116,7 +137,7 @@ var (
Usage: "Swarm HTTP endpoint",
Value: "http://127.0.0.1:8500",
}
- SwarmRecursiveUploadFlag = cli.BoolFlag{
+ SwarmRecursiveFlag = cli.BoolFlag{
Name: "recursive",
Usage: "Upload directories recursively",
}
@@ -136,20 +157,29 @@ var (
Name: "mime",
Usage: "force mime type",
}
+ SwarmEncryptedFlag = cli.BoolFlag{
+ Name: "encrypt",
+ Usage: "use encrypted upload",
+ }
CorsStringFlag = cli.StringFlag{
Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
EnvVar: SWARM_ENV_CORS,
}
-
- // the following flags are deprecated and should be removed in the future
- DeprecatedEthAPIFlag = cli.StringFlag{
- Name: "ethapi",
- Usage: "DEPRECATED: please use --ens-api and --swap-api",
+ SwarmStorePath = cli.StringFlag{
+ Name: "store.path",
+ Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
+ EnvVar: SWARM_ENV_STORE_PATH,
}
- DeprecatedEnsAddrFlag = cli.StringFlag{
- Name: "ens-addr",
- Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
+ SwarmStoreCapacity = cli.Uint64Flag{
+ Name: "store.size",
+ Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
+ EnvVar: SWARM_ENV_STORE_CAPACITY,
+ }
+ SwarmStoreCacheCapacity = cli.UintFlag{
+ Name: "store.cache.size",
+ Usage: "Number of recent chunks cached in memory (default 5000)",
+ EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
}
)
@@ -180,91 +210,130 @@ func init() {
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
app.Commands = []cli.Command{
{
- Action: version,
- Name: "version",
- Usage: "Print version numbers",
- ArgsUsage: " ",
- Description: `
-The output of this command is supposed to be machine-readable.
-`,
+ Action: version,
+ CustomHelpTemplate: helpTemplate,
+ Name: "version",
+ Usage: "Print version numbers",
+ Description: "The output of this command is supposed to be machine-readable",
},
{
- Action: upload,
- Name: "up",
- Usage: "upload a file or directory to swarm using the HTTP API",
- ArgsUsage: " <file>",
- Description: `
-"upload a file or directory to swarm using the HTTP API and prints the root hash",
-`,
+ Action: upload,
+ CustomHelpTemplate: helpTemplate,
+ Name: "up",
+ Usage: "uploads a file or directory to swarm using the HTTP API",
+ ArgsUsage: "<file>",
+ Flags: []cli.Flag{SwarmEncryptedFlag},
+ Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
},
{
- Action: list,
- Name: "ls",
- Usage: "list files and directories contained in a manifest",
- ArgsUsage: " <manifest> [<prefix>]",
- Description: `
-Lists files and directories contained in a manifest.
-`,
+ Action: list,
+ CustomHelpTemplate: helpTemplate,
+ Name: "ls",
+ Usage: "list files and directories contained in a manifest",
+ ArgsUsage: "<manifest> [<prefix>]",
+ Description: "Lists files and directories contained in a manifest",
},
{
- Action: hash,
- Name: "hash",
- Usage: "print the swarm hash of a file or directory",
- ArgsUsage: " <file>",
- Description: `
-Prints the swarm hash of file or directory.
-`,
+ Action: hash,
+ CustomHelpTemplate: helpTemplate,
+ Name: "hash",
+ Usage: "print the swarm hash of a file or directory",
+ ArgsUsage: "<file>",
+ Description: "Prints the swarm hash of file or directory",
},
{
- Name: "manifest",
- Usage: "update a MANIFEST",
- ArgsUsage: "manifest COMMAND",
+ Action: download,
+ Name: "down",
+ Flags: []cli.Flag{SwarmRecursiveFlag},
+ Usage: "downloads a swarm manifest or a file inside a manifest",
+ ArgsUsage: " <uri> [<dir>]",
Description: `
-Updates a MANIFEST by adding/removing/updating the hash of a path.
+Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
`,
+ },
+
+ {
+ Name: "manifest",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "perform operations on swarm manifests",
+ ArgsUsage: "COMMAND",
+ Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{
{
- Action: add,
- Name: "add",
- Usage: "add a new path to the manifest",
- ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
- Description: `
-Adds a new path to the manifest
-`,
+ Action: add,
+ CustomHelpTemplate: helpTemplate,
+ Name: "add",
+ Usage: "add a new path to the manifest",
+ ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
+ Description: "Adds a new path to the manifest",
},
{
- Action: update,
- Name: "update",
- Usage: "update the hash for an already existing path in the manifest",
- ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
- Description: `
-Update the hash for an already existing path in the manifest
-`,
+ Action: update,
+ CustomHelpTemplate: helpTemplate,
+ Name: "update",
+ Usage: "update the hash for an already existing path in the manifest",
+ ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
+ Description: "Update the hash for an already existing path in the manifest",
},
{
- Action: remove,
- Name: "remove",
- Usage: "removes a path from the manifest",
- ArgsUsage: "<MANIFEST> <path>",
- Description: `
-Removes a path from the manifest
-`,
+ Action: remove,
+ CustomHelpTemplate: helpTemplate,
+ Name: "remove",
+ Usage: "removes a path from the manifest",
+ ArgsUsage: "<MANIFEST> <path>",
+ Description: "Removes a path from the manifest",
},
},
},
{
- Name: "db",
- Usage: "manage the local chunk database",
- ArgsUsage: "db COMMAND",
- Description: `
-Manage the local chunk database.
-`,
+ Name: "fs",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "perform FUSE operations",
+ ArgsUsage: "fs COMMAND",
+ Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
Subcommands: []cli.Command{
{
- Action: dbExport,
- Name: "export",
- Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
- ArgsUsage: "<chunkdb> <file>",
+ Action: mount,
+ CustomHelpTemplate: helpTemplate,
+ Name: "mount",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "mount a swarm hash to a mount point",
+ ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
+ Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ {
+ Action: unmount,
+ CustomHelpTemplate: helpTemplate,
+ Name: "unmount",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "unmount a swarmfs mount",
+ ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
+ Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ {
+ Action: listMounts,
+ CustomHelpTemplate: helpTemplate,
+ Name: "list",
+ Flags: []cli.Flag{utils.IPCPathFlag},
+ Usage: "list swarmfs mounts",
+ ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
+ Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
+ },
+ },
+ },
+ {
+ Name: "db",
+ CustomHelpTemplate: helpTemplate,
+ Usage: "manage the local chunk database",
+ ArgsUsage: "db COMMAND",
+ Description: "Manage the local chunk database",
+ Subcommands: []cli.Command{
+ {
+ Action: dbExport,
+ CustomHelpTemplate: helpTemplate,
+ Name: "export",
+ Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
+ ArgsUsage: "<chunkdb> <file>",
Description: `
Export a local chunk database as a tar archive (use - to send to stdout).
@@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
`,
},
{
- Action: dbImport,
- Name: "import",
- Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
- ArgsUsage: "<chunkdb> <file>",
+ Action: dbImport,
+ CustomHelpTemplate: helpTemplate,
+ Name: "import",
+ Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
+ ArgsUsage: "<chunkdb> <file>",
Description: `
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
@@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
`,
},
{
- Action: dbClean,
- Name: "clean",
- Usage: "remove corrupt entries from a local chunk database",
- ArgsUsage: "<chunkdb>",
- Description: `
-Remove corrupt entries from a local chunk database.
-`,
+ Action: dbClean,
+ CustomHelpTemplate: helpTemplate,
+ Name: "clean",
+ Usage: "remove corrupt entries from a local chunk database",
+ ArgsUsage: "<chunkdb>",
+ Description: "Remove corrupt entries from a local chunk database",
},
},
},
- {
- Action: func(ctx *cli.Context) {
- utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
- },
- Name: "cleandb",
- Usage: "DEPRECATED: use 'swarm db clean'",
- ArgsUsage: " ",
- Description: `
-DEPRECATED: use 'swarm db clean'.
-`,
- },
+
// See config.go
DumpConfigCommand,
}
@@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
CorsStringFlag,
EnsAPIFlag,
SwarmTomlConfigPathFlag,
- SwarmConfigPathFlag,
SwarmSwapEnabledFlag,
SwarmSwapAPIFlag,
- SwarmSyncEnabledFlag,
+ SwarmSyncDisabledFlag,
+ SwarmSyncUpdateDelay,
+ SwarmDeliverySkipCheckFlag,
SwarmListenAddrFlag,
SwarmPortFlag,
SwarmAccountFlag,
@@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
ChequebookAddrFlag,
// upload flags
SwarmApiFlag,
- SwarmRecursiveUploadFlag,
+ SwarmRecursiveFlag,
SwarmWantManifestFlag,
SwarmUploadDefaultPath,
SwarmUpFromStdinFlag,
SwarmUploadMimeType,
- //deprecated flags
- DeprecatedEthAPIFlag,
- DeprecatedEnsAddrFlag,
- }
+ // storage flags
+ SwarmStorePath,
+ SwarmStoreCapacity,
+ SwarmStoreCacheCapacity,
+ }
+ rpcFlags := []cli.Flag{
+ utils.WSEnabledFlag,
+ utils.WSListenAddrFlag,
+ utils.WSPortFlag,
+ utils.WSApiFlag,
+ utils.WSAllowedOriginsFlag,
+ }
+ app.Flags = append(app.Flags, rpcFlags...)
app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error {
@@ -383,16 +452,12 @@ func main() {
}
func version(ctx *cli.Context) error {
- fmt.Println(strings.Title(clientIdentifier))
- fmt.Println("Version:", params.Version)
+ fmt.Println("Version:", SWARM_VERSION)
if gitCommit != "" {
fmt.Println("Git Commit:", gitCommit)
}
- fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
fmt.Println("Go Version:", runtime.Version())
fmt.Println("OS:", runtime.GOOS)
- fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
- fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
return nil
}
@@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
}
cfg := defaultNodeConfig
+
+ //pss operates on ws
+ cfg.WSModules = append(cfg.WSModules, "pss")
+
//geth only supports --datadir via command line
//in order to be consistent within swarm, if we pass --datadir via environment variable
//or via config file, we get the same directory for geth and swarm
@@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
//due to overriding behavior
initSwarmNode(bzzconfig, stack, ctx)
//register BZZ as node.Service in the ethereum node
- registerBzzService(bzzconfig, ctx, stack)
+ registerBzzService(bzzconfig, stack)
//start the node
utils.StartNode(stack)
@@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
bootnodes := strings.Split(bzzconfig.BootNodes, ",")
injectBootnodes(stack.Server(), bootnodes)
} else {
- if bzzconfig.NetworkId == 3 {
+ if bzzconfig.NetworkID == 3 {
injectBootnodes(stack.Server(), testbetBootNodes)
}
}
@@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
return nil
}
-func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
-
+func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
//define the swarm service boot function
- boot := func(ctx *node.ServiceContext) (node.Service, error) {
- var swapClient *ethclient.Client
- var err error
- if bzzconfig.SwapApi != "" {
- log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
- swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
- if err != nil {
- return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
- }
- }
-
- return swarm.NewSwarm(ctx, swapClient, bzzconfig)
+ boot := func(_ *node.ServiceContext) (node.Service, error) {
+ // In production, mockStore must be always nil.
+ return swarm.NewSwarm(bzzconfig, nil)
}
//register within the ethereum node
if err := stack.Register(boot); err != nil {
diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go
index 41a69a5d0..82166edf6 100644
--- a/cmd/swarm/manifest.go
+++ b/cmd/swarm/manifest.go
@@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
//TODO: check if the "hash" to add is valid and present in swarm
- _, err = client.DownloadManifest(hash)
+ _, _, err = client.DownloadManifest(hash)
if err != nil {
utils.Fatalf("Hash to add is not present: %v", err)
}
@@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
mroot.Entries = append(mroot.Entries, newEntry)
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
@@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
@@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
mroot = newMRoot
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
@@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
longestPathEntry = api.ManifestEntry{}
)
- mroot, err := client.DownloadManifest(mhash)
+ mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil {
utils.Fatalf("Manifest download failed: %v", err)
}
@@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
mroot = newMRoot
}
- newManifestHash, err := client.UploadManifest(mroot)
+ newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil {
utils.Fatalf("Manifest upload failed: %v", err)
}
diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go
index 594cfa55c..a70c4686d 100644
--- a/cmd/swarm/run_test.go
+++ b/cmd/swarm/run_test.go
@@ -81,6 +81,7 @@ type testCluster struct {
//
// When starting more than one node, they are connected together using the
// admin SetPeer RPC method.
+
func newTestCluster(t *testing.T, size int) *testCluster {
cluster := &testCluster{}
defer func() {
@@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
cluster.TmpDir = tmpdir
// start the nodes
- cluster.Nodes = make([]*testNode, 0, size)
- for i := 0; i < size; i++ {
- dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
- if err := os.Mkdir(dir, 0700); err != nil {
- t.Fatal(err)
- }
-
- node := newTestNode(t, dir)
- node.Name = fmt.Sprintf("swarm%02d", i)
-
- cluster.Nodes = append(cluster.Nodes, node)
- }
+ cluster.StartNewNodes(t, size)
if size == 1 {
return cluster
@@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
os.RemoveAll(c.TmpDir)
}
+func (c *testCluster) Stop() {
+ for _, node := range c.Nodes {
+ node.Shutdown()
+ }
+}
+
+func (c *testCluster) StartNewNodes(t *testing.T, size int) {
+ c.Nodes = make([]*testNode, 0, size)
+ for i := 0; i < size; i++ {
+ dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
+ if err := os.Mkdir(dir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ node := newTestNode(t, dir)
+ node.Name = fmt.Sprintf("swarm%02d", i)
+
+ c.Nodes = append(c.Nodes, node)
+ }
+}
+
+func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
+ c.Nodes = make([]*testNode, 0, size)
+ for i := 0; i < size; i++ {
+ dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
+ node := existingTestNode(t, dir, bzzaccount)
+ node.Name = fmt.Sprintf("swarm%02d", i)
+
+ c.Nodes = append(c.Nodes, node)
+ }
+}
+
+func (c *testCluster) Cleanup() {
+ os.RemoveAll(c.TmpDir)
+}
+
type testNode struct {
- Name string
- Addr string
- URL string
- Enode string
- Dir string
- Client *rpc.Client
- Cmd *cmdtest.TestCmd
+ Name string
+ Addr string
+ URL string
+ Enode string
+ Dir string
+ IpcPath string
+ Client *rpc.Client
+ Cmd *cmdtest.TestCmd
}
const testPassphrase = "swarm-test-passphrase"
@@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
return conf, account
}
+func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
+ conf, _ := getTestAccount(t, dir)
+ node := &testNode{Dir: dir}
+
+ // use a unique IPCPath when running tests on Windows
+ if runtime.GOOS == "windows" {
+ conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
+ }
+
+ // assign ports
+ httpPort, err := assignTCPPort()
+ if err != nil {
+ t.Fatal(err)
+ }
+ p2pPort, err := assignTCPPort()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // start the node
+ node.Cmd = runSwarm(t,
+ "--port", p2pPort,
+ "--nodiscover",
+ "--datadir", dir,
+ "--ipcpath", conf.IPCPath,
+ "--ens-api", "",
+ "--bzzaccount", bzzaccount,
+ "--bzznetworkid", "321",
+ "--bzzport", httpPort,
+ "--verbosity", "6",
+ )
+ node.Cmd.InputLine(testPassphrase)
+ defer func() {
+ if t.Failed() {
+ node.Shutdown()
+ }
+ }()
+
+ // wait for the node to start
+ for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
+ node.Client, err = rpc.Dial(conf.IPCEndpoint())
+ if err == nil {
+ break
+ }
+ }
+ if node.Client == nil {
+ t.Fatal(err)
+ }
+
+ // load info
+ var info swarm.Info
+ if err := node.Client.Call(&info, "bzz_info"); err != nil {
+ t.Fatal(err)
+ }
+ node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
+ node.URL = "http://" + node.Addr
+
+ var nodeInfo p2p.NodeInfo
+ if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
+ t.Fatal(err)
+ }
+ node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
+
+ return node
+}
+
func newTestNode(t *testing.T, dir string) *testNode {
conf, account := getTestAccount(t, dir)
@@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
t.Fatal(err)
}
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
+ node.IpcPath = conf.IPCPath
return node
}
diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go
new file mode 100644
index 000000000..87bc39816
--- /dev/null
+++ b/cmd/swarm/swarm-smoke/main.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "os"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/log"
+ colorable "github.com/mattn/go-colorable"
+
+ cli "gopkg.in/urfave/cli.v1"
+)
+
+var (
+ endpoints []string
+ includeLocalhost bool
+ cluster string
+ scheme string
+ filesize int
+ from int
+ to int
+)
+
+func main() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+
+ app := cli.NewApp()
+ app.Name = "smoke-test"
+ app.Usage = ""
+
+ app.Flags = []cli.Flag{
+ cli.StringFlag{
+ Name: "cluster-endpoint",
+ Value: "testing",
+ Usage: "cluster to point to (open, or testing)",
+ Destination: &cluster,
+ },
+ cli.IntFlag{
+ Name: "cluster-from",
+ Value: 8501,
+ Usage: "swarm node (from)",
+ Destination: &from,
+ },
+ cli.IntFlag{
+ Name: "cluster-to",
+ Value: 8512,
+ Usage: "swarm node (to)",
+ Destination: &to,
+ },
+ cli.StringFlag{
+ Name: "cluster-scheme",
+ Value: "http",
+ Usage: "http or https",
+ Destination: &scheme,
+ },
+ cli.BoolFlag{
+ Name: "include-localhost",
+ Usage: "whether to include localhost:8500 as an endpoint",
+ Destination: &includeLocalhost,
+ },
+ cli.IntFlag{
+ Name: "filesize",
+ Value: 1,
+ Usage: "file size for generated random file in MB",
+ Destination: &filesize,
+ },
+ }
+
+ app.Commands = []cli.Command{
+ {
+ Name: "upload_and_sync",
+ Aliases: []string{"c"},
+ Usage: "upload and sync",
+ Action: cliUploadAndSync,
+ },
+ }
+
+ sort.Sort(cli.FlagsByName(app.Flags))
+ sort.Sort(cli.CommandsByName(app.Commands))
+
+ err := app.Run(os.Args)
+ if err != nil {
+ log.Error(err.Error())
+ }
+}
diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go
new file mode 100644
index 000000000..7f9051e7f
--- /dev/null
+++ b/cmd/swarm/swarm-smoke/upload_and_sync.go
@@ -0,0 +1,184 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/pborman/uuid"
+
+ cli "gopkg.in/urfave/cli.v1"
+)
+
+func generateEndpoints(scheme string, cluster string, from int, to int) {
+ for port := from; port <= to; port++ {
+ endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
+ }
+
+ if includeLocalhost {
+ endpoints = append(endpoints, "http://localhost:8500")
+ }
+}
+
+func cliUploadAndSync(c *cli.Context) error {
+ defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
+
+ generateEndpoints(scheme, cluster, from, to)
+
+ log.Info("uploading to " + endpoints[0] + " and syncing")
+
+ f, cleanup := generateRandomFile(filesize * 1000000)
+ defer cleanup()
+
+ hash, err := upload(f, endpoints[0])
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+
+ fhash, err := digest(f)
+ if err != nil {
+ log.Error(err.Error())
+ return err
+ }
+
+ log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
+
+ if filesize < 10 {
+ time.Sleep(15 * time.Second)
+ } else {
+ time.Sleep(2 * time.Duration(filesize) * time.Second)
+ }
+
+ wg := sync.WaitGroup{}
+ for _, endpoint := range endpoints {
+ endpoint := endpoint
+ ruid := uuid.New()[:8]
+ wg.Add(1)
+ go func(endpoint string, ruid string) {
+ for {
+ err := fetch(hash, endpoint, fhash, ruid)
+ if err != nil {
+ continue
+ }
+
+ wg.Done()
+ return
+ }
+ }(endpoint, ruid)
+ }
+ wg.Wait()
+ log.Info("all endpoints synced random file successfully")
+
+ return nil
+}
+
+// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
+func fetch(hash string, endpoint string, original []byte, ruid string) error {
+ log.Trace("sleeping", "ruid", ruid)
+ time.Sleep(1 * time.Second)
+
+ log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
+ res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
+ if err != nil {
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+ log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
+
+ if res.StatusCode != 200 {
+ err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ defer res.Body.Close()
+
+ rdigest, err := digest(res.Body)
+ if err != nil {
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ if !bytes.Equal(rdigest, original) {
+ err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
+ log.Warn(err.Error(), "ruid", ruid)
+ return err
+ }
+
+ log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
+
+ return nil
+}
+
+// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
+func upload(f *os.File, endpoint string) (string, error) {
+ var out bytes.Buffer
+ cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ return "", err
+ }
+ hash := strings.TrimRight(out.String(), "\r\n")
+ return hash, nil
+}
+
+func digest(r io.Reader) ([]byte, error) {
+ h := md5.New()
+ _, err := io.Copy(h, r)
+ if err != nil {
+ return nil, err
+ }
+ return h.Sum(nil), nil
+}
+
+// generateRandomFile is creating a temporary file with the requested byte size
+func generateRandomFile(size int) (f *os.File, teardown func()) {
+ // create a tmp file
+ tmp, err := ioutil.TempFile("", "swarm-test")
+ if err != nil {
+ panic(err)
+ }
+
+ // callback for tmp file cleanup
+ teardown = func() {
+ tmp.Close()
+ os.Remove(tmp.Name())
+ }
+
+ buf := make([]byte, size)
+ _, err = rand.Read(buf)
+ if err != nil {
+ panic(err)
+ }
+ ioutil.WriteFile(tmp.Name(), buf, 0755)
+
+ return tmp, teardown
+}
diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go
index 9f4c525bb..8ba0e7c5f 100644
--- a/cmd/swarm/upload.go
+++ b/cmd/swarm/upload.go
@@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
args := ctx.Args()
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
- recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name)
+ recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
client = swarm.NewClient(bzzapi)
+ toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
file string
)
@@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
utils.Fatalf("Error opening file: %s", err)
}
defer f.Close()
- hash, err := client.UploadRaw(f, f.Size)
+ hash, err := client.UploadRaw(f, f.Size, toEncrypt)
if err != nil {
utils.Fatalf("Upload failed: %s", err)
}
@@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
if !recursive {
return "", errors.New("Argument is a directory and recursive upload is disabled")
}
- return client.UploadDirectory(file, defaultPath, "")
+ return client.UploadDirectory(file, defaultPath, "", toEncrypt)
}
} else {
doUpload = func() (string, error) {
@@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
mimeType = detectMimeType(file)
}
f.ContentType = mimeType
- return client.Upload(f, "")
+ return client.Upload(f, "", toEncrypt)
}
}
hash, err := doUpload()
diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go
index df7fc216a..2afc9b3a1 100644
--- a/cmd/swarm/upload_test.go
+++ b/cmd/swarm/upload_test.go
@@ -17,60 +17,259 @@
package main
import (
+ "bytes"
+ "flag"
+ "fmt"
"io"
"io/ioutil"
"net/http"
"os"
+ "path"
+ "path/filepath"
+ "strings"
"testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/log"
+ swarm "github.com/ethereum/go-ethereum/swarm/api/client"
+ colorable "github.com/mattn/go-colorable"
)
+var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
+
+func init() {
+ log.PrintOrigins(true)
+ log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
+}
+
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUp(t *testing.T) {
- // start 3 node cluster
- t.Log("starting 3 node cluster")
+ testCLISwarmUp(false, t)
+}
+func TestCLISwarmUpRecursive(t *testing.T) {
+ testCLISwarmUpRecursive(false, t)
+}
+
+// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
+// available from all nodes via the HTTP API
+func TestCLISwarmUpEncrypted(t *testing.T) {
+ testCLISwarmUp(true, t)
+}
+func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
+ testCLISwarmUpRecursive(true, t)
+}
+
+func testCLISwarmUp(toEncrypt bool, t *testing.T) {
+ log.Info("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
- assertNil(t, err)
+ if err != nil {
+ t.Fatal(err)
+ }
defer tmp.Close()
defer os.Remove(tmp.Name())
- _, err = io.WriteString(tmp, "data")
- assertNil(t, err)
+ // write data to file
+ data := "notsorandomdata"
+ _, err = io.WriteString(tmp, data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ hashRegexp := `[a-f\d]{64}`
+ flags := []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "up",
+ tmp.Name()}
+ if toEncrypt {
+ hashRegexp = `[a-f\d]{128}`
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "up",
+ "--encrypt",
+ tmp.Name()}
+ }
// upload the file with 'swarm up' and expect a hash
- t.Log("uploading file with 'swarm up'")
- up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name())
- _, matches := up.ExpectRegexp(`[a-f\d]{64}`)
+ log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
- t.Logf("file uploaded with hash %s", hash)
+ log.Info("file uploaded", "hash", hash)
// get the file from the HTTP API of each node
for _, node := range cluster.Nodes {
- t.Logf("getting file from %s", node.Name)
+ log.Info("getting file from node", "node", node.Name)
+
res, err := http.Get(node.URL + "/bzz:/" + hash)
- assertNil(t, err)
- assertHTTPResponse(t, res, http.StatusOK, "data")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+
+ reply, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ t.Fatalf("expected HTTP status 200, got %s", res.Status)
+ }
+ if string(reply) != data {
+ t.Fatalf("expected HTTP body %q, got %q", data, reply)
+ }
+ log.Debug("verifying uploaded file using `swarm down`")
+ //try to get the content with `swarm down`
+ tmpDownload, err := ioutil.TempDir("", "swarm-test")
+ tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDownload)
+
+ bzzLocator := "bzz:/" + hash
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "down",
+ bzzLocator,
+ tmpDownload,
+ }
+
+ down := runSwarm(t, flags...)
+ down.ExpectExit()
+
+ fi, err := os.Stat(tmpDownload)
+ if err != nil {
+ t.Fatalf("could not stat path: %v", err)
+ }
+
+ switch mode := fi.Mode(); {
+ case mode.IsRegular():
+ downloadedBytes, err := ioutil.ReadFile(tmpDownload)
+ if err != nil {
+ t.Fatalf("had an error reading the downloaded file: %v", err)
+ }
+ if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
+ t.Fatalf("retrieved data and posted data not equal!")
+ }
+
+ default:
+ t.Fatalf("expected to download regular file, got %s", fi.Mode())
+ }
+ }
+
+ timeout := time.Duration(2 * time.Second)
+ httpClient := http.Client{
+ Timeout: timeout,
+ }
+
+ // try to squeeze a timeout by getting an non-existent hash from each node
+ for _, node := range cluster.Nodes {
+ _, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
+ // we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
+ if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
+ t.Fatal(err)
+ }
+ // this is disabled since it takes 60s due to netstore timeout
+ // if res.StatusCode != 404 {
+ // t.Fatalf("expected HTTP status 404, got %s", res.Status)
+ // }
}
}
-func assertNil(t *testing.T, err error) {
+func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
+ fmt.Println("starting 3 node cluster")
+ cluster := newTestCluster(t, 3)
+ defer cluster.Shutdown()
+
+ tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
-}
+ defer os.RemoveAll(tmpUploadDir)
+ // create tmp files
+ data := "notsorandomdata"
+ for _, path := range []string{"tmp1", "tmp2"} {
+ if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
+ t.Fatal(err)
+ }
+ }
-func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) {
- defer res.Body.Close()
- if res.StatusCode != expectedStatus {
- t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status)
+ hashRegexp := `[a-f\d]{64}`
+ flags := []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "--recursive",
+ "up",
+ tmpUploadDir}
+ if toEncrypt {
+ hashRegexp = `[a-f\d]{128}`
+ flags = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "--recursive",
+ "up",
+ "--encrypt",
+ tmpUploadDir}
}
- data, err := ioutil.ReadAll(res.Body)
- assertNil(t, err)
- if string(data) != expectedBody {
- t.Fatalf("expected HTTP body %q, got %q", expectedBody, data)
+ // upload the file with 'swarm up' and expect a hash
+ log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
+ up := runSwarm(t, flags...)
+ _, matches := up.ExpectRegexp(hashRegexp)
+ up.ExpectExit()
+ hash := matches[0]
+ log.Info("dir uploaded", "hash", hash)
+
+ // get the file from the HTTP API of each node
+ for _, node := range cluster.Nodes {
+ log.Info("getting file from node", "node", node.Name)
+ //try to get the content with `swarm down`
+ tmpDownload, err := ioutil.TempDir("", "swarm-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDownload)
+ bzzLocator := "bzz:/" + hash
+ flagss := []string{}
+ flagss = []string{
+ "--bzzapi", cluster.Nodes[0].URL,
+ "down",
+ "--recursive",
+ bzzLocator,
+ tmpDownload,
+ }
+
+ fmt.Println("downloading from swarm with recursive")
+ down := runSwarm(t, flagss...)
+ down.ExpectExit()
+
+ files, err := ioutil.ReadDir(tmpDownload)
+ for _, v := range files {
+ fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
+ if err != nil {
+ t.Fatalf("got an error: %v", err)
+ }
+
+ switch mode := fi.Mode(); {
+ case mode.IsRegular():
+ if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
+ t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
+ } else {
+ ff := make([]byte, len(data))
+ io.ReadFull(file, ff)
+ buf := bytes.NewBufferString(data)
+
+ if !bytes.Equal(ff, buf.Bytes()) {
+ t.Fatalf("retrieved data and posted data not equal!")
+ }
+ }
+ default:
+ t.Fatalf("this shouldnt happen")
+ }
+ }
+ if err != nil {
+ t.Fatalf("could not list files at: %v", files)
+ }
}
}