aboutsummaryrefslogtreecommitdiffstats
path: root/swarm
diff options
context:
space:
mode:
authorferhat elmas <elmas.ferhat@gmail.com>2017-11-08 18:45:52 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-11-08 18:45:52 +0800
commit9619a610248e9630968ba1d9be8e214b645c9c55 (patch)
tree5844cfbee9a0ac5750dcc803cea461f75969f74e /swarm
parentbfdc0fa3622d7c3b421d2f5a6dda5746be41bfde (diff)
downloaddexon-9619a610248e9630968ba1d9be8e214b645c9c55.tar.gz
dexon-9619a610248e9630968ba1d9be8e214b645c9c55.tar.zst
dexon-9619a610248e9630968ba1d9be8e214b645c9c55.zip
all: gofmt -w -s (#15419)
Diffstat (limited to 'swarm')
-rw-r--r--swarm/api/client/client_test.go38
-rw-r--r--swarm/storage/chunker.go15
-rw-r--r--swarm/storage/pyramid.go4
3 files changed, 27 insertions, 30 deletions
diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go
index edf385dd0..c1d144e37 100644
--- a/swarm/api/client/client_test.go
+++ b/swarm/api/client/client_test.go
@@ -244,25 +244,25 @@ func TestClientFileList(t *testing.T) {
}
tests := map[string][]string{
- "": []string{"dir1/", "dir2/", "file1.txt", "file2.txt"},
- "file": []string{"file1.txt", "file2.txt"},
- "file1": []string{"file1.txt"},
- "file2.txt": []string{"file2.txt"},
- "file12": []string{},
- "dir": []string{"dir1/", "dir2/"},
- "dir1": []string{"dir1/"},
- "dir1/": []string{"dir1/file3.txt", "dir1/file4.txt"},
- "dir1/file": []string{"dir1/file3.txt", "dir1/file4.txt"},
- "dir1/file3.txt": []string{"dir1/file3.txt"},
- "dir1/file34": []string{},
- "dir2/": []string{"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
- "dir2/file": []string{"dir2/file5.txt"},
- "dir2/dir": []string{"dir2/dir3/", "dir2/dir4/"},
- "dir2/dir3/": []string{"dir2/dir3/file6.txt"},
- "dir2/dir4/": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
- "dir2/dir4/file": []string{"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
- "dir2/dir4/file7.txt": []string{"dir2/dir4/file7.txt"},
- "dir2/dir4/file78": []string{},
+ "": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
+ "file": {"file1.txt", "file2.txt"},
+ "file1": {"file1.txt"},
+ "file2.txt": {"file2.txt"},
+ "file12": {},
+ "dir": {"dir1/", "dir2/"},
+ "dir1": {"dir1/"},
+ "dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
+ "dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
+ "dir1/file3.txt": {"dir1/file3.txt"},
+ "dir1/file34": {},
+ "dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
+ "dir2/file": {"dir2/file5.txt"},
+ "dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
+ "dir2/dir3/": {"dir2/dir3/file6.txt"},
+ "dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
+ "dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
+ "dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
+ "dir2/dir4/file78": {},
}
for prefix, expected := range tests {
actual := ls(prefix)
diff --git a/swarm/storage/chunker.go b/swarm/storage/chunker.go
index 0454828b9..8c0d62cbe 100644
--- a/swarm/storage/chunker.go
+++ b/swarm/storage/chunker.go
@@ -50,7 +50,6 @@ data_{i} := size(subtree_{i}) || key_{j} || key_{j+1} .... || key_{j+n-1}
The underlying hash function is configurable
*/
-
/*
Tree chunker is a concrete implementation of data chunking.
This chunker works in a simple way, it builds a tree out of the document so that each node either represents a chunk of real data or a chunk of data representing an branching non-leaf node of the tree. In particular each such non-leaf chunk will represent is a concatenation of the hash of its respective children. This scheme simultaneously guarantees data integrity as well as self addressing. Abstract nodes are transparent since their represented size component is strictly greater than their maximum data size, since they encode a subtree.
@@ -61,17 +60,17 @@ The hashing itself does use extra copies and allocation though, since it does ne
var (
errAppendOppNotSuported = errors.New("Append operation not supported")
- errOperationTimedOut = errors.New("operation timed out")
+ errOperationTimedOut = errors.New("operation timed out")
)
type TreeChunker struct {
branches int64
hashFunc SwarmHasher
// calculated
- hashSize int64 // self.hashFunc.New().Size()
- chunkSize int64 // hashSize* branches
- workerCount int64 // the number of worker routines used
- workerLock sync.RWMutex // lock for the worker count
+ hashSize int64 // self.hashFunc.New().Size()
+ chunkSize int64 // hashSize* branches
+ workerCount int64 // the number of worker routines used
+ workerLock sync.RWMutex // lock for the worker count
}
func NewTreeChunker(params *ChunkerParams) (self *TreeChunker) {
@@ -124,7 +123,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
panic("chunker must be initialised")
}
-
jobC := make(chan *hashJob, 2*ChunkProcessors)
wg := &sync.WaitGroup{}
errC := make(chan error)
@@ -164,7 +162,6 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
close(errC)
}()
-
defer close(quitC)
select {
case err := <-errC:
@@ -172,7 +169,7 @@ func (self *TreeChunker) Split(data io.Reader, size int64, chunkC chan *Chunk, s
return nil, err
}
case <-time.NewTimer(splitTimeout).C:
- return nil,errOperationTimedOut
+ return nil, errOperationTimedOut
}
return key, nil
diff --git a/swarm/storage/pyramid.go b/swarm/storage/pyramid.go
index 631ab52b8..42b83583d 100644
--- a/swarm/storage/pyramid.go
+++ b/swarm/storage/pyramid.go
@@ -123,7 +123,7 @@ type PyramidChunker struct {
hashSize int64
branches int64
workerCount int64
- workerLock sync.RWMutex
+ workerLock sync.RWMutex
}
func NewPyramidChunker(params *ChunkerParams) (self *PyramidChunker) {
@@ -634,4 +634,4 @@ func (self *PyramidChunker) enqueueDataChunk(chunkData []byte, size uint64, pare
return pkey
-} \ No newline at end of file
+}