aboutsummaryrefslogtreecommitdiffstats
path: root/swarm/storage
diff options
context:
space:
mode:
authorLewis Marshall <lewis@lmars.net>2017-07-31 19:23:44 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-07-31 19:23:44 +0800
commite9b850805eebc55ea8486323a1a7861b9b554430 (patch)
treec5df8c23899053824c49aa0390309e415a33da51 /swarm/storage
parent53f3460ab5b94875edf90c8b0f5da46b0c104321 (diff)
downloaddexon-e9b850805eebc55ea8486323a1a7861b9b554430.tar.gz
dexon-e9b850805eebc55ea8486323a1a7861b9b554430.tar.zst
dexon-e9b850805eebc55ea8486323a1a7861b9b554430.zip
cmd/swarm: support exporting, importing chunk db (#14868)
Diffstat (limited to 'swarm/storage')
-rw-r--r--swarm/storage/dbstore.go82
1 files changed, 82 insertions, 0 deletions
diff --git a/swarm/storage/dbstore.go b/swarm/storage/dbstore.go
index 31ff5b64e..076113084 100644
--- a/swarm/storage/dbstore.go
+++ b/swarm/storage/dbstore.go
@@ -23,9 +23,13 @@
package storage
import (
+ "archive/tar"
"bytes"
"encoding/binary"
+ "encoding/hex"
"fmt"
+ "io"
+ "io/ioutil"
"sync"
"github.com/ethereum/go-ethereum/log"
@@ -260,6 +264,84 @@ func (s *DbStore) collectGarbage(ratio float32) {
s.db.Put(keyGCPos, s.gcPos)
}
+// Export writes all chunks from the store to a tar archive, returning the
+// number of chunks written.
+func (s *DbStore) Export(out io.Writer) (int64, error) {
+ tw := tar.NewWriter(out)
+ defer tw.Close()
+
+ it := s.db.NewIterator()
+ defer it.Release()
+ var count int64
+ for ok := it.Seek([]byte{kpIndex}); ok; ok = it.Next() {
+ key := it.Key()
+ if (key == nil) || (key[0] != kpIndex) {
+ break
+ }
+
+ var index dpaDBIndex
+ decodeIndex(it.Value(), &index)
+
+ data, err := s.db.Get(getDataKey(index.Idx))
+ if err != nil {
+ log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err))
+ continue
+ }
+
+ hdr := &tar.Header{
+ Name: hex.EncodeToString(key[1:]),
+ Mode: 0644,
+ Size: int64(len(data)),
+ }
+ if err := tw.WriteHeader(hdr); err != nil {
+ return count, err
+ }
+ if _, err := tw.Write(data); err != nil {
+ return count, err
+ }
+ count++
+ }
+
+ return count, nil
+}
+
+// Import reads chunks into the store from a tar archive, returning the number
+// of chunks read.
+func (s *DbStore) Import(in io.Reader) (int64, error) {
+ tr := tar.NewReader(in)
+
+ var count int64
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return count, err
+ }
+
+ if len(hdr.Name) != 64 {
+ log.Warn("ignoring non-chunk file", "name", hdr.Name)
+ continue
+ }
+
+ key, err := hex.DecodeString(hdr.Name)
+ if err != nil {
+ log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
+ continue
+ }
+
+ data, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return count, err
+ }
+
+ s.Put(&Chunk{Key: key, SData: data})
+ count++
+ }
+
+ return count, nil
+}
+
func (s *DbStore) Cleanup() {
//Iterates over the database and checks that there are no faulty chunks
it := s.db.NewIterator()