aboutsummaryrefslogtreecommitdiffstats
path: root/swarm
diff options
context:
space:
mode:
authorAnton Evangelatov <anton.evangelatov@gmail.com>2018-09-12 20:39:45 +0800
committerBalint Gabor <balint.g@gmail.com>2018-09-12 20:39:45 +0800
commitff3a5d24d2e40fd66f7813173e9cfc31144f3c53 (patch)
tree50ea3c47731722196270329b4c50df1a390e3075 /swarm
parentbfce00385f1c8dab222b7ddab6c336177a5ae731 (diff)
downloaddexon-ff3a5d24d2e40fd66f7813173e9cfc31144f3c53.tar.gz
dexon-ff3a5d24d2e40fd66f7813173e9cfc31144f3c53.tar.zst
dexon-ff3a5d24d2e40fd66f7813173e9cfc31144f3c53.zip
swarm/storage: remove redundant increments for dataIdx and entryCnt (#17484)
* swarm/storage: remove redundant increments for dataIdx and entryCnt * swarm/storage: add Delete to LDBStore * swarm/storage: wait for garbage collection
Diffstat (limited to 'swarm')
-rw-r--r--swarm/storage/ldbstore.go25
-rw-r--r--swarm/storage/ldbstore_test.go41
2 files changed, 35 insertions, 31 deletions
diff --git a/swarm/storage/ldbstore.go b/swarm/storage/ldbstore.go
index bd3501ea2..675b5de01 100644
--- a/swarm/storage/ldbstore.go
+++ b/swarm/storage/ldbstore.go
@@ -147,13 +147,10 @@ func NewLDBStore(params *LDBStoreParams) (s *LDBStore, err error) {
}
data, _ := s.db.Get(keyEntryCnt)
s.entryCnt = BytesToU64(data)
- s.entryCnt++
data, _ = s.db.Get(keyAccessCnt)
s.accessCnt = BytesToU64(data)
- s.accessCnt++
data, _ = s.db.Get(keyDataIdx)
s.dataIdx = BytesToU64(data)
- s.dataIdx++
return s, nil
}
@@ -251,6 +248,8 @@ func decodeOldData(data []byte, chunk *Chunk) {
}
func (s *LDBStore) collectGarbage(ratio float32) {
+ log.Trace("collectGarbage", "ratio", ratio)
+
metrics.GetOrRegisterCounter("ldbstore.collectgarbage", nil).Inc(1)
it := s.db.NewIterator()
@@ -492,6 +491,18 @@ func (s *LDBStore) ReIndex() {
log.Warn(fmt.Sprintf("Found %v errors out of %v entries", errorsFound, total))
}
+func (s *LDBStore) Delete(addr Address) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ ikey := getIndexKey(addr)
+
+ var indx dpaDBIndex
+ s.tryAccessIdx(ikey, &indx)
+
+ s.delete(indx.Idx, ikey, s.po(addr))
+}
+
func (s *LDBStore) delete(idx uint64, idxKey []byte, po uint8) {
metrics.GetOrRegisterCounter("ldbstore.delete", nil).Inc(1)
@@ -517,8 +528,8 @@ func (s *LDBStore) CurrentBucketStorageIndex(po uint8) uint64 {
}
func (s *LDBStore) Size() uint64 {
- s.lock.Lock()
- defer s.lock.Unlock()
+ s.lock.RLock()
+ defer s.lock.RUnlock()
return s.entryCnt
}
@@ -602,21 +613,23 @@ mainLoop:
}
close(c)
for e > s.capacity {
+ log.Trace("for >", "e", e, "s.capacity", s.capacity)
// Collect garbage in a separate goroutine
// to be able to interrupt this loop by s.quit.
done := make(chan struct{})
go func() {
s.collectGarbage(gcArrayFreeRatio)
+ log.Trace("collectGarbage closing done")
close(done)
}()
- e = s.entryCnt
select {
case <-s.quit:
s.lock.Unlock()
break mainLoop
case <-done:
}
+ e = s.entryCnt
}
s.lock.Unlock()
}
diff --git a/swarm/storage/ldbstore_test.go b/swarm/storage/ldbstore_test.go
index 5ee88baa5..9694a724e 100644
--- a/swarm/storage/ldbstore_test.go
+++ b/swarm/storage/ldbstore_test.go
@@ -27,8 +27,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/chunk"
- "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
@@ -209,7 +209,7 @@ func testIterator(t *testing.T, mock bool) {
for poc = 0; poc <= 255; poc++ {
err := db.SyncIterator(0, uint64(chunkkeys.Len()), uint8(poc), func(k Address, n uint64) bool {
log.Trace(fmt.Sprintf("Got key %v number %d poc %d", k, n, uint8(poc)))
- chunkkeys_results[n-1] = k
+ chunkkeys_results[n] = k
i++
return true
})
@@ -324,12 +324,12 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) {
log.Info("got back chunk", "chunk", ret)
}
- if ldb.entryCnt != uint64(n+1) {
- t.Fatalf("expected entryCnt to be equal to %v, but got %v", n+1, ldb.entryCnt)
+ if ldb.entryCnt != uint64(n) {
+ t.Fatalf("expected entryCnt to be equal to %v, but got %v", n, ldb.entryCnt)
}
- if ldb.accessCnt != uint64(2*n+1) {
- t.Fatalf("expected accessCnt to be equal to %v, but got %v", n+1, ldb.accessCnt)
+ if ldb.accessCnt != uint64(2*n) {
+ t.Fatalf("expected accessCnt to be equal to %v, but got %v", 2*n, ldb.accessCnt)
}
}
@@ -362,7 +362,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) {
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
// wait for garbage collection to kick in on the responsible actor
- time.Sleep(5 * time.Second)
+ time.Sleep(1 * time.Second)
var missing int
for i := 0; i < n; i++ {
@@ -416,14 +416,7 @@ func TestLDBStoreAddRemove(t *testing.T) {
for i := 0; i < n; i++ {
// delete all even index chunks
if i%2 == 0 {
-
- key := chunks[i].Addr
- ikey := getIndexKey(key)
-
- var indx dpaDBIndex
- ldb.tryAccessIdx(ikey, &indx)
-
- ldb.delete(indx.Idx, ikey, ldb.po(key))
+ ldb.Delete(chunks[i].Addr)
}
}
@@ -452,12 +445,12 @@ func TestLDBStoreAddRemove(t *testing.T) {
// TestLDBStoreRemoveThenCollectGarbage tests that we can delete chunks and that we can trigger garbage collection
func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
- capacity := 10
+ capacity := 11
ldb, cleanup := newLDBStore(t)
ldb.setCapacity(uint64(capacity))
- n := 7
+ n := 11
chunks := []*Chunk{}
for i := 0; i < capacity; i++ {
@@ -477,13 +470,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
// delete all chunks
for i := 0; i < n; i++ {
- key := chunks[i].Addr
- ikey := getIndexKey(key)
-
- var indx dpaDBIndex
- ldb.tryAccessIdx(ikey, &indx)
-
- ldb.delete(indx.Idx, ikey, ldb.po(key))
+ ldb.Delete(chunks[i].Addr)
}
log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt)
@@ -491,9 +478,10 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
cleanup()
ldb, cleanup = newLDBStore(t)
+ capacity = 10
ldb.setCapacity(uint64(capacity))
- n = 10
+ n = 11
for i := 0; i < n; i++ {
ldb.Put(context.TODO(), chunks[i])
@@ -504,6 +492,9 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) {
<-chunks[i].dbStoredC
}
+ // wait for garbage collection
+ time.Sleep(1 * time.Second)
+
// expect for first chunk to be missing, because it has the smallest access value
idx := 0
ret, err := ldb.Get(context.TODO(), chunks[idx].Addr)