aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorobscuren <geffobscura@gmail.com>2015-06-19 22:20:13 +0800
committerJeffrey Wilcke <geffobscura@gmail.com>2015-06-30 00:51:48 +0800
commitc850c41ec108484a613600df103dbc55fe209a98 (patch)
treeca576ea7fd983a6960556a1fd57a31c6c1d5658f
parent76821d167acd7da15e13b23beeceb6779138ffe5 (diff)
downloadgo-tangerine-c850c41ec108484a613600df103dbc55fe209a98.tar.gz
go-tangerine-c850c41ec108484a613600df103dbc55fe209a98.tar.zst
go-tangerine-c850c41ec108484a613600df103dbc55fe209a98.zip
trie: Implemented a batch write approach for flushing
-rw-r--r--trie/cache.go26
1 files changed, 18 insertions, 8 deletions
diff --git a/trie/cache.go b/trie/cache.go
index 4c76c6cba..cb805d2f2 100644
--- a/trie/cache.go
+++ b/trie/cache.go
@@ -1,6 +1,11 @@
package trie
-import "github.com/ethereum/go-ethereum/logger/glog"
+import (
+ "github.com/ethereum/go-ethereum/compression/rle"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/logger/glog"
+ "github.com/syndtr/goleveldb/leveldb"
+)
type Backend interface {
Get([]byte) ([]byte, error)
@@ -8,12 +13,13 @@ type Backend interface {
}
type Cache struct {
+ batch *leveldb.Batch
store map[string][]byte
backend Backend
}
func NewCache(backend Backend) *Cache {
- return &Cache{make(map[string][]byte), backend}
+ return &Cache{new(leveldb.Batch), make(map[string][]byte), backend}
}
func (self *Cache) Get(key []byte) []byte {
@@ -26,19 +32,23 @@ func (self *Cache) Get(key []byte) []byte {
}
func (self *Cache) Put(key []byte, data []byte) {
+ // write the data to the ldb batch
+ self.batch.Put(key, rle.Compress(data))
self.store[string(key)] = data
}
+// Flush flushes the trie to the backing layer. If this is a leveldb instance
+// we'll use a batched write, otherwise we'll use regular put.
func (self *Cache) Flush() {
- for k, v := range self.store {
- if err := self.backend.Put([]byte(k), v); err != nil {
+ if db, ok := self.backend.(*ethdb.LDBDatabase); ok {
+ if err := db.LDB().Write(self.batch, nil); err != nil {
glog.Fatal("db write err:", err)
}
+ } else {
+ for k, v := range self.store {
+ self.backend.Put([]byte(k), v)
+ }
}
-
- // This will eventually grow too large. We'd could
- // do a make limit on storage and push out not-so-popular nodes.
- //self.Reset()
}
func (self *Cache) Copy() *Cache {