aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorJeffrey Wilcke <geffobscura@gmail.com>2015-06-30 04:42:13 +0800
committerJeffrey Wilcke <geffobscura@gmail.com>2015-06-30 06:36:25 +0800
commit992e4f83cb05946fa53132a9184d4ac3f38b62bc (patch)
treeb35a10b7a41c5e1f03300c7afb51f8a76202dbf8 /core
parenta8ebf756c7ea9e2d516d57aae6ee504e9acb21d9 (diff)
downloaddexon-992e4f83cb05946fa53132a9184d4ac3f38b62bc.tar.gz
dexon-992e4f83cb05946fa53132a9184d4ac3f38b62bc.tar.zst
dexon-992e4f83cb05946fa53132a9184d4ac3f38b62bc.zip
core: replaced BlockCache with lru.Cache
Diffstat (limited to 'core')
-rw-r--r--core/chain_manager.go43
-rw-r--r--core/chain_manager_test.go2
2 files changed, 25 insertions, 20 deletions
diff --git a/core/chain_manager.go b/core/chain_manager.go
index daae24709..fc1922b3b 100644
--- a/core/chain_manager.go
+++ b/core/chain_manager.go
@@ -109,9 +109,9 @@ type ChainManager struct {
transState *state.StateDB
txState *state.ManagedState
- cache *lru.Cache // cache is the LRU caching
- futureBlocks *BlockCache // future blocks are blocks added for later processing
- pendingBlocks *BlockCache // pending blocks contain blocks not yet written to the db
+ cache *lru.Cache // cache is the LRU caching
+ futureBlocks *lru.Cache // future blocks are blocks added for later processing
+ pendingBlocks *lru.Cache // pending blocks contain blocks not yet written to the db
quit chan struct{}
// procInterrupt must be atomically called
@@ -158,7 +158,7 @@ func NewChainManager(genesis *types.Block, blockDb, stateDb common.Database, pow
// Take ownership of this particular state
bc.txState = state.ManageState(bc.State().Copy())
- bc.futureBlocks = NewBlockCache(maxFutureBlocks)
+ bc.futureBlocks, _ = lru.New(maxFutureBlocks)
bc.makeCache()
go bc.update()
@@ -390,7 +390,7 @@ func (bc *ChainManager) HasBlock(hash common.Hash) bool {
}
if bc.pendingBlocks != nil {
- if block := bc.pendingBlocks.Get(hash); block != nil {
+ if _, exist := bc.pendingBlocks.Get(hash); exist {
return true
}
}
@@ -426,8 +426,8 @@ func (self *ChainManager) GetBlock(hash common.Hash) *types.Block {
}
if self.pendingBlocks != nil {
- if block := self.pendingBlocks.Get(hash); block != nil {
- return block
+ if block, _ := self.pendingBlocks.Get(hash); block != nil {
+ return block.(*types.Block)
}
}
@@ -510,10 +510,11 @@ type queueEvent struct {
}
func (self *ChainManager) procFutureBlocks() {
- var blocks []*types.Block
- self.futureBlocks.Each(func(i int, block *types.Block) {
- blocks = append(blocks, block)
- })
+ blocks := make([]*types.Block, self.futureBlocks.Len())
+ for i, hash := range self.futureBlocks.Keys() {
+ block, _ := self.futureBlocks.Get(hash)
+ blocks[i] = block.(*types.Block)
+ }
if len(blocks) > 0 {
types.BlockBy(types.Number).Sort(blocks)
self.InsertChain(blocks)
@@ -521,13 +522,16 @@ func (self *ChainManager) procFutureBlocks() {
}
func (self *ChainManager) enqueueForWrite(block *types.Block) {
- self.pendingBlocks.Push(block)
+ self.pendingBlocks.Add(block.Hash(), block)
}
func (self *ChainManager) flushQueuedBlocks() {
db, batchWrite := self.blockDb.(*ethdb.LDBDatabase)
batch := new(leveldb.Batch)
- self.pendingBlocks.Each(func(i int, block *types.Block) {
+ for _, key := range self.pendingBlocks.Keys() {
+ b, _ := self.pendingBlocks.Get(key)
+ block := b.(*types.Block)
+
enc, _ := rlp.EncodeToBytes((*types.StorageBlock)(block))
key := append(blockHashPre, block.Hash().Bytes()...)
if batchWrite {
@@ -535,7 +539,8 @@ func (self *ChainManager) flushQueuedBlocks() {
} else {
self.blockDb.Put(key, enc)
}
- })
+ }
+
if batchWrite {
db.LDB().Write(batch, nil)
}
@@ -588,7 +593,7 @@ func (self *ChainManager) WriteBlock(block *types.Block) (status writeStatus, er
self.enqueueForWrite(block)
self.mu.Unlock()
// Delete from future blocks
- self.futureBlocks.Delete(block.Hash())
+ self.futureBlocks.Remove(block.Hash())
return
}
@@ -602,7 +607,7 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
self.chainmu.Lock()
defer self.chainmu.Unlock()
- self.pendingBlocks = NewBlockCache(len(chain))
+ self.pendingBlocks, _ = lru.New(len(chain))
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
@@ -669,13 +674,13 @@ func (self *ChainManager) InsertChain(chain types.Blocks) (int, error) {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
- self.futureBlocks.Push(block)
+ self.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
}
- if IsParentErr(err) && self.futureBlocks.Has(block.ParentHash()) {
- self.futureBlocks.Push(block)
+ if IsParentErr(err) && self.futureBlocks.Contains(block.ParentHash()) {
+ self.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
}
diff --git a/core/chain_manager_test.go b/core/chain_manager_test.go
index 11a326ae4..8b3ea9e85 100644
--- a/core/chain_manager_test.go
+++ b/core/chain_manager_test.go
@@ -393,7 +393,7 @@ func chm(genesis *types.Block, db common.Database) *ChainManager {
var eventMux event.TypeMux
bc := &ChainManager{blockDb: db, stateDb: db, genesisBlock: genesis, eventMux: &eventMux, pow: FakePow{}}
bc.cache, _ = lru.New(100)
- bc.futureBlocks = NewBlockCache(100)
+ bc.futureBlocks, _ = lru.New(100)
bc.processor = bproc{}
bc.ResetWithGenesisBlock(genesis)
bc.txState = state.ManageState(bc.State())