1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
|
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// memory storage layer for the package blockhash
package storage
import (
"sync"
lru "github.com/hashicorp/golang-lru"
)
type MemStore struct {
cache *lru.Cache
requests *lru.Cache
mu sync.RWMutex
disabled bool
}
//NewMemStore is instantiating a MemStore cache. We are keeping a record of all outgoing requests for chunks, that
//should later be delivered by peer nodes, in the `requests` LRU cache. We are also keeping all frequently requested
//chunks in the `cache` LRU cache.
//
//`requests` LRU cache capacity should ideally never be reached, this is why for the time being it should be initialised
//with the same value as the LDBStore capacity.
func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) {
if params.CacheCapacity == 0 {
return &MemStore{
disabled: true,
}
}
onEvicted := func(key interface{}, value interface{}) {
v := value.(*Chunk)
<-v.dbStoredC
}
c, err := lru.NewWithEvict(int(params.CacheCapacity), onEvicted)
if err != nil {
panic(err)
}
requestEvicted := func(key interface{}, value interface{}) {
// temporary remove of the error log, until we figure out the problem, as it is too spamy
//log.Error("evict called on outgoing request")
}
r, err := lru.NewWithEvict(int(params.ChunkRequestsCacheCapacity), requestEvicted)
if err != nil {
panic(err)
}
return &MemStore{
cache: c,
requests: r,
}
}
func (m *MemStore) Get(addr Address) (*Chunk, error) {
if m.disabled {
return nil, ErrChunkNotFound
}
m.mu.RLock()
defer m.mu.RUnlock()
r, ok := m.requests.Get(string(addr))
// it is a request
if ok {
return r.(*Chunk), nil
}
// it is not a request
c, ok := m.cache.Get(string(addr))
if !ok {
return nil, ErrChunkNotFound
}
return c.(*Chunk), nil
}
func (m *MemStore) Put(c *Chunk) {
if m.disabled {
return
}
m.mu.Lock()
defer m.mu.Unlock()
// it is a request
if c.ReqC != nil {
select {
case <-c.ReqC:
if c.GetErrored() != nil {
m.requests.Remove(string(c.Addr))
return
}
m.cache.Add(string(c.Addr), c)
m.requests.Remove(string(c.Addr))
default:
m.requests.Add(string(c.Addr), c)
}
return
}
// it is not a request
m.cache.Add(string(c.Addr), c)
m.requests.Remove(string(c.Addr))
}
func (m *MemStore) setCapacity(n int) {
if n <= 0 {
m.disabled = true
} else {
onEvicted := func(key interface{}, value interface{}) {
v := value.(*Chunk)
<-v.dbStoredC
}
c, err := lru.NewWithEvict(n, onEvicted)
if err != nil {
panic(err)
}
r, err := lru.New(defaultChunkRequestsCacheCapacity)
if err != nil {
panic(err)
}
m = &MemStore{
cache: c,
requests: r,
}
}
}
func (s *MemStore) Close() {}
|