aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash/algorithm_go1.8.go
diff options
context:
space:
mode:
authorFelix Lange <fjl@users.noreply.github.com>2018-01-23 18:05:30 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-01-23 18:05:30 +0800
commit924065e19d08cc7e6af0b3a5b5b1ef3785b79bd4 (patch)
tree9aefabb6b4c375971570555486ce5b172660661d /consensus/ethash/algorithm_go1.8.go
parent5d4267911a7791bfa60f275a97347372fbf0ce99 (diff)
downloaddexon-924065e19d08cc7e6af0b3a5b5b1ef3785b79bd4.tar.gz
dexon-924065e19d08cc7e6af0b3a5b5b1ef3785b79bd4.tar.zst
dexon-924065e19d08cc7e6af0b3a5b5b1ef3785b79bd4.zip
consensus/ethash: improve cache/dataset handling (#15864)
* consensus/ethash: add maxEpoch constant * consensus/ethash: improve cache/dataset handling There are two fixes in this commit: Unmap the memory through a finalizer like the libethash wrapper did. The release logic was incorrect and freed the memory while it was being used, leading to crashes like in #14495 or #14943. Track caches and datasets using simplelru instead of reinventing LRU logic. This should make it easier to see whether it's correct. * consensus/ethash: restore 'future item' logic in lru * consensus/ethash: use mmap even in test mode This makes it possible to shorten the time taken for TestCacheFileEvict. * consensus/ethash: shuffle func calc*Size comments around * consensus/ethash: ensure future cache/dataset is in the lru cache * consensus/ethash: add issue link to the new test * consensus/ethash: fix vet * consensus/ethash: fix test * consensus: tiny issue + nitpick fixes
Diffstat (limited to 'consensus/ethash/algorithm_go1.8.go')
-rw-r--r--consensus/ethash/algorithm_go1.8.go34
1 files changed, 20 insertions, 14 deletions
diff --git a/consensus/ethash/algorithm_go1.8.go b/consensus/ethash/algorithm_go1.8.go
index d691b758f..975fdffe5 100644
--- a/consensus/ethash/algorithm_go1.8.go
+++ b/consensus/ethash/algorithm_go1.8.go
@@ -20,17 +20,20 @@ package ethash
import "math/big"
-// cacheSize calculates and returns the size of the ethash verification cache that
-// belongs to a certain block number. The cache size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
+// cacheSize returns the size of the ethash verification cache that belongs to a certain
+// block number.
func cacheSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
epoch := int(block / epochLength)
- if epoch < len(cacheSizes) {
+ if epoch < maxEpoch {
return cacheSizes[epoch]
}
- // No known cache size, calculate manually (sanity branch only)
+ return calcCacheSize(epoch)
+}
+
+// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
+// however, we always take the highest prime below the linearly growing threshold in order
+// to reduce the risk of accidental regularities leading to cyclic behavior.
+func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
@@ -38,17 +41,20 @@ func cacheSize(block uint64) uint64 {
return size
}
-// datasetSize calculates and returns the size of the ethash mining dataset that
-// belongs to a certain block number. The dataset size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
+// datasetSize returns the size of the ethash mining dataset that belongs to a certain
+// block number.
func datasetSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
epoch := int(block / epochLength)
- if epoch < len(datasetSizes) {
+ if epoch < maxEpoch {
return datasetSizes[epoch]
}
- // No known dataset size, calculate manually (sanity branch only)
+ return calcDatasetSize(epoch)
+}
+
+// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
+// however, we always take the highest prime below the linearly growing threshold in order
+// to reduce the risk of accidental regularities leading to cyclic behavior.
+func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes