aboutsummaryrefslogtreecommitdiffstats
path: root/pow/ethash_algo.go
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2017-03-06 21:00:20 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-03-09 22:50:14 +0800
commitdf72e20cc521b43092b9e3cc684836d4d673e126 (patch)
treef158ca0f28a404f1798133daeddb8af9b0bbe920 /pow/ethash_algo.go
parent023670f6bafcfed28c01857da215217a5dadfaa1 (diff)
downloadgo-tangerine-df72e20cc521b43092b9e3cc684836d4d673e126.tar.gz
go-tangerine-df72e20cc521b43092b9e3cc684836d4d673e126.tar.zst
go-tangerine-df72e20cc521b43092b9e3cc684836d4d673e126.zip
pow: only support prime calculations on Go 1.8 and above
Diffstat (limited to 'pow/ethash_algo.go')
-rw-r--r--pow/ethash_algo.go37
1 files changed, 0 insertions, 37 deletions
diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go
index f6d05880a..d3fac8d5b 100644
--- a/pow/ethash_algo.go
+++ b/pow/ethash_algo.go
@@ -19,7 +19,6 @@ package pow
import (
"encoding/binary"
"io"
- "math/big"
"runtime"
"sync"
"sync/atomic"
@@ -45,42 +44,6 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop
)
-// cacheSize calculates and returns the size of the ethash verification cache that
-// belongs to a certain block number. The cache size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
-func cacheSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
- epoch := int(block / epochLength)
- if epoch < len(cacheSizes) {
- return cacheSizes[epoch]
- }
- // No known cache size, calculate manually (sanity branch only)
- size := uint64(cacheInitBytes + cacheGrowthBytes*epoch - hashBytes)
- for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
- size -= 2 * hashBytes
- }
- return size
-}
-
-// datasetSize calculates and returns the size of the ethash mining dataset that
-// belongs to a certain block number. The dataset size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
-func datasetSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
- epoch := int(block / epochLength)
- if epoch < len(datasetSizes) {
- return datasetSizes[epoch]
- }
- // No known dataset size, calculate manually (sanity branch only)
- size := uint64(datasetInitBytes + datasetGrowthBytes*epoch - mixBytes)
- for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
- size -= 2 * mixBytes
- }
- return size
-}
-
// seedHash is the seed to use for generating a verification cache and the mining
// dataset.
func seedHash(block uint64) []byte {