diff options
Diffstat (limited to 'pow/ethash_algo_go1.8.go')
-rw-r--r-- | pow/ethash_algo_go1.8.go | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/pow/ethash_algo_go1.8.go b/pow/ethash_algo_go1.8.go new file mode 100644 index 000000000..cac96cd5e --- /dev/null +++ b/pow/ethash_algo_go1.8.go @@ -0,0 +1,57 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. + +// +build go1.8 + +package pow + +import "math/big" + +// cacheSize calculates and returns the size of the ethash verification cache that +// belongs to a certain block number. The cache size grows linearly, however, we +// always take the highest prime below the linearly growing threshold in order to +// reduce the risk of accidental regularities leading to cyclic behavior. +func cacheSize(block uint64) uint64 { + // If we have a pre-generated value, use that + epoch := int(block / epochLength) + if epoch < len(cacheSizes) { + return cacheSizes[epoch] + } + // No known cache size, calculate manually (sanity branch only) + size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes) + for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 + size -= 2 * hashBytes + } + return size +} + +// datasetSize calculates and returns the size of the ethash mining dataset that +// belongs to a certain block number. The dataset size grows linearly, however, we +// always take the highest prime below the linearly growing threshold in order to +// reduce the risk of accidental regularities leading to cyclic behavior. +func datasetSize(block uint64) uint64 { + // If we have a pre-generated value, use that + epoch := int(block / epochLength) + if epoch < len(datasetSizes) { + return datasetSizes[epoch] + } + // No known dataset size, calculate manually (sanity branch only) + size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes) + for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64 + size -= 2 * mixBytes + } + return size +} |