aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2017-03-06 21:00:20 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-03-09 22:50:14 +0800
commitdf72e20cc521b43092b9e3cc684836d4d673e126 (patch)
treef158ca0f28a404f1798133daeddb8af9b0bbe920
parent023670f6bafcfed28c01857da215217a5dadfaa1 (diff)
downloaddexon-df72e20cc521b43092b9e3cc684836d4d673e126.tar.gz
dexon-df72e20cc521b43092b9e3cc684836d4d673e126.tar.zst
dexon-df72e20cc521b43092b9e3cc684836d4d673e126.zip
pow: only support prime calculations on Go 1.8 and above
-rw-r--r--pow/ethash.go8
-rw-r--r--pow/ethash_algo.go37
-rw-r--r--pow/ethash_algo_go1.7.go47
-rw-r--r--pow/ethash_algo_go1.8.go57
-rw-r--r--pow/ethash_algo_go1.8_test.go46
-rw-r--r--pow/ethash_algo_test.go25
6 files changed, 157 insertions, 63 deletions
diff --git a/pow/ethash.go b/pow/ethash.go
index 602f9324f..9dfeedb78 100644
--- a/pow/ethash.go
+++ b/pow/ethash.go
@@ -35,6 +35,7 @@ import (
)
var (
+ ErrNonceOutOfRange = errors.New("nonce out of range")
ErrInvalidDifficulty = errors.New("non-positive difficulty")
ErrInvalidMixDigest = errors.New("invalid mix digest")
ErrInvalidPoW = errors.New("pow difficulty invalid")
@@ -174,13 +175,18 @@ func NewSharedEthash() PoW {
// Verify implements PoW, checking whether the given block satisfies the PoW
// difficulty requirements.
func (ethash *Ethash) Verify(block Block) error {
+ // Sanity check that the block number is below the lookup table size (60M blocks)
+ number := block.NumberU64()
+ if number/epochLength >= uint64(len(cacheSizes)) {
+ // Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
+ return ErrNonceOutOfRange
+ }
// Ensure twe have a valid difficulty for the block
difficulty := block.Difficulty()
if difficulty.Sign() <= 0 {
return ErrInvalidDifficulty
}
// Recompute the digest and PoW value and verify against the block
- number := block.NumberU64()
cache := ethash.cache(number)
size := datasetSize(number)
diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go
index f6d05880a..d3fac8d5b 100644
--- a/pow/ethash_algo.go
+++ b/pow/ethash_algo.go
@@ -19,7 +19,6 @@ package pow
import (
"encoding/binary"
"io"
- "math/big"
"runtime"
"sync"
"sync/atomic"
@@ -45,42 +44,6 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop
)
-// cacheSize calculates and returns the size of the ethash verification cache that
-// belongs to a certain block number. The cache size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
-func cacheSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
- epoch := int(block / epochLength)
- if epoch < len(cacheSizes) {
- return cacheSizes[epoch]
- }
- // No known cache size, calculate manually (sanity branch only)
- size := uint64(cacheInitBytes + cacheGrowthBytes*epoch - hashBytes)
- for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
- size -= 2 * hashBytes
- }
- return size
-}
-
-// datasetSize calculates and returns the size of the ethash mining dataset that
-// belongs to a certain block number. The dataset size grows linearly, however, we
-// always take the highest prime below the linearly growing threshold in order to
-// reduce the risk of accidental regularities leading to cyclic behavior.
-func datasetSize(block uint64) uint64 {
- // If we have a pre-generated value, use that
- epoch := int(block / epochLength)
- if epoch < len(datasetSizes) {
- return datasetSizes[epoch]
- }
- // No known dataset size, calculate manually (sanity branch only)
- size := uint64(datasetInitBytes + datasetGrowthBytes*epoch - mixBytes)
- for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
- size -= 2 * mixBytes
- }
- return size
-}
-
// seedHash is the seed to use for generating a verification cache and the mining
// dataset.
func seedHash(block uint64) []byte {
diff --git a/pow/ethash_algo_go1.7.go b/pow/ethash_algo_go1.7.go
new file mode 100644
index 000000000..ce05b3bb0
--- /dev/null
+++ b/pow/ethash_algo_go1.7.go
@@ -0,0 +1,47 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build !go1.8
+
+package pow
+
+// cacheSize calculates and returns the size of the ethash verification cache that
+// belongs to a certain block number. The cache size grows linearly, however, we
+// always take the highest prime below the linearly growing threshold in order to
+// reduce the risk of accidental regularities leading to cyclic behavior.
+func cacheSize(block uint64) uint64 {
+ // If we have a pre-generated value, use that
+ epoch := int(block / epochLength)
+ if epoch < len(cacheSizes) {
+ return cacheSizes[epoch]
+ }
+ // We don't have a way to verify primes fast before Go 1.8
+ panic("fast prime testing unsupported in Go < 1.8")
+}
+
+// datasetSize calculates and returns the size of the ethash mining dataset that
+// belongs to a certain block number. The dataset size grows linearly, however, we
+// always take the highest prime below the linearly growing threshold in order to
+// reduce the risk of accidental regularities leading to cyclic behavior.
+func datasetSize(block uint64) uint64 {
+ // If we have a pre-generated value, use that
+ epoch := int(block / epochLength)
+ if epoch < len(datasetSizes) {
+ return datasetSizes[epoch]
+ }
+ // We don't have a way to verify primes fast before Go 1.8
+ panic("fast prime testing unsupported in Go < 1.8")
+}
diff --git a/pow/ethash_algo_go1.8.go b/pow/ethash_algo_go1.8.go
new file mode 100644
index 000000000..cac96cd5e
--- /dev/null
+++ b/pow/ethash_algo_go1.8.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build go1.8
+
+package pow
+
+import "math/big"
+
+// cacheSize calculates and returns the size of the ethash verification cache that
+// belongs to a certain block number. The cache size grows linearly, however, we
+// always take the highest prime below the linearly growing threshold in order to
+// reduce the risk of accidental regularities leading to cyclic behavior.
+func cacheSize(block uint64) uint64 {
+ // If we have a pre-generated value, use that
+ epoch := int(block / epochLength)
+ if epoch < len(cacheSizes) {
+ return cacheSizes[epoch]
+ }
+ // No known cache size, calculate manually (sanity branch only)
+ size := uint64(cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes)
+ for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
+ size -= 2 * hashBytes
+ }
+ return size
+}
+
+// datasetSize calculates and returns the size of the ethash mining dataset that
+// belongs to a certain block number. The dataset size grows linearly, however, we
+// always take the highest prime below the linearly growing threshold in order to
+// reduce the risk of accidental regularities leading to cyclic behavior.
+func datasetSize(block uint64) uint64 {
+ // If we have a pre-generated value, use that
+ epoch := int(block / epochLength)
+ if epoch < len(datasetSizes) {
+ return datasetSizes[epoch]
+ }
+ // No known dataset size, calculate manually (sanity branch only)
+ size := uint64(datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes)
+ for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
+ size -= 2 * mixBytes
+ }
+ return size
+}
diff --git a/pow/ethash_algo_go1.8_test.go b/pow/ethash_algo_go1.8_test.go
new file mode 100644
index 000000000..57e0b0b7a
--- /dev/null
+++ b/pow/ethash_algo_go1.8_test.go
@@ -0,0 +1,46 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
+
+// +build go1.8
+
+package pow
+
+import "testing"
+
+// Tests whether the dataset size calculator work correctly by cross checking the
+// hard coded lookup table with the value generated by it.
+func TestSizeCalculations(t *testing.T) {
+ var tests []uint64
+
+ // Verify all the cache sizes from the lookup table
+ defer func(sizes []uint64) { cacheSizes = sizes }(cacheSizes)
+ tests, cacheSizes = cacheSizes, []uint64{}
+
+ for i, test := range tests {
+ if size := cacheSize(uint64(i*epochLength) + 1); size != test {
+ t.Errorf("cache %d: cache size mismatch: have %d, want %d", i, size, test)
+ }
+ }
+ // Verify all the dataset sizes from the lookup table
+ defer func(sizes []uint64) { datasetSizes = sizes }(datasetSizes)
+ tests, datasetSizes = datasetSizes, []uint64{}
+
+ for i, test := range tests {
+ if size := datasetSize(uint64(i*epochLength) + 1); size != test {
+ t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", i, size, test)
+ }
+ }
+}
diff --git a/pow/ethash_algo_test.go b/pow/ethash_algo_test.go
index 253ddfa72..32e115db9 100644
--- a/pow/ethash_algo_test.go
+++ b/pow/ethash_algo_test.go
@@ -23,31 +23,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
)
-// Tests whether the dataset size calculator work correctly by cross checking the
-// hard coded lookup table with the value generated by it.
-func TestSizeCalculations(t *testing.T) {
- var tests []uint64
-
- // Verify all the cache sizes from the lookup table
- defer func(sizes []uint64) { cacheSizes = sizes }(cacheSizes)
- tests, cacheSizes = cacheSizes, []uint64{}
-
- for i, test := range tests {
- if size := cacheSize(uint64(i*epochLength) + 1); size != test {
- t.Errorf("cache %d: cache size mismatch: have %d, want %d", i, size, test)
- }
- }
- // Verify all the dataset sizes from the lookup table
- defer func(sizes []uint64) { datasetSizes = sizes }(datasetSizes)
- tests, datasetSizes = datasetSizes, []uint64{}
-
- for i, test := range tests {
- if size := datasetSize(uint64(i*epochLength) + 1); size != test {
- t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", i, size, test)
- }
- }
-}
-
// Tests that verification caches can be correctly generated.
func TestCacheGeneration(t *testing.T) {
tests := []struct {