aboutsummaryrefslogtreecommitdiffstats
path: root/pow
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2017-03-08 02:05:54 +0800
committerFelix Lange <fjl@users.noreply.github.com>2017-03-09 22:50:14 +0800
commit5c8fa6ae1a42813e7aec477bd68d98f66f85e0b8 (patch)
tree19e34d409d1a999079e007698f33766973676bc8 /pow
parentb7d93500f13e3054c81196273ebf676ad8ecb5ba (diff)
downloadgo-tangerine-5c8fa6ae1a42813e7aec477bd68d98f66f85e0b8.tar.gz
go-tangerine-5c8fa6ae1a42813e7aec477bd68d98f66f85e0b8.tar.zst
go-tangerine-5c8fa6ae1a42813e7aec477bd68d98f66f85e0b8.zip
crypto, pow, vendor: hash optimizations, mmap ethash
Diffstat (limited to 'pow')
-rw-r--r--pow/ethash.go310
-rw-r--r--pow/ethash_algo.go124
-rw-r--r--pow/ethash_algo_test.go136
3 files changed, 403 insertions, 167 deletions
diff --git a/pow/ethash.go b/pow/ethash.go
index 0af1904b6..dbe8ff077 100644
--- a/pow/ethash.go
+++ b/pow/ethash.go
@@ -17,20 +17,21 @@
package pow
import (
- "bufio"
"bytes"
"errors"
"fmt"
- "io/ioutil"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
+ "reflect"
+ "strconv"
"sync"
"time"
+ "unsafe"
- "github.com/ethereum/go-ethereum/common"
+ mmap "github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
metrics "github.com/rcrowley/go-metrics"
@@ -57,10 +58,89 @@ var (
dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
)
+// isLittleEndian returns whether the local system is running in little or big
+// endian byte order.
+func isLittleEndian() bool {
+ n := uint32(0x01020304)
+ return *(*byte)(unsafe.Pointer(&n)) == 0x04
+}
+
+// memoryMap tries to memory map a file of uint32s for read only access.
+func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
+ file, err := os.OpenFile(path, os.O_RDONLY, 0644)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ mem, buffer, err := memoryMapFile(file, false)
+ if err != nil {
+ file.Close()
+ return nil, nil, nil, err
+ }
+ return file, mem, buffer, err
+}
+
+// memoryMapFile tries to memory map an already opened file descriptor.
+func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
+ // Try to memory map the file
+ flag := mmap.RDONLY
+ if write {
+ flag = mmap.RDWR
+ }
+ mem, err := mmap.Map(file, flag, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Yay, we managed to memory map the file, here be dragons
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
+ header.Len /= 4
+ header.Cap /= 4
+
+ return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
+}
+
+// memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
+// access, fill it with the data from a generator and then move it into the final
+// path requested.
+func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
+ // Ensure the data folder exists
+ if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+ return nil, nil, nil, err
+ }
+ // Create a huge temporary empty file to fill with data
+ temp := path + "." + strconv.Itoa(rand.Int())
+
+ dump, err := os.Create(temp)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if err = dump.Truncate(int64(size)); err != nil {
+ return nil, nil, nil, err
+ }
+ // Memory map the file for writing and fill it with the generator
+ mem, buffer, err := memoryMapFile(dump, true)
+ if err != nil {
+ dump.Close()
+ return nil, nil, nil, err
+ }
+ generator(buffer)
+
+ if err := mem.Flush(); err != nil {
+ mem.Unmap()
+ dump.Close()
+ return nil, nil, nil, err
+ }
+ os.Rename(temp, path)
+ return dump, mem, buffer, nil
+}
+
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
type cache struct {
- epoch uint64 // Epoch for which this cache is relevant
- cache []uint32 // The actual cache data content
+ epoch uint64 // Epoch for which this cache is relevant
+
+ dump *os.File // File descriptor of the memory mapped cache
+ mmap mmap.MMap // Memory map itself to unmap before releasing
+
+ cache []uint32 // The actual cache data content (may be memory mapped)
used time.Time // Timestamp of the last use for smarter eviction
once sync.Once // Ensures the cache is generated only once
lock sync.Mutex // Ensures thread safety for updating the usage time
@@ -71,57 +151,72 @@ func (c *cache) generate(dir string, limit int, test bool) {
c.once.Do(func() {
// If we have a testing cache, generate and return
if test {
- rawCache := generateCache(1024, seedHash(c.epoch*epochLength+1))
- c.cache = prepare(1024, bytes.NewReader(rawCache))
+ c.cache = make([]uint32, 1024/4)
+ generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1))
return
}
- // Full cache generation is needed, check cache dir for existing data
+ // If we don't store anything on disk, generate and return
size := cacheSize(c.epoch*epochLength + 1)
seed := seedHash(c.epoch*epochLength + 1)
- path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
- logger := log.New("seed", hexutil.Bytes(seed))
+ if dir == "" {
+ c.cache = make([]uint32, size/4)
+ generateCache(c.cache, c.epoch, seed)
+ return
+ }
+ // Disk storage is needed, this will get fancy
+ endian := "le"
+ if !isLittleEndian() {
+ endian = "be"
+ }
+ path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
+ logger := log.New("epoch", c.epoch)
+
+ // Try to load the file from disk and memory map it
+ var err error
+ c.dump, c.mmap, c.cache, err = memoryMap(path)
+ if err == nil {
+ logger.Debug("Loaded old ethash cache from disk")
+ return
+ }
+ logger.Debug("Failed to load old ethash cache", "err", err)
- if dir != "" {
- dump, err := os.Open(path)
- if err == nil {
- logger.Info("Loading ethash cache from disk")
- start := time.Now()
- c.cache = prepare(size, bufio.NewReader(dump))
- logger.Info("Loaded ethash cache from disk", "elapsed", common.PrettyDuration(time.Since(start)))
+ // No previous cache available, create a new cache file to fill
+ c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
+ if err != nil {
+ logger.Error("Failed to generate mapped ethash cache", "err", err)
- dump.Close()
- return
- }
+ c.cache = make([]uint32, size/4)
+ generateCache(c.cache, c.epoch, seed)
}
- // No previous disk cache was available, generate on the fly
- rawCache := generateCache(size, seed)
- c.cache = prepare(size, bytes.NewReader(rawCache))
-
- // If a cache directory is given, attempt to serialize for next time
- if dir != "" {
- // Store the ethash cache to disk
- start := time.Now()
- if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
- logger.Error("Failed to create ethash cache dir", "err", err)
- } else if err := ioutil.WriteFile(path, rawCache, os.ModePerm); err != nil {
- logger.Error("Failed to write ethash cache to disk", "err", err)
- } else {
- logger.Info("Stored ethash cache to disk", "elapsed", common.PrettyDuration(time.Since(start)))
- }
- // Iterate over all previous instances and delete old ones
- for ep := int(c.epoch) - limit; ep >= 0; ep-- {
- seed := seedHash(uint64(ep)*epochLength + 1)
- path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
- os.Remove(path)
- }
+ // Iterate over all previous instances and delete old ones
+ for ep := int(c.epoch) - limit; ep >= 0; ep-- {
+ seed := seedHash(uint64(ep)*epochLength + 1)
+ path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
+ os.Remove(path)
}
})
}
+// release closes any file handlers and memory maps open.
+func (c *cache) release() {
+ if c.mmap != nil {
+ c.mmap.Unmap()
+ c.mmap = nil
+ }
+ if c.dump != nil {
+ c.dump.Close()
+ c.dump = nil
+ }
+}
+
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
type dataset struct {
- epoch uint64 // Epoch for which this cache is relevant
+ epoch uint64 // Epoch for which this cache is relevant
+
+ dump *os.File // File descriptor of the memory mapped cache
+ mmap mmap.MMap // Memory map itself to unmap before releasing
+
dataset []uint32 // The actual cache data content
used time.Time // Timestamp of the last use for smarter eviction
once sync.Once // Ensures the cache is generated only once
@@ -129,78 +224,91 @@ type dataset struct {
}
// generate ensures that the dataset content is generated before use.
-func (d *dataset) generate(dir string, limit int, test bool, discard bool) {
+func (d *dataset) generate(dir string, limit int, test bool) {
d.once.Do(func() {
// If we have a testing dataset, generate and return
if test {
- rawCache := generateCache(1024, seedHash(d.epoch*epochLength+1))
- intCache := prepare(1024, bytes.NewReader(rawCache))
+ cache := make([]uint32, 1024/4)
+ generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1))
- rawDataset := generateDataset(32*1024, intCache)
- d.dataset = prepare(32*1024, bytes.NewReader(rawDataset))
+ d.dataset = make([]uint32, 32*1024/4)
+ generateDataset(d.dataset, d.epoch, cache)
return
}
- // Full dataset generation is needed, check dataset dir for existing data
+ // If we don't store anything on disk, generate and return
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
- path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
- logger := log.New("seed", hexutil.Bytes(seed))
-
- if dir != "" {
- dump, err := os.Open(path)
- if err == nil {
- if !discard {
- logger.Info("Loading ethash DAG from disk")
- start := time.Now()
- d.dataset = prepare(dsize, bufio.NewReader(dump))
- logger.Info("Loaded ethash DAG from disk", "elapsed", common.PrettyDuration(time.Since(start)))
- }
- dump.Close()
- return
- }
+ if dir == "" {
+ cache := make([]uint32, csize/4)
+ generateCache(cache, d.epoch, seed)
+
+ d.dataset = make([]uint32, dsize/4)
+ generateDataset(d.dataset, d.epoch, cache)
}
- // No previous disk dataset was available, generate on the fly
- rawCache := generateCache(csize, seed)
- intCache := prepare(csize, bytes.NewReader(rawCache))
+ // Disk storage is needed, this will get fancy
+ endian := "le"
+ if !isLittleEndian() {
+ endian = "be"
+ }
+ path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
+ logger := log.New("epoch", d.epoch)
+
+ // Try to load the file from disk and memory map it
+ var err error
+ d.dump, d.mmap, d.dataset, err = memoryMap(path)
+ if err == nil {
+ logger.Debug("Loaded old ethash dataset from disk")
+ return
+ }
+ logger.Debug("Failed to load old ethash dataset", "err", err)
+
+ // No previous dataset available, create a new dataset file to fill
+ cache := make([]uint32, csize/4)
+ generateCache(cache, d.epoch, seed)
- rawDataset := generateDataset(dsize, intCache)
- if !discard {
- d.dataset = prepare(dsize, bytes.NewReader(rawDataset))
+ d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
+ if err != nil {
+ logger.Error("Failed to generate mapped ethash dataset", "err", err)
+
+ d.dataset = make([]uint32, dsize/2)
+ generateDataset(d.dataset, d.epoch, cache)
}
- // If a dataset directory is given, attempt to serialize for next time
- if dir != "" {
- // Store the ethash dataset to disk
- start := time.Now()
- if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
- logger.Error("Failed to create ethash DAG dir", "err", err)
- } else if err := ioutil.WriteFile(path, rawDataset, os.ModePerm); err != nil {
- logger.Error("Failed to write ethash DAG to disk", "err", err)
- } else {
- logger.Info("Stored ethash DAG to disk", "elapsed", common.PrettyDuration(time.Since(start)))
- }
- // Iterate over all previous instances and delete old ones
- for ep := int(d.epoch) - limit; ep >= 0; ep-- {
- seed := seedHash(uint64(ep)*epochLength + 1)
- path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
- os.Remove(path)
- }
+ // Iterate over all previous instances and delete old ones
+ for ep := int(d.epoch) - limit; ep >= 0; ep-- {
+ seed := seedHash(uint64(ep)*epochLength + 1)
+ path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
+ os.Remove(path)
}
})
}
+// release closes any file handlers and memory maps open.
+func (d *dataset) release() {
+ if d.mmap != nil {
+ d.mmap.Unmap()
+ d.mmap = nil
+ }
+ if d.dump != nil {
+ d.dump.Close()
+ d.dump = nil
+ }
+}
+
// MakeCache generates a new ethash cache and optionally stores it to disk.
func MakeCache(block uint64, dir string) {
c := cache{epoch: block/epochLength + 1}
c.generate(dir, math.MaxInt32, false)
+ c.release()
}
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
func MakeDataset(block uint64, dir string) {
d := dataset{epoch: block/epochLength + 1}
- d.generate(dir, math.MaxInt32, false, true)
+ d.generate(dir, math.MaxInt32, false)
+ d.release()
}
// Ethash is a PoW data struture implementing the ethash algorithm.
@@ -318,22 +426,26 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
}
}
delete(ethash.caches, evict.epoch)
+ evict.release()
- log.Debug("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
+ log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.fcache != nil && ethash.fcache.epoch == epoch {
- log.Debug("Using pre-generated cache", "epoch", epoch)
+ log.Trace("Using pre-generated cache", "epoch", epoch)
current, ethash.fcache = ethash.fcache, nil
} else {
- log.Debug("Requiring new ethash cache", "epoch", epoch)
+ log.Trace("Requiring new ethash cache", "epoch", epoch)
current = &cache{epoch: epoch}
}
ethash.caches[epoch] = current
// If we just used up the future cache, or need a refresh, regenerate
if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
- log.Debug("Requiring new future ethash cache", "epoch", epoch+1)
+ if ethash.fcache != nil {
+ ethash.fcache.release()
+ }
+ log.Trace("Requiring new future ethash cache", "epoch", epoch+1)
future = &cache{epoch: epoch + 1}
ethash.fcache = future
}
@@ -418,23 +530,27 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
}
}
delete(ethash.datasets, evict.epoch)
+ evict.release()
- log.Debug("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
+ log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
- log.Debug("Using pre-generated dataset", "epoch", epoch)
+ log.Trace("Using pre-generated dataset", "epoch", epoch)
current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
ethash.fdataset = nil
} else {
- log.Debug("Requiring new ethash dataset", "epoch", epoch)
+ log.Trace("Requiring new ethash dataset", "epoch", epoch)
current = &dataset{epoch: epoch}
}
ethash.datasets[epoch] = current
// If we just used up the future dataset, or need a refresh, regenerate
if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
- log.Debug("Requiring new future ethash dataset", "epoch", epoch+1)
+ if ethash.fdataset != nil {
+ ethash.fdataset.release()
+ }
+ log.Trace("Requiring new future ethash dataset", "epoch", epoch+1)
future = &dataset{epoch: epoch + 1}
ethash.fdataset = future
}
@@ -443,7 +559,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache
- current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, false)
+ current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
current.lock.Lock()
current.used = time.Now()
@@ -451,7 +567,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
// If we exhausted the future dataset, now's a good time to regenerate it
if future != nil {
- go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, true) // Discard results from memorys
+ go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
}
return current.dataset
}
diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go
index d3fac8d5b..ace482b93 100644
--- a/pow/ethash_algo.go
+++ b/pow/ethash_algo.go
@@ -18,15 +18,17 @@ package pow
import (
"encoding/binary"
- "io"
+ "hash"
+ "reflect"
"runtime"
"sync"
"sync/atomic"
"time"
+ "unsafe"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
)
@@ -44,6 +46,22 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop
)
+// hasher is a repetitive hasher allowing the same hash data structures to be
+// reused between hash runs instead of requiring new ones to be created.
+type hasher func(dest []byte, data []byte)
+
+// makeHasher creates a repetitive hasher, allowing the same hash data structures
+// to be reused between hash runs instead of requiring new ones to be created.
+//
+// The returned function is not thread safe!
+func makeHasher(h hash.Hash) hasher {
+ return func(dest []byte, data []byte) {
+ h.Write(data)
+ h.Sum(dest[:0])
+ h.Reset()
+ }
+}
+
// seedHash is the seed to use for generating a verification cache and the mining
// dataset.
func seedHash(block uint64) []byte {
@@ -51,9 +69,9 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
- keccak256 := crypto.Keccak256Hasher()
+ keccak256 := makeHasher(sha3.NewKeccak256())
for i := 0; i < int(block/epochLength); i++ {
- seed = keccak256(seed)
+ keccak256(seed, seed)
}
return seed
}
@@ -63,17 +81,30 @@ func seedHash(block uint64) []byte {
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
// set of 524288 64-byte values.
-func generateCache(size uint64, seed []byte) []byte {
+//
+// This method places the result into dest in machine byte order.
+func generateCache(dest []uint32, epoch uint64, seed []byte) {
// Print some debug logs to allow analysis on low end devices
- logger := log.New("seed", hexutil.Bytes(seed))
- logger.Debug("Generating ethash verification cache")
+ logger := log.New("epoch", epoch)
start := time.Now()
defer func() {
- logger.Info("Generated ethash verification cache", "elapsed", common.PrettyDuration(time.Since(start)))
+ elapsed := time.Since(start)
+
+ logFn := logger.Debug
+ if elapsed > 3*time.Second {
+ logFn = logger.Info
+ }
+ logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
}()
+ // Convert our destination slice to a byte buffer
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
+ header.Len *= 4
+ header.Cap *= 4
+ cache := *(*[]byte)(unsafe.Pointer(&header))
// Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
+ size := uint64(len(cache))
rows := int(size) / hashBytes
// Start a monitoring goroutine to report progress on low end devices
@@ -93,13 +124,12 @@ func generateCache(size uint64, seed []byte) []byte {
}
}()
// Create a hasher to reuse between invocations
- keccak512 := crypto.Keccak512Hasher()
+ keccak512 := makeHasher(sha3.NewKeccak512())
// Sequentially produce the initial dataset
- cache := make([]byte, size)
- copy(cache, keccak512(seed))
+ keccak512(cache, seed)
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
- copy(cache[offset:], keccak512(cache[offset-hashBytes:offset]))
+ keccak512(cache[offset:], cache[offset-hashBytes:offset])
atomic.AddUint32(&progress, 1)
}
// Use a low-round version of randmemohash
@@ -113,26 +143,31 @@ func generateCache(size uint64, seed []byte) []byte {
xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
)
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
- copy(cache[dstOff:], keccak512(temp))
+ keccak512(cache[dstOff:], temp)
atomic.AddUint32(&progress, 1)
}
}
- return cache
+ // Swap the byte order on big endian systems and return
+ if !isLittleEndian() {
+ swap(cache)
+ }
+}
+
+// swap changes the byte order of the buffer assuming a uint32 representation.
+func swap(buffer []byte) {
+ for i := 0; i < len(buffer); i += 4 {
+ binary.BigEndian.PutUint32(buffer[i:], binary.LittleEndian.Uint32(buffer[i:]))
+ }
}
// prepare converts an ethash cache or dataset from a byte stream into the internal
// int representation. All ethash methods work with ints to avoid constant byte to
// int conversions as well as to handle both little and big endian systems.
-func prepare(size uint64, r io.Reader) []uint32 {
- ints := make([]uint32, size/4)
-
- buffer := make([]byte, 4)
- for i := 0; i < len(ints); i++ {
- io.ReadFull(r, buffer)
- ints[i] = binary.LittleEndian.Uint32(buffer)
+func prepare(dest []uint32, src []byte) {
+ for i := 0; i < len(dest); i++ {
+ dest[i] = binary.LittleEndian.Uint32(src[i*4:])
}
- return ints
}
// fnv is an algorithm inspired by the FNV hash, which in some cases is used as
@@ -152,7 +187,7 @@ func fnvHash(mix []uint32, data []uint32) {
// generateDatasetItem combines data from 256 pseudorandomly selected cache nodes,
// and hashes that to compute a single dataset node.
-func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher) []byte {
+func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte {
// Calculate the number of thoretical rows (we use one buffer nonetheless)
rows := uint32(len(cache) / hashWords)
@@ -163,7 +198,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher)
for i := 1; i < hashWords; i++ {
binary.LittleEndian.PutUint32(mix[i*4:], cache[(index%rows)*hashWords+uint32(i)])
}
- mix = keccak512(mix)
+ keccak512(mix, mix)
// Convert the mix to uint32s to avoid constant bit shifting
intMix := make([]uint32, hashWords)
@@ -179,22 +214,39 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher)
for i, val := range intMix {
binary.LittleEndian.PutUint32(mix[i*4:], val)
}
- return keccak512(mix)
+ keccak512(mix, mix)
+ return mix
}
// generateDataset generates the entire ethash dataset for mining.
-func generateDataset(size uint64, cache []uint32) []byte {
+//
+// This method places the result into dest in machine byte order.
+func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
// Print some debug logs to allow analysis on low end devices
- logger := log.New("size", size)
- logger.Debug("Generating ethash dataset")
+ logger := log.New("epoch", epoch)
defer func(start time.Time) {
- logger.Debug("Generated ethash dataset", "elapsed", common.PrettyDuration(time.Since(start)))
+ elapsed := time.Since(start)
+
+ logFn := logger.Debug
+ if elapsed > 3*time.Second {
+ logFn = logger.Info
+ }
+ logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
}(time.Now())
+ // Figure out whether the bytes need to be swapped for the machine
+ swapped := !isLittleEndian()
+
+ // Convert our destination slice to a byte buffer
+ header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
+ header.Len *= 4
+ header.Cap *= 4
+ dataset := *(*[]byte)(unsafe.Pointer(&header))
+
// Generate the dataset on many goroutines since it takes a while
- dataset := make([]byte, size)
threads := runtime.NumCPU()
+ size := uint64(len(dataset))
var pend sync.WaitGroup
pend.Add(threads)
@@ -205,7 +257,7 @@ func generateDataset(size uint64, cache []uint32) []byte {
defer pend.Done()
// Create a hasher to reuse between invocations
- keccak512 := crypto.Keccak512Hasher()
+ keccak512 := makeHasher(sha3.NewKeccak512())
// Calculate the data segment this thread should generate
batch := uint32(size / hashBytes / uint64(threads))
@@ -217,7 +269,11 @@ func generateDataset(size uint64, cache []uint32) []byte {
// Calculate the dataset segment
percent := uint32(size / hashBytes / 100)
for index := start; index < limit; index++ {
- copy(dataset[index*hashBytes:], generateDatasetItem(cache, index, keccak512))
+ item := generateDatasetItem(cache, index, keccak512)
+ if swapped {
+ swap(item)
+ }
+ copy(dataset[index*hashBytes:], item)
if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes))
@@ -227,8 +283,6 @@ func generateDataset(size uint64, cache []uint32) []byte {
}
// Wait for all the generators to finish and return
pend.Wait()
-
- return dataset
}
// hashimoto aggregates data from the full dataset in order to produce our final
@@ -277,7 +331,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
- keccak512 := crypto.Keccak512Hasher()
+ keccak512 := makeHasher(sha3.NewKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)
diff --git a/pow/ethash_algo_test.go b/pow/ethash_algo_test.go
index 32e115db9..c881874ff 100644
--- a/pow/ethash_algo_test.go
+++ b/pow/ethash_algo_test.go
@@ -18,21 +18,28 @@ package pow
import (
"bytes"
+ "io/ioutil"
+ "math/big"
+ "os"
+ "reflect"
+ "sync"
"testing"
+ "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
)
// Tests that verification caches can be correctly generated.
func TestCacheGeneration(t *testing.T) {
tests := []struct {
size uint64
- seed []byte
+ epoch uint64
cache []byte
}{
{
- size: 1024,
- seed: make([]byte, 32),
+ size: 1024,
+ epoch: 0,
cache: hexutil.MustDecode("0x" +
"7ce2991c951f7bf4c4c1bb119887ee07871eb5339d7b97b8588e85c742de90e5bafd5bbe6ce93a134fb6be9ad3e30db99d9528a2ea7846833f52e9ca119b6b54" +
"8979480c46e19972bd0738779c932c1b43e665a2fd3122fc3ddb2691f353ceb0ed3e38b8f51fd55b6940290743563c9f8fa8822e611924657501a12aafab8a8d" +
@@ -52,8 +59,8 @@ func TestCacheGeneration(t *testing.T) {
"845f64fd8324bb85312979dead74f764c9677aab89801ad4f927f1c00f12e28f22422bb44200d1969d9ab377dd6b099dc6dbc3222e9321b2c1e84f8e2f07731c"),
},
{
- size: 1024,
- seed: hexutil.MustDecode("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
+ size: 1024,
+ epoch: 1,
cache: hexutil.MustDecode("0x" +
"1f56855d59cc5a085720899b4377a0198f1abe948d85fe5820dc0e346b7c0931b9cde8e541d751de3b2b3275d0aabfae316209d5879297d8bd99f8a033c9d4df" +
"35add1029f4e6404a022d504fb8023e42989aba985a65933b0109c7218854356f9284983c9e7de97de591828ae348b63d1fc78d8db58157344d4e06530ffd422" +
@@ -74,22 +81,28 @@ func TestCacheGeneration(t *testing.T) {
},
}
for i, tt := range tests {
- if cache := generateCache(tt.size, tt.seed); !bytes.Equal(cache, tt.cache) {
- t.Errorf("cache %d: content mismatch: have %x, want %x", i, cache, tt.cache)
+ cache := make([]uint32, tt.size/4)
+ generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
+
+ want := make([]uint32, tt.size/4)
+ prepare(want, tt.cache)
+
+ if !reflect.DeepEqual(cache, want) {
+ t.Errorf("cache %d: content mismatch: have %x, want %x", i, cache, want)
}
}
}
func TestDatasetGeneration(t *testing.T) {
tests := []struct {
+ epoch uint64
cacheSize uint64
- cacheSeed []byte
datasetSize uint64
dataset []byte
}{
{
+ epoch: 0,
cacheSize: 1024,
- cacheSeed: make([]byte, 32),
datasetSize: 32 * 1024,
dataset: hexutil.MustDecode("0x" +
"4bc09fbd530a041dd2ec296110a29e8f130f179c59d223f51ecce3126e8b0c74326abc2f32ccd9d7f976bd0944e3ccf8479db39343cbbffa467046ca97e2da63" +
@@ -608,11 +621,17 @@ func TestDatasetGeneration(t *testing.T) {
},
}
for i, tt := range tests {
- rawCache := generateCache(tt.cacheSize, tt.cacheSeed)
- cache := prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
+ cache := make([]uint32, tt.cacheSize/4)
+ generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
+
+ dataset := make([]uint32, tt.datasetSize/4)
+ generateDataset(dataset, tt.epoch, cache)
- if dataset := generateDataset(tt.datasetSize, cache); !bytes.Equal(dataset, tt.dataset) {
- t.Errorf("dataset %d: content mismatch: have %x, want %x", i, dataset, tt.dataset)
+ want := make([]uint32, tt.datasetSize/4)
+ prepare(want, tt.dataset)
+
+ if !reflect.DeepEqual(dataset, want) {
+ t.Errorf("dataset %d: content mismatch: have %x, want %x", i, dataset, want)
}
}
}
@@ -621,12 +640,12 @@ func TestDatasetGeneration(t *testing.T) {
// datasets.
func TestHashimoto(t *testing.T) {
// Create the verification cache and mining dataset
- var (
- rawCache = generateCache(1024, make([]byte, 32))
- cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
- rawDataset = generateDataset(32*1024, cache)
- dataset = prepare(uint64(len(rawDataset)), bytes.NewReader(rawDataset))
- )
+ cache := make([]uint32, 1024/4)
+ generateCache(cache, 0, make([]byte, 32))
+
+ dataset := make([]uint32, 32*1024/4)
+ generateDataset(dataset, 0, cache)
+
// Create a block to verify
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
nonce := uint64(0)
@@ -650,31 +669,77 @@ func TestHashimoto(t *testing.T) {
}
}
+// Tests that caches generated on disk may be done concurrently.
+func TestConcurrentDiskCacheGeneration(t *testing.T) {
+ // Create a temp folder to generate the caches into
+ cachedir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("Failed to create temporary cache dir: %v", err)
+ }
+ defer os.RemoveAll(cachedir)
+
+ // Define a heavy enough block, one from mainnet should do
+ block := types.NewBlockWithHeader(&types.Header{
+ Number: big.NewInt(3311058),
+ ParentHash: common.HexToHash("0xd783efa4d392943503f28438ad5830b2d5964696ffc285f338585e9fe0a37a05"),
+ UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
+ Coinbase: common.HexToAddress("0xc0ea08a2d404d3172d2add29a45be56da40e2949"),
+ Root: common.HexToHash("0x77d14e10470b5850332524f8cd6f69ad21f070ce92dca33ab2858300242ef2f1"),
+ TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
+ ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
+ Difficulty: big.NewInt(167925187834220),
+ GasLimit: big.NewInt(4015682),
+ GasUsed: big.NewInt(0),
+ Time: big.NewInt(1488928920),
+ Extra: []byte("www.bw.com"),
+ MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
+ Nonce: types.EncodeNonce(0xf400cd0006070c49),
+ })
+ // Simulate multiple processes sharing the same datadir
+ var pend sync.WaitGroup
+
+ for i := 0; i < 3; i++ {
+ pend.Add(1)
+
+ go func(idx int) {
+ defer pend.Done()
+
+ ethash := NewFullEthash(cachedir, 0, 1, "", 0, 0)
+ if err := ethash.Verify(block); err != nil {
+ t.Errorf("proc %d: block verification failed: %v", idx, err)
+ }
+ }(i)
+ }
+ pend.Wait()
+}
+
// Benchmarks the cache generation performance.
func BenchmarkCacheGeneration(b *testing.B) {
for i := 0; i < b.N; i++ {
- generateCache(cacheSize(1), make([]byte, 32))
+ cache := make([]uint32, cacheSize(1)/4)
+ generateCache(cache, 0, make([]byte, 32))
}
}
// Benchmarks the dataset (small) generation performance.
func BenchmarkSmallDatasetGeneration(b *testing.B) {
- rawCache := generateCache(65536, make([]byte, 32))
- cache := prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
+ cache := make([]uint32, 65536/4)
+ generateCache(cache, 0, make([]byte, 32))
b.ResetTimer()
for i := 0; i < b.N; i++ {
- generateDataset(32*65536, cache)
+ dataset := make([]uint32, 32*65536/4)
+ generateDataset(dataset, 0, cache)
}
}
// Benchmarks the light verification performance.
func BenchmarkHashimotoLight(b *testing.B) {
- var (
- rawCache = generateCache(cacheSize(1), make([]byte, 32))
- cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
- hash = hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
- )
+ cache := make([]uint32, cacheSize(1)/4)
+ generateCache(cache, 0, make([]byte, 32))
+
+ hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
+
b.ResetTimer()
for i := 0; i < b.N; i++ {
hashimotoLight(datasetSize(1), cache, hash, 0)
@@ -683,13 +748,14 @@ func BenchmarkHashimotoLight(b *testing.B) {
// Benchmarks the full (small) verification performance.
func BenchmarkHashimotoFullSmall(b *testing.B) {
- var (
- rawCache = generateCache(65536, make([]byte, 32))
- cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
- rawDataset = generateDataset(32*65536, cache)
- dataset = prepare(uint64(len(rawDataset)), bytes.NewReader(rawDataset))
- hash = hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
- )
+ cache := make([]uint32, 65536/4)
+ generateCache(cache, 0, make([]byte, 32))
+
+ dataset := make([]uint32, 32*65536/4)
+ generateDataset(dataset, 0, cache)
+
+ hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
+
b.ResetTimer()
for i := 0; i < b.N; i++ {
hashimotoFull(32*65536, dataset, hash, 0)