diff options
Diffstat (limited to 'pow')
-rw-r--r-- | pow/ethash.go | 38 | ||||
-rw-r--r-- | pow/ethash_algo.go | 15 |
2 files changed, 32 insertions, 21 deletions
diff --git a/pow/ethash.go b/pow/ethash.go index dbe8ff077..1e577a587 100644 --- a/pow/ethash.go +++ b/pow/ethash.go @@ -32,12 +32,12 @@ import ( "unsafe" mmap "github.com/edsrzf/mmap-go" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/log" metrics "github.com/rcrowley/go-metrics" ) var ( + ErrInvalidDumpMagic = errors.New("invalid dump magic") ErrNonceOutOfRange = errors.New("nonce out of range") ErrInvalidDifficulty = errors.New("non-positive difficulty") ErrInvalidMixDigest = errors.New("invalid mix digest") @@ -55,7 +55,7 @@ var ( algorithmRevision = 23 // dumpMagic is a dataset dump header to sanity check a data dump. - dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe") + dumpMagic = []uint32{0xbaddcafe, 0xfee1dead} ) // isLittleEndian returns whether the local system is running in little or big @@ -76,7 +76,14 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) { file.Close() return nil, nil, nil, err } - return file, mem, buffer, err + for i, magic := range dumpMagic { + if buffer[i] != magic { + mem.Unmap() + file.Close() + return nil, nil, nil, ErrInvalidDumpMagic + } + } + return file, mem, buffer[len(dumpMagic):], err } // memoryMapFile tries to memory map an already opened file descriptor. @@ -113,7 +120,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint if err != nil { return nil, nil, nil, err } - if err = dump.Truncate(int64(size)); err != nil { + if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { return nil, nil, nil, err } // Memory map the file for writing and fill it with the generator @@ -122,7 +129,10 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint dump.Close() return nil, nil, nil, err } - generator(buffer) + copy(buffer, dumpMagic) + + data := buffer[len(dumpMagic):] + generator(data) if err := mem.Flush(); err != nil { mem.Unmap() @@ -130,7 +140,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint return nil, nil, nil, err } os.Rename(temp, path) - return dump, mem, buffer, nil + return dump, mem, data, nil } // cache wraps an ethash cache with some metadata to allow easier concurrent use. @@ -165,11 +175,11 @@ func (c *cache) generate(dir string, limit int, test bool) { return } // Disk storage is needed, this will get fancy - endian := "le" + var endian string if !isLittleEndian() { - endian = "be" + endian = ".be" } - path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian)) + path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) logger := log.New("epoch", c.epoch) // Try to load the file from disk and memory map it @@ -192,7 +202,7 @@ func (c *cache) generate(dir string, limit int, test bool) { // Iterate over all previous instances and delete old ones for ep := int(c.epoch) - limit; ep >= 0; ep-- { seed := seedHash(uint64(ep)*epochLength + 1) - path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian)) + path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian)) os.Remove(path) } }) @@ -249,11 +259,11 @@ func (d *dataset) generate(dir string, limit int, test bool) { generateDataset(d.dataset, d.epoch, cache) } // Disk storage is needed, this will get fancy - endian := "le" + var endian string if !isLittleEndian() { - endian = "be" + endian = ".be" } - path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian)) + path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) logger := log.New("epoch", d.epoch) // Try to load the file from disk and memory map it @@ -279,7 +289,7 @@ func (d *dataset) generate(dir string, limit int, test bool) { // Iterate over all previous instances and delete old ones for ep := int(d.epoch) - limit; ep >= 0; ep-- { seed := seedHash(uint64(ep)*epochLength + 1) - path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian)) + path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian)) os.Remove(path) } }) diff --git a/pow/ethash_algo.go b/pow/ethash_algo.go index ace482b93..3737cc5d7 100644 --- a/pow/ethash_algo.go +++ b/pow/ethash_algo.go @@ -225,7 +225,8 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { // Print some debug logs to allow analysis on low end devices logger := log.New("epoch", epoch) - defer func(start time.Time) { + start := time.Now() + defer func() { elapsed := time.Since(start) logFn := logger.Debug @@ -233,7 +234,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { logFn = logger.Info } logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) - }(time.Now()) + }() // Figure out whether the bytes need to be swapped for the machine swapped := !isLittleEndian() @@ -260,15 +261,15 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { keccak512 := makeHasher(sha3.NewKeccak512()) // Calculate the data segment this thread should generate - batch := uint32(size / hashBytes / uint64(threads)) - start := uint32(id) * batch - limit := start + batch + batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads))) + first := uint32(id) * batch + limit := first + batch if limit > uint32(size/hashBytes) { limit = uint32(size / hashBytes) } // Calculate the dataset segment percent := uint32(size / hashBytes / 100) - for index := start; index < limit; index++ { + for index := first; index < limit; index++ { item := generateDatasetItem(cache, index, keccak512) if swapped { swap(item) @@ -276,7 +277,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { copy(dataset[index*hashBytes:], item) if status := atomic.AddUint32(&progress, 1); status%percent == 0 { - logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes)) + logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start))) } } }(i) |