aboutsummaryrefslogtreecommitdiffstats
path: root/consensus/ethash
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2018-08-15 18:50:16 +0800
committerPéter Szilágyi <peterke@gmail.com>2018-08-15 19:38:39 +0800
commitd8541a9f99c58d97ba4908c3a768e518f28d2441 (patch)
tree8da3166d1c102fbc56f17267461b9ce0dcbf72e4 /consensus/ethash
parente598ae5c010a9bc445fb3f106db9ae712e1a326e (diff)
downloadgo-tangerine-d8541a9f99c58d97ba4908c3a768e518f28d2441.tar.gz
go-tangerine-d8541a9f99c58d97ba4908c3a768e518f28d2441.tar.zst
go-tangerine-d8541a9f99c58d97ba4908c3a768e518f28d2441.zip
consensus/ethash: use DAGs for remote mining, generate async
Diffstat (limited to 'consensus/ethash')
-rw-r--r--consensus/ethash/consensus.go50
-rw-r--r--consensus/ethash/ethash.go46
-rw-r--r--consensus/ethash/sealer.go11
3 files changed, 83 insertions, 24 deletions
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index e18a06d52..86fd997ae 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -461,6 +461,13 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
// VerifySeal implements consensus.Engine, checking whether the given block satisfies
// the PoW difficulty requirements.
func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+ return ethash.verifySeal(chain, header, false)
+}
+
+// verifySeal checks whether a block satisfies the PoW difficulty requirements,
+// either using the usual ethash cache for it, or alternatively using a full DAG
+// to make remote mining fast.
+func (ethash *Ethash) verifySeal(chain consensus.ChainReader, header *types.Header, fulldag bool) error {
// If we're running a fake PoW, accept any seal as valid
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
time.Sleep(ethash.fakeDelay)
@@ -471,25 +478,48 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
}
// If we're running a shared PoW, delegate verification to it
if ethash.shared != nil {
- return ethash.shared.VerifySeal(chain, header)
+ return ethash.shared.verifySeal(chain, header, fulldag)
}
// Ensure that we have a valid difficulty for the block
if header.Difficulty.Sign() <= 0 {
return errInvalidDifficulty
}
- // Recompute the digest and PoW value and verify against the header
+ // Recompute the digest and PoW values
number := header.Number.Uint64()
- cache := ethash.cache(number)
- size := datasetSize(number)
- if ethash.config.PowMode == ModeTest {
- size = 32 * 1024
+ var (
+ digest []byte
+ result []byte
+ )
+ // If fast-but-heavy PoW verification was requested, use an ethash dataset
+ if fulldag {
+ dataset := ethash.dataset(number, true)
+ if dataset.generated() {
+ digest, result = hashimotoFull(dataset.dataset, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
+
+ // Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
+ // until after the call to hashimotoFull so it's not unmapped while being used.
+ runtime.KeepAlive(dataset)
+ } else {
+ // Dataset not yet generated, don't hang, use a cache instead
+ fulldag = false
+ }
}
- digest, result := hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
- // Caches are unmapped in a finalizer. Ensure that the cache stays live
- // until after the call to hashimotoLight so it's not unmapped while being used.
- runtime.KeepAlive(cache)
+ // If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
+ if !fulldag {
+ cache := ethash.cache(number)
+
+ size := datasetSize(number)
+ if ethash.config.PowMode == ModeTest {
+ size = 32 * 1024
+ }
+ digest, result = hashimotoLight(size, cache.cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
+ // Caches are unmapped in a finalizer. Ensure that the cache stays alive
+ // until after the call to hashimotoLight so it's not unmapped while being used.
+ runtime.KeepAlive(cache)
+ }
+ // Verify the calculated values against the ones provided in the header
if !bytes.Equal(header.MixDigest[:], digest) {
return errInvalidMixDigest
}
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 19c94deb6..d98c3371c 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -29,6 +29,7 @@ import (
"runtime"
"strconv"
"sync"
+ "sync/atomic"
"time"
"unsafe"
@@ -281,6 +282,7 @@ type dataset struct {
mmap mmap.MMap // Memory map itself to unmap before releasing
dataset []uint32 // The actual cache data content
once sync.Once // Ensures the cache is generated only once
+ done uint32 // Atomic flag to determine generation status
}
// newDataset creates a new ethash mining dataset and returns it as a plain Go
@@ -292,6 +294,9 @@ func newDataset(epoch uint64) interface{} {
// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, test bool) {
d.once.Do(func() {
+ // Mark the dataset generated after we're done. This is needed for remote
+ defer atomic.StoreUint32(&d.done, 1)
+
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
@@ -306,6 +311,8 @@ func (d *dataset) generate(dir string, limit int, test bool) {
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache)
+
+ return
}
// Disk storage is needed, this will get fancy
var endian string
@@ -348,6 +355,13 @@ func (d *dataset) generate(dir string, limit int, test bool) {
})
}
+// generated returns whether this particular dataset finished generating already
+// or not (it may not have been started at all). This is useful for remote miners
+// to default to verification caches instead of blocking on DAG generations.
+func (d *dataset) generated() bool {
+ return atomic.LoadUint32(&d.done) == 1
+}
+
// finalizer closes any file handlers and memory maps open.
func (d *dataset) finalizer() {
if d.mmap != nil {
@@ -589,20 +603,34 @@ func (ethash *Ethash) cache(block uint64) *cache {
// dataset tries to retrieve a mining dataset for the specified block number
// by first checking against a list of in-memory datasets, then against DAGs
// stored on disk, and finally generating one if none can be found.
-func (ethash *Ethash) dataset(block uint64) *dataset {
+//
+// If async is specified, not only the future but the current DAG is also
+// generates on a background thread.
+func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
+ // Retrieve the requested ethash dataset
epoch := block / epochLength
currentI, futureI := ethash.datasets.get(epoch)
current := currentI.(*dataset)
- // Wait for generation finish.
- current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
-
- // If we need a new future dataset, now's a good time to regenerate it.
- if futureI != nil {
- future := futureI.(*dataset)
- go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
+ // If async is specified, generate everything in a background thread
+ if async && !current.generated() {
+ go func() {
+ current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
+
+ if futureI != nil {
+ future := futureI.(*dataset)
+ future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
+ }
+ }()
+ } else {
+ // Either blocking generation was requested, or already done
+ current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
+
+ if futureI != nil {
+ future := futureI.(*dataset)
+ go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.PowMode == ModeTest)
+ }
}
-
return current
}
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index 03d848473..c3b2c86d1 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -114,7 +114,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
hash = header.HashNoNonce().Bytes()
target = new(big.Int).Div(two256, header.Difficulty)
number = header.Number.Uint64()
- dataset = ethash.dataset(number)
+ dataset = ethash.dataset(number, false)
)
// Start generating random nonces until we abort or find a good one
var (
@@ -233,21 +233,22 @@ func (ethash *Ethash) remote(notify []string) {
log.Info("Work submitted but none pending", "hash", hash)
return false
}
-
// Verify the correctness of submitted result.
header := block.Header()
header.Nonce = nonce
header.MixDigest = mixDigest
- if err := ethash.VerifySeal(nil, header); err != nil {
- log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err)
+
+ start := time.Now()
+ if err := ethash.verifySeal(nil, header, true); err != nil {
+ log.Warn("Invalid proof-of-work submitted", "hash", hash, "elapsed", time.Since(start), "err", err)
return false
}
-
// Make sure the result channel is created.
if ethash.resultCh == nil {
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
+ log.Trace("Verified correct proof-of-work", "hash", hash, "elapsed", time.Since(start))
// Solutions seems to be valid, return to the miner and notify acceptance.
select {