aboutsummaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorJimmy Hu <jimmy.hu@dexon.org>2019-03-21 08:37:33 +0800
committerWei-Ning Huang <w@byzantine-lab.io>2019-06-13 18:11:44 +0800
commitd82cacf5ac51ef695b921a9c2683c38c779d1050 (patch)
tree720005698e986d8eba31b0a98960f3efc1760aa6 /vendor
parentba1eb31880ea5fae9a8e99b21ac165360e0ea30e (diff)
downloadgo-tangerine-d82cacf5ac51ef695b921a9c2683c38c779d1050.tar.gz
go-tangerine-d82cacf5ac51ef695b921a9c2683c38c779d1050.tar.zst
go-tangerine-d82cacf5ac51ef695b921a9c2683c38c779d1050.zip
core/vm: remove round from addDKG functions (#279)
* vendor: sync to latest core * core/vm: remove addDKG functions * core: fix conflict
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go7
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go6
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go345
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go138
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go12
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go338
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go65
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go91
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go8
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go23
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go9
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go20
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go2
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go29
-rw-r--r--vendor/vendor.json48
15 files changed, 925 insertions, 216 deletions
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
index 0e39fa52a..14aa3857b 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
@@ -432,6 +432,13 @@ func (mgr *agreementMgr) baRoutineForOneRound(
var nextHeight uint64
var nextTime time.Time
for {
+ // Make sure we are stoppable.
+ select {
+ case <-mgr.ctx.Done():
+ breakLoop = true
+ return
+ default:
+ }
nextHeight, nextTime = mgr.bcModule.nextBlock()
if isStop(oldPos) && nextHeight == 0 {
break
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
index 43fddd0a0..b0c773429 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
@@ -533,6 +533,10 @@ func (a *agreement) done() <-chan struct{} {
func (a *agreement) confirmed() bool {
a.lock.RLock()
defer a.lock.RUnlock()
+ return a.confirmedNoLock()
+}
+
+func (a *agreement) confirmedNoLock() bool {
return a.hasOutput
}
@@ -556,6 +560,8 @@ func (a *agreement) processBlock(block *types.Block) error {
receivedTime: time.Now().UTC(),
})
return nil
+ } else if a.confirmedNoLock() {
+ return nil
}
if b, exist := a.data.blocks[block.ProposerID]; exist {
if b.Hash != block.Hash {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
index 5c3226053..084f9d0c9 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/configuration-chain.go
@@ -38,18 +38,39 @@ var (
"tsig is already running")
ErrDKGNotReady = fmt.Errorf(
"DKG is not ready")
+ ErrSkipButNoError = fmt.Errorf(
+ "skip but no error")
+ ErrDKGAborted = fmt.Errorf(
+ "DKG is aborted")
)
+// ErrMismatchDKG represent an attempt to run DKG protocol is failed because
+// the register DKG protocol is mismatched, interms of round and resetCount.
+type ErrMismatchDKG struct {
+ expectRound, expectReset uint64
+ actualRound, actualReset uint64
+}
+
+func (e ErrMismatchDKG) Error() string {
+ return fmt.Sprintf(
+ "mismatch DKG, abort running: expect(%d %d) actual(%d %d)",
+ e.expectRound, e.expectReset, e.actualRound, e.actualReset)
+}
+
+type dkgStepFn func(round uint64, reset uint64) (chan<- struct{}, error)
+
type configurationChain struct {
ID types.NodeID
recv dkgReceiver
gov Governance
dkg *dkgProtocol
+ dkgRunPhases []dkgStepFn
logger common.Logger
dkgLock sync.RWMutex
dkgSigner map[uint64]*dkgShareSecret
npks map[uint64]*typesDKG.NodePublicKeys
dkgResult sync.RWMutex
+ abortDKGCh chan chan<- struct{}
tsig map[common.Hash]*tsigProtocol
tsigTouched map[common.Hash]struct{}
tsigReady *sync.Cond
@@ -70,13 +91,14 @@ func newConfigurationChain(
cache *utils.NodeSetCache,
dbInst db.Database,
logger common.Logger) *configurationChain {
- return &configurationChain{
+ configurationChain := &configurationChain{
ID: ID,
recv: recv,
gov: gov,
logger: logger,
dkgSigner: make(map[uint64]*dkgShareSecret),
npks: make(map[uint64]*typesDKG.NodePublicKeys),
+ abortDKGCh: make(chan chan<- struct{}, 1),
tsig: make(map[common.Hash]*tsigProtocol),
tsigTouched: make(map[common.Hash]struct{}),
tsigReady: sync.NewCond(&sync.Mutex{}),
@@ -84,14 +106,66 @@ func newConfigurationChain(
db: dbInst,
pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature),
}
+ configurationChain.initDKGPhasesFunc()
+ return configurationChain
+}
+
+func (cc *configurationChain) abortDKG(round, reset uint64) {
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ if cc.dkg != nil {
+ cc.abortDKGNoLock(round, reset)
+ }
+}
+
+func (cc *configurationChain) abortDKGNoLock(round, reset uint64) bool {
+ if cc.dkg.round > round ||
+ (cc.dkg.round == round && cc.dkg.reset >= reset) {
+ cc.logger.Error("newer DKG already is registered",
+ "round", round,
+ "reset", reset)
+ return false
+ }
+ cc.logger.Error("Previous DKG is not finished",
+ "round", round,
+ "reset", reset,
+ "previous-round", cc.dkg.round,
+ "previous-reset", cc.dkg.reset)
+ // Abort DKG routine in previous round.
+ aborted := make(chan struct{}, 1)
+ cc.logger.Error("Aborting DKG in previous round",
+ "round", round,
+ "previous-round", cc.dkg.round)
+ cc.dkgLock.Unlock()
+ // Notify current running DKG protocol to abort.
+ cc.abortDKGCh <- aborted
+ // Wait for current running DKG protocol aborting.
+ <-aborted
+ cc.logger.Error("Previous DKG aborted",
+ "round", round,
+ "reset", reset)
+ cc.dkgLock.Lock()
+ return true
}
func (cc *configurationChain) registerDKG(round, reset uint64, threshold int) {
cc.dkgLock.Lock()
defer cc.dkgLock.Unlock()
if cc.dkg != nil {
- cc.logger.Error("Previous DKG is not finished")
- // TODO(mission): return here and fix CI failure.
+ // Make sure we only proceed when cc.dkg is nil.
+ if !cc.abortDKGNoLock(round, reset) {
+ return
+ }
+ if cc.dkg != nil {
+ // This panic would only raise when multiple attampts to register
+ // a DKG protocol at the same time.
+ panic(ErrMismatchDKG{
+ expectRound: round,
+ expectReset: reset,
+ actualRound: cc.dkg.round,
+ actualReset: cc.dkg.reset,
+ })
+ }
}
dkgSet, err := cc.cache.GetDKGSet(round)
if err != nil {
@@ -101,65 +175,77 @@ func (cc *configurationChain) registerDKG(round, reset uint64, threshold int) {
cc.dkgSet = dkgSet
cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare)
cc.mpkReady = false
- cc.dkg = newDKGProtocol(
- cc.ID,
- cc.recv,
- round,
- reset,
- threshold)
- // TODO(mission): should keep DKG resetCount along with DKG private share.
- err = cc.db.PutOrUpdateDKGMasterPrivateShares(round, *cc.dkg.prvShares)
+ cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db)
if err != nil {
- cc.logger.Error("Error put or update DKG master private shares", "error",
- err)
- return
+ panic(err)
}
+ if cc.dkg == nil {
+ cc.dkg = newDKGProtocol(
+ cc.ID,
+ cc.recv,
+ round,
+ reset,
+ threshold)
+
+ err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo())
+ if err != nil {
+ cc.logger.Error("Error put or update DKG protocol", "error",
+ err)
+ return
+ }
+ }
+
go func() {
ticker := newTicker(cc.gov, round, TickerDKG)
defer ticker.Stop()
<-ticker.Tick()
cc.dkgLock.Lock()
defer cc.dkgLock.Unlock()
- cc.dkg.proposeMPKReady()
+ if cc.dkg != nil && cc.dkg.round == round && cc.dkg.reset == reset {
+ cc.dkg.proposeMPKReady()
+ }
}()
}
-func (cc *configurationChain) runDKG(round, reset uint64) error {
- // Check if corresponding DKG signer is ready.
- if _, _, err := cc.getDKGInfo(round); err == nil {
- return nil
- }
- cc.dkgLock.Lock()
- defer cc.dkgLock.Unlock()
- if cc.dkg == nil ||
- cc.dkg.round < round ||
+func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) (
+ abortCh chan<- struct{}, err error) {
+ if cc.dkg.round < round ||
(cc.dkg.round == round && cc.dkg.reset < reset) {
- return ErrDKGNotRegistered
+ err = ErrDKGNotRegistered
+ return
}
if cc.dkg.round != round || cc.dkg.reset != reset {
cc.logger.Warn("DKG canceled", "round", round, "reset", reset)
- return nil
+ err = ErrSkipButNoError
+ return
}
cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
if cc.gov.IsDKGFinal(round) {
cc.logger.Warn("DKG already final", "round", round)
- return nil
+ err = ErrSkipButNoError
+ return
}
cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round)
- for !cc.gov.IsDKGMPKReady(round) {
- cc.logger.Debug("DKG MPKs are not ready yet. Try again later...",
- "nodeID", cc.ID.String()[:6],
- "round", round,
- "reset", reset)
+ for abortCh == nil && !cc.gov.IsDKGMPKReady(round) {
cc.dkgLock.Unlock()
- time.Sleep(500 * time.Millisecond)
+ cc.logger.Debug("DKG MPKs are not ready yet. Try again later...",
+ "nodeID", cc.ID,
+ "round", round)
+ select {
+ case abortCh = <-cc.abortDKGCh:
+ err = ErrDKGAborted
+ case <-time.After(500 * time.Millisecond):
+ }
cc.dkgLock.Lock()
}
- ticker := newTicker(cc.gov, round, TickerDKG)
- defer ticker.Stop()
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
+ if abortCh != nil {
+ return
+ }
+ return
+}
+
+func (cc *configurationChain) runDKGPhaseTwoAndThree(
+ round uint64, reset uint64) (chan<- struct{}, error) {
// Check if this node successfully join the protocol.
cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
mpks := cc.gov.DKGMasterPublicKeys(round)
@@ -174,7 +260,7 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
cc.logger.Warn("Failed to join DKG protocol",
"round", round,
"reset", reset)
- return nil
+ return nil, ErrSkipButNoError
}
// Phase 2(T = 0): Exchange DKG secret key share.
if err := cc.dkg.processMasterPublicKeys(mpks); err != nil {
@@ -184,6 +270,13 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
"error", err)
}
cc.mpkReady = true
+ // The time to process private share might be long, check aborting before
+ // get into that loop.
+ select {
+ case abortCh := <-cc.abortDKGCh:
+ return abortCh, ErrDKGAborted
+ default:
+ }
for _, prvShare := range cc.pendingPrvShare {
if err := cc.dkg.processPrivateShare(prvShare); err != nil {
cc.logger.Error("Failed to process private share",
@@ -192,16 +285,18 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
"error", err)
}
}
+
// Phase 3(T = 0~λ): Propose complaint.
// Propose complaint is done in `processMasterPublicKeys`.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
+ return nil, nil
+}
+
+func (cc *configurationChain) runDKGPhaseFour() {
// Phase 4(T = λ): Propose nack complaints.
cc.dkg.proposeNackComplaints()
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
+}
+
+func (cc *configurationChain) runDKGPhaseFiveAndSix(round uint64, reset uint64) {
// Phase 5(T = 2λ): Propose Anti nack complaint.
cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
complaints := cc.gov.DKGComplaints(round)
@@ -211,35 +306,43 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
"reset", reset,
"error", err)
}
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
+
// Phase 6(T = 3λ): Rebroadcast anti nack complaint.
// Rebroadcast is done in `processPrivateShare`.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
+}
+
+func (cc *configurationChain) runDKGPhaseSeven(complaints []*typesDKG.Complaint) {
// Phase 7(T = 4λ): Enforce complaints and nack complaints.
cc.dkg.enforceNackComplaints(complaints)
// Enforce complaint is done in `processPrivateShare`.
+}
+
+func (cc *configurationChain) runDKGPhaseEight() {
// Phase 8(T = 5λ): DKG finalize.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
cc.dkg.proposeFinalize()
+}
+
+func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) (
+ abortCh chan<- struct{}, err error) {
// Phase 9(T = 6λ): DKG is ready.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
// Normally, IsDKGFinal would return true here. Use this for in case of
// unexpected network fluctuation and ensure the robustness of DKG protocol.
cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
- for !cc.gov.IsDKGFinal(round) {
+ for abortCh == nil && !cc.gov.IsDKGFinal(round) {
+ cc.dkgLock.Unlock()
cc.logger.Debug("DKG is not ready yet. Try again later...",
"nodeID", cc.ID.String()[:6],
"round", round,
"reset", reset)
- time.Sleep(500 * time.Millisecond)
+ select {
+ case abortCh = <-cc.abortDKGCh:
+ err = ErrDKGAborted
+ case <-time.After(500 * time.Millisecond):
+ }
+ cc.dkgLock.Lock()
+ }
+ if abortCh != nil {
+ return
}
cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
@@ -248,7 +351,7 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
cc.gov.DKGComplaints(round),
cc.dkg.threshold)
if err != nil {
- return err
+ return
}
qualifies := ""
for nID := range npks.QualifyNodeIDs {
@@ -264,20 +367,136 @@ func (cc *configurationChain) runDKG(round, reset uint64) error {
cc.logger.Warn("Self is not in Qualify Nodes",
"round", round,
"reset", reset)
- return nil
+ return
}
signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs)
if err != nil {
- return err
+ return
}
// Save private shares to DB.
if err = cc.db.PutDKGPrivateKey(round, *signer.privateKey); err != nil {
- return err
+ return
}
cc.dkgResult.Lock()
defer cc.dkgResult.Unlock()
cc.dkgSigner[round] = signer
cc.npks[round] = npks
+ return
+}
+
+func (cc *configurationChain) runTick(ticker Ticker) (abortCh chan<- struct{}) {
+ cc.dkgLock.Unlock()
+ defer cc.dkgLock.Lock()
+ select {
+ case abortCh = <-cc.abortDKGCh:
+ case <-ticker.Tick():
+ }
+ return
+}
+
+func (cc *configurationChain) initDKGPhasesFunc() {
+ cc.dkgRunPhases = []dkgStepFn{
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ return cc.runDKGPhaseOne(round, reset)
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ return cc.runDKGPhaseTwoAndThree(round, reset)
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ cc.runDKGPhaseFour()
+ return nil, nil
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ cc.runDKGPhaseFiveAndSix(round, reset)
+ return nil, nil
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ complaints := cc.gov.DKGComplaints(round)
+ cc.runDKGPhaseSeven(complaints)
+ return nil, nil
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ cc.runDKGPhaseEight()
+ return nil, nil
+ },
+ func(round uint64, reset uint64) (chan<- struct{}, error) {
+ return cc.runDKGPhaseNine(round, reset)
+ },
+ }
+}
+
+func (cc *configurationChain) runDKG(round uint64, reset uint64) (err error) {
+ // Check if corresponding DKG signer is ready.
+ if _, _, err = cc.getDKGInfo(round); err == nil {
+ return ErrSkipButNoError
+ }
+ cc.dkgLock.Lock()
+ defer cc.dkgLock.Unlock()
+ var (
+ ticker Ticker
+ abortCh chan<- struct{}
+ )
+ defer func() {
+ if ticker != nil {
+ ticker.Stop()
+ }
+ // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done.
+ cc.dkg = nil
+ if abortCh == nil {
+ select {
+ case abortCh = <-cc.abortDKGCh:
+ // The previous DKG finishes its job, don't overwrite its error
+ // with "aborted" here.
+ default:
+ }
+ }
+ if abortCh != nil {
+ abortCh <- struct{}{}
+ }
+ }()
+ tickStartAt := 1
+
+ if cc.dkg == nil {
+ return ErrDKGNotRegistered
+ }
+ if cc.dkg.round != round || cc.dkg.reset != reset {
+ return ErrMismatchDKG{
+ expectRound: round,
+ expectReset: reset,
+ actualRound: cc.dkg.round,
+ actualReset: cc.dkg.reset,
+ }
+ }
+
+ for i := cc.dkg.step; i < len(cc.dkgRunPhases); i++ {
+ if i >= tickStartAt && ticker == nil {
+ ticker = newTicker(cc.gov, round, TickerDKG)
+ }
+
+ if ticker != nil {
+ if abortCh = cc.runTick(ticker); abortCh != nil {
+ return
+ }
+ }
+
+ switch abortCh, err = cc.dkgRunPhases[i](round, reset); err {
+ case ErrSkipButNoError, nil:
+ cc.dkg.step = i + 1
+ err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo())
+ if err != nil {
+ return fmt.Errorf("put or update DKG protocol error: %v", err)
+ }
+
+ if err == nil {
+ continue
+ } else {
+ return
+ }
+ default:
+ return
+ }
+ }
+
return nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index e0a6753a9..8f8002b67 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -260,8 +260,8 @@ CleanChannelLoop:
if block.Position.Height > changeNotaryHeight &&
block.Position.Round <= currentRound {
panic(fmt.Errorf(
- "round not switch when confirmig: %s, %d, should switch at %d",
- block, currentRound, changeNotaryHeight))
+ "round not switch when confirmig: %s, %d, should switch at %d, %s",
+ block, currentRound, changeNotaryHeight, newPos))
}
recv.restartNotary <- newPos
}
@@ -302,7 +302,7 @@ func (recv *consensusDKGReceiver) ProposeDKGComplaint(
}
recv.logger.Debug("Calling Governace.AddDKGComplaint",
"complaint", complaint)
- recv.gov.AddDKGComplaint(complaint.Round, complaint)
+ recv.gov.AddDKGComplaint(complaint)
}
// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey.
@@ -313,7 +313,7 @@ func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey(
return
}
recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk)
- recv.gov.AddDKGMasterPublicKey(mpk.Round, mpk)
+ recv.gov.AddDKGMasterPublicKey(mpk)
}
// ProposeDKGPrivateShare propose a DKGPrivateShare.
@@ -362,7 +362,7 @@ func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) {
return
}
recv.logger.Debug("Calling Governance.AddDKGMPKReady", "ready", ready)
- recv.gov.AddDKGMPKReady(ready.Round, ready)
+ recv.gov.AddDKGMPKReady(ready)
}
// ProposeDKGFinalize propose a DKGFinalize message.
@@ -372,7 +372,7 @@ func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) {
return
}
recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final)
- recv.gov.AddDKGFinalize(final.Round, final)
+ recv.gov.AddDKGFinalize(final)
}
// Consensus implements DEXON Consensus algorithm.
@@ -563,11 +563,6 @@ func newConsensusForRound(
logger: logger,
}
cfgModule := newConfigurationChain(ID, recv, gov, nodeSetCache, db, logger)
- dkg, err := recoverDKGProtocol(ID, recv, initRound, utils.GetDKGThreshold(initConfig), db)
- if err != nil {
- panic(err)
- }
- cfgModule.dkg = dkg
recv.cfgModule = cfgModule
appModule := app
if usingNonBlocking {
@@ -598,6 +593,7 @@ func newConsensusForRound(
processBlockChan: make(chan *types.Block, 1024),
}
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
+ var err error
if con.roundEvent, err = utils.NewRoundEvent(con.ctx, gov, logger, initRound,
initRoundBeginHeight, initBlockHeight, ConfigRoundShift); err != nil {
panic(err)
@@ -634,8 +630,40 @@ func (con *Consensus) prepare(
panic("not implemented yet")
}
}
+ // Measure time elapse for each handler of round events.
+ elapse := func(what string, lastE utils.RoundEventParam) func() {
+ start := time.Now()
+ con.logger.Info("handle round event",
+ "what", what,
+ "event", lastE)
+ return func() {
+ con.logger.Info("finish round event",
+ "what", what,
+ "event", lastE,
+ "elapse", time.Since(start))
+ }
+ }
+ // Register round event handler to purge cached node set. To make sure each
+ // modules see the up-to-date node set, we need to make sure this action
+ // should be taken as the first one.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ defer elapse("purge node set", evts[len(evts)-1])()
+ for _, e := range evts {
+ if e.Reset == 0 {
+ continue
+ }
+ con.nodeSetCache.Purge(e.Round + 1)
+ }
+ })
+ // Register round event handler to abort previous running DKG if any.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ defer elapse("abort DKG", e)()
+ con.cfgModule.abortDKG(e.Round+1, e.Reset)
+ })
// Register round event handler to update BA and BC modules.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ defer elapse("append config", evts[len(evts)-1])()
// Always updates newer configs to the later modules first in the flow.
if err := con.bcModule.notifyRoundEvents(evts); err != nil {
panic(err)
@@ -647,11 +675,62 @@ func (con *Consensus) prepare(
}
}
})
+ // Register round event handler to reset DKG if the DKG set for next round
+ // failed to setup.
+ con.roundEvent.Register(func(evts []utils.RoundEventParam) {
+ e := evts[len(evts)-1]
+ defer elapse("reset DKG", e)()
+ nextRound := e.Round + 1
+ if nextRound < DKGDelayRound {
+ return
+ }
+ curDKGSet, err := con.nodeSetCache.GetDKGSet(e.Round)
+ if err != nil {
+ con.logger.Error("Error getting DKG set when proposing CRS",
+ "round", e.Round,
+ "error", err)
+ return
+ }
+ if _, exist := curDKGSet[con.ID]; !exist {
+ return
+ }
+ isDKGValid := func() bool {
+ nextConfig := utils.GetConfigWithPanic(con.gov, nextRound,
+ con.logger)
+ if !con.gov.IsDKGFinal(nextRound) {
+ con.logger.Error("Next DKG is not final, reset it",
+ "round", e.Round,
+ "reset", e.Reset)
+ return false
+ }
+ if _, err := typesDKG.NewGroupPublicKey(
+ nextRound,
+ con.gov.DKGMasterPublicKeys(nextRound),
+ con.gov.DKGComplaints(nextRound),
+ utils.GetDKGThreshold(nextConfig)); err != nil {
+ con.logger.Error("Next DKG failed to prepare, reset it",
+ "round", e.Round,
+ "reset", e.Reset,
+ "error", err)
+ return false
+ }
+ return true
+ }
+ con.event.RegisterHeight(e.NextDKGResetHeight(), func(uint64) {
+ if isDKGValid() {
+ return
+ }
+ // Aborting all previous running DKG protocol instance if any.
+ con.cfgModule.abortDKG(nextRound, e.Reset)
+ con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true)
+ })
+ })
// Register round event handler to propose new CRS.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
// We don't have to propose new CRS during DKG reset, the reset of DKG
// would be done by the DKG set in previous round.
e := evts[len(evts)-1]
+ defer elapse("propose CRS", e)()
if e.Reset != 0 || e.Round < DKGDelayRound {
return
}
@@ -671,13 +750,14 @@ func (con *Consensus) prepare(
con.logger.Debug("CRS already proposed", "round", e.Round+1)
return
}
- con.runCRS(e.Round, e.CRS)
+ con.runCRS(e.Round, e.CRS, false)
})
}
})
// Touch nodeSetCache for next round.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
+ defer elapse("touch node set cache", e)()
if e.Reset != 0 {
return
}
@@ -706,6 +786,7 @@ func (con *Consensus) prepare(
// Trigger round validation method for next period.
con.roundEvent.Register(func(evts []utils.RoundEventParam) {
e := evts[len(evts)-1]
+ defer elapse("next round", e)()
// Register a routine to trigger round events.
con.event.RegisterHeight(e.NextRoundValidationHeight(), func(
blockHeight uint64) {
@@ -715,7 +796,9 @@ func (con *Consensus) prepare(
con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) {
nextRound := e.Round + 1
if nextRound < DKGDelayRound {
- con.logger.Info("Skip runDKG for round", "round", nextRound)
+ con.logger.Info("Skip runDKG for round",
+ "round", nextRound,
+ "reset", e.Reset)
return
}
// Normally, gov.CRS would return non-nil. Use this for in case of
@@ -723,21 +806,27 @@ func (con *Consensus) prepare(
if !checkWithCancel(
con.ctx, 500*time.Millisecond, checkCRS(nextRound)) {
con.logger.Debug("unable to prepare CRS for DKG set",
- "round", nextRound)
+ "round", nextRound,
+ "reset", e.Reset)
return
}
nextDkgSet, err := con.nodeSetCache.GetDKGSet(nextRound)
if err != nil {
con.logger.Error("Error getting DKG set for next round",
"round", nextRound,
+ "reset", e.Reset,
"error", err)
return
}
if _, exist := nextDkgSet[con.ID]; !exist {
- con.logger.Info("Not selected as DKG set", "round", nextRound)
+ con.logger.Info("Not selected as DKG set",
+ "round", nextRound,
+ "reset", e.Reset)
return
}
- con.logger.Info("Selected as DKG set", "round", nextRound)
+ con.logger.Info("Selected as DKG set",
+ "round", nextRound,
+ "reset", e.Reset)
nextConfig := utils.GetConfigWithPanic(con.gov, nextRound,
con.logger)
con.cfgModule.registerDKG(nextRound, e.Reset, utils.GetDKGThreshold(
@@ -825,7 +914,7 @@ func (con *Consensus) runDKG(round, reset uint64, config *types.Config) {
}()
}
-func (con *Consensus) runCRS(round uint64, hash common.Hash) {
+func (con *Consensus) runCRS(round uint64, hash common.Hash, reset bool) {
// Start running next round CRS.
psig, err := con.cfgModule.preparePartialSignature(round, hash)
if err != nil {
@@ -845,10 +934,17 @@ func (con *Consensus) runCRS(round uint64, hash common.Hash) {
if err != nil {
con.logger.Error("Failed to run CRS Tsig", "error", err)
} else {
- con.logger.Debug("Calling Governance.ProposeCRS",
- "round", round+1,
- "crs", hex.EncodeToString(crs))
- con.gov.ProposeCRS(round+1, crs)
+ if reset {
+ con.logger.Debug("Calling Governance.ResetDKG",
+ "round", round+1,
+ "crs", hex.EncodeToString(crs))
+ con.gov.ResetDKG(crs)
+ } else {
+ con.logger.Debug("Calling Governance.ProposeCRS",
+ "round", round+1,
+ "crs", hex.EncodeToString(crs))
+ con.gov.ProposeCRS(round+1, crs)
+ }
}
}
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go
index e95861112..2c32ebb6e 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/interfaces.go
@@ -50,12 +50,12 @@ var (
// ErrDKGPrivateKeyDoesNotExist raised when the DKG private key of the
// requested round does not exists.
ErrDKGPrivateKeyDoesNotExist = errors.New("dkg private key does not exists")
- // ErrDKGMasterPrivateSharesExists raised when attempting to save DKG master private shares
+ // ErrDKGProtocolExists raised when attempting to save DKG protocol
// that already saved.
- ErrDKGMasterPrivateSharesExists = errors.New("dkg master private shares exists")
- // ErrDKGMasterPrivateSharesDoesNotExist raised when the DKG master private shares of the
+ ErrDKGProtocolExists = errors.New("dkg protocol exists")
+ // ErrDKGProtocolDoesNotExist raised when the DKG protocol of the
// requested round does not exists.
- ErrDKGMasterPrivateSharesDoesNotExist = errors.New("dkg master private shares does not exists")
+ ErrDKGProtocolDoesNotExist = errors.New("dkg protocol does not exists")
)
// Database is the interface for a Database.
@@ -82,7 +82,7 @@ type Reader interface {
// DKG Private Key related methods.
HasDKGPrivateKey(round uint64) (bool, error)
GetDKGPrivateKey(round uint64) (dkg.PrivateKey, error)
- GetDKGMasterPrivateShares(round uint64) (shares dkg.PrivateKeyShares, err error)
+ GetDKGProtocol() (dkgProtocol DKGProtocolInfo, err error)
}
// Writer defines the interface for writing blocks into DB.
@@ -91,7 +91,7 @@ type Writer interface {
PutBlock(block types.Block) error
PutCompactionChainTipInfo(common.Hash, uint64) error
PutDKGPrivateKey(uint64, dkg.PrivateKey) error
- PutOrUpdateDKGMasterPrivateShares(round uint64, shares dkg.PrivateKeyShares) error
+ PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error
}
// BlockIterator defines an iterator on blocks hold
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go
index efa1fecbc..88f5801fc 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/level-db.go
@@ -19,6 +19,7 @@ package db
import (
"encoding/binary"
+ "io"
"github.com/syndtr/goleveldb/leveldb"
@@ -29,10 +30,10 @@ import (
)
var (
- blockKeyPrefix = []byte("b-")
- compactionChainTipInfoKey = []byte("cc-tip")
- dkgPrivateKeyKeyPrefix = []byte("dkg-prvs")
- dkgMasterPrivateSharesPrefix = []byte("dkg-master-private-shares")
+ blockKeyPrefix = []byte("b-")
+ compactionChainTipInfoKey = []byte("cc-tip")
+ dkgPrivateKeyKeyPrefix = []byte("dkg-prvs")
+ dkgProtocolInfoKeyPrefix = []byte("dkg-protocol-info")
)
type compactionChainTipInfo struct {
@@ -40,6 +41,301 @@ type compactionChainTipInfo struct {
Hash common.Hash `json:"hash"`
}
+// DKGProtocolInfo DKG protocol info.
+type DKGProtocolInfo struct {
+ ID types.NodeID
+ Round uint64
+ Threshold uint64
+ IDMap NodeIDToDKGID
+ MpkMap NodeIDToPubShares
+ MasterPrivateShare dkg.PrivateKeyShares
+ IsMasterPrivateShareEmpty bool
+ PrvShares dkg.PrivateKeyShares
+ IsPrvSharesEmpty bool
+ PrvSharesReceived NodeID
+ NodeComplained NodeID
+ AntiComplaintReceived NodeIDToNodeIDs
+ Step uint64
+ Reset uint64
+}
+
+// Equal compare with target DKGProtocolInfo.
+func (info *DKGProtocolInfo) Equal(target *DKGProtocolInfo) bool {
+ if !info.ID.Equal(target.ID) ||
+ info.Round != target.Round ||
+ info.Threshold != target.Threshold ||
+ info.IsMasterPrivateShareEmpty != target.IsMasterPrivateShareEmpty ||
+ info.IsPrvSharesEmpty != target.IsPrvSharesEmpty ||
+ info.Step != target.Step ||
+ info.Reset != target.Reset ||
+ !info.MasterPrivateShare.Equal(&target.MasterPrivateShare) ||
+ !info.PrvShares.Equal(&target.PrvShares) {
+ return false
+ }
+
+ if len(info.IDMap) != len(target.IDMap) {
+ return false
+ }
+ for k, v := range info.IDMap {
+ tV, exist := target.IDMap[k]
+ if !exist {
+ return false
+ }
+
+ if !v.IsEqual(&tV) {
+ return false
+ }
+ }
+
+ if len(info.MpkMap) != len(target.MpkMap) {
+ return false
+ }
+ for k, v := range info.MpkMap {
+ tV, exist := target.MpkMap[k]
+ if !exist {
+ return false
+ }
+
+ if !v.Equal(tV) {
+ return false
+ }
+ }
+
+ if len(info.PrvSharesReceived) != len(target.PrvSharesReceived) {
+ return false
+ }
+ for k := range info.PrvSharesReceived {
+ _, exist := target.PrvSharesReceived[k]
+ if !exist {
+ return false
+ }
+ }
+
+ if len(info.NodeComplained) != len(target.NodeComplained) {
+ return false
+ }
+ for k := range info.NodeComplained {
+ _, exist := target.NodeComplained[k]
+ if !exist {
+ return false
+ }
+ }
+
+ if len(info.AntiComplaintReceived) != len(target.AntiComplaintReceived) {
+ return false
+ }
+ for k, v := range info.AntiComplaintReceived {
+ tV, exist := target.AntiComplaintReceived[k]
+ if !exist {
+ return false
+ }
+
+ if len(v) != len(tV) {
+ return false
+ }
+ for kk := range v {
+ _, exist := tV[kk]
+ if !exist {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// NodeIDToNodeIDs the map with NodeID to NodeIDs.
+type NodeIDToNodeIDs map[types.NodeID]map[types.NodeID]struct{}
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToNodeIDs) EncodeRLP(w io.Writer) error {
+ var allBytes [][][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, [][]byte{kBytes})
+
+ var vBytes [][]byte
+ for subK := range v {
+ bytes, err := subK.MarshalText()
+ if err != nil {
+ return err
+ }
+ vBytes = append(vBytes, bytes)
+ }
+ allBytes = append(allBytes, vBytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToNodeIDs) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToNodeIDs)
+ var dec [][][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i][0])
+ if err != nil {
+ return err
+ }
+
+ valueMap := map[types.NodeID]struct{}{}
+ for _, v := range dec[i+1] {
+ value := types.NodeID{}
+ err := value.UnmarshalText(v)
+ if err != nil {
+ return err
+ }
+
+ valueMap[value] = struct{}{}
+ }
+
+ (*m)[key] = valueMap
+ }
+
+ return nil
+}
+
+// NodeID the map with NodeID.
+type NodeID map[types.NodeID]struct{}
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeID) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeID) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeID)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i++ {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = struct{}{}
+ }
+
+ return nil
+}
+
+// NodeIDToPubShares the map with NodeID to PublicKeyShares.
+type NodeIDToPubShares map[types.NodeID]*dkg.PublicKeyShares
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToPubShares) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+
+ bytes, err := rlp.EncodeToBytes(v)
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, bytes)
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToPubShares) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToPubShares)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ value := dkg.PublicKeyShares{}
+ err = rlp.DecodeBytes(dec[i+1], &value)
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = &value
+ }
+
+ return nil
+}
+
+// NodeIDToDKGID the map with NodeID to DKGID.
+type NodeIDToDKGID map[types.NodeID]dkg.ID
+
+// EncodeRLP implements rlp.Encoder
+func (m NodeIDToDKGID) EncodeRLP(w io.Writer) error {
+ var allBytes [][]byte
+ for k, v := range m {
+ kBytes, err := k.MarshalText()
+ if err != nil {
+ return err
+ }
+ allBytes = append(allBytes, kBytes)
+ allBytes = append(allBytes, v.GetLittleEndian())
+ }
+
+ return rlp.Encode(w, allBytes)
+}
+
+// DecodeRLP implements rlp.Encoder
+func (m *NodeIDToDKGID) DecodeRLP(s *rlp.Stream) error {
+ *m = make(NodeIDToDKGID)
+ var dec [][]byte
+ if err := s.Decode(&dec); err != nil {
+ return err
+ }
+
+ for i := 0; i < len(dec); i += 2 {
+ key := types.NodeID{}
+ err := key.UnmarshalText(dec[i])
+ if err != nil {
+ return err
+ }
+
+ value := dkg.ID{}
+ err = value.SetLittleEndian(dec[i+1])
+ if err != nil {
+ return err
+ }
+
+ (*m)[key] = value
+ }
+
+ return nil
+}
+
// LevelDBBackedDB is a leveldb backed DB implementation.
type LevelDBBackedDB struct {
db *leveldb.DB
@@ -189,11 +485,6 @@ func (lvl *LevelDBBackedDB) HasDKGPrivateKey(round uint64) (bool, error) {
return lvl.db.Has(lvl.getDKGPrivateKeyKey(round), nil)
}
-// HasDKGMasterPrivateSharesKey check existence of DKG master private shares of one round.
-func (lvl *LevelDBBackedDB) HasDKGMasterPrivateSharesKey(round uint64) (bool, error) {
- return lvl.db.Has(lvl.getDKGMasterPrivateSharesKey(round), nil)
-}
-
// GetDKGPrivateKey get DKG private key of one round.
func (lvl *LevelDBBackedDB) GetDKGPrivateKey(round uint64) (
prv dkg.PrivateKey, err error) {
@@ -227,30 +518,28 @@ func (lvl *LevelDBBackedDB) PutDKGPrivateKey(
lvl.getDKGPrivateKeyKey(round), marshaled, nil)
}
-// GetDKGMasterPrivateShares get DKG master private shares of one round.
-func (lvl *LevelDBBackedDB) GetDKGMasterPrivateShares(round uint64) (
- shares dkg.PrivateKeyShares, err error) {
- queried, err := lvl.db.Get(lvl.getDKGMasterPrivateSharesKey(round), nil)
+// GetDKGProtocol get DKG protocol.
+func (lvl *LevelDBBackedDB) GetDKGProtocol() (
+ info DKGProtocolInfo, err error) {
+ queried, err := lvl.db.Get(lvl.getDKGProtocolInfoKey(), nil)
if err != nil {
if err == leveldb.ErrNotFound {
- err = ErrDKGMasterPrivateSharesDoesNotExist
+ err = ErrDKGProtocolDoesNotExist
}
return
}
- err = rlp.DecodeBytes(queried, &shares)
+ err = rlp.DecodeBytes(queried, &info)
return
}
-// PutOrUpdateDKGMasterPrivateShares save DKG master private shares of one round.
-func (lvl *LevelDBBackedDB) PutOrUpdateDKGMasterPrivateShares(
- round uint64, shares dkg.PrivateKeyShares) error {
- marshaled, err := rlp.EncodeToBytes(&shares)
+// PutOrUpdateDKGProtocol save DKG protocol.
+func (lvl *LevelDBBackedDB) PutOrUpdateDKGProtocol(info DKGProtocolInfo) error {
+ marshaled, err := rlp.EncodeToBytes(&info)
if err != nil {
return err
}
- return lvl.db.Put(
- lvl.getDKGMasterPrivateSharesKey(round), marshaled, nil)
+ return lvl.db.Put(lvl.getDKGProtocolInfoKey(), marshaled, nil)
}
func (lvl *LevelDBBackedDB) getBlockKey(hash common.Hash) (ret []byte) {
@@ -269,9 +558,8 @@ func (lvl *LevelDBBackedDB) getDKGPrivateKeyKey(
return
}
-func (lvl *LevelDBBackedDB) getDKGMasterPrivateSharesKey(round uint64) (ret []byte) {
- ret = make([]byte, len(dkgMasterPrivateSharesPrefix)+8)
- copy(ret, dkgMasterPrivateSharesPrefix)
- binary.LittleEndian.PutUint64(ret[len(dkgMasterPrivateSharesPrefix):], round)
+func (lvl *LevelDBBackedDB) getDKGProtocolInfoKey() (ret []byte) {
+ ret = make([]byte, len(dkgProtocolInfoKeyPrefix)+8)
+ copy(ret, dkgProtocolInfoKeyPrefix)
return
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go
index 548e41e90..971f758d5 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/db/memory.go
@@ -42,27 +42,26 @@ func (seq *blockSeqIterator) NextBlock() (types.Block, error) {
// MemBackedDB is a memory backed DB implementation.
type MemBackedDB struct {
- blocksLock sync.RWMutex
- blockHashSequence common.Hashes
- blocksByHash map[common.Hash]*types.Block
- compactionChainTipLock sync.RWMutex
- compactionChainTipHash common.Hash
- compactionChainTipHeight uint64
- dkgPrivateKeysLock sync.RWMutex
- dkgPrivateKeys map[uint64]*dkg.PrivateKey
- dkgMasterPrivateSharesLock sync.RWMutex
- dkgMasterPrivateShares map[uint64]*dkg.PrivateKeyShares
- persistantFilePath string
+ blocksLock sync.RWMutex
+ blockHashSequence common.Hashes
+ blocksByHash map[common.Hash]*types.Block
+ compactionChainTipLock sync.RWMutex
+ compactionChainTipHash common.Hash
+ compactionChainTipHeight uint64
+ dkgPrivateKeysLock sync.RWMutex
+ dkgPrivateKeys map[uint64]*dkg.PrivateKey
+ dkgProtocolLock sync.RWMutex
+ dkgProtocolInfo *DKGProtocolInfo
+ persistantFilePath string
}
// NewMemBackedDB initialize a memory-backed database.
func NewMemBackedDB(persistantFilePath ...string) (
dbInst *MemBackedDB, err error) {
dbInst = &MemBackedDB{
- blockHashSequence: common.Hashes{},
- blocksByHash: make(map[common.Hash]*types.Block),
- dkgPrivateKeys: make(map[uint64]*dkg.PrivateKey),
- dkgMasterPrivateShares: make(map[uint64]*dkg.PrivateKeyShares),
+ blockHashSequence: common.Hashes{},
+ blocksByHash: make(map[common.Hash]*types.Block),
+ dkgPrivateKeys: make(map[uint64]*dkg.PrivateKey),
}
if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 {
return
@@ -200,31 +199,23 @@ func (m *MemBackedDB) PutDKGPrivateKey(
return nil
}
-// HasDKGMasterPrivateShares check existence of DKG master private shares of one round.
-func (m *MemBackedDB) HasDKGMasterPrivateShares(round uint64) (bool, error) {
- m.dkgMasterPrivateSharesLock.RLock()
- defer m.dkgMasterPrivateSharesLock.RUnlock()
- _, exists := m.dkgMasterPrivateShares[round]
- return exists, nil
-}
-
-// GetDKGMasterPrivateShares get DKG master private shares of one round.
-func (m *MemBackedDB) GetDKGMasterPrivateShares(round uint64) (
- dkg.PrivateKeyShares, error) {
- m.dkgMasterPrivateSharesLock.RLock()
- defer m.dkgMasterPrivateSharesLock.RUnlock()
- if shares, exists := m.dkgMasterPrivateShares[round]; exists {
- return *shares, nil
+// GetDKGProtocol get DKG protocol.
+func (m *MemBackedDB) GetDKGProtocol() (
+ DKGProtocolInfo, error) {
+ m.dkgProtocolLock.RLock()
+ defer m.dkgProtocolLock.RUnlock()
+ if m.dkgProtocolInfo == nil {
+ return DKGProtocolInfo{}, ErrDKGProtocolDoesNotExist
}
- return dkg.PrivateKeyShares{}, ErrDKGMasterPrivateSharesDoesNotExist
+
+ return *m.dkgProtocolInfo, nil
}
-// PutOrUpdateDKGMasterPrivateShares save DKG master private shares of one round.
-func (m *MemBackedDB) PutOrUpdateDKGMasterPrivateShares(
- round uint64, shares dkg.PrivateKeyShares) error {
- m.dkgMasterPrivateSharesLock.Lock()
- defer m.dkgMasterPrivateSharesLock.Unlock()
- m.dkgMasterPrivateShares[round] = &shares
+// PutOrUpdateDKGProtocol save DKG protocol.
+func (m *MemBackedDB) PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error {
+ m.dkgProtocolLock.Lock()
+ defer m.dkgProtocolLock.Unlock()
+ m.dkgProtocolInfo = &dkgProtocol
return nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go
index 82da6dc20..50c3a0bff 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/dkg-tsig-protocol.go
@@ -118,6 +118,62 @@ type dkgProtocol struct {
nodeComplained map[types.NodeID]struct{}
// Complaint[from][to]'s anti is saved to antiComplaint[from][to].
antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{}
+ // The completed step in `runDKG`.
+ step int
+}
+
+func (d *dkgProtocol) convertFromInfo(info db.DKGProtocolInfo) {
+ d.ID = info.ID
+ d.idMap = info.IDMap
+ d.round = info.Round
+ d.threshold = int(info.Threshold)
+ d.idMap = info.IDMap
+ d.mpkMap = info.MpkMap
+ d.prvSharesReceived = info.PrvSharesReceived
+ d.nodeComplained = info.NodeComplained
+ d.antiComplaintReceived = info.AntiComplaintReceived
+ d.step = int(info.Step)
+ d.reset = info.Reset
+ if info.IsMasterPrivateShareEmpty {
+ d.masterPrivateShare = nil
+ } else {
+ d.masterPrivateShare = &info.MasterPrivateShare
+ }
+
+ if info.IsPrvSharesEmpty {
+ d.prvShares = nil
+ } else {
+ d.prvShares = &info.PrvShares
+ }
+}
+
+func (d *dkgProtocol) toDKGProtocolInfo() db.DKGProtocolInfo {
+ info := db.DKGProtocolInfo{
+ ID: d.ID,
+ Round: d.round,
+ Threshold: uint64(d.threshold),
+ IDMap: d.idMap,
+ MpkMap: d.mpkMap,
+ PrvSharesReceived: d.prvSharesReceived,
+ NodeComplained: d.nodeComplained,
+ AntiComplaintReceived: d.antiComplaintReceived,
+ Step: uint64(d.step),
+ Reset: d.reset,
+ }
+
+ if d.masterPrivateShare != nil {
+ info.MasterPrivateShare = *d.masterPrivateShare
+ } else {
+ info.IsMasterPrivateShareEmpty = true
+ }
+
+ if d.prvShares != nil {
+ info.PrvShares = *d.prvShares
+ } else {
+ info.IsPrvSharesEmpty = true
+ }
+
+ return info
}
type dkgShareSecret struct {
@@ -197,33 +253,26 @@ func recoverDKGProtocol(
ID types.NodeID,
recv dkgReceiver,
round uint64,
- threshold int,
+ reset uint64,
coreDB db.Database) (*dkgProtocol, error) {
- shares, err := coreDB.GetDKGMasterPrivateShares(round)
+ dkgProtocolInfo, err := coreDB.GetDKGProtocol()
if err != nil {
- if err == db.ErrDKGMasterPrivateSharesDoesNotExist {
+ if err == db.ErrDKGProtocolDoesNotExist {
return nil, nil
}
return nil, err
}
- // TODO(mission): taken resetCount into consideration, we should keep
- // reset count of private shares from DB, and use it to init
- // DKG protocol instance.
- reset := uint64(0)
- return &dkgProtocol{
- ID: ID,
- recv: recv,
- round: round,
- reset: reset,
- threshold: threshold,
- idMap: make(map[types.NodeID]dkg.ID),
- mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares),
- masterPrivateShare: &shares,
- prvShares: dkg.NewEmptyPrivateKeyShares(),
- prvSharesReceived: make(map[types.NodeID]struct{}),
- nodeComplained: make(map[types.NodeID]struct{}),
- antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}),
- }, nil
+
+ dkgProtocol := dkgProtocol{
+ recv: recv,
+ }
+ dkgProtocol.convertFromInfo(dkgProtocolInfo)
+
+ if dkgProtocol.ID != ID || dkgProtocol.round != round || dkgProtocol.reset != reset {
+ return nil, nil
+ }
+
+ return &dkgProtocol, nil
}
func (d *dkgProtocol) processMasterPublicKeys(
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
index 707563f03..06838e019 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
@@ -122,25 +122,25 @@ type Governance interface {
//// DKG-related methods.
// AddDKGComplaint adds a DKGComplaint.
- AddDKGComplaint(round uint64, complaint *typesDKG.Complaint)
+ AddDKGComplaint(complaint *typesDKG.Complaint)
// DKGComplaints gets all the DKGComplaints of round.
DKGComplaints(round uint64) []*typesDKG.Complaint
// AddDKGMasterPublicKey adds a DKGMasterPublicKey.
- AddDKGMasterPublicKey(round uint64, masterPublicKey *typesDKG.MasterPublicKey)
+ AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey)
// DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
// AddDKGMPKReady adds a DKG ready message.
- AddDKGMPKReady(round uint64, ready *typesDKG.MPKReady)
+ AddDKGMPKReady(ready *typesDKG.MPKReady)
// IsDKGMPKReady checks if DKG's master public key preparation is ready.
IsDKGMPKReady(round uint64) bool
// AddDKGFinalize adds a DKG finalize message.
- AddDKGFinalize(round uint64, final *typesDKG.Finalize)
+ AddDKGFinalize(final *typesDKG.Finalize)
// IsDKGFinal checks if DKG is final.
IsDKGFinal(round uint64) bool
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
index f2f8f9e66..2eeee9d07 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/syncer/consensus.go
@@ -168,6 +168,16 @@ func (con *Consensus) assureBuffering() {
}
// Make sure con.roundEvt stopped before stopping con.agreementModule.
con.waitGroup.Add(1)
+ // Register a round event handler to reset node set cache, this handler
+ // should be the highest priority.
+ con.roundEvt.Register(func(evts []utils.RoundEventParam) {
+ for _, e := range evts {
+ if e.Reset == 0 {
+ continue
+ }
+ con.nodeSetCache.Purge(e.Round + 1)
+ }
+ })
// Register a round event handler to notify CRS to agreementModule.
con.roundEvt.Register(func(evts []utils.RoundEventParam) {
con.waitGroup.Add(1)
@@ -263,11 +273,14 @@ func (con *Consensus) ForceSync(skip bool) {
con.setupConfigsUntilRound(block.Position.Round + core.ConfigRoundShift - 1)
con.syncedLastBlock = &block
con.stopBuffering()
- con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver(
- context.Background(), con.network.ReceiveChan(),
- func(msg interface{}) {
- con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg)
- })
+ // We might call stopBuffering without calling assureBuffering.
+ if con.dummyCancel == nil {
+ con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver(
+ context.Background(), con.network.ReceiveChan(),
+ func(msg interface{}) {
+ con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg)
+ })
+ }
con.syncedSkipNext = skip
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
index 7532d299e..8be503fe3 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/crypto.go
@@ -323,3 +323,12 @@ func VerifyDKGFinalizeSignature(
}
return true, nil
}
+
+// Rehash hashes the hash again and again and again...
+func Rehash(hash common.Hash, count uint) common.Hash {
+ result := hash
+ for i := uint(0); i < count; i++ {
+ result = crypto.Keccak256Hash(result[:])
+ }
+ return result
+}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
index e09120d9a..00901237d 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/nodeset-cache.go
@@ -59,6 +59,9 @@ type NodeSetCacheInterface interface {
}
// NodeSetCache caches node set information.
+//
+// NOTE: this module doesn't handle DKG resetting and can only be used along
+// with utils.RoundEvent.
type NodeSetCache struct {
lock sync.RWMutex
nsIntf NodeSetCacheInterface
@@ -165,6 +168,23 @@ func (cache *NodeSetCache) GetLeaderNode(pos types.Position) (
return IDs.leaderNode[pos.Height], nil
}
+// Purge a specific round.
+func (cache *NodeSetCache) Purge(rID uint64) {
+ cache.lock.Lock()
+ defer cache.lock.Unlock()
+ nIDs, exist := cache.rounds[rID]
+ if !exist {
+ return
+ }
+ for nID := range nIDs.nodeSet.IDs {
+ rec := cache.keyPool[nID]
+ if rec.refCnt--; rec.refCnt == 0 {
+ delete(cache.keyPool, nID)
+ }
+ }
+ delete(cache.rounds, rID)
+}
+
// Touch updates the internal cache of round.
func (cache *NodeSetCache) Touch(round uint64) (err error) {
_, err = cache.update(round)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go
index 3219a1379..4c83d046b 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-based-config.go
@@ -90,7 +90,7 @@ func (c *RoundBasedConfig) RoundEndHeight() uint64 {
return c.roundEndHeight
}
-// AppendTo a config in previous round.
+// AppendTo a config from previous round.
func (c *RoundBasedConfig) AppendTo(other RoundBasedConfig) {
if c.roundID != other.roundID+1 {
panic(fmt.Errorf("round IDs of configs not continuous: %d %d",
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go
index 1ce877dda..3536a27b3 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/utils/round-event.go
@@ -93,6 +93,13 @@ func (e RoundEventParam) NextDKGRegisterHeight() uint64 {
return e.BeginHeight + e.Config.RoundLength/2
}
+func (e RoundEventParam) String() string {
+ return fmt.Sprintf("roundEvtParam{Round:%d Reset:%d Height:%d}",
+ e.Round,
+ e.Reset,
+ e.BeginHeight)
+}
+
// roundEventFn defines the fingerprint of handlers of round events.
type roundEventFn func([]RoundEventParam)
@@ -135,6 +142,7 @@ type RoundEvent struct {
roundShift uint64
ctx context.Context
ctxCancel context.CancelFunc
+ retryInterval time.Duration
}
// NewRoundEvent creates an RoundEvent instance.
@@ -144,16 +152,17 @@ func NewRoundEvent(parentCtx context.Context, gov governanceAccessor,
roundShift uint64) (*RoundEvent, error) {
// We need to generate valid ending block height of this round (taken
// DKG reset count into consideration).
+ initConfig := GetConfigWithPanic(gov, initRound, logger)
e := &RoundEvent{
gov: gov,
logger: logger,
lastTriggeredRound: initRound,
roundShift: roundShift,
+ retryInterval: initConfig.LambdaBA,
}
e.ctx, e.ctxCancel = context.WithCancel(parentCtx)
e.config = RoundBasedConfig{}
- e.config.SetupRoundBasedFields(initRound, GetConfigWithPanic(
- gov, initRound, logger))
+ e.config.SetupRoundBasedFields(initRound, initConfig)
e.config.SetRoundBeginHeight(initRoundBeginHeight)
// Make sure the DKG reset count in current governance can cover the initial
// block height.
@@ -175,6 +184,8 @@ func NewRoundEvent(parentCtx context.Context, gov governanceAccessor,
// Register a handler to be called when new round is confirmed or new DKG reset
// is detected.
+//
+// The earlier registered handler has higher priority.
func (e *RoundEvent) Register(h roundEventFn) {
e.lock.Lock()
defer e.lock.Unlock()
@@ -223,13 +234,13 @@ func (e *RoundEvent) ValidateNextRound(blockHeight uint64) {
h(events)
}
}()
- startRound := e.lastTriggeredRound
+ var (
+ dkgFailed, triggered bool
+ param RoundEventParam
+ beginHeight = blockHeight
+ startRound = e.lastTriggeredRound
+ )
for {
- var (
- dkgFailed, triggered bool
- param RoundEventParam
- beginHeight = blockHeight
- )
for {
param, dkgFailed, triggered = e.check(beginHeight, startRound,
dkgFailed)
@@ -245,7 +256,7 @@ func (e *RoundEvent) ValidateNextRound(blockHeight uint64) {
select {
case <-e.ctx.Done():
return
- case <-time.After(500 * time.Millisecond):
+ case <-time.After(e.retryInterval):
}
}
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index a9ccdbf29..e602bcf83 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -141,16 +141,16 @@
{
"checksumSHA1": "8EuKVkP1v/w5fRuuvUaXX5k/F+I=",
"path": "github.com/dexon-foundation/dexon-consensus/common",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "Mww2DjHBDmD5YUzUSIwJTa53BIg=",
+ "checksumSHA1": "0DGA7q0IqImUaB6ooQKS8UWrzAM=",
"path": "github.com/dexon-foundation/dexon-consensus/core",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
@@ -165,64 +165,64 @@
{
"checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "kC/Tu4is9+jABI/EdvEv7VxwvEo=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "BhLKK8RveoLaeXc9UyUKMwQqchU=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "dQOZYmiikmjWhwkUJc0QmCJnO9o=",
+ "checksumSHA1": "b99zZvbWvBimv1NiPGGF1yQ4dKY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/db",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "H0+GIDijBmoic/0HSTZBUwEij5A=",
+ "checksumSHA1": "6gVpBAk9bPqgUo+HkIp2zFz9aF4=",
"path": "github.com/dexon-foundation/dexon-consensus/core/syncer",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "id8imcgp3SqYhIx0k3Chd0VZrUQ=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
"checksumSHA1": "yoVRmvJDCp/1jSfY7wMt2LBQ9e8=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},
{
- "checksumSHA1": "yoUeXa3YR9zZloqS9M08Ts8Ak1A=",
+ "checksumSHA1": "GGbVDVOkB+cxRyRTHRdLfU8+gnk=",
"path": "github.com/dexon-foundation/dexon-consensus/core/utils",
- "revision": "4b40c1b8990d2a371a77018feea32d038163f2ec",
- "revisionTime": "2019-03-17T01:56:23Z",
+ "revision": "672d245243b6b85040f96e6638628b86975e9a1b",
+ "revisionTime": "2019-03-20T15:03:36Z",
"version": "single-chain",
"versionExact": "single-chain"
},