aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJimmy Hu <jimmy.hu@dexon.org>2018-12-06 13:36:20 +0800
committerWei-Ning Huang <w@dexon.org>2019-03-12 12:19:09 +0800
commit4b1789f7e96c19a516c1068a9d1b019262cb5741 (patch)
treea1dbfb97af185eea996ede82f59eb58a1cc51841
parentaf2d4af8f03af72683ba23963b0c432effe7feb6 (diff)
downloaddexon-4b1789f7e96c19a516c1068a9d1b019262cb5741.tar.gz
dexon-4b1789f7e96c19a516c1068a9d1b019262cb5741.tar.zst
dexon-4b1789f7e96c19a516c1068a9d1b019262cb5741.zip
vendor: sync to latest core and fix conflict (#79)
-rw-r--r--dex/backend.go3
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go32
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go63
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/compaction-chain.go38
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go195
-rw-r--r--vendor/vendor.json40
6 files changed, 272 insertions, 99 deletions
diff --git a/dex/backend.go b/dex/backend.go
index 4e7def8e4..8153dc2ed 100644
--- a/dex/backend.go
+++ b/dex/backend.go
@@ -23,7 +23,6 @@ import (
dexCore "github.com/dexon-foundation/dexon-consensus/core"
coreEcdsa "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa"
- coreTypes "github.com/dexon-foundation/dexon-consensus/core/types"
"github.com/dexon-foundation/dexon/accounts"
"github.com/dexon-foundation/dexon/consensus"
@@ -251,7 +250,7 @@ func (s *Dexon) Stop() error {
func (s *Dexon) StartProposing() error {
// TODO: Run with the latest confirmed block in compaction chain.
- s.consensus.Run(&coreTypes.Block{})
+ s.consensus.Run()
return nil
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go b/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go
index 2eb1e2bd0..29eac3595 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/common/logger.go
@@ -85,3 +85,35 @@ func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) {
func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) {
log.Println(composeVargs(msg, ctx)...)
}
+
+// CustomLogger logs everything.
+type CustomLogger struct {
+ logger *log.Logger
+}
+
+// NewCustomLogger creates a new custom logger.
+func NewCustomLogger(logger *log.Logger) *CustomLogger {
+ return &CustomLogger{
+ logger: logger,
+ }
+}
+
+// Debug implements Logger interface.
+func (logger *CustomLogger) Debug(msg string, ctx ...interface{}) {
+ logger.logger.Println(composeVargs(msg, ctx)...)
+}
+
+// Info implements Logger interface.
+func (logger *CustomLogger) Info(msg string, ctx ...interface{}) {
+ logger.logger.Println(composeVargs(msg, ctx)...)
+}
+
+// Warn implements Logger interface.
+func (logger *CustomLogger) Warn(msg string, ctx ...interface{}) {
+ logger.logger.Println(composeVargs(msg, ctx)...)
+}
+
+// Error implements Logger interface.
+func (logger *CustomLogger) Error(msg string, ctx ...interface{}) {
+ logger.logger.Println(composeVargs(msg, ctx)...)
+}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
index 6f50bfc16..57fb5c549 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement-mgr.go
@@ -89,11 +89,13 @@ type agreementMgr struct {
lattice *Lattice
ctx context.Context
lastEndTime time.Time
+ initRound uint64
configs []*agreementMgrConfig
baModules []*agreement
waitGroup sync.WaitGroup
pendingVotes map[uint64][]*types.Vote
pendingBlocks map[uint64][]*types.Block
+ isRunning bool
// This lock should be used when attempting to:
// - add a new baModule.
@@ -106,7 +108,8 @@ type agreementMgr struct {
lock sync.RWMutex
}
-func newAgreementMgr(con *Consensus, dMoment time.Time) *agreementMgr {
+func newAgreementMgr(con *Consensus, initRound uint64,
+ initRoundBeginTime time.Time) *agreementMgr {
return &agreementMgr{
con: con,
ID: con.ID,
@@ -118,7 +121,33 @@ func newAgreementMgr(con *Consensus, dMoment time.Time) *agreementMgr {
auth: con.authModule,
lattice: con.lattice,
ctx: con.ctx,
- lastEndTime: dMoment,
+ initRound: initRound,
+ lastEndTime: initRoundBeginTime,
+ }
+}
+
+func (mgr *agreementMgr) getConfig(round uint64) *agreementMgrConfig {
+ mgr.lock.RLock()
+ defer mgr.lock.RUnlock()
+ if round < mgr.initRound {
+ panic(ErrRoundOutOfRange)
+ }
+ roundIndex := round - mgr.initRound
+ if roundIndex >= uint64(len(mgr.configs)) {
+ return nil
+ }
+ return mgr.configs[roundIndex]
+}
+
+func (mgr *agreementMgr) run() {
+ mgr.lock.Lock()
+ defer mgr.lock.Unlock()
+ if mgr.isRunning {
+ return
+ }
+ mgr.isRunning = true
+ for i := uint32(0); i < uint32(len(mgr.baModules)); i++ {
+ go mgr.runBA(mgr.initRound, i)
}
}
@@ -126,8 +155,7 @@ func (mgr *agreementMgr) appendConfig(
round uint64, config *types.Config, crs common.Hash) (err error) {
mgr.lock.Lock()
defer mgr.lock.Unlock()
- // TODO(mission): initiate this module from some round > 0.
- if round != uint64(len(mgr.configs)) {
+ if round != uint64(len(mgr.configs))+mgr.initRound {
return ErrRoundNotIncreasing
}
newConfig := &agreementMgrConfig{
@@ -156,7 +184,9 @@ func (mgr *agreementMgr) appendConfig(
// Hacky way to make agreement module self contained.
recv.agreementModule = agrModule
mgr.baModules = append(mgr.baModules, agrModule)
- go mgr.runBA(round, i)
+ if mgr.isRunning {
+ go mgr.runBA(round, i)
+ }
}
return nil
}
@@ -169,7 +199,8 @@ func (mgr *agreementMgr) processVote(v *types.Vote) error {
mgr.logger.Error("Process vote for unknown chain to BA",
"position", &v.Position,
"baChain", len(mgr.baModules),
- "baRound", len(mgr.configs))
+ "baRound", len(mgr.configs),
+ "initRound", mgr.initRound)
return utils.ErrInvalidChainID
}
return mgr.baModules[v.Position.ChainID].processVote(v)
@@ -182,7 +213,8 @@ func (mgr *agreementMgr) processBlock(b *types.Block) error {
mgr.logger.Error("Process block for unknown chain to BA",
"position", &b.Position,
"baChain", len(mgr.baModules),
- "baRound", len(mgr.configs))
+ "baRound", len(mgr.configs),
+ "initRound", mgr.initRound)
return utils.ErrInvalidChainID
}
return mgr.baModules[b.Position.ChainID].processBlock(b)
@@ -196,7 +228,8 @@ func (mgr *agreementMgr) processAgreementResult(
mgr.logger.Error("Process unknown result for unknown chain to BA",
"position", &result.Position,
"baChain", len(mgr.baModules),
- "baRound", len(mgr.configs))
+ "baRound", len(mgr.configs),
+ "initRound", mgr.initRound)
return utils.ErrInvalidChainID
}
agreement := mgr.baModules[result.Position.ChainID]
@@ -218,8 +251,8 @@ func (mgr *agreementMgr) processAgreementResult(
nIDs := nodes.GetSubSet(
int(mgr.gov.Configuration(result.Position.Round).NotarySetSize),
types.NewNotarySetTarget(crs, result.Position.ChainID))
- for _, vote := range result.Votes {
- agreement.processVote(&vote)
+ for key := range result.Votes {
+ agreement.processVote(&result.Votes[key])
}
agreement.restart(nIDs, result.Position, crs)
}
@@ -273,15 +306,7 @@ func (mgr *agreementMgr) runBA(initRound uint64, chainID uint32) {
// Wait until the configuartion for next round is ready.
var config *agreementMgrConfig
for {
- config = func() *agreementMgrConfig {
- mgr.lock.RLock()
- defer mgr.lock.RUnlock()
- if nextRound < uint64(len(mgr.configs)) {
- return mgr.configs[nextRound]
- }
- return nil
- }()
- if config != nil {
+ if config = mgr.getConfig(nextRound); config != nil {
break
} else {
mgr.logger.Info("round is not ready", "round", nextRound)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/compaction-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/compaction-chain.go
index 89ba978d0..dcd99f497 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/compaction-chain.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/compaction-chain.go
@@ -82,7 +82,6 @@ func (cc *compactionChain) init(initBlock *types.Block) {
if initBlock.Finalization.Height == 0 {
cc.chainUnsynced = cc.gov.Configuration(uint64(0)).NumChains
}
- cc.pendingBlocks = append(cc.pendingBlocks, initBlock)
}
func (cc *compactionChain) registerBlock(block *types.Block) {
@@ -111,7 +110,7 @@ func (cc *compactionChain) blockRegisteredNoLock(
}
func (cc *compactionChain) processBlock(block *types.Block) error {
- prevBlock := cc.lastBlock()
+ prevBlock := cc.lastDeliveredBlock()
if prevBlock == nil {
return ErrNotInitiazlied
}
@@ -125,7 +124,7 @@ func (cc *compactionChain) processBlock(block *types.Block) error {
}
func (cc *compactionChain) extractBlocks() []*types.Block {
- prevBlock := cc.lastBlock()
+ prevBlock := cc.lastDeliveredBlock()
// Check if we're synced.
if !func() bool {
@@ -138,9 +137,6 @@ func (cc *compactionChain) extractBlocks() []*types.Block {
if prevBlock.Finalization.Height == 0 {
return cc.chainUnsynced == 0
}
- if prevBlock.Hash != cc.pendingBlocks[0].Hash {
- return false
- }
return true
}() {
return []*types.Block{}
@@ -148,14 +144,12 @@ func (cc *compactionChain) extractBlocks() []*types.Block {
deliveringBlocks := make([]*types.Block, 0)
cc.lock.Lock()
defer cc.lock.Unlock()
- // cc.pendingBlocks[0] will not be popped and will equal to cc.prevBlock.
- for len(cc.pendingBlocks) > 1 &&
- (len(cc.blockRandomness[cc.pendingBlocks[1].Hash]) != 0 ||
- cc.pendingBlocks[1].Position.Round == 0) {
+ var block *types.Block
+ for len(cc.pendingBlocks) > 0 &&
+ (len(cc.blockRandomness[cc.pendingBlocks[0].Hash]) != 0 ||
+ cc.pendingBlocks[0].Position.Round == 0) {
delete(cc.blocks, cc.pendingBlocks[0].Hash)
- cc.pendingBlocks = cc.pendingBlocks[1:]
-
- block := cc.pendingBlocks[0]
+ block, cc.pendingBlocks = cc.pendingBlocks[0], cc.pendingBlocks[1:]
block.Finalization.ParentHash = prevBlock.Hash
block.Finalization.Height = prevBlock.Finalization.Height + 1
if block.Position.Round != 0 {
@@ -165,9 +159,7 @@ func (cc *compactionChain) extractBlocks() []*types.Block {
deliveringBlocks = append(deliveringBlocks, block)
prevBlock = block
}
-
cc.prevBlock = prevBlock
-
return deliveringBlocks
}
@@ -190,7 +182,7 @@ func (cc *compactionChain) verifyRandomness(
}
func (cc *compactionChain) processFinalizedBlock(block *types.Block) error {
- if block.Finalization.Height <= cc.lastBlock().Finalization.Height {
+ if block.Finalization.Height <= cc.lastDeliveredBlock().Finalization.Height {
return nil
}
// Block of round 0 should not have randomness.
@@ -246,8 +238,20 @@ func (cc *compactionChain) purgePending() {
}
}
-func (cc *compactionChain) lastBlock() *types.Block {
+// lastDeliveredBlock returns the last delivered block, or the one used to
+// initialize this module.
+func (cc *compactionChain) lastDeliveredBlock() *types.Block {
cc.lock.RLock()
defer cc.lock.RUnlock()
return cc.prevBlock
}
+
+// lastPendingBlock returns the last pending block.
+func (cc *compactionChain) lastPendingBlock() *types.Block {
+ cc.lock.RLock()
+ defer cc.lock.RUnlock()
+ if len(cc.pendingBlocks) > 0 {
+ return cc.pendingBlocks[0]
+ }
+ return nil
+}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index 253c9a59f..bfe893cd5 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -58,6 +58,8 @@ var (
"incorrect vote proposer")
ErrCRSNotReady = fmt.Errorf(
"CRS not ready")
+ ErrConfigurationNotReady = fmt.Errorf(
+ "Configuration not ready")
)
// consensusBAReceiver implements agreementReceiver.
@@ -338,11 +340,10 @@ type Consensus struct {
toSyncer *totalOrderingSyncer
// Interfaces.
- db blockdb.BlockDatabase
- app Application
- gov Governance
- network Network
- tickerObj Ticker
+ db blockdb.BlockDatabase
+ app Application
+ gov Governance
+ network Network
// Misc.
dMoment time.Time
@@ -367,15 +368,19 @@ func NewConsensus(
logger common.Logger) *Consensus {
// TODO(w): load latest blockHeight from DB, and use config at that height.
- var round uint64
- logger.Debug("Calling Governance.Configuration", "round", round)
- config := gov.Configuration(round)
nodeSetCache := utils.NewNodeSetCache(gov)
- logger.Debug("Calling Governance.CRS", "round", round)
// Setup auth module.
authModule := NewAuthenticator(prv)
// Check if the application implement Debug interface.
debugApp, _ := app.(Debug)
+ // Get configuration for genesis round.
+ var round uint64
+ logger.Debug("Calling Governance.Configuration", "round", round)
+ config := gov.Configuration(round)
+ if config == nil {
+ logger.Error("Unable to get configuration", "round", round)
+ return nil
+ }
// Init lattice.
lattice := NewLattice(
dMoment, round, config, authModule, app, debugApp, db, logger)
@@ -405,7 +410,6 @@ func NewConsensus(
gov: gov,
db: db,
network: network,
- tickerObj: newTicker(gov, round, TickerBA),
baConfirmedBlock: make(map[common.Hash]chan<- *types.Block),
dkgReady: sync.NewCond(&sync.Mutex{}),
cfgModule: cfgModule,
@@ -416,12 +420,100 @@ func NewConsensus(
logger: logger,
}
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
- con.baMgr = newAgreementMgr(con, dMoment)
+ con.baMgr = newAgreementMgr(con, round, dMoment)
+ if err := con.prepare(&types.Block{}); err != nil {
+ panic(err)
+ }
return con
}
-// Run starts running DEXON Consensus.
-func (con *Consensus) Run(initBlock *types.Block) {
+// NewConsensusFromSyncer constructs an Consensus instance from information
+// provided from syncer.
+//
+// You need to provide the initial block for this newly created Consensus
+// instance to bootstrap with. A proper choice is the last finalized block you
+// delivered to syncer.
+func NewConsensusFromSyncer(
+ initBlock *types.Block,
+ initRoundBeginTime time.Time,
+ app Application,
+ gov Governance,
+ db blockdb.BlockDatabase,
+ networkModule Network,
+ prv crypto.PrivateKey,
+ latticeModule *Lattice,
+ blocks []*types.Block,
+ randomnessResults []*types.BlockRandomnessResult,
+ logger common.Logger) (*Consensus, error) {
+ // Setup the cache for node sets.
+ nodeSetCache := utils.NewNodeSetCache(gov)
+ // Setup auth module.
+ authModule := NewAuthenticator(prv)
+ // Init configuration chain.
+ ID := types.NewNodeID(prv.PublicKey())
+ recv := &consensusDKGReceiver{
+ ID: ID,
+ gov: gov,
+ authModule: authModule,
+ nodeSetCache: nodeSetCache,
+ network: networkModule,
+ logger: logger,
+ }
+ cfgModule := newConfigurationChain(
+ ID,
+ recv,
+ gov,
+ nodeSetCache,
+ logger)
+ recv.cfgModule = cfgModule
+ // Setup Consensus instance.
+ con := &Consensus{
+ ID: ID,
+ ccModule: newCompactionChain(gov),
+ lattice: latticeModule,
+ app: app,
+ gov: gov,
+ db: db,
+ network: networkModule,
+ baConfirmedBlock: make(map[common.Hash]chan<- *types.Block),
+ dkgReady: sync.NewCond(&sync.Mutex{}),
+ cfgModule: cfgModule,
+ dMoment: initRoundBeginTime,
+ nodeSetCache: nodeSetCache,
+ authModule: authModule,
+ event: common.NewEvent(),
+ logger: logger,
+ }
+ con.ctx, con.ctxCancel = context.WithCancel(context.Background())
+ con.baMgr = newAgreementMgr(con, initBlock.Position.Round, initRoundBeginTime)
+ // Bootstrap the consensus instance.
+ if err := con.prepare(initBlock); err != nil {
+ return nil, err
+ }
+ // Dump all BA-confirmed blocks to the consensus instance.
+ for _, b := range blocks {
+ con.app.BlockConfirmed(*b)
+ con.ccModule.registerBlock(b)
+ if err := con.processBlock(b); err != nil {
+ return nil, err
+ }
+ }
+ // Dump all randomness result to the consensus instance.
+ for _, r := range randomnessResults {
+ if err := con.ProcessBlockRandomnessResult(r); err != nil {
+ con.logger.Error("failed to process randomness result when syncing",
+ "result", r)
+ continue
+ }
+ }
+ return con, nil
+}
+
+// prepare the Consensus instance to be ready for blocks after 'initBlock'.
+// 'initBlock' could be either:
+// - an empty block
+// - the last finalized block
+func (con *Consensus) prepare(initBlock *types.Block) error {
// The block past from full node should be delivered already or known by
// full node. We don't have to notify it.
con.roundToNotify = initBlock.Position.Round + 1
@@ -430,36 +522,33 @@ func (con *Consensus) Run(initBlock *types.Block) {
initConfig := con.gov.Configuration(initRound)
// Setup context.
con.ccModule.init(initBlock)
- // TODO(jimmy-dexon): change AppendConfig to add config for specific round.
- for i := uint64(0); i <= initRound+1; i++ {
- con.logger.Debug("Calling Governance.Configuration", "round", i)
- cfg := con.gov.Configuration(i)
- // 0 round is already given to core.Lattice module when constructing.
- if i > 0 {
- if err := con.lattice.AppendConfig(i, cfg); err != nil {
- panic(err)
- }
- }
- // Corresponding CRS might not be ready for next round to initRound.
- if i < initRound+1 {
- con.logger.Debug("Calling Governance.CRS", "round", i)
- crs := con.gov.CRS(i)
- if (crs == common.Hash{}) {
- panic(ErrCRSNotReady)
- }
- if err := con.baMgr.appendConfig(i, cfg, crs); err != nil {
- panic(err)
- }
- }
+ // Setup agreementMgr module.
+ con.logger.Debug("Calling Governance.Configuration", "round", initRound)
+ initCfg := con.gov.Configuration(initRound)
+ if initCfg == nil {
+ return ErrConfigurationNotReady
}
+ con.logger.Debug("Calling Governance.CRS", "round", initRound)
+ initCRS := con.gov.CRS(initRound)
+ if (initCRS == common.Hash{}) {
+ return ErrCRSNotReady
+ }
+ if err := con.baMgr.appendConfig(initRound, initCfg, initCRS); err != nil {
+ return err
+ }
+ // Setup lattice module.
+ initPlusOneCfg := con.gov.Configuration(initRound + 1)
+ if initPlusOneCfg == nil {
+ return ErrConfigurationNotReady
+ }
+ if err := con.lattice.AppendConfig(initRound+1, initPlusOneCfg); err != nil {
+ return err
+ }
+ // Register events.
dkgSet, err := con.nodeSetCache.GetDKGSet(initRound)
if err != nil {
- panic(err)
+ return err
}
- con.logger.Debug("Calling Network.ReceiveChan")
- go con.processMsg(con.network.ReceiveChan())
- // Sleep until dMoment come.
- time.Sleep(con.dMoment.Sub(time.Now().UTC()))
if _, exist := dkgSet[con.ID]; exist {
con.logger.Info("Selected as DKG set", "round", initRound)
con.cfgModule.registerDKG(initRound, int(initConfig.DKGSetSize)/3+1)
@@ -469,6 +558,18 @@ func (con *Consensus) Run(initBlock *types.Block) {
})
}
con.initialRound(con.dMoment, initRound, initConfig)
+ return nil
+}
+
+// Run starts running DEXON Consensus.
+func (con *Consensus) Run() {
+ // Launch BA routines.
+ con.baMgr.run()
+ // Launch network handler.
+ con.logger.Debug("Calling Network.ReceiveChan")
+ go con.processMsg(con.network.ReceiveChan())
+ // Sleep until dMoment come.
+ time.Sleep(con.dMoment.Sub(time.Now().UTC()))
// Block until done.
select {
case <-con.ctx.Done():
@@ -510,6 +611,15 @@ func (con *Consensus) runCRS(round uint64) {
con.logger.Info("CRS already proposed", "round", round+1)
return
}
+ con.logger.Debug("Calling Governance.IsDKGFinal to check if ready to run CRS",
+ "round", round)
+ for !con.gov.IsDKGFinal(round) {
+ con.logger.Debug("DKG is not ready for running CRS. Retry later...",
+ "round", round)
+ time.Sleep(500 * time.Millisecond)
+ }
+ // Wait some time for DKG to recover private share.
+ time.Sleep(100 * time.Millisecond)
// Start running next round CRS.
con.logger.Debug("Calling Governance.CRS", "round", round)
psig, err := con.cfgModule.preparePartialSignature(round, con.gov.CRS(round))
@@ -705,6 +815,8 @@ MessageLoop:
case *types.BlockRandomnessResult:
if err := con.ProcessBlockRandomnessResult(val); err != nil {
con.logger.Error("Failed to process block randomness result",
+ "hash", val.BlockHash.String()[:6],
+ "position", &val.Position,
"error", err)
}
case *typesDKG.PrivateShare:
@@ -906,8 +1018,9 @@ func (con *Consensus) processBlock(block *types.Block) (err error) {
go con.event.NotifyTime(b.Finalization.Timestamp)
}
deliveredBlocks = con.ccModule.extractBlocks()
- con.logger.Debug("Last block in compaction chain",
- "block", con.ccModule.lastBlock())
+ con.logger.Debug("Last blocks in compaction chain",
+ "delivered", con.ccModule.lastDeliveredBlock(),
+ "pending", con.ccModule.lastPendingBlock())
for _, b := range deliveredBlocks {
if err = con.db.Update(*b); err != nil {
panic(err)
diff --git a/vendor/vendor.json b/vendor/vendor.json
index a515fb204..0eab5926c 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -103,58 +103,58 @@
"versionExact": "dev"
},
{
- "checksumSHA1": "JQjsCP961LUqOQ9GeYq2rTtrP/I=",
+ "checksumSHA1": "65L1yf+f0OCiLFniljqfRxVdsQA=",
"path": "github.com/dexon-foundation/dexon-consensus/common",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
- "checksumSHA1": "Y/CtabyOPE1ifc0ZScQzsDLdwB0=",
+ "checksumSHA1": "YWywKLu6YqxtRpXbCls4lRxjgPo=",
"path": "github.com/dexon-foundation/dexon-consensus/core",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "v4fKR7uhoyufi6hAVO44cFEb+tY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/blockdb",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "p2jOAulavUU2xyj018pYPHlj8XA=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "6Pf6caC8LTNCI7IflFmglKYnxYo=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "Z079qQV+aQV9A3kSJ0LbFjx5VO4=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "Sn3PAYsblIXmr7gVKDzxnoBPku4=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "pE0L1qyJ7Jyir1SQ6jEsj8U+83U=",
"path": "github.com/dexon-foundation/dexon-consensus/core/utils",
- "revision": "81c3d2d4446b5daee09529f58bc17cad3284edbf",
- "revisionTime": "2018-11-30T09:28:22Z"
+ "revision": "56e872f84131348adbc0861afb3554bba4a8e5db",
+ "revisionTime": "2018-12-05T06:29:54Z"
},
{
"checksumSHA1": "TAkwduKZqLyimyTPPWIllZWYFuE=",