aboutsummaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
authorWei-Ning Huang <w@dexon.org>2018-11-06 22:46:19 +0800
committerWei-Ning Huang <w@dexon.org>2019-03-12 12:19:09 +0800
commit49e89d337a4a45df40abba8b9b11cfd1a76cf4ad (patch)
tree1b9a7a650943bb0cda95a38d2bda36be5c6f157a /vendor
parentfb06ccc049ebaffabae928e8234b949d3bb53865 (diff)
downloaddexon-49e89d337a4a45df40abba8b9b11cfd1a76cf4ad.tar.gz
dexon-49e89d337a4a45df40abba8b9b11cfd1a76cf4ad.tar.zst
dexon-49e89d337a4a45df40abba8b9b11cfd1a76cf4ad.zip
vendor: sync to latest core
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/blockpool.go26
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go149
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go90
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go171
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/nodeset-cache.go6
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go3
-rw-r--r--vendor/vendor.json36
7 files changed, 259 insertions, 222 deletions
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockpool.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/blockpool.go
index 261966719..7861a73f2 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/blockpool.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/blockpool.go
@@ -23,11 +23,10 @@ import (
"github.com/dexon-foundation/dexon-consensus/core/types"
)
-// blockPool is a slice of heap of blocks, indexed by chainID,
-// and the heap is sorted based on heights of blocks.
+// blockPool is a heaped slice of blocks, indexed by chainID, and each in it is
+// sorted by block's height.
type blockPool []types.ByPosition
-// newBlockPool constructs a blockPool.
func newBlockPool(chainNum uint32) (pool blockPool) {
pool = make(blockPool, chainNum)
for _, p := range pool {
@@ -36,12 +35,12 @@ func newBlockPool(chainNum uint32) (pool blockPool) {
return
}
-// resize the pool if new chain is added.
func (p *blockPool) resize(num uint32) {
if uint32(len(*p)) >= num {
+ // Do nothing If the origin size is larger.
return
}
- newPool := make([]types.ByPosition, num)
+ newPool := make(blockPool, num)
copy(newPool, *p)
for i := uint32(len(*p)); i < num; i++ {
newChain := types.ByPosition{}
@@ -51,25 +50,20 @@ func (p *blockPool) resize(num uint32) {
*p = newPool
}
-// addBlock adds a block into pending set and make sure these
-// blocks are sorted by height.
+// addBlock adds a block into pool and sorts them by height.
func (p blockPool) addBlock(b *types.Block) {
heap.Push(&p[b.Position.ChainID], b)
}
-// purgeBlocks purge blocks of that chain with less-or-equal height.
-// NOTE: we won't check the validity of 'chainID', the caller should
-// be sure what he is expecting.
+// purgeBlocks purges blocks of a specified chain with less-or-equal heights.
+// NOTE: "chainID" is not checked here, this should be ensured by the called.
func (p blockPool) purgeBlocks(chainID uint32, height uint64) {
- for {
- if len(p[chainID]) == 0 || p[chainID][0].Position.Height > height {
- break
- }
+ for len(p[chainID]) > 0 && p[chainID][0].Position.Height <= height {
heap.Pop(&p[chainID])
}
}
-// tip get the blocks with lowest height of the chain if any.
+// tip returns block with the smallest height, nil if no existing block.
func (p blockPool) tip(chainID uint32) *types.Block {
if len(p[chainID]) == 0 {
return nil
@@ -77,7 +71,7 @@ func (p blockPool) tip(chainID uint32) *types.Block {
return p[chainID][0]
}
-// removeTip removes block with lowest height of the specified chain.
+// removeTip removes block with lowest height of a specified chain.
func (p blockPool) removeTip(chainID uint32) {
if len(p[chainID]) > 0 {
heap.Pop(&p[chainID])
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index 56c757b0d..09bc0a873 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -105,8 +105,9 @@ func (recv *consensusBAReceiver) ConfirmBlock(
hash common.Hash, votes map[types.NodeID]*types.Vote) {
var block *types.Block
if (hash == common.Hash{}) {
+ aID := recv.agreementModule.agreementID()
recv.consensus.logger.Info("Empty block is confirmed",
- "position", recv.agreementModule.agreementID())
+ "position", &aID)
var err error
block, err = recv.consensus.proposeEmptyBlock(recv.chainID)
if err != nil {
@@ -267,9 +268,8 @@ func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) {
// Consensus implements DEXON Consensus algorithm.
type Consensus struct {
// Node Info.
- ID types.NodeID
- authModule *Authenticator
- currentConfig *types.Config
+ ID types.NodeID
+ authModule *Authenticator
// BA.
baModules []*agreement
@@ -330,7 +330,7 @@ func NewConsensus(
debugApp, _ := app.(Debug)
// Init lattice.
lattice := NewLattice(
- dMoment, config, authModule, app, debugApp, db, logger)
+ dMoment, round, config, authModule, app, debugApp, db, logger)
// Init configuration chain.
ID := types.NewNodeID(prv.PublicKey())
recv := &consensusDKGReceiver{
@@ -351,7 +351,6 @@ func NewConsensus(
// Construct Consensus instance.
con := &Consensus{
ID: ID,
- currentConfig: config,
ccModule: newCompactionChain(gov),
lattice: lattice,
app: app,
@@ -406,32 +405,39 @@ func (con *Consensus) Run(initBlock *types.Block) {
con.logger.Debug("Calling Governance.NotifyRoundHeight for genesis rounds",
"block", initBlock)
notifyGenesisRounds(initBlock, con.gov)
+ initRound := initBlock.Position.Round
+ con.logger.Debug("Calling Governance.Configuration", "round", initRound)
+ initConfig := con.gov.Configuration(initRound)
// Setup context.
con.ctx, con.ctxCancel = context.WithCancel(context.Background())
con.ccModule.init(initBlock)
// TODO(jimmy-dexon): change AppendConfig to add config for specific round.
- for i := uint64(0); i < initBlock.Position.Round; i++ {
+ for i := uint64(0); i <= initRound; i++ {
con.logger.Debug("Calling Governance.Configuration", "round", i+1)
cfg := con.gov.Configuration(i + 1)
if err := con.lattice.AppendConfig(i+1, cfg); err != nil {
panic(err)
}
}
+ dkgSet, err := con.nodeSetCache.GetDKGSet(initRound)
+ if err != nil {
+ panic(err)
+ }
con.logger.Debug("Calling Network.ReceiveChan")
go con.processMsg(con.network.ReceiveChan())
// Sleep until dMoment come.
time.Sleep(con.dMoment.Sub(time.Now().UTC()))
- con.cfgModule.registerDKG(con.round, int(con.currentConfig.DKGSetSize)/3+1)
- con.event.RegisterTime(con.dMoment.Add(con.currentConfig.RoundInterval/4),
- func(time.Time) {
- con.runDKGTSIG(con.round)
- })
- round1 := uint64(1)
- con.logger.Debug("Calling Governance.Configuration", "round", round1)
- con.lattice.AppendConfig(round1, con.gov.Configuration(round1))
- con.initialRound(con.dMoment)
- ticks := make([]chan struct{}, 0, con.currentConfig.NumChains)
- for i := uint32(0); i < con.currentConfig.NumChains; i++ {
+ if _, exist := dkgSet[con.ID]; exist {
+ con.logger.Info("Selected as DKG set", "round", initRound)
+ con.cfgModule.registerDKG(initRound, int(initConfig.DKGSetSize)/3+1)
+ con.event.RegisterTime(con.dMoment.Add(initConfig.RoundInterval/4),
+ func(time.Time) {
+ con.runDKGTSIG(initRound, initConfig)
+ })
+ }
+ con.initialRound(con.dMoment, initRound, initConfig)
+ ticks := make([]chan struct{}, 0, initConfig.NumChains)
+ for i := uint32(0); i < initConfig.NumChains; i++ {
tick := make(chan struct{})
ticks = append(ticks, tick)
go con.runBA(i, tick)
@@ -470,8 +476,9 @@ BALoop:
select {
case newNotary := <-recv.restartNotary:
if newNotary {
+ configForNewRound := con.gov.Configuration(recv.round)
recv.changeNotaryTime =
- recv.changeNotaryTime.Add(con.currentConfig.RoundInterval)
+ recv.changeNotaryTime.Add(configForNewRound.RoundInterval)
nodes, err := con.nodeSetCache.GetNodeSet(recv.round)
if err != nil {
panic(err)
@@ -481,7 +488,7 @@ BALoop:
con.logger.Debug("Calling Governance.Configuration",
"round", recv.round)
nIDs = nodes.GetSubSet(
- int(con.gov.Configuration(recv.round).NotarySetSize),
+ int(configForNewRound.NotarySetSize),
types.NewNotarySetTarget(crs, chainID))
}
nextPos := con.lattice.NextPosition(chainID)
@@ -492,7 +499,7 @@ BALoop:
if agreement.pullVotes() {
pos := agreement.agreementID()
con.logger.Debug("Calling Network.PullVotes for syncing votes",
- "position", pos)
+ "position", &pos)
con.network.PullVotes(pos)
}
err := agreement.nextState()
@@ -519,7 +526,7 @@ BALoop:
}
// runDKGTSIG starts running DKG+TSIG protocol.
-func (con *Consensus) runDKGTSIG(round uint64) {
+func (con *Consensus) runDKGTSIG(round uint64, config *types.Config) {
con.dkgReady.L.Lock()
defer con.dkgReady.L.Unlock()
if con.dkgRunning != 0 {
@@ -535,7 +542,7 @@ func (con *Consensus) runDKGTSIG(round uint64) {
con.dkgRunning = 2
DKGTime := time.Now().Sub(startTime)
if DKGTime.Nanoseconds() >=
- con.currentConfig.RoundInterval.Nanoseconds()/2 {
+ config.RoundInterval.Nanoseconds()/2 {
con.logger.Warn("Your computer cannot finish DKG on time!",
"nodeID", con.ID.String())
}
@@ -575,11 +582,10 @@ func (con *Consensus) runDKGTSIG(round uint64) {
}()
}
-func (con *Consensus) runCRS() {
+func (con *Consensus) runCRS(round uint64) {
// Start running next round CRS.
- con.logger.Debug("Calling Governance.CRS", "round", con.round)
- psig, err := con.cfgModule.preparePartialSignature(
- con.round, con.gov.CRS(con.round))
+ con.logger.Debug("Calling Governance.CRS", "round", round)
+ psig, err := con.cfgModule.preparePartialSignature(round, con.gov.CRS(round))
if err != nil {
con.logger.Error("Failed to prepare partial signature", "error", err)
} else if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
@@ -592,62 +598,89 @@ func (con *Consensus) runCRS() {
"round", psig.Round,
"hash", psig.Hash)
con.network.BroadcastDKGPartialSignature(psig)
- con.logger.Debug("Calling Governance.CRS", "round", con.round)
- crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.CRS(con.round))
+ con.logger.Debug("Calling Governance.CRS", "round", round)
+ crs, err := con.cfgModule.runCRSTSig(round, con.gov.CRS(round))
if err != nil {
con.logger.Error("Failed to run CRS Tsig", "error", err)
} else {
con.logger.Debug("Calling Governance.ProposeCRS",
- "round", con.round+1,
+ "round", round+1,
"crs", hex.EncodeToString(crs))
- con.gov.ProposeCRS(con.round+1, crs)
+ con.gov.ProposeCRS(round+1, crs)
}
}
}
-func (con *Consensus) initialRound(startTime time.Time) {
+func (con *Consensus) initialRound(
+ startTime time.Time, round uint64, config *types.Config) {
select {
case <-con.ctx.Done():
return
default:
}
- con.logger.Debug("Calling Governance.Configuration", "round", con.round)
- con.currentConfig = con.gov.Configuration(con.round)
+ curDkgSet, err := con.nodeSetCache.GetDKGSet(round)
+ if err != nil {
+ con.logger.Error("Error getting DKG set", "round", round, "error", err)
+ curDkgSet = make(map[types.NodeID]struct{})
+ }
+ if _, exist := curDkgSet[con.ID]; exist {
+ con.event.RegisterTime(startTime.Add(config.RoundInterval/2),
+ func(time.Time) {
+ go func() {
+ con.runCRS(round)
+ }()
+ })
+ }
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval/2),
+ con.event.RegisterTime(startTime.Add(config.RoundInterval/2+config.LambdaDKG),
func(time.Time) {
- go func() {
- con.runCRS()
- ticker := newTicker(con.gov, con.round, TickerDKG)
- <-ticker.Tick()
+ go func(nextRound uint64) {
// Normally, gov.CRS would return non-nil. Use this for in case of
// unexpected network fluctuation and ensure the robustness.
- for (con.gov.CRS(con.round+1) == common.Hash{}) {
+ for (con.gov.CRS(nextRound) == common.Hash{}) {
con.logger.Info("CRS is not ready yet. Try again later...",
"nodeID", con.ID)
time.Sleep(500 * time.Millisecond)
}
+ nextDkgSet, err := con.nodeSetCache.GetDKGSet(nextRound)
+ if err != nil {
+ con.logger.Error("Error getting DKG set",
+ "round", nextRound,
+ "error", err)
+ return
+ }
+ if _, exist := nextDkgSet[con.ID]; !exist {
+ return
+ }
+ con.logger.Info("Selected as DKG set", "round", nextRound)
con.cfgModule.registerDKG(
- con.round+1, int(con.currentConfig.DKGSetSize/3)+1)
- }()
- })
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval*2/3),
- func(time.Time) {
- func() {
- con.dkgReady.L.Lock()
- defer con.dkgReady.L.Unlock()
- con.dkgRunning = 0
- }()
- con.runDKGTSIG(con.round + 1)
+ nextRound, int(config.DKGSetSize/3)+1)
+ con.event.RegisterTime(
+ startTime.Add(config.RoundInterval*2/3),
+ func(time.Time) {
+ func() {
+ con.dkgReady.L.Lock()
+ defer con.dkgReady.L.Unlock()
+ con.dkgRunning = 0
+ }()
+ con.logger.Debug("Calling Governance.Configuration",
+ "round", nextRound)
+ nextConfig := con.gov.Configuration(nextRound)
+ con.runDKGTSIG(nextRound, nextConfig)
+ })
+ }(round + 1)
})
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval),
+ con.event.RegisterTime(startTime.Add(config.RoundInterval),
func(time.Time) {
// Change round.
- con.round++
+ nextRound := round + 1
con.logger.Debug("Calling Governance.Configuration",
- "round", con.round+1)
- con.lattice.AppendConfig(con.round+1, con.gov.Configuration(con.round+1))
- con.initialRound(startTime.Add(con.currentConfig.RoundInterval))
+ "round", nextRound)
+ nextConfig := con.gov.Configuration(nextRound)
+ con.lattice.AppendConfig(nextRound, nextConfig)
+ con.initialRound(
+ startTime.Add(config.RoundInterval), nextRound, nextConfig)
+ con.round = nextRound
})
}
@@ -812,7 +845,7 @@ func (con *Consensus) ProcessAgreementResult(
agreement := con.baModules[rand.Position.ChainID]
aID := agreement.agreementID()
if rand.Position.Newer(&aID) {
- con.logger.Info("Syncing BA", "position", rand.Position)
+ con.logger.Info("Syncing BA", "position", &rand.Position)
nodes, err := con.nodeSetCache.GetNodeSet(rand.Position.Round)
if err != nil {
return err
@@ -913,7 +946,7 @@ func (con *Consensus) ProcessBlockRandomnessResult(
}
con.logger.Debug("Calling Network.BroadcastRandomnessResult",
"hash", rand.BlockHash,
- "position", rand.Position,
+ "position", &rand.Position,
"randomness", hex.EncodeToString(rand.Randomness))
con.network.BroadcastRandomnessResult(rand)
if err := con.ccModule.processBlockRandomnessResult(rand); err != nil {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
index 3f8c42bb5..e671c6385 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice-data.go
@@ -82,22 +82,26 @@ func (config *latticeDataConfig) fromConfig(roundID uint64, cfg *types.Config) {
config.setupRoundBasedFields(roundID, cfg)
}
-// Check if timestamp of a block is valid according to a reference time.
+// isValidBlockTime checks if timestamp of a block is valid according to a
+// reference time.
func (config *latticeDataConfig) isValidBlockTime(
b *types.Block, ref time.Time) bool {
return !b.Timestamp.Before(ref.Add(config.minBlockTimeInterval))
}
-// isValidGenesisBlockTime check if a timestamp is valid for a genesis block.
+// isValidGenesisBlockTime checks if a timestamp is valid for a genesis block.
func (config *latticeDataConfig) isValidGenesisBlockTime(b *types.Block) bool {
return !b.Timestamp.Before(config.roundBeginTime)
}
// newGenesisLatticeDataConfig constructs a latticeDataConfig instance.
func newGenesisLatticeDataConfig(
- dMoment time.Time, config *types.Config) *latticeDataConfig {
+ dMoment time.Time,
+ round uint64,
+ config *types.Config) *latticeDataConfig {
+
c := &latticeDataConfig{}
- c.fromConfig(0, config)
+ c.fromConfig(round, config)
c.setRoundBeginTime(dMoment)
return c
}
@@ -113,7 +117,7 @@ func newLatticeDataConfig(
// latticeData is a module for storing lattice.
type latticeData struct {
- // we need blockdb to read blocks purged from cache in memory.
+ // BlockDB for getting blocks purged in memory.
db blockdb.Reader
// chains stores chains' blocks and other info.
chains []*chainStatus
@@ -123,9 +127,10 @@ type latticeData struct {
configs []*latticeDataConfig
}
-// newLatticeData creates a new latticeData struct.
+// newLatticeData creates a new latticeData instance.
func newLatticeData(
db blockdb.Reader, genesisConfig *latticeDataConfig) (data *latticeData) {
+
data = &latticeData{
db: db,
chains: make([]*chainStatus, genesisConfig.numChains),
@@ -163,7 +168,7 @@ func (data *latticeData) checkAckingRelations(b *types.Block) error {
if lastAckPos != nil && !bAck.Position.Newer(lastAckPos) {
return ErrDoubleAck
}
- // Check if ack two blocks on the same chain. This would need
+ // Check if it acks two blocks on the same chain. This would need
// to check after we replace map with slice for acks.
if _, acked := acksByChainID[bAck.Position.ChainID]; acked {
return ErrDuplicatedAckOnOneChain
@@ -174,8 +179,8 @@ func (data *latticeData) checkAckingRelations(b *types.Block) error {
}
func (data *latticeData) sanityCheck(b *types.Block) error {
- // TODO(mission): Check if its proposer is in validator set somewhere,
- // lattice doesn't have to know about node set.
+ // TODO(mission): Check if its proposer is in validator set, lattice has no
+ // knowledge about node set.
config := data.getConfig(b.Position.Round)
if config == nil {
return ErrInvalidRoundID
@@ -264,8 +269,8 @@ func (data *latticeData) sanityCheck(b *types.Block) error {
return nil
}
-// addBlock processes block, it does sanity check, inserts block into
-// lattice and deletes blocks which will not be used.
+// addBlock processes blocks. It does sanity check, inserts block into lattice
+// and deletes blocks which will not be used.
func (data *latticeData) addBlock(
block *types.Block) (deliverable []*types.Block, err error) {
var (
@@ -287,9 +292,8 @@ func (data *latticeData) addBlock(
bAck.Position.Clone()
}
- // Extract blocks that deliverable to total ordering.
- // A block is deliverable to total ordering iff:
- // - All its acking blocks are delivered to total ordering.
+ // Extract deliverable blocks to total ordering. A block is deliverable to
+ // total ordering iff all its ackings blocks were delivered to total ordering.
for {
updated = false
for _, status := range data.chains {
@@ -308,8 +312,7 @@ func (data *latticeData) addBlock(
return
}
// Check if this block is outputed or not.
- idx := data.chains[bAck.Position.ChainID].findBlock(
- &bAck.Position)
+ idx := data.chains[bAck.Position.ChainID].findBlock(&bAck.Position)
var ok bool
if idx == -1 {
// Either the block is delivered or not added to chain yet.
@@ -344,12 +347,10 @@ func (data *latticeData) addBlock(
}
// addFinalizedBlock processes block for syncing internal data.
-func (data *latticeData) addFinalizedBlock(
- block *types.Block) (err error) {
+func (data *latticeData) addFinalizedBlock(block *types.Block) (err error) {
var bAck *types.Block
chain := data.chains[block.Position.ChainID]
- if chain.tip != nil && chain.tip.Position.Height >=
- block.Position.Height {
+ if chain.tip != nil && chain.tip.Position.Height >= block.Position.Height {
return
}
chain.nextOutputIndex = 0
@@ -367,13 +368,13 @@ func (data *latticeData) addFinalizedBlock(
return
}
-// prepareBlock helps to setup fields of block based on its ChainID and Round,
+// prepareBlock setups fields of a block based on its ChainID and Round,
// including:
-// - Acks
-// - Timestamp
-// - ParentHash and Height from parent block. If there is no valid parent block
-// (ex. Newly added chain or bootstrap ), these fields would be setup as
-// genesis block.
+// - Acks
+// - Timestamp
+// - ParentHash and Height from parent block. If there is no valid parent block
+// (e.g. Newly added chain or bootstrap), these fields should be setup as
+// genesis block.
func (data *latticeData) prepareBlock(b *types.Block) error {
var (
minTimestamp time.Time
@@ -385,7 +386,7 @@ func (data *latticeData) prepareBlock(b *types.Block) error {
if config = data.getConfig(b.Position.Round); config == nil {
return ErrUnknownRoundID
}
- // When this chain is illegal in this round, reject it.
+ // If chainID is illegal in this round, reject it.
if b.Position.ChainID >= config.numChains {
return ErrInvalidChainID
}
@@ -459,13 +460,13 @@ func (data *latticeData) prepareBlock(b *types.Block) error {
return nil
}
-// prepareEmptyBlock helps to setup fields of block based on its ChainID.
+// prepareEmptyBlock setups fields of a block based on its ChainID.
// including:
-// - Acks only acking its parent
-// - Timestamp with parent.Timestamp + minBlockProposeInterval
-// - ParentHash and Height from parent block. If there is no valid parent block
-// (ex. Newly added chain or bootstrap ), these fields would be setup as
-// genesis block.
+// - Acks only acking its parent
+// - Timestamp with parent.Timestamp + minBlockProposeInterval
+// - ParentHash and Height from parent block. If there is no valid parent block
+// (ex. Newly added chain or bootstrap), these fields would be setup as
+// genesis block.
func (data *latticeData) prepareEmptyBlock(b *types.Block) {
// emptyBlock has no proposer.
b.ProposerID = types.NodeID{}
@@ -497,7 +498,7 @@ func (data *latticeData) prepareEmptyBlock(b *types.Block) {
}
// TODO(mission): make more abstraction for this method.
-// nextHeight returns the next height for the chain.
+// nextHeight returns the next height of a chain.
func (data *latticeData) nextPosition(chainID uint32) types.Position {
return data.chains[chainID].nextPosition()
}
@@ -522,7 +523,7 @@ func (data *latticeData) purgeBlocks(blocks []*types.Block) error {
return ErrPurgedBlockNotFound
}
delete(data.blockByHash, b.Hash)
- // blocks would be purged in ascending order in position.
+ // Blocks are purged in ascending order by position.
if err := data.chains[b.Position.ChainID].purgeBlock(b); err != nil {
return err
}
@@ -532,18 +533,19 @@ func (data *latticeData) purgeBlocks(blocks []*types.Block) error {
// getConfig get configuration for lattice-data by round ID.
func (data *latticeData) getConfig(round uint64) (config *latticeDataConfig) {
- if round >= uint64(len(data.configs)) {
+ r := data.configs[0].roundID
+ if round < r || round >= r+uint64(len(data.configs)) {
return
}
- return data.configs[round]
+ return data.configs[round-r]
}
-// appendConfig appends a configuration for upcoming round. When you append
-// a config for round R, next time you can only append the config for round R+1.
+// appendConfig appends a configuration for upcoming round. Rounds appended
+// should be consecutive.
func (data *latticeData) appendConfig(
round uint64, config *types.Config) (err error) {
- // Make sure caller knows which round this config belongs to.
- if round != uint64(len(data.configs)) {
+ // Check if the round of config is increasing by 1.
+ if round != uint64(len(data.configs))+data.configs[0].roundID {
return ErrRoundNotIncreasing
}
// Set round beginning time.
@@ -584,7 +586,7 @@ type chainStatus struct {
}
// findBlock finds index of block in current pending blocks on this chain.
-// -1 means not found.
+// Return -1 if not found.
func (s *chainStatus) findBlock(pos *types.Position) (idx int) {
idx = sort.Search(len(s.blocks), func(i int) bool {
return s.blocks[i].Position.Newer(pos) ||
@@ -628,8 +630,8 @@ func (s *chainStatus) nextPosition() types.Position {
}
}
-// purgeBlock purge a block from cache, make sure this block already
-// persists to blockdb.
+// purgeBlock purges a block from cache, make sure this block is already saved
+// in blockdb.
func (s *chainStatus) purgeBlock(b *types.Block) error {
if b.Hash != s.blocks[0].Hash || s.nextOutputIndex <= 0 {
return ErrPurgeNotDeliveredBlock
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
index af9c3c42f..db13e7eba 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/lattice.go
@@ -49,16 +49,18 @@ type Lattice struct {
// NewLattice constructs an Lattice instance.
func NewLattice(
dMoment time.Time,
+ round uint64,
cfg *types.Config,
authModule *Authenticator,
app Application,
debug Debug,
db blockdb.BlockDatabase,
- logger common.Logger) (s *Lattice) {
+ logger common.Logger) *Lattice {
+
// Create genesis latticeDataConfig.
- dataConfig := newGenesisLatticeDataConfig(dMoment, cfg)
+ dataConfig := newGenesisLatticeDataConfig(dMoment, round, cfg)
toConfig := newGenesisTotalOrderingConfig(dMoment, cfg)
- s = &Lattice{
+ return &Lattice{
authModule: authModule,
app: app,
debug: debug,
@@ -68,51 +70,50 @@ func NewLattice(
ctModule: newConsensusTimestamp(dMoment, 0, cfg.NumChains),
logger: logger,
}
- return
}
-// PrepareBlock setup block's field based on current lattice status.
-func (s *Lattice) PrepareBlock(
+// PrepareBlock setups block's fields based on current status.
+func (l *Lattice) PrepareBlock(
b *types.Block, proposeTime time.Time) (err error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
+ l.lock.RLock()
+ defer l.lock.RUnlock()
b.Timestamp = proposeTime
- if err = s.data.prepareBlock(b); err != nil {
+ if err = l.data.prepareBlock(b); err != nil {
return
}
- s.logger.Debug("Calling Application.PreparePayload", "position", b.Position)
- if b.Payload, err = s.app.PreparePayload(b.Position); err != nil {
+ l.logger.Debug("Calling Application.PreparePayload", "position", b.Position)
+ if b.Payload, err = l.app.PreparePayload(b.Position); err != nil {
return
}
- s.logger.Debug("Calling Application.PrepareWitness",
+ l.logger.Debug("Calling Application.PrepareWitness",
"height", b.Witness.Height)
- if b.Witness, err = s.app.PrepareWitness(b.Witness.Height); err != nil {
+ if b.Witness, err = l.app.PrepareWitness(b.Witness.Height); err != nil {
return
}
- if err = s.authModule.SignBlock(b); err != nil {
+ if err = l.authModule.SignBlock(b); err != nil {
return
}
return
}
-// PrepareEmptyBlock setup block's field based on current lattice status.
-func (s *Lattice) PrepareEmptyBlock(b *types.Block) (err error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
- s.data.prepareEmptyBlock(b)
+// PrepareEmptyBlock setups block's fields based on current lattice status.
+func (l *Lattice) PrepareEmptyBlock(b *types.Block) (err error) {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ l.data.prepareEmptyBlock(b)
if b.Hash, err = hashBlock(b); err != nil {
return
}
return
}
-// SanityCheck check if a block is valid.
+// SanityCheck checks the validity of a block.
//
-// If some acking blocks don't exists, Lattice would help to cache this block
-// and retry when lattice updated in Lattice.ProcessBlock.
-func (s *Lattice) SanityCheck(b *types.Block) (err error) {
+// If any acking blocks of this block does not exist, Lattice helps caching this
+// block and retries when Lattice.ProcessBlock is called.
+func (l *Lattice) SanityCheck(b *types.Block) (err error) {
if b.IsEmpty() {
// Only need to verify block's hash.
var hash common.Hash
@@ -124,7 +125,7 @@ func (s *Lattice) SanityCheck(b *types.Block) (err error) {
}
} else {
// Verify block's signature.
- if err = s.authModule.VerifyBlock(b); err != nil {
+ if err = l.authModule.VerifyBlock(b); err != nil {
return
}
}
@@ -139,13 +140,13 @@ func (s *Lattice) SanityCheck(b *types.Block) (err error) {
}
}
if err = func() (err error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
- if err = s.data.sanityCheck(b); err != nil {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ if err = l.data.sanityCheck(b); err != nil {
if _, ok := err.(*ErrAckingBlockNotExists); ok {
err = ErrRetrySanityCheckLater
}
- s.logger.Error("Sanity Check failed", "error", err)
+ l.logger.Error("Sanity Check failed", "error", err)
return
}
return
@@ -153,8 +154,8 @@ func (s *Lattice) SanityCheck(b *types.Block) (err error) {
return
}
// Verify data in application layer.
- s.logger.Debug("Calling Application.VerifyBlock", "block", b)
- switch s.app.VerifyBlock(b) {
+ l.logger.Debug("Calling Application.VerifyBlock", "block", b)
+ switch l.app.VerifyBlock(b) {
case types.VerifyInvalidBlock:
err = ErrInvalidBlock
case types.VerifyRetryLater:
@@ -163,31 +164,33 @@ func (s *Lattice) SanityCheck(b *types.Block) (err error) {
return
}
-// addBlockToLattice adds a block into lattice, and deliver blocks with the acks
-// already delivered.
+// addBlockToLattice adds a block into lattice, and delivers blocks with the
+// acks already delivered.
//
-// NOTE: assume the block passed sanity check.
-func (s *Lattice) addBlockToLattice(
+// NOTE: input block should pass sanity check.
+func (l *Lattice) addBlockToLattice(
input *types.Block) (outputBlocks []*types.Block, err error) {
- if tip := s.data.chains[input.Position.ChainID].tip; tip != nil {
+
+ if tip := l.data.chains[input.Position.ChainID].tip; tip != nil {
if !input.Position.Newer(&tip.Position) {
return
}
}
- s.pool.addBlock(input)
- // Replay tips in pool to check their validity.
+ l.pool.addBlock(input)
+ // Check tips in pool to check their validity for moving blocks from pool
+ // to lattice.
for {
hasOutput := false
- for i := uint32(0); i < uint32(len(s.pool)); i++ {
+ for i := uint32(0); i < uint32(len(l.pool)); i++ {
var tip *types.Block
- if tip = s.pool.tip(i); tip == nil {
+ if tip = l.pool.tip(i); tip == nil {
continue
}
- err = s.data.sanityCheck(tip)
+ err = l.data.sanityCheck(tip)
if err == nil {
var output []*types.Block
- if output, err = s.data.addBlock(tip); err != nil {
- s.logger.Error("Sanity Check failed", "error", err)
+ if output, err = l.data.addBlock(tip); err != nil {
+ l.logger.Error("Sanity Check failed", "error", err)
continue
}
hasOutput = true
@@ -197,7 +200,7 @@ func (s *Lattice) addBlockToLattice(
err = nil
continue
}
- s.pool.removeTip(i)
+ l.pool.removeTip(i)
}
if !hasOutput {
break
@@ -206,13 +209,13 @@ func (s *Lattice) addBlockToLattice(
for _, b := range outputBlocks {
// TODO(jimmy-dexon): change this name of classic DEXON algorithm.
- if s.debug != nil {
- s.debug.StronglyAcked(b.Hash)
+ if l.debug != nil {
+ l.debug.StronglyAcked(b.Hash)
}
- s.logger.Debug("Calling Application.BlockConfirmed", "block", input)
- s.app.BlockConfirmed(*b.Clone())
+ l.logger.Debug("Calling Application.BlockConfirmed", "block", input)
+ l.app.BlockConfirmed(*b.Clone())
// Purge blocks in pool with the same chainID and lower height.
- s.pool.purgeBlocks(b.Position.ChainID, b.Position.Height)
+ l.pool.purgeBlocks(b.Position.ChainID, b.Position.Height)
}
return
@@ -223,7 +226,7 @@ func (s *Lattice) addBlockToLattice(
// would be returned, too.
//
// NOTE: assume the block passed sanity check.
-func (s *Lattice) ProcessBlock(
+func (l *Lattice) ProcessBlock(
input *types.Block) (delivered []*types.Block, err error) {
var (
b *types.Block
@@ -232,10 +235,10 @@ func (s *Lattice) ProcessBlock(
deliveredMode uint32
)
- s.lock.Lock()
- defer s.lock.Unlock()
+ l.lock.Lock()
+ defer l.lock.Unlock()
- if inLattice, err = s.addBlockToLattice(input); err != nil {
+ if inLattice, err = l.addBlockToLattice(input); err != nil {
return
}
@@ -245,7 +248,7 @@ func (s *Lattice) ProcessBlock(
// Perform total ordering for each block added to lattice.
for _, b = range inLattice {
- toDelivered, deliveredMode, err = s.toModule.processBlock(b)
+ toDelivered, deliveredMode, err = l.toModule.processBlock(b)
if err != nil {
// All errors from total ordering is serious, should panic.
panic(err)
@@ -257,11 +260,11 @@ func (s *Lattice) ProcessBlock(
for idx := range toDelivered {
hashes[idx] = toDelivered[idx].Hash
}
- if s.debug != nil {
- s.debug.TotalOrderingDelivered(hashes, deliveredMode)
+ if l.debug != nil {
+ l.debug.TotalOrderingDelivered(hashes, deliveredMode)
}
- // Perform timestamp generation.
- if err = s.ctModule.processBlocks(toDelivered); err != nil {
+ // Perform consensus timestamp module.
+ if err = l.ctModule.processBlocks(toDelivered); err != nil {
return
}
delivered = append(delivered, toDelivered...)
@@ -269,49 +272,47 @@ func (s *Lattice) ProcessBlock(
return
}
-// NextPosition returns expected position of incoming block for that chain.
-func (s *Lattice) NextPosition(chainID uint32) types.Position {
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- return s.data.nextPosition(chainID)
+// NextPosition returns expected position of incoming block for specified chain.
+func (l *Lattice) NextPosition(chainID uint32) types.Position {
+ l.lock.RLock()
+ defer l.lock.RUnlock()
+ return l.data.nextPosition(chainID)
}
-// PurgeBlocks from cache of blocks in memory, this is called when the caller
-// make sure those blocks are saved to db.
-func (s *Lattice) PurgeBlocks(blocks []*types.Block) error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- return s.data.purgeBlocks(blocks)
+// PurgeBlocks purges blocks' cache in memory, this is called when the caller
+// makes sure those blocks are already saved in db.
+func (l *Lattice) PurgeBlocks(blocks []*types.Block) error {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ return l.data.purgeBlocks(blocks)
}
-// AppendConfig add new configs for upcoming rounds. If you add a config for
-// round R, next time you can only add the config for round R+1.
-func (s *Lattice) AppendConfig(round uint64, config *types.Config) (err error) {
- s.lock.Lock()
- defer s.lock.Unlock()
+// AppendConfig adds a new config for upcoming rounds. If a config of round r is
+// added, only config in round r + 1 is allowed next.
+func (l *Lattice) AppendConfig(round uint64, config *types.Config) (err error) {
+ l.lock.Lock()
+ defer l.lock.Unlock()
- s.pool.resize(config.NumChains)
- if err = s.data.appendConfig(round, config); err != nil {
+ l.pool.resize(config.NumChains)
+ if err = l.data.appendConfig(round, config); err != nil {
return
}
- if err = s.toModule.appendConfig(round, config); err != nil {
+ if err = l.toModule.appendConfig(round, config); err != nil {
return
}
- if err = s.ctModule.appendConfig(round, config); err != nil {
+ if err = l.ctModule.appendConfig(round, config); err != nil {
return
}
return
}
// ProcessFinalizedBlock is used for syncing lattice data.
-func (s *Lattice) ProcessFinalizedBlock(input *types.Block) {
- defer func() { s.retryAdd = true }()
- s.lock.Lock()
- defer s.lock.Unlock()
- if err := s.data.addFinalizedBlock(input); err != nil {
+func (l *Lattice) ProcessFinalizedBlock(b *types.Block) {
+ defer func() { l.retryAdd = true }()
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ if err := l.data.addFinalizedBlock(b); err != nil {
panic(err)
}
- s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height)
+ l.pool.purgeBlocks(b.Position.ChainID, b.Position.Height)
}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/nodeset-cache.go
index bf7b88d89..26e3d55f9 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/nodeset-cache.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/nodeset-cache.go
@@ -177,6 +177,11 @@ func (cache *NodeSetCache) update(
err = ErrRoundNotReady
return
}
+ crs := cache.nsIntf.CRS(round)
+ if (crs == common.Hash{}) {
+ err = ErrRoundNotReady
+ return
+ }
// Cache new round.
nodeSet := types.NewNodeSet()
for _, key := range keySet {
@@ -192,7 +197,6 @@ func (cache *NodeSetCache) update(
}
}
cfg := cache.nsIntf.Configuration(round)
- crs := cache.nsIntf.CRS(round)
nIDs = &sets{
nodeSet: nodeSet,
notarySet: make([]map[types.NodeID]struct{}, cfg.NumChains),
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
index 3222b3c2f..89dfef3fd 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/types/nodeset.go
@@ -100,6 +100,9 @@ func (ns *NodeSet) Clone() *NodeSet {
// GetSubSet returns the subset of given target.
func (ns *NodeSet) GetSubSet(
size int, target SubSetTarget) map[NodeID]struct{} {
+ if size == 0 {
+ return make(map[NodeID]struct{})
+ }
h := rankHeap{}
idx := 0
for nID := range ns.IDs {
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 9f8d288d1..b0f8c0717 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -105,50 +105,50 @@
{
"checksumSHA1": "ev84RyegNbt2Pr/sK26LK9LoQNI=",
"path": "github.com/dexon-foundation/dexon-consensus/common",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
- "checksumSHA1": "mpmQy/NAsKliUPVFaUEW9+vsTe8=",
+ "checksumSHA1": "NxDI5JSfgv3GWC4/0WbIHlYDUVM=",
"path": "github.com/dexon-foundation/dexon-consensus/core",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "vNsaBvsrXJF+W6K5DCLpgy1rUZY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/blockdb",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "p2jOAulavUU2xyj018pYPHlj8XA=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "6Pf6caC8LTNCI7IflFmglKYnxYo=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
- "checksumSHA1": "RlxtJw5enzIY+6PNEyl1q48qKFg=",
+ "checksumSHA1": "o7RiigeU3kIDbZ96Gh95/h/7yZo=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "ovChyW9OfDGnk/7CDAR+A5vJymc=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg",
- "revision": "f3e03e18d815d92493d3a85ecb3a0048247ecf71",
- "revisionTime": "2018-11-05T10:36:13Z"
+ "revision": "3714ebf2f1054d9984d37b89cf17e885a5856532",
+ "revisionTime": "2018-11-06T08:53:19Z"
},
{
"checksumSHA1": "TAkwduKZqLyimyTPPWIllZWYFuE=",