aboutsummaryrefslogtreecommitdiffstats
path: root/vendor
diff options
context:
space:
mode:
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go102
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go87
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go107
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go14
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go170
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go492
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go148
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go70
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go127
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go179
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go85
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go264
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go312
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go139
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go1026
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go264
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go26
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go560
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go71
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go133
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go48
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go80
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go578
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go145
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go645
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go317
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go136
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go211
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go233
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go164
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go50
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go77
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering-syncer.go174
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go1355
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go43
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go341
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go109
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg/dkg.go194
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go56
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go145
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go74
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go65
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go161
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go7
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go26
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go3
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go13
-rw-r--r--vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go16
-rw-r--r--vendor/vendor.json34
49 files changed, 48 insertions, 9828 deletions
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go
deleted file mode 100644
index 6c6bf49d4..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/event.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package common
-
-import (
- "container/heap"
- "sync"
- "time"
-)
-
-type timeEventFn func(time.Time)
-
-type timeEvent struct {
- t time.Time
- fn timeEventFn
-}
-
-// timeEvents implements a Min-Heap structure.
-type timeEvents []timeEvent
-
-func (h timeEvents) Len() int { return len(h) }
-func (h timeEvents) Less(i, j int) bool { return h[i].t.Before(h[j].t) }
-func (h timeEvents) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *timeEvents) Push(x interface{}) {
- *h = append(*h, x.(timeEvent))
-}
-func (h *timeEvents) Pop() interface{} {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[0 : n-1]
- return x
-}
-
-// Event implements the Observer pattern.
-type Event struct {
- timeEvents timeEvents
- timeEventsLock sync.Mutex
-}
-
-// NewEvent creates a new event instance.
-func NewEvent() *Event {
- te := timeEvents{}
- heap.Init(&te)
- return &Event{
- timeEvents: te,
- }
-}
-
-// RegisterTime to get notified on and after specific time.
-func (e *Event) RegisterTime(t time.Time, fn timeEventFn) {
- e.timeEventsLock.Lock()
- defer e.timeEventsLock.Unlock()
- heap.Push(&e.timeEvents, timeEvent{
- t: t,
- fn: fn,
- })
-}
-
-// NotifyTime and trigger function callback.
-func (e *Event) NotifyTime(t time.Time) {
- fns := func() (fns []timeEventFn) {
- e.timeEventsLock.Lock()
- defer e.timeEventsLock.Unlock()
- if len(e.timeEvents) == 0 {
- return
- }
- for !t.Before(e.timeEvents[0].t) {
- te := heap.Pop(&e.timeEvents).(timeEvent)
- fns = append(fns, te.fn)
- if len(e.timeEvents) == 0 {
- return
- }
- }
- return
- }()
- for _, fn := range fns {
- fn(t)
- }
-}
-
-// Reset clears all pending event
-func (e *Event) Reset() {
- e.timeEventsLock.Lock()
- defer e.timeEventsLock.Unlock()
- e.timeEvents = timeEvents{}
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go
deleted file mode 100644
index 2eb1e2bd0..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/logger.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package common
-
-import "log"
-
-// Logger define the way to receive logs from Consensus instance.
-// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example,
-// to log an error with message:
-// logger.Error("some message", "error", err)
-// which is similar to loggers with context:
-// logger.Error("some message", map[string]interface{}{
-// "error": err,
-// })
-type Logger interface {
- // Info logs info level logs.
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
-}
-
-// NullLogger logs nothing.
-type NullLogger struct{}
-
-// Debug implements Logger interface.
-func (logger *NullLogger) Debug(msg string, ctx ...interface{}) {
-}
-
-// Info implements Logger interface.
-func (logger *NullLogger) Info(msg string, ctx ...interface{}) {
-}
-
-// Warn implements Logger interface.
-func (logger *NullLogger) Warn(msg string, ctx ...interface{}) {
-}
-
-// Error implements Logger interface.
-func (logger *NullLogger) Error(msg string, ctx ...interface{}) {
-}
-
-// SimpleLogger logs everything.
-type SimpleLogger struct{}
-
-// composeVargs makes (msg, ctx...) could be pass to log.Println
-func composeVargs(msg string, ctxs []interface{}) []interface{} {
- args := []interface{}{msg}
- for _, c := range ctxs {
- args = append(args, c)
- }
- return args
-}
-
-// Debug implements Logger interface.
-func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) {
- log.Println(composeVargs(msg, ctx)...)
-}
-
-// Info implements Logger interface.
-func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) {
- log.Println(composeVargs(msg, ctx)...)
-}
-
-// Warn implements Logger interface.
-func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) {
- log.Println(composeVargs(msg, ctx)...)
-}
-
-// Error implements Logger interface.
-func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) {
- log.Println(composeVargs(msg, ctx)...)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go
deleted file mode 100644
index a5dfab10e..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/types.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package common
-
-import (
- "bytes"
- "encoding/hex"
- "sort"
- "time"
-)
-
-const (
- // HashLength is the length of a hash in DEXON.
- HashLength = 32
-)
-
-// Hash is the basic hash type in DEXON.
-type Hash [HashLength]byte
-
-func (h Hash) String() string {
- return hex.EncodeToString([]byte(h[:]))
-}
-
-// Bytes return the hash as slice of bytes.
-func (h Hash) Bytes() []byte {
- return h[:]
-}
-
-// Equal compares if two hashes are the same.
-func (h Hash) Equal(hp Hash) bool {
- return h == hp
-}
-
-// Less compares if current hash is lesser.
-func (h Hash) Less(hp Hash) bool {
- return bytes.Compare(h[:], hp[:]) < 0
-}
-
-// MarshalText implements the encoding.TextMarhsaler interface.
-func (h Hash) MarshalText() ([]byte, error) {
- result := make([]byte, hex.EncodedLen(HashLength))
- hex.Encode(result, h[:])
- return result, nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-func (h *Hash) UnmarshalText(text []byte) error {
- _, err := hex.Decode(h[:], text)
- return err
-}
-
-// Hashes is for sorting hashes.
-type Hashes []Hash
-
-func (hs Hashes) Len() int { return len(hs) }
-func (hs Hashes) Less(i, j int) bool { return hs[i].Less(hs[j]) }
-func (hs Hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] }
-
-// SortedHashes is a slice of hashes sorted in ascending order.
-type SortedHashes Hashes
-
-// NewSortedHashes converts a slice of hashes to a sorted one. It's a
-// firewall to prevent us from assigning unsorted hashes to a variable
-// declared as SortedHashes directly.
-func NewSortedHashes(hs Hashes) SortedHashes {
- sort.Sort(hs)
- return SortedHashes(hs)
-}
-
-// ByTime implements sort.Interface for time.Time.
-type ByTime []time.Time
-
-func (t ByTime) Len() int { return len(t) }
-func (t ByTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t ByTime) Less(i, j int) bool { return t[i].Before(t[j]) }
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go
deleted file mode 100644
index 7e89c059d..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/common/utils.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package common
-
-import (
- "math/rand"
-)
-
-// NewRandomHash returns a random Hash-like value.
-func NewRandomHash() Hash {
- x := Hash{}
- for i := 0; i < HashLength; i++ {
- x[i] = byte(rand.Int() % 256)
- }
- return x
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go
deleted file mode 100644
index 77569d549..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement-state.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for agreement state module.
-var (
- ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state")
- ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state")
-)
-
-// agreementStateType is the state of agreement
-type agreementStateType int
-
-// agreementStateType enum.
-const (
- stateInitial agreementStateType = iota
- statePreCommit
- stateCommit
- stateForward
- statePullVote
-)
-
-var nullBlockHash = common.Hash{}
-var skipBlockHash common.Hash
-
-func init() {
- for idx := range skipBlockHash {
- skipBlockHash[idx] = 0xff
- }
-}
-
-type agreementState interface {
- state() agreementStateType
- nextState() (agreementState, error)
- clocks() int
-}
-
-//----- InitialState -----
-type initialState struct {
- a *agreementData
-}
-
-func newInitialState(a *agreementData) *initialState {
- return &initialState{a: a}
-}
-
-func (s *initialState) state() agreementStateType { return stateInitial }
-func (s *initialState) clocks() int { return 0 }
-func (s *initialState) nextState() (agreementState, error) {
- hash := s.a.recv.ProposeBlock()
- s.a.lock.Lock()
- defer s.a.lock.Unlock()
- s.a.recv.ProposeVote(&types.Vote{
- Type: types.VoteInit,
- BlockHash: hash,
- Period: s.a.period,
- })
- return newPreCommitState(s.a), nil
-}
-
-//----- PreCommitState -----
-type preCommitState struct {
- a *agreementData
-}
-
-func newPreCommitState(a *agreementData) *preCommitState {
- return &preCommitState{a: a}
-}
-
-func (s *preCommitState) state() agreementStateType { return statePreCommit }
-func (s *preCommitState) clocks() int { return 2 }
-func (s *preCommitState) nextState() (agreementState, error) {
- s.a.lock.RLock()
- defer s.a.lock.RUnlock()
- hash := s.a.lockValue
- if hash == nullBlockHash {
- hash = s.a.leader.leaderBlockHash()
- }
- s.a.recv.ProposeVote(&types.Vote{
- Type: types.VotePreCom,
- BlockHash: hash,
- Period: s.a.period,
- })
- return newCommitState(s.a), nil
-}
-
-//----- CommitState -----
-type commitState struct {
- a *agreementData
-}
-
-func newCommitState(a *agreementData) *commitState {
- return &commitState{a: a}
-}
-
-func (s *commitState) state() agreementStateType { return stateCommit }
-func (s *commitState) clocks() int { return 2 }
-func (s *commitState) nextState() (agreementState, error) {
- hash, ok := s.a.countVote(s.a.period, types.VotePreCom)
- s.a.lock.Lock()
- defer s.a.lock.Unlock()
- if ok && hash != skipBlockHash {
- s.a.lockValue = hash
- s.a.lockRound = s.a.period
- } else {
- hash = skipBlockHash
- }
- s.a.recv.ProposeVote(&types.Vote{
- Type: types.VoteCom,
- BlockHash: hash,
- Period: s.a.period,
- })
- return newForwardState(s.a), nil
-}
-
-// ----- ForwardState -----
-type forwardState struct {
- a *agreementData
-}
-
-func newForwardState(a *agreementData) *forwardState {
- return &forwardState{a: a}
-}
-
-func (s *forwardState) state() agreementStateType { return stateForward }
-func (s *forwardState) clocks() int { return 4 }
-
-func (s *forwardState) nextState() (agreementState, error) {
- return newPullVoteState(s.a), nil
-}
-
-// ----- PullVoteState -----
-// pullVoteState is a special state to ensure the assumption in the consensus
-// algorithm that every vote will eventually arrive for all nodes.
-type pullVoteState struct {
- a *agreementData
-}
-
-func newPullVoteState(a *agreementData) *pullVoteState {
- return &pullVoteState{a: a}
-}
-
-func (s *pullVoteState) state() agreementStateType { return statePullVote }
-func (s *pullVoteState) clocks() int { return 4 }
-
-func (s *pullVoteState) nextState() (agreementState, error) {
- return s, nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go
deleted file mode 100644
index 8741baf10..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/agreement.go
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "math"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for agreement module.
-var (
- ErrNotInNotarySet = fmt.Errorf("not in notary set")
- ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature")
-)
-
-// ErrFork for fork error in agreement.
-type ErrFork struct {
- nID types.NodeID
- old, new common.Hash
-}
-
-func (e *ErrFork) Error() string {
- return fmt.Sprintf("fork is found for %s, old %s, new %s",
- e.nID.String(), e.old, e.new)
-}
-
-// ErrForkVote for fork vote error in agreement.
-type ErrForkVote struct {
- nID types.NodeID
- old, new *types.Vote
-}
-
-func (e *ErrForkVote) Error() string {
- return fmt.Sprintf("fork vote is found for %s, old %s, new %s",
- e.nID.String(), e.old, e.new)
-}
-
-func newVoteListMap() []map[types.NodeID]*types.Vote {
- listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType)
- for idx := range listMap {
- listMap[idx] = make(map[types.NodeID]*types.Vote)
- }
- return listMap
-}
-
-// agreementReceiver is the interface receiving agreement event.
-type agreementReceiver interface {
- ProposeVote(vote *types.Vote)
- ProposeBlock() common.Hash
- // ConfirmBlock is called with lock hold. User can safely use all data within
- // agreement module.
- ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote)
- PullBlocks(common.Hashes)
-}
-
-type pendingBlock struct {
- block *types.Block
- receivedTime time.Time
-}
-
-type pendingVote struct {
- vote *types.Vote
- receivedTime time.Time
-}
-
-// agreementData is the data for agreementState.
-type agreementData struct {
- recv agreementReceiver
-
- ID types.NodeID
- leader *leaderSelector
- lockValue common.Hash
- lockRound uint64
- period uint64
- requiredVote int
- votes map[uint64][]map[types.NodeID]*types.Vote
- lock sync.RWMutex
- blocks map[types.NodeID]*types.Block
- blocksLock sync.Mutex
-}
-
-// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm.
-type agreement struct {
- state agreementState
- data *agreementData
- aID types.Position
- notarySet map[types.NodeID]struct{}
- hasOutput bool
- lock sync.RWMutex
- pendingBlock []pendingBlock
- pendingVote []pendingVote
- candidateBlock map[common.Hash]*types.Block
- fastForward chan uint64
- authModule *Authenticator
-}
-
-// newAgreement creates a agreement instance.
-func newAgreement(
- ID types.NodeID,
- recv agreementReceiver,
- notarySet map[types.NodeID]struct{},
- leader *leaderSelector,
- authModule *Authenticator) *agreement {
- agreement := &agreement{
- data: &agreementData{
- recv: recv,
- ID: ID,
- leader: leader,
- },
- candidateBlock: make(map[common.Hash]*types.Block),
- fastForward: make(chan uint64, 1),
- authModule: authModule,
- }
- agreement.stop()
- return agreement
-}
-
-// restart the agreement
-func (a *agreement) restart(
- notarySet map[types.NodeID]struct{}, aID types.Position) {
-
- func() {
- a.lock.Lock()
- defer a.lock.Unlock()
- a.data.lock.Lock()
- defer a.data.lock.Unlock()
- a.data.blocksLock.Lock()
- defer a.data.blocksLock.Unlock()
- a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote)
- a.data.votes[1] = newVoteListMap()
- a.data.period = 1
- a.data.blocks = make(map[types.NodeID]*types.Block)
- a.data.requiredVote = len(notarySet)/3*2 + 1
- a.data.leader.restart()
- a.data.lockValue = nullBlockHash
- a.data.lockRound = 1
- a.fastForward = make(chan uint64, 1)
- a.hasOutput = false
- a.state = newInitialState(a.data)
- a.notarySet = notarySet
- a.candidateBlock = make(map[common.Hash]*types.Block)
- a.aID = *aID.Clone()
- }()
-
- if isStop(aID) {
- return
- }
-
- expireTime := time.Now().Add(-10 * time.Second)
- replayBlock := make([]*types.Block, 0)
- func() {
- a.lock.Lock()
- defer a.lock.Unlock()
- newPendingBlock := make([]pendingBlock, 0)
- for _, pending := range a.pendingBlock {
- if aID.Newer(&pending.block.Position) {
- continue
- } else if pending.block.Position == aID {
- replayBlock = append(replayBlock, pending.block)
- } else if pending.receivedTime.After(expireTime) {
- newPendingBlock = append(newPendingBlock, pending)
- }
- }
- a.pendingBlock = newPendingBlock
- }()
-
- replayVote := make([]*types.Vote, 0)
- func() {
- a.lock.Lock()
- defer a.lock.Unlock()
- newPendingVote := make([]pendingVote, 0)
- for _, pending := range a.pendingVote {
- if aID.Newer(&pending.vote.Position) {
- continue
- } else if pending.vote.Position == aID {
- replayVote = append(replayVote, pending.vote)
- } else if pending.receivedTime.After(expireTime) {
- newPendingVote = append(newPendingVote, pending)
- }
- }
- a.pendingVote = newPendingVote
- }()
-
- for _, block := range replayBlock {
- a.processBlock(block)
- }
-
- for _, vote := range replayVote {
- a.processVote(vote)
- }
-}
-
-func (a *agreement) stop() {
- a.restart(make(map[types.NodeID]struct{}), types.Position{
- ChainID: math.MaxUint32,
- })
-}
-
-func isStop(aID types.Position) bool {
- return aID.ChainID == math.MaxUint32
-}
-
-// clocks returns how many time this state is required.
-func (a *agreement) clocks() int {
- return a.state.clocks()
-}
-
-// pullVotes returns if current agreement requires more votes to continue.
-func (a *agreement) pullVotes() bool {
- return a.state.state() == statePullVote
-}
-
-// agreementID returns the current agreementID.
-func (a *agreement) agreementID() types.Position {
- a.lock.RLock()
- defer a.lock.RUnlock()
- return a.aID
-}
-
-// nextState is called at the specific clock time.
-func (a *agreement) nextState() (err error) {
- a.state, err = a.state.nextState()
- return
-}
-
-func (a *agreement) sanityCheck(vote *types.Vote) error {
- if _, exist := a.notarySet[vote.ProposerID]; !exist {
- return ErrNotInNotarySet
- }
- ok, err := verifyVoteSignature(vote)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectVoteSignature
- }
- return nil
-}
-
-func (a *agreement) checkForkVote(vote *types.Vote) error {
- if err := func() error {
- a.data.lock.RLock()
- defer a.data.lock.RUnlock()
- if votes, exist := a.data.votes[vote.Period]; exist {
- if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist {
- if vote.BlockHash != oldVote.BlockHash {
- return &ErrForkVote{vote.ProposerID, oldVote, vote}
- }
- }
- }
- return nil
- }(); err != nil {
- return err
- }
- return nil
-}
-
-// prepareVote prepares a vote.
-func (a *agreement) prepareVote(vote *types.Vote) (err error) {
- vote.Position = a.agreementID()
- err = a.authModule.SignVote(vote)
- return
-}
-
-// processVote is the entry point for processing Vote.
-func (a *agreement) processVote(vote *types.Vote) error {
- a.lock.Lock()
- defer a.lock.Unlock()
- if err := a.sanityCheck(vote); err != nil {
- return err
- }
- if vote.Position != a.aID {
- // Agreement module has stopped.
- if !isStop(a.aID) {
- if a.aID.Newer(&vote.Position) {
- return nil
- }
- }
- a.pendingVote = append(a.pendingVote, pendingVote{
- vote: vote,
- receivedTime: time.Now().UTC(),
- })
- return nil
- }
- if err := a.checkForkVote(vote); err != nil {
- return err
- }
-
- a.data.lock.Lock()
- defer a.data.lock.Unlock()
- if _, exist := a.data.votes[vote.Period]; !exist {
- a.data.votes[vote.Period] = newVoteListMap()
- }
- a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote
- if !a.hasOutput && vote.Type == types.VoteCom {
- if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
- hash != skipBlockHash {
- a.hasOutput = true
- a.data.recv.ConfirmBlock(hash,
- a.data.votes[vote.Period][types.VoteCom])
- return nil
- }
- } else if a.hasOutput {
- return nil
- }
-
- // Check if the agreement requires fast-forwarding.
- if len(a.fastForward) > 0 {
- return nil
- }
- if vote.Type == types.VotePreCom {
- if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok &&
- hash != skipBlockHash {
- // Condition 1.
- if a.data.period >= vote.Period && vote.Period > a.data.lockRound &&
- vote.BlockHash != a.data.lockValue {
- a.data.lockValue = hash
- a.data.lockRound = vote.Period
- a.fastForward <- a.data.period + 1
- return nil
- }
- // Condition 2.
- if vote.Period > a.data.period {
- a.data.lockValue = hash
- a.data.lockRound = vote.Period
- a.fastForward <- vote.Period
- return nil
- }
- }
- }
- // Condition 3.
- if vote.Type == types.VoteCom && vote.Period >= a.data.period &&
- len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote {
- hashes := common.Hashes{}
- addPullBlocks := func(voteType types.VoteType) {
- for _, vote := range a.data.votes[vote.Period][voteType] {
- if vote.BlockHash == nullBlockHash || vote.BlockHash == skipBlockHash {
- continue
- }
- if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found {
- hashes = append(hashes, vote.BlockHash)
- }
- }
- }
- addPullBlocks(types.VoteInit)
- addPullBlocks(types.VotePreCom)
- addPullBlocks(types.VoteCom)
- if len(hashes) > 0 {
- a.data.recv.PullBlocks(hashes)
- }
- a.fastForward <- vote.Period + 1
- return nil
- }
- return nil
-}
-
-func (a *agreement) done() <-chan struct{} {
- a.lock.Lock()
- defer a.lock.Unlock()
- a.data.lock.Lock()
- defer a.data.lock.Unlock()
- ch := make(chan struct{}, 1)
- if a.hasOutput {
- ch <- struct{}{}
- } else {
- select {
- case period := <-a.fastForward:
- if period <= a.data.period {
- break
- }
- a.data.setPeriod(period)
- a.state = newPreCommitState(a.data)
- ch <- struct{}{}
- default:
- }
- }
- return ch
-}
-
-// processBlock is the entry point for processing Block.
-func (a *agreement) processBlock(block *types.Block) error {
- a.lock.Lock()
- defer a.lock.Unlock()
- a.data.blocksLock.Lock()
- defer a.data.blocksLock.Unlock()
-
- if block.Position != a.aID {
- // Agreement module has stopped.
- if !isStop(a.aID) {
- if a.aID.Newer(&block.Position) {
- return nil
- }
- }
- a.pendingBlock = append(a.pendingBlock, pendingBlock{
- block: block,
- receivedTime: time.Now().UTC(),
- })
- return nil
- }
- if b, exist := a.data.blocks[block.ProposerID]; exist {
- if b.Hash != block.Hash {
- return &ErrFork{block.ProposerID, b.Hash, block.Hash}
- }
- return nil
- }
- if err := a.data.leader.processBlock(block); err != nil {
- return err
- }
- a.data.blocks[block.ProposerID] = block
- a.addCandidateBlockNoLock(block)
- return nil
-}
-
-func (a *agreement) addCandidateBlock(block *types.Block) {
- a.lock.Lock()
- defer a.lock.Unlock()
- a.addCandidateBlockNoLock(block)
-}
-
-func (a *agreement) addCandidateBlockNoLock(block *types.Block) {
- a.candidateBlock[block.Hash] = block
-}
-
-func (a *agreement) findCandidateBlock(hash common.Hash) (*types.Block, bool) {
- a.lock.RLock()
- defer a.lock.RUnlock()
- return a.findCandidateBlockNoLock(hash)
-}
-
-func (a *agreement) findCandidateBlockNoLock(
- hash common.Hash) (*types.Block, bool) {
- b, e := a.candidateBlock[hash]
- return b, e
-}
-func (a *agreementData) countVote(period uint64, voteType types.VoteType) (
- blockHash common.Hash, ok bool) {
- a.lock.RLock()
- defer a.lock.RUnlock()
- return a.countVoteNoLock(period, voteType)
-}
-
-func (a *agreementData) countVoteNoLock(
- period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) {
- votes, exist := a.votes[period]
- if !exist {
- return
- }
- candidate := make(map[common.Hash]int)
- for _, vote := range votes[voteType] {
- if _, exist := candidate[vote.BlockHash]; !exist {
- candidate[vote.BlockHash] = 0
- }
- candidate[vote.BlockHash]++
- }
- for candidateHash, votes := range candidate {
- if votes >= a.requiredVote {
- blockHash = candidateHash
- ok = true
- return
- }
- }
- return
-}
-
-func (a *agreementData) setPeriod(period uint64) {
- for i := a.period + 1; i <= period; i++ {
- if _, exist := a.votes[i]; !exist {
- a.votes[i] = newVoteListMap()
- }
- }
- a.period = period
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go
deleted file mode 100644
index 5d176cfee..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/authenticator.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-// Authenticator verify data owner.
-type Authenticator struct {
- prvKey crypto.PrivateKey
- pubKey crypto.PublicKey
- proposerID types.NodeID
-}
-
-// NewAuthenticator constructs an Authenticator instance.
-func NewAuthenticator(prvKey crypto.PrivateKey) (auth *Authenticator) {
- auth = &Authenticator{
- prvKey: prvKey,
- pubKey: prvKey.PublicKey(),
- }
- auth.proposerID = types.NewNodeID(auth.pubKey)
- return
-}
-
-// SignBlock signs a types.Block.
-func (au *Authenticator) SignBlock(b *types.Block) (err error) {
- b.ProposerID = au.proposerID
- b.PayloadHash = crypto.Keccak256Hash(b.Payload)
- if b.Hash, err = hashBlock(b); err != nil {
- return
- }
- if b.Signature, err = au.prvKey.Sign(b.Hash); err != nil {
- return
- }
- return
-}
-
-// SignVote signs a types.Vote.
-func (au *Authenticator) SignVote(v *types.Vote) (err error) {
- v.ProposerID = au.proposerID
- v.Signature, err = au.prvKey.Sign(hashVote(v))
- return
-}
-
-// SignCRS signs CRS signature of types.Block.
-func (au *Authenticator) SignCRS(b *types.Block, crs common.Hash) (err error) {
- if b.ProposerID != au.proposerID {
- err = ErrInvalidProposerID
- return
- }
- b.CRSSignature, err = au.prvKey.Sign(hashCRS(b, crs))
- return
-}
-
-// SignDKGComplaint signs a DKG complaint.
-func (au *Authenticator) SignDKGComplaint(
- complaint *typesDKG.Complaint) (err error) {
- complaint.ProposerID = au.proposerID
- complaint.Signature, err = au.prvKey.Sign(hashDKGComplaint(complaint))
- return
-}
-
-// SignDKGMasterPublicKey signs a DKG master public key.
-func (au *Authenticator) SignDKGMasterPublicKey(
- mpk *typesDKG.MasterPublicKey) (err error) {
- mpk.ProposerID = au.proposerID
- mpk.Signature, err = au.prvKey.Sign(hashDKGMasterPublicKey(mpk))
- return
-}
-
-// SignDKGPrivateShare signs a DKG private share.
-func (au *Authenticator) SignDKGPrivateShare(
- prvShare *typesDKG.PrivateShare) (err error) {
- prvShare.ProposerID = au.proposerID
- prvShare.Signature, err = au.prvKey.Sign(hashDKGPrivateShare(prvShare))
- return
-}
-
-// SignDKGPartialSignature signs a DKG partial signature.
-func (au *Authenticator) SignDKGPartialSignature(
- pSig *typesDKG.PartialSignature) (err error) {
- pSig.ProposerID = au.proposerID
- pSig.Signature, err = au.prvKey.Sign(hashDKGPartialSignature(pSig))
- return
-}
-
-// SignDKGFinalize signs a DKG finalize message.
-func (au *Authenticator) SignDKGFinalize(
- final *typesDKG.Finalize) (err error) {
- final.ProposerID = au.proposerID
- final.Signature, err = au.prvKey.Sign(hashDKGFinalize(final))
- return
-}
-
-// VerifyBlock verifies the signature of types.Block.
-func (au *Authenticator) VerifyBlock(b *types.Block) (err error) {
- payloadHash := crypto.Keccak256Hash(b.Payload)
- if payloadHash != b.PayloadHash {
- err = ErrIncorrectHash
- return
- }
- hash, err := hashBlock(b)
- if err != nil {
- return
- }
- if hash != b.Hash {
- err = ErrIncorrectHash
- return
- }
- pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
- if err != nil {
- return
- }
- if !b.ProposerID.Equal(types.NewNodeID(pubKey)) {
- err = ErrIncorrectSignature
- return
- }
- return
-}
-
-// VerifyVote verifies the signature of types.Vote.
-func (au *Authenticator) VerifyVote(v *types.Vote) (bool, error) {
- return verifyVoteSignature(v)
-}
-
-// VerifyCRS verifies the CRS signature of types.Block.
-func (au *Authenticator) VerifyCRS(b *types.Block, crs common.Hash) (bool, error) {
- return verifyCRSSignature(b, crs)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go
deleted file mode 100644
index c85630775..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/interfaces.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package blockdb
-
-import (
- "errors"
- "fmt"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-var (
- // ErrBlockExists is the error when block eixsts.
- ErrBlockExists = errors.New("block exists")
- // ErrBlockDoesNotExist is the error when block does not eixst.
- ErrBlockDoesNotExist = errors.New("block does not exist")
- // ErrIterationFinished is the error to check if the iteration is finished.
- ErrIterationFinished = errors.New("iteration finished")
- // ErrEmptyPath is the error when the required path is empty.
- ErrEmptyPath = fmt.Errorf("empty path")
- // ErrClosed is the error when using DB after it's closed.
- ErrClosed = fmt.Errorf("db closed")
- // ErrNotImplemented is the error that some interface is not implemented.
- ErrNotImplemented = fmt.Errorf("not implemented")
-)
-
-// BlockDatabase is the interface for a BlockDatabase.
-type BlockDatabase interface {
- Reader
- Writer
-
- // Close allows database implementation able to
- // release resource when finishing.
- Close() error
-}
-
-// Reader defines the interface for reading blocks into DB.
-type Reader interface {
- Has(hash common.Hash) bool
- Get(hash common.Hash) (types.Block, error)
- GetAll() (BlockIterator, error)
-}
-
-// Writer defines the interface for writing blocks into DB.
-type Writer interface {
- Update(block types.Block) error
- Put(block types.Block) error
-}
-
-// BlockIterator defines an iterator on blocks hold
-// in a DB.
-type BlockIterator interface {
- Next() (types.Block, error)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go
deleted file mode 100644
index 76730fc9c..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/level-db.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package blockdb
-
-import (
- "encoding/json"
-
- "github.com/syndtr/goleveldb/leveldb"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// LevelDBBackedBlockDB is a leveldb backed BlockDB implementation.
-type LevelDBBackedBlockDB struct {
- db *leveldb.DB
-}
-
-// NewLevelDBBackedBlockDB initialize a leveldb-backed block database.
-func NewLevelDBBackedBlockDB(
- path string) (lvl *LevelDBBackedBlockDB, err error) {
-
- db, err := leveldb.OpenFile(path, nil)
- if err != nil {
- return
- }
- lvl = &LevelDBBackedBlockDB{db: db}
- return
-}
-
-// Close implement Closer interface, which would release allocated resource.
-func (lvl *LevelDBBackedBlockDB) Close() error {
- return lvl.db.Close()
-}
-
-// Has implements the Reader.Has method.
-func (lvl *LevelDBBackedBlockDB) Has(hash common.Hash) bool {
- exists, err := lvl.db.Has([]byte(hash[:]), nil)
- if err != nil {
- // TODO(missionliao): Modify the interface to return error.
- panic(err)
- }
- return exists
-}
-
-// Get implements the Reader.Get method.
-func (lvl *LevelDBBackedBlockDB) Get(
- hash common.Hash) (block types.Block, err error) {
-
- queried, err := lvl.db.Get([]byte(hash[:]), nil)
- if err != nil {
- if err == leveldb.ErrNotFound {
- err = ErrBlockDoesNotExist
- }
- return
- }
- err = json.Unmarshal(queried, &block)
- if err != nil {
- return
- }
- return
-}
-
-// Update implements the Writer.Update method.
-func (lvl *LevelDBBackedBlockDB) Update(block types.Block) (err error) {
- // NOTE: we didn't handle changes of block hash (and it
- // should not happen).
- marshaled, err := json.Marshal(&block)
- if err != nil {
- return
- }
-
- if !lvl.Has(block.Hash) {
- err = ErrBlockDoesNotExist
- return
- }
- err = lvl.db.Put(
- []byte(block.Hash[:]),
- marshaled,
- nil)
- if err != nil {
- return
- }
- return
-}
-
-// Put implements the Writer.Put method.
-func (lvl *LevelDBBackedBlockDB) Put(block types.Block) (err error) {
- marshaled, err := json.Marshal(&block)
- if err != nil {
- return
- }
- if lvl.Has(block.Hash) {
- err = ErrBlockExists
- return
- }
- err = lvl.db.Put(
- []byte(block.Hash[:]),
- marshaled,
- nil)
- if err != nil {
- return
- }
- return
-}
-
-// GetAll implements Reader.GetAll method, which allows callers
-// to retrieve all blocks in DB.
-func (lvl *LevelDBBackedBlockDB) GetAll() (BlockIterator, error) {
- // TODO (mission): Implement this part via goleveldb's iterator.
- return nil, ErrNotImplemented
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go
deleted file mode 100644
index 760646e10..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockdb/memory.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package blockdb
-
-import (
- "encoding/json"
- "io/ioutil"
- "os"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-type seqIterator struct {
- idx int
- db *MemBackedBlockDB
-}
-
-func (seq *seqIterator) Next() (types.Block, error) {
- curIdx := seq.idx
- seq.idx++
- return seq.db.getByIndex(curIdx)
-}
-
-// MemBackedBlockDB is a memory backed BlockDB implementation.
-type MemBackedBlockDB struct {
- blocksMutex sync.RWMutex
- blockHashSequence common.Hashes
- blocksByHash map[common.Hash]*types.Block
- persistantFilePath string
-}
-
-// NewMemBackedBlockDB initialize a memory-backed block database.
-func NewMemBackedBlockDB(persistantFilePath ...string) (db *MemBackedBlockDB, err error) {
- db = &MemBackedBlockDB{
- blockHashSequence: common.Hashes{},
- blocksByHash: make(map[common.Hash]*types.Block),
- }
- if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 {
- return
- }
- db.persistantFilePath = persistantFilePath[0]
- buf, err := ioutil.ReadFile(db.persistantFilePath)
- if err != nil {
- if !os.IsNotExist(err) {
- // Something unexpected happened.
- return
- }
- // It's expected behavior that file doesn't exists, we should not
- // report error on it.
- err = nil
- return
- }
-
- // Init this instance by file content, it's a temporary way
- // to export those private field for JSON encoding.
- toLoad := struct {
- Sequence common.Hashes
- ByHash map[common.Hash]*types.Block
- }{}
- err = json.Unmarshal(buf, &toLoad)
- if err != nil {
- return
- }
- db.blockHashSequence = toLoad.Sequence
- db.blocksByHash = toLoad.ByHash
- return
-}
-
-// Has returns wheter or not the DB has a block identified with the hash.
-func (m *MemBackedBlockDB) Has(hash common.Hash) bool {
- m.blocksMutex.RLock()
- defer m.blocksMutex.RUnlock()
-
- _, ok := m.blocksByHash[hash]
- return ok
-}
-
-// Get returns a block given a hash.
-func (m *MemBackedBlockDB) Get(hash common.Hash) (types.Block, error) {
- m.blocksMutex.RLock()
- defer m.blocksMutex.RUnlock()
-
- return m.internalGet(hash)
-}
-
-func (m *MemBackedBlockDB) internalGet(hash common.Hash) (types.Block, error) {
- b, ok := m.blocksByHash[hash]
- if !ok {
- return types.Block{}, ErrBlockDoesNotExist
- }
- return *b, nil
-}
-
-// Put inserts a new block into the database.
-func (m *MemBackedBlockDB) Put(block types.Block) error {
- if m.Has(block.Hash) {
- return ErrBlockExists
- }
-
- m.blocksMutex.Lock()
- defer m.blocksMutex.Unlock()
-
- m.blockHashSequence = append(m.blockHashSequence, block.Hash)
- m.blocksByHash[block.Hash] = &block
- return nil
-}
-
-// Update updates a block in the database.
-func (m *MemBackedBlockDB) Update(block types.Block) error {
- m.blocksMutex.Lock()
- defer m.blocksMutex.Unlock()
-
- m.blocksByHash[block.Hash] = &block
- return nil
-}
-
-// Close implement Closer interface, which would release allocated resource.
-func (m *MemBackedBlockDB) Close() (err error) {
- // Save internal state to a pretty-print json file. It's a temporary way
- // to dump private file via JSON encoding.
- if len(m.persistantFilePath) == 0 {
- return
- }
-
- m.blocksMutex.RLock()
- defer m.blocksMutex.RUnlock()
-
- toDump := struct {
- Sequence common.Hashes
- ByHash map[common.Hash]*types.Block
- }{
- Sequence: m.blockHashSequence,
- ByHash: m.blocksByHash,
- }
-
- // Dump to JSON with 2-space indent.
- buf, err := json.Marshal(&toDump)
- if err != nil {
- return
- }
-
- err = ioutil.WriteFile(m.persistantFilePath, buf, 0644)
- return
-}
-
-func (m *MemBackedBlockDB) getByIndex(idx int) (types.Block, error) {
- m.blocksMutex.RLock()
- defer m.blocksMutex.RUnlock()
-
- if idx >= len(m.blockHashSequence) {
- return types.Block{}, ErrIterationFinished
- }
-
- hash := m.blockHashSequence[idx]
- return m.internalGet(hash)
-}
-
-// GetAll implement Reader.GetAll method, which allows caller
-// to retrieve all blocks in DB.
-func (m *MemBackedBlockDB) GetAll() (BlockIterator, error) {
- return &seqIterator{db: m}, nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go
deleted file mode 100644
index 261966719..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/blockpool.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "container/heap"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// blockPool is a slice of heap of blocks, indexed by chainID,
-// and the heap is sorted based on heights of blocks.
-type blockPool []types.ByPosition
-
-// newBlockPool constructs a blockPool.
-func newBlockPool(chainNum uint32) (pool blockPool) {
- pool = make(blockPool, chainNum)
- for _, p := range pool {
- heap.Init(&p)
- }
- return
-}
-
-// resize the pool if new chain is added.
-func (p *blockPool) resize(num uint32) {
- if uint32(len(*p)) >= num {
- return
- }
- newPool := make([]types.ByPosition, num)
- copy(newPool, *p)
- for i := uint32(len(*p)); i < num; i++ {
- newChain := types.ByPosition{}
- heap.Init(&newChain)
- newPool[i] = newChain
- }
- *p = newPool
-}
-
-// addBlock adds a block into pending set and make sure these
-// blocks are sorted by height.
-func (p blockPool) addBlock(b *types.Block) {
- heap.Push(&p[b.Position.ChainID], b)
-}
-
-// purgeBlocks purge blocks of that chain with less-or-equal height.
-// NOTE: we won't check the validity of 'chainID', the caller should
-// be sure what he is expecting.
-func (p blockPool) purgeBlocks(chainID uint32, height uint64) {
- for {
- if len(p[chainID]) == 0 || p[chainID][0].Position.Height > height {
- break
- }
- heap.Pop(&p[chainID])
- }
-}
-
-// tip get the blocks with lowest height of the chain if any.
-func (p blockPool) tip(chainID uint32) *types.Block {
- if len(p[chainID]) == 0 {
- return nil
- }
- return p[chainID][0]
-}
-
-// removeTip removes block with lowest height of the specified chain.
-func (p blockPool) removeTip(chainID uint32) {
- if len(p[chainID]) > 0 {
- heap.Pop(&p[chainID])
- }
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go
deleted file mode 100644
index 65592f1d0..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/compaction-chain.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "container/heap"
- "fmt"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for compaction chain module.
-var (
- ErrBlockNotRegistered = fmt.Errorf(
- "block not registered")
- ErrNotInitiazlied = fmt.Errorf(
- "not initialized")
-)
-
-type finalizedBlockHeap = types.ByFinalizationHeight
-
-type compactionChain struct {
- gov Governance
- chainUnsynced uint32
- tsigVerifier *TSigVerifierCache
- blocks map[common.Hash]*types.Block
- pendingBlocks []*types.Block
- pendingFinalizedBlocks *finalizedBlockHeap
- lock sync.RWMutex
- prevBlock *types.Block
-}
-
-func newCompactionChain(gov Governance) *compactionChain {
- pendingFinalizedBlocks := &finalizedBlockHeap{}
- heap.Init(pendingFinalizedBlocks)
- return &compactionChain{
- gov: gov,
- tsigVerifier: NewTSigVerifierCache(gov, 7),
- blocks: make(map[common.Hash]*types.Block),
- pendingFinalizedBlocks: pendingFinalizedBlocks,
- }
-}
-
-func (cc *compactionChain) init(initBlock *types.Block) {
- cc.lock.Lock()
- defer cc.lock.Unlock()
- cc.prevBlock = initBlock
- cc.pendingBlocks = []*types.Block{}
- if initBlock.Finalization.Height == 0 {
- cc.chainUnsynced = cc.gov.Configuration(uint64(0)).NumChains
- cc.pendingBlocks = append(cc.pendingBlocks, initBlock)
- }
-}
-
-func (cc *compactionChain) registerBlock(block *types.Block) {
- if cc.blockRegistered(block.Hash) {
- return
- }
- cc.lock.Lock()
- defer cc.lock.Unlock()
- cc.blocks[block.Hash] = block
-}
-
-func (cc *compactionChain) blockRegistered(hash common.Hash) (exist bool) {
- cc.lock.RLock()
- defer cc.lock.RUnlock()
- _, exist = cc.blocks[hash]
- return
-}
-
-func (cc *compactionChain) processBlock(block *types.Block) error {
- prevBlock := cc.lastBlock()
- if prevBlock == nil {
- return ErrNotInitiazlied
- }
- cc.lock.Lock()
- defer cc.lock.Unlock()
- if prevBlock.Finalization.Height == 0 && block.Position.Height == 0 {
- cc.chainUnsynced--
- }
- cc.pendingBlocks = append(cc.pendingBlocks, block)
- return nil
-}
-
-func (cc *compactionChain) extractBlocks() []*types.Block {
- prevBlock := cc.lastBlock()
-
- // Check if we're synced.
- if !func() bool {
- cc.lock.RLock()
- defer cc.lock.RUnlock()
- if len(cc.pendingBlocks) == 0 {
- return false
- }
- // Finalization.Height == 0 is syncing from bootstrap.
- if prevBlock.Finalization.Height == 0 {
- return cc.chainUnsynced == 0
- }
- if prevBlock.Hash != cc.pendingBlocks[0].Hash {
- return false
- }
- return true
- }() {
- return []*types.Block{}
- }
- deliveringBlocks := make([]*types.Block, 0)
- cc.lock.Lock()
- defer cc.lock.Unlock()
- // cc.pendingBlocks[0] will not be popped and will equal to cc.prevBlock.
- for len(cc.pendingBlocks) > 1 &&
- (len(cc.pendingBlocks[1].Finalization.Randomness) != 0 ||
- cc.pendingBlocks[1].Position.Round == 0) {
- delete(cc.blocks, cc.pendingBlocks[0].Hash)
- cc.pendingBlocks = cc.pendingBlocks[1:]
-
- block := cc.pendingBlocks[0]
- block.Finalization.ParentHash = prevBlock.Hash
- block.Finalization.Height = prevBlock.Finalization.Height + 1
- deliveringBlocks = append(deliveringBlocks, block)
- prevBlock = block
- }
-
- cc.prevBlock = prevBlock
-
- return deliveringBlocks
-}
-
-func (cc *compactionChain) processFinalizedBlock(block *types.Block) {
- if block.Finalization.Height <= cc.lastBlock().Finalization.Height {
- return
- }
-
- // Block of round 0 should not have randomness.
- if block.Position.Round == 0 && len(block.Finalization.Randomness) != 0 {
- return
- }
-
- cc.lock.Lock()
- defer cc.lock.Unlock()
- heap.Push(cc.pendingFinalizedBlocks, block)
-
- return
-}
-
-func (cc *compactionChain) extractFinalizedBlocks() []*types.Block {
- prevBlock := cc.lastBlock()
-
- blocks := func() []*types.Block {
- cc.lock.Lock()
- defer cc.lock.Unlock()
- blocks := []*types.Block{}
- prevHeight := prevBlock.Finalization.Height
- for cc.pendingFinalizedBlocks.Len() != 0 {
- tip := (*cc.pendingFinalizedBlocks)[0]
- // Pop blocks that are already confirmed.
- if tip.Finalization.Height <= prevBlock.Finalization.Height {
- heap.Pop(cc.pendingFinalizedBlocks)
- continue
- }
- // Since we haven't verified the finalized block,
- // it is possible to be forked.
- if tip.Finalization.Height == prevHeight ||
- tip.Finalization.Height == prevHeight+1 {
- prevHeight = tip.Finalization.Height
- blocks = append(blocks, tip)
- heap.Pop(cc.pendingFinalizedBlocks)
- } else {
- break
- }
- }
- return blocks
- }()
- toPending := []*types.Block{}
- confirmed := []*types.Block{}
- for _, b := range blocks {
- if b.Hash == prevBlock.Hash &&
- b.Finalization.Height == prevBlock.Finalization.Height {
- continue
- }
- round := b.Position.Round
- if round != 0 {
- // Randomness is not available at round 0.
- v, ok, err := cc.tsigVerifier.UpdateAndGet(round)
- if err != nil {
- continue
- }
- if !ok {
- toPending = append(toPending, b)
- continue
- }
- if ok := v.VerifySignature(b.Hash, crypto.Signature{
- Type: "bls",
- Signature: b.Finalization.Randomness}); !ok {
- continue
- }
- }
- // Fork resolution: choose block with smaller hash.
- if prevBlock.Finalization.Height == b.Finalization.Height {
- //TODO(jimmy-dexon): remove this panic after test.
- if true {
- // workaround to `go vet` error
- panic(fmt.Errorf(
- "forked finalized block %s,%s", prevBlock.Hash, b.Hash))
- }
- if b.Hash.Less(prevBlock.Hash) {
- confirmed = confirmed[:len(confirmed)-1]
- } else {
- continue
- }
- }
- if b.Finalization.Height-prevBlock.Finalization.Height > 1 {
- toPending = append(toPending, b)
- continue
- }
- confirmed = append(confirmed, b)
- prevBlock = b
- }
- cc.lock.Lock()
- defer cc.lock.Unlock()
- if len(confirmed) != 0 && cc.prevBlock.Finalization.Height == 0 {
- // Pop the initBlock inserted when bootstrapping.
- cc.pendingBlocks = cc.pendingBlocks[1:]
- }
- cc.prevBlock = prevBlock
- for _, b := range toPending {
- heap.Push(cc.pendingFinalizedBlocks, b)
- }
- return confirmed
-}
-
-func (cc *compactionChain) processBlockRandomnessResult(
- rand *types.BlockRandomnessResult) error {
- if !cc.blockRegistered(rand.BlockHash) {
- return ErrBlockNotRegistered
- }
- cc.lock.Lock()
- defer cc.lock.Unlock()
- cc.blocks[rand.BlockHash].Finalization.Randomness = rand.Randomness
- return nil
-}
-
-func (cc *compactionChain) lastBlock() *types.Block {
- cc.lock.RLock()
- defer cc.lock.RUnlock()
- return cc.prevBlock
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go
deleted file mode 100644
index 5e5a587cb..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/configuration-chain.go
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-// Errors for configuration chain..
-var (
- ErrDKGNotRegistered = fmt.Errorf(
- "not yet registered in DKG protocol")
- ErrTSigAlreadyRunning = fmt.Errorf(
- "tsig is already running")
- ErrDKGNotReady = fmt.Errorf(
- "DKG is not ready")
-)
-
-type configurationChain struct {
- ID types.NodeID
- recv dkgReceiver
- gov Governance
- dkg *dkgProtocol
- logger common.Logger
- dkgLock sync.RWMutex
- dkgSigner map[uint64]*dkgShareSecret
- gpk map[uint64]*DKGGroupPublicKey
- dkgResult sync.RWMutex
- tsig map[common.Hash]*tsigProtocol
- tsigTouched map[common.Hash]struct{}
- tsigReady *sync.Cond
- // TODO(jimmy-dexon): add timeout to pending psig.
- pendingPsig map[common.Hash][]*typesDKG.PartialSignature
- prevHash common.Hash
-}
-
-func newConfigurationChain(
- ID types.NodeID,
- recv dkgReceiver,
- gov Governance,
- logger common.Logger) *configurationChain {
- return &configurationChain{
- ID: ID,
- recv: recv,
- gov: gov,
- logger: logger,
- dkgSigner: make(map[uint64]*dkgShareSecret),
- gpk: make(map[uint64]*DKGGroupPublicKey),
- tsig: make(map[common.Hash]*tsigProtocol),
- tsigTouched: make(map[common.Hash]struct{}),
- tsigReady: sync.NewCond(&sync.Mutex{}),
- pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature),
- }
-}
-
-func (cc *configurationChain) registerDKG(round uint64, threshold int) {
- cc.dkgLock.Lock()
- defer cc.dkgLock.Unlock()
- cc.dkg = newDKGProtocol(
- cc.ID,
- cc.recv,
- round,
- threshold)
-}
-
-func (cc *configurationChain) runDKG(round uint64) error {
- cc.dkgLock.Lock()
- defer cc.dkgLock.Unlock()
- if cc.dkg == nil || cc.dkg.round != round {
- return ErrDKGNotRegistered
- }
- if func() bool {
- cc.dkgResult.RLock()
- defer cc.dkgResult.RUnlock()
- _, exist := cc.gpk[round]
- return exist
- }() {
- return nil
- }
-
- ticker := newTicker(cc.gov, round, TickerDKG)
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Phase 2(T = 0): Exchange DKG secret key share.
- cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
- cc.dkg.processMasterPublicKeys(cc.gov.DKGMasterPublicKeys(round))
- // Phase 3(T = 0~λ): Propose complaint.
- // Propose complaint is done in `processMasterPublicKeys`.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Phase 4(T = λ): Propose nack complaints.
- cc.dkg.proposeNackComplaints()
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Phase 5(T = 2λ): Propose Anti nack complaint.
- cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
- cc.dkg.processNackComplaints(cc.gov.DKGComplaints(round))
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Phase 6(T = 3λ): Rebroadcast anti nack complaint.
- // Rebroadcast is done in `processPrivateShare`.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Phase 7(T = 4λ): Enforce complaints and nack complaints.
- cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
- cc.dkg.enforceNackComplaints(cc.gov.DKGComplaints(round))
- // Enforce complaint is done in `processPrivateShare`.
- // Phase 8(T = 5λ): DKG finalize.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- cc.dkg.proposeFinalize()
- // Phase 9(T = 6λ): DKG is ready.
- cc.dkgLock.Unlock()
- <-ticker.Tick()
- cc.dkgLock.Lock()
- // Normally, IsDKGFinal would return true here. Use this for in case of
- // unexpected network fluctuation and ensure the robustness of DKG protocol.
- cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round)
- for !cc.gov.IsDKGFinal(round) {
- cc.logger.Info("DKG is not ready yet. Try again later...",
- "nodeID", cc.ID)
- time.Sleep(500 * time.Millisecond)
- }
- cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round)
- cc.logger.Debug("Calling Governance.DKGComplaints", "round", round)
- gpk, err := NewDKGGroupPublicKey(round,
- cc.gov.DKGMasterPublicKeys(round),
- cc.gov.DKGComplaints(round),
- cc.dkg.threshold)
- if err != nil {
- return err
- }
- signer, err := cc.dkg.recoverShareSecret(gpk.qualifyIDs)
- if err != nil {
- return err
- }
- qualifies := ""
- for nID := range gpk.qualifyNodeIDs {
- qualifies += fmt.Sprintf("%s ", nID.String()[:6])
- }
- cc.logger.Info("Qualify Nodes",
- "nodeID", cc.ID,
- "round", round,
- "count", len(gpk.qualifyIDs),
- "qualifies", qualifies)
- cc.dkgResult.Lock()
- defer cc.dkgResult.Unlock()
- cc.dkgSigner[round] = signer
- cc.gpk[round] = gpk
- return nil
-}
-
-func (cc *configurationChain) preparePartialSignature(
- round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) {
- signer, exist := func() (*dkgShareSecret, bool) {
- cc.dkgResult.RLock()
- defer cc.dkgResult.RUnlock()
- signer, exist := cc.dkgSigner[round]
- return signer, exist
- }()
- if !exist {
- return nil, ErrDKGNotReady
- }
- return &typesDKG.PartialSignature{
- ProposerID: cc.ID,
- Round: round,
- Hash: hash,
- PartialSignature: signer.sign(hash),
- }, nil
-}
-
-func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) {
- cc.tsigReady.L.Lock()
- defer cc.tsigReady.L.Unlock()
- _, exist := cc.tsigTouched[hash]
- cc.tsigTouched[hash] = struct{}{}
- return !exist
-}
-
-func (cc *configurationChain) untouchTSigHash(hash common.Hash) {
- cc.tsigReady.L.Lock()
- defer cc.tsigReady.L.Unlock()
- delete(cc.tsigTouched, hash)
-}
-
-func (cc *configurationChain) runTSig(
- round uint64, hash common.Hash) (
- crypto.Signature, error) {
- gpk, exist := func() (*DKGGroupPublicKey, bool) {
- cc.dkgResult.RLock()
- defer cc.dkgResult.RUnlock()
- gpk, exist := cc.gpk[round]
- return gpk, exist
- }()
- if !exist {
- return crypto.Signature{}, ErrDKGNotReady
- }
- cc.tsigReady.L.Lock()
- defer cc.tsigReady.L.Unlock()
- if _, exist := cc.tsig[hash]; exist {
- return crypto.Signature{}, ErrTSigAlreadyRunning
- }
- cc.tsig[hash] = newTSigProtocol(gpk, hash)
- pendingPsig := cc.pendingPsig[hash]
- delete(cc.pendingPsig, hash)
- go func() {
- for _, psig := range pendingPsig {
- if err := cc.processPartialSignature(psig); err != nil {
- cc.logger.Error("failed to process partial signature",
- "nodeID", cc.ID,
- "error", err)
- }
- }
- }()
- var signature crypto.Signature
- var err error
- for func() bool {
- signature, err = cc.tsig[hash].signature()
- return err == ErrNotEnoughtPartialSignatures
- }() {
- // TODO(jimmy-dexon): add a timeout here.
- cc.tsigReady.Wait()
- }
- delete(cc.tsig, hash)
- if err != nil {
- return crypto.Signature{}, err
- }
- return signature, nil
-}
-
-func (cc *configurationChain) runBlockTSig(
- round uint64, hash common.Hash) (crypto.Signature, error) {
- sig, err := cc.runTSig(round, hash)
- if err != nil {
- return crypto.Signature{}, err
- }
- cc.logger.Info("Block TSIG",
- "nodeID", cc.ID,
- "round", round,
- "signature", sig)
- return sig, nil
-}
-
-func (cc *configurationChain) runCRSTSig(
- round uint64, crs common.Hash) ([]byte, error) {
- sig, err := cc.runTSig(round, crs)
- cc.logger.Info("CRS",
- "nodeID", cc.ID,
- "round", round+1,
- "signature", sig)
- return sig.Signature[:], err
-}
-
-func (cc *configurationChain) processPrivateShare(
- prvShare *typesDKG.PrivateShare) error {
- cc.dkgLock.Lock()
- defer cc.dkgLock.Unlock()
- if cc.dkg == nil {
- return nil
- }
- return cc.dkg.processPrivateShare(prvShare)
-}
-
-func (cc *configurationChain) processPartialSignature(
- psig *typesDKG.PartialSignature) error {
- cc.tsigReady.L.Lock()
- defer cc.tsigReady.L.Unlock()
- if _, exist := cc.tsig[psig.Hash]; !exist {
- ok, err := verifyDKGPartialSignatureSignature(psig)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectPartialSignatureSignature
- }
- cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig)
- return nil
- }
- if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil {
- return err
- }
- cc.tsigReady.Broadcast()
- return nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go
deleted file mode 100644
index 9750a74c3..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus-timestamp.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// consensusTimestamp is for Concensus Timestamp Algorithm.
-type consensusTimestamp struct {
- chainTimestamps []time.Time
-
- // This part keeps configs for each round.
- numChainsOfRounds []uint32
- numChainsBase uint64
-
- // dMoment represents the genesis time.
- dMoment time.Time
- // lastTimestamp represents previous assigned consensus timestamp.
- lastTimestamp time.Time
-}
-
-var (
- // ErrTimestampNotIncrease would be reported if the timestamp is not strickly
- // increasing on the same chain.
- ErrTimestampNotIncrease = errors.New("timestamp is not increasing")
- // ErrNoRoundConfig for no round config found.
- ErrNoRoundConfig = errors.New("no round config found")
- // ErrConsensusTimestampRewind would be reported if the generated timestamp
- // is rewinded.
- ErrConsensusTimestampRewind = errors.New("consensus timestamp rewind")
-)
-
-// newConsensusTimestamp creates timestamper object.
-func newConsensusTimestamp(
- dMoment time.Time, round uint64, numChains uint32) *consensusTimestamp {
- ts := make([]time.Time, 0, numChains)
- for i := uint32(0); i < numChains; i++ {
- ts = append(ts, dMoment)
- }
- return &consensusTimestamp{
- numChainsOfRounds: []uint32{numChains},
- numChainsBase: round,
- dMoment: dMoment,
- chainTimestamps: ts,
- }
-}
-
-// appendConfig appends a configuration for upcoming round. When you append
-// a config for round R, next time you can only append the config for round R+1.
-func (ct *consensusTimestamp) appendConfig(
- round uint64, config *types.Config) error {
-
- if round != uint64(len(ct.numChainsOfRounds))+ct.numChainsBase {
- return ErrRoundNotIncreasing
- }
- ct.numChainsOfRounds = append(ct.numChainsOfRounds, config.NumChains)
- return nil
-}
-
-func (ct *consensusTimestamp) resizeChainTimetamps(numChain uint32) {
- l := uint32(len(ct.chainTimestamps))
- if numChain > l {
- for i := l; i < numChain; i++ {
- ct.chainTimestamps = append(ct.chainTimestamps, ct.dMoment)
- }
- } else if numChain < l {
- ct.chainTimestamps = ct.chainTimestamps[:numChain]
- }
-}
-
-// ProcessBlocks is the entry function.
-func (ct *consensusTimestamp) processBlocks(blocks []*types.Block) (err error) {
- for _, block := range blocks {
- // Rounds might interleave within rounds if no configuration change
- // occurs. And it is limited to one round, that is, round r can only
- // interleave with r-1 and r+1.
- round := block.Position.Round
- if ct.numChainsBase == round || ct.numChainsBase+1 == round {
- // Normal case, no need to modify chainTimestamps.
- } else if ct.numChainsBase+2 == round {
- if len(ct.numChainsOfRounds) < 2 {
- return ErrNoRoundConfig
- }
- ct.numChainsBase++
- ct.numChainsOfRounds = ct.numChainsOfRounds[1:]
- if ct.numChainsOfRounds[0] > ct.numChainsOfRounds[1] {
- ct.resizeChainTimetamps(ct.numChainsOfRounds[0])
- } else {
- ct.resizeChainTimetamps(ct.numChainsOfRounds[1])
- }
- } else {
- // Error if round < base or round > base + 2.
- return ErrInvalidRoundID
- }
- ts := ct.chainTimestamps[:ct.numChainsOfRounds[round-ct.numChainsBase]]
- if block.Finalization.Timestamp, err = getMedianTime(ts); err != nil {
- return
- }
- if block.Timestamp.Before(ct.chainTimestamps[block.Position.ChainID]) {
- return ErrTimestampNotIncrease
- }
- ct.chainTimestamps[block.Position.ChainID] = block.Timestamp
- if block.Finalization.Timestamp.Before(ct.lastTimestamp) {
- block.Finalization.Timestamp = ct.lastTimestamp
- } else {
- ct.lastTimestamp = block.Finalization.Timestamp
- }
- }
- return
-}
-
-func (ct *consensusTimestamp) isSynced() bool {
- numChain := ct.numChainsOfRounds[0]
- for i := uint32(0); i < numChain; i++ {
- if ct.chainTimestamps[i].Equal(ct.dMoment) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go
deleted file mode 100644
index 7e6934f45..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/consensus.go
+++ /dev/null
@@ -1,1026 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "context"
- "encoding/hex"
- "fmt"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-// Errors for consensus core.
-var (
- ErrProposerNotInNodeSet = fmt.Errorf(
- "proposer is not in node set")
- ErrIncorrectHash = fmt.Errorf(
- "hash of block is incorrect")
- ErrIncorrectSignature = fmt.Errorf(
- "signature of block is incorrect")
- ErrGenesisBlockNotEmpty = fmt.Errorf(
- "genesis block should be empty")
- ErrUnknownBlockProposed = fmt.Errorf(
- "unknown block is proposed")
- ErrIncorrectAgreementResultPosition = fmt.Errorf(
- "incorrect agreement result position")
- ErrNotEnoughVotes = fmt.Errorf(
- "not enought votes")
- ErrIncorrectVoteBlockHash = fmt.Errorf(
- "incorrect vote block hash")
- ErrIncorrectVoteType = fmt.Errorf(
- "incorrect vote type")
- ErrIncorrectVotePosition = fmt.Errorf(
- "incorrect vote position")
- ErrIncorrectVoteProposer = fmt.Errorf(
- "incorrect vote proposer")
- ErrIncorrectBlockRandomnessResult = fmt.Errorf(
- "incorrect block randomness result")
-)
-
-// consensusBAReceiver implements agreementReceiver.
-type consensusBAReceiver struct {
- // TODO(mission): consensus would be replaced by lattice and network.
- consensus *Consensus
- agreementModule *agreement
- chainID uint32
- changeNotaryTime time.Time
- round uint64
- restartNotary chan bool
-}
-
-func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) {
- go func() {
- if err := recv.agreementModule.prepareVote(vote); err != nil {
- recv.consensus.logger.Error("Failed to prepare vote", "error", err)
- return
- }
- if err := recv.agreementModule.processVote(vote); err != nil {
- recv.consensus.logger.Error("Failed to process vote", "error", err)
- return
- }
- recv.consensus.logger.Debug("Calling Network.BroadcastVote",
- "vote", vote)
- recv.consensus.network.BroadcastVote(vote)
- }()
-}
-
-func (recv *consensusBAReceiver) ProposeBlock() common.Hash {
- block := recv.consensus.proposeBlock(recv.chainID, recv.round)
- if block == nil {
- recv.consensus.logger.Error("unable to propose block")
- return nullBlockHash
- }
- if err := recv.consensus.preProcessBlock(block); err != nil {
- recv.consensus.logger.Error("Failed to pre-process block", "error", err)
- return common.Hash{}
- }
- recv.consensus.logger.Debug("Calling Network.BroadcastBlock", "block", block)
- recv.consensus.network.BroadcastBlock(block)
- return block.Hash
-}
-
-func (recv *consensusBAReceiver) ConfirmBlock(
- hash common.Hash, votes map[types.NodeID]*types.Vote) {
- var block *types.Block
- if (hash == common.Hash{}) {
- recv.consensus.logger.Info("Empty block is confirmed",
- "position", recv.agreementModule.agreementID())
- var err error
- block, err = recv.consensus.proposeEmptyBlock(recv.chainID)
- if err != nil {
- recv.consensus.logger.Error("Propose empty block failed", "error", err)
- return
- }
- } else {
- var exist bool
- block, exist = recv.consensus.baModules[recv.chainID].
- findCandidateBlockNoLock(hash)
- if !exist {
- recv.consensus.logger.Error("Unknown block confirmed",
- "hash", hash,
- "chainID", recv.chainID)
- ch := make(chan *types.Block)
- func() {
- recv.consensus.lock.Lock()
- defer recv.consensus.lock.Unlock()
- recv.consensus.baConfirmedBlock[hash] = ch
- }()
- recv.consensus.network.PullBlocks(common.Hashes{hash})
- go func() {
- block = <-ch
- recv.consensus.logger.Info("Receive unknown block",
- "hash", hash,
- "chainID", recv.chainID)
- recv.agreementModule.addCandidateBlock(block)
- recv.ConfirmBlock(block.Hash, votes)
- }()
- return
- }
- }
- recv.consensus.ccModule.registerBlock(block)
- voteList := make([]types.Vote, 0, len(votes))
- for _, vote := range votes {
- if vote.BlockHash != hash {
- continue
- }
- voteList = append(voteList, *vote)
- }
- result := &types.AgreementResult{
- BlockHash: block.Hash,
- Position: block.Position,
- Votes: voteList,
- }
- recv.consensus.logger.Debug("Calling Network.BroadcastAgreementResult",
- "result", result)
- recv.consensus.network.BroadcastAgreementResult(result)
- if err := recv.consensus.processBlock(block); err != nil {
- recv.consensus.logger.Error("Failed to process block", "error", err)
- return
- }
- if block.Timestamp.After(recv.changeNotaryTime) {
- recv.round++
- recv.restartNotary <- true
- } else {
- recv.restartNotary <- false
- }
-}
-
-func (recv *consensusBAReceiver) PullBlocks(hashes common.Hashes) {
- recv.consensus.logger.Debug("Calling Network.PullBlocks", "hashes", hashes)
- recv.consensus.network.PullBlocks(hashes)
-}
-
-// consensusDKGReceiver implements dkgReceiver.
-type consensusDKGReceiver struct {
- ID types.NodeID
- gov Governance
- authModule *Authenticator
- nodeSetCache *NodeSetCache
- cfgModule *configurationChain
- network Network
- logger common.Logger
-}
-
-// ProposeDKGComplaint proposes a DKGComplaint.
-func (recv *consensusDKGReceiver) ProposeDKGComplaint(
- complaint *typesDKG.Complaint) {
- if err := recv.authModule.SignDKGComplaint(complaint); err != nil {
- recv.logger.Error("Failed to sign DKG complaint", "error", err)
- return
- }
- recv.logger.Debug("Calling Governace.AddDKGComplaint",
- "complaint", complaint)
- recv.gov.AddDKGComplaint(complaint.Round, complaint)
-}
-
-// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey.
-func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey(
- mpk *typesDKG.MasterPublicKey) {
- if err := recv.authModule.SignDKGMasterPublicKey(mpk); err != nil {
- recv.logger.Error("Failed to sign DKG master public key", "error", err)
- return
- }
- recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk)
- recv.gov.AddDKGMasterPublicKey(mpk.Round, mpk)
-}
-
-// ProposeDKGPrivateShare propose a DKGPrivateShare.
-func (recv *consensusDKGReceiver) ProposeDKGPrivateShare(
- prv *typesDKG.PrivateShare) {
- if err := recv.authModule.SignDKGPrivateShare(prv); err != nil {
- recv.logger.Error("Failed to sign DKG private share", "error", err)
- return
- }
- receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID)
- if !exists {
- recv.logger.Error("Public key for receiver not found",
- "receiver", prv.ReceiverID.String()[:6])
- return
- }
- if prv.ReceiverID == recv.ID {
- go func() {
- if err := recv.cfgModule.processPrivateShare(prv); err != nil {
- recv.logger.Error("Failed to process self private share", "prvShare", prv)
- }
- }()
- } else {
- recv.logger.Debug("Calling Network.SendDKGPrivateShare",
- "receiver", hex.EncodeToString(receiverPubKey.Bytes()))
- recv.network.SendDKGPrivateShare(receiverPubKey, prv)
- }
-}
-
-// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint.
-func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint(
- prv *typesDKG.PrivateShare) {
- if prv.ProposerID == recv.ID {
- if err := recv.authModule.SignDKGPrivateShare(prv); err != nil {
- recv.logger.Error("Failed sign DKG private share", "error", err)
- return
- }
- }
- recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv)
- recv.network.BroadcastDKGPrivateShare(prv)
-}
-
-// ProposeDKGFinalize propose a DKGFinalize message.
-func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) {
- if err := recv.authModule.SignDKGFinalize(final); err != nil {
- recv.logger.Error("Faield to sign DKG finalize", "error", err)
- return
- }
- recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final)
- recv.gov.AddDKGFinalize(final.Round, final)
-}
-
-// Consensus implements DEXON Consensus algorithm.
-type Consensus struct {
- // Node Info.
- ID types.NodeID
- authModule *Authenticator
- currentConfig *types.Config
-
- // BA.
- baModules []*agreement
- receivers []*consensusBAReceiver
- baConfirmedBlock map[common.Hash]chan<- *types.Block
-
- // DKG.
- dkgRunning int32
- dkgReady *sync.Cond
- cfgModule *configurationChain
-
- // Dexon consensus v1's modules.
- lattice *Lattice
- ccModule *compactionChain
-
- // Interfaces.
- db blockdb.BlockDatabase
- app Application
- gov Governance
- network Network
- tickerObj Ticker
-
- // Misc.
- dMoment time.Time
- nodeSetCache *NodeSetCache
- round uint64
- roundToNotify uint64
- lock sync.RWMutex
- ctx context.Context
- ctxCancel context.CancelFunc
- event *common.Event
- logger common.Logger
-}
-
-// NewConsensus construct an Consensus instance.
-func NewConsensus(
- dMoment time.Time,
- app Application,
- gov Governance,
- db blockdb.BlockDatabase,
- network Network,
- prv crypto.PrivateKey,
- logger common.Logger) *Consensus {
-
- // TODO(w): load latest blockHeight from DB, and use config at that height.
- var (
- round uint64
- // round 0 and 1 are decided at beginning.
- roundToNotify = round + 2
- )
- logger.Debug("Calling Governance.Configuration", "round", round)
- config := gov.Configuration(round)
- nodeSetCache := NewNodeSetCache(gov)
- logger.Debug("Calling Governance.CRS", "round", round)
- crs := gov.CRS(round)
- // Setup acking by information returned from Governace.
- nodes, err := nodeSetCache.GetNodeSet(round)
- if err != nil {
- panic(err)
- }
- // Setup auth module.
- authModule := NewAuthenticator(prv)
- // Check if the application implement Debug interface.
- debugApp, _ := app.(Debug)
- // Init lattice.
- lattice := NewLattice(
- dMoment, config, authModule, app, debugApp, db, logger)
- // Init configuration chain.
- ID := types.NewNodeID(prv.PublicKey())
- recv := &consensusDKGReceiver{
- ID: ID,
- gov: gov,
- authModule: authModule,
- nodeSetCache: nodeSetCache,
- network: network,
- logger: logger,
- }
- cfgModule := newConfigurationChain(
- ID,
- recv,
- gov,
- logger)
- recv.cfgModule = cfgModule
- // Construct Consensus instance.
- con := &Consensus{
- ID: ID,
- currentConfig: config,
- ccModule: newCompactionChain(gov),
- lattice: lattice,
- app: app,
- gov: gov,
- db: db,
- network: network,
- tickerObj: newTicker(gov, round, TickerBA),
- baConfirmedBlock: make(map[common.Hash]chan<- *types.Block),
- dkgReady: sync.NewCond(&sync.Mutex{}),
- cfgModule: cfgModule,
- dMoment: dMoment,
- nodeSetCache: nodeSetCache,
- authModule: authModule,
- event: common.NewEvent(),
- logger: logger,
- roundToNotify: roundToNotify,
- }
-
- validLeader := func(block *types.Block) bool {
- return lattice.SanityCheck(block) == nil
- }
-
- con.baModules = make([]*agreement, config.NumChains)
- con.receivers = make([]*consensusBAReceiver, config.NumChains)
- for i := uint32(0); i < config.NumChains; i++ {
- chainID := i
- recv := &consensusBAReceiver{
- consensus: con,
- chainID: chainID,
- restartNotary: make(chan bool, 1),
- }
- agreementModule := newAgreement(
- con.ID,
- recv,
- nodes.IDs,
- newLeaderSelector(crs, validLeader),
- con.authModule,
- )
- // Hacky way to make agreement module self contained.
- recv.agreementModule = agreementModule
- recv.changeNotaryTime = dMoment
- con.baModules[chainID] = agreementModule
- con.receivers[chainID] = recv
- }
- return con
-}
-
-// Run starts running DEXON Consensus.
-func (con *Consensus) Run(initBlock *types.Block) {
- // Setup context.
- con.ctx, con.ctxCancel = context.WithCancel(context.Background())
- con.ccModule.init(initBlock)
- // TODO(jimmy-dexon): change AppendConfig to add config for specific round.
- for i := uint64(0); i < initBlock.Position.Round; i++ {
- con.logger.Debug("Calling Governance.Configuration", "round", i+1)
- cfg := con.gov.Configuration(i + 1)
- if err := con.lattice.AppendConfig(i+1, cfg); err != nil {
- panic(err)
- }
- }
- con.logger.Debug("Calling Network.ReceiveChan")
- go con.processMsg(con.network.ReceiveChan())
- // Sleep until dMoment come.
- time.Sleep(con.dMoment.Sub(time.Now().UTC()))
- con.cfgModule.registerDKG(con.round, int(con.currentConfig.DKGSetSize)/3+1)
- con.event.RegisterTime(con.dMoment.Add(con.currentConfig.RoundInterval/4),
- func(time.Time) {
- con.runDKGTSIG(con.round)
- })
- round1 := uint64(1)
- con.logger.Debug("Calling Governance.Configuration", "round", round1)
- con.lattice.AppendConfig(round1, con.gov.Configuration(round1))
- con.initialRound(con.dMoment)
- ticks := make([]chan struct{}, 0, con.currentConfig.NumChains)
- for i := uint32(0); i < con.currentConfig.NumChains; i++ {
- tick := make(chan struct{})
- ticks = append(ticks, tick)
- go con.runBA(i, tick)
- }
-
- // Reset ticker.
- <-con.tickerObj.Tick()
- <-con.tickerObj.Tick()
- for {
- <-con.tickerObj.Tick()
- for _, tick := range ticks {
- go func(tick chan struct{}) { tick <- struct{}{} }(tick)
- }
- }
-}
-
-func (con *Consensus) runBA(chainID uint32, tick <-chan struct{}) {
- // TODO(jimmy-dexon): move this function inside agreement.
- agreement := con.baModules[chainID]
- recv := con.receivers[chainID]
- recv.restartNotary <- true
- nIDs := make(map[types.NodeID]struct{})
- // Reset ticker
- <-tick
-BALoop:
- for {
- select {
- case <-con.ctx.Done():
- break BALoop
- default:
- }
- select {
- case newNotary := <-recv.restartNotary:
- if newNotary {
- recv.changeNotaryTime =
- recv.changeNotaryTime.Add(con.currentConfig.RoundInterval)
- nodes, err := con.nodeSetCache.GetNodeSet(recv.round)
- if err != nil {
- panic(err)
- }
- con.logger.Debug("Calling Governance.Configuration",
- "round", recv.round)
- con.logger.Debug("Calling Governance.CRS", "round", recv.round)
- nIDs = nodes.GetSubSet(
- int(con.gov.Configuration(recv.round).NotarySetSize),
- types.NewNotarySetTarget(con.gov.CRS(recv.round), chainID))
- }
- nextPos := con.lattice.NextPosition(chainID)
- nextPos.Round = recv.round
- agreement.restart(nIDs, nextPos)
- default:
- }
- if agreement.pullVotes() {
- pos := agreement.agreementID()
- con.logger.Debug("Calling Network.PullVotes for syncing votes",
- "position", pos)
- con.network.PullVotes(pos)
- }
- err := agreement.nextState()
- if err != nil {
- con.logger.Error("Failed to proceed to next state",
- "nodeID", con.ID.String(),
- "error", err)
- break BALoop
- }
- for i := 0; i < agreement.clocks(); i++ {
- // Priority select for agreement.done().
- select {
- case <-agreement.done():
- continue BALoop
- default:
- }
- select {
- case <-agreement.done():
- continue BALoop
- case <-tick:
- }
- }
- }
-}
-
-// runDKGTSIG starts running DKG+TSIG protocol.
-func (con *Consensus) runDKGTSIG(round uint64) {
- con.dkgReady.L.Lock()
- defer con.dkgReady.L.Unlock()
- if con.dkgRunning != 0 {
- return
- }
- con.dkgRunning = 1
- go func() {
- startTime := time.Now().UTC()
- defer func() {
- con.dkgReady.L.Lock()
- defer con.dkgReady.L.Unlock()
- con.dkgReady.Broadcast()
- con.dkgRunning = 2
- DKGTime := time.Now().Sub(startTime)
- if DKGTime.Nanoseconds() >=
- con.currentConfig.RoundInterval.Nanoseconds()/2 {
- con.logger.Warn("Your computer cannot finish DKG on time!",
- "nodeID", con.ID.String())
- }
- }()
- if err := con.cfgModule.runDKG(round); err != nil {
- panic(err)
- }
- nodes, err := con.nodeSetCache.GetNodeSet(round)
- if err != nil {
- panic(err)
- }
- con.logger.Debug("Calling Governance.Configuration", "round", round)
- hash := HashConfigurationBlock(
- nodes.IDs,
- con.gov.Configuration(round),
- common.Hash{},
- con.cfgModule.prevHash)
- psig, err := con.cfgModule.preparePartialSignature(
- round, hash)
- if err != nil {
- panic(err)
- }
- if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
- panic(err)
- }
- if err = con.cfgModule.processPartialSignature(psig); err != nil {
- panic(err)
- }
- con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
- "proposer", psig.ProposerID,
- "round", psig.Round,
- "hash", psig.Hash)
- con.network.BroadcastDKGPartialSignature(psig)
- if _, err = con.cfgModule.runBlockTSig(round, hash); err != nil {
- panic(err)
- }
- }()
-}
-
-func (con *Consensus) runCRS() {
- // Start running next round CRS.
- con.logger.Debug("Calling Governance.CRS", "round", con.round)
- psig, err := con.cfgModule.preparePartialSignature(
- con.round, con.gov.CRS(con.round))
- if err != nil {
- con.logger.Error("Failed to prepare partial signature", "error", err)
- } else if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
- con.logger.Error("Failed to sign DKG partial signature", "error", err)
- } else if err = con.cfgModule.processPartialSignature(psig); err != nil {
- con.logger.Error("Failed to process partial signature", "error", err)
- } else {
- con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
- "proposer", psig.ProposerID,
- "round", psig.Round,
- "hash", psig.Hash)
- con.network.BroadcastDKGPartialSignature(psig)
- con.logger.Debug("Calling Governance.CRS", "round", con.round)
- crs, err := con.cfgModule.runCRSTSig(con.round, con.gov.CRS(con.round))
- if err != nil {
- con.logger.Error("Failed to run CRS Tsig", "error", err)
- } else {
- con.logger.Debug("Calling Governance.ProposeCRS",
- "round", con.round+1,
- "crs", hex.EncodeToString(crs))
- con.gov.ProposeCRS(con.round+1, crs)
- }
- }
-}
-
-func (con *Consensus) initialRound(startTime time.Time) {
- select {
- case <-con.ctx.Done():
- return
- default:
- }
- con.logger.Debug("Calling Governance.Configuration", "round", con.round)
- con.currentConfig = con.gov.Configuration(con.round)
-
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval/2),
- func(time.Time) {
- go func() {
- con.runCRS()
- ticker := newTicker(con.gov, con.round, TickerDKG)
- <-ticker.Tick()
- // Normally, gov.CRS would return non-nil. Use this for in case of
- // unexpected network fluctuation and ensure the robustness.
- for (con.gov.CRS(con.round+1) == common.Hash{}) {
- con.logger.Info("CRS is not ready yet. Try again later...",
- "nodeID", con.ID)
- time.Sleep(500 * time.Millisecond)
- }
- con.cfgModule.registerDKG(
- con.round+1, int(con.currentConfig.DKGSetSize/3)+1)
- }()
- })
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval*2/3),
- func(time.Time) {
- func() {
- con.dkgReady.L.Lock()
- defer con.dkgReady.L.Unlock()
- con.dkgRunning = 0
- }()
- con.runDKGTSIG(con.round + 1)
- })
- con.event.RegisterTime(startTime.Add(con.currentConfig.RoundInterval),
- func(time.Time) {
- // Change round.
- con.round++
- con.logger.Debug("Calling Governance.Configuration",
- "round", con.round+1)
- con.lattice.AppendConfig(con.round+1, con.gov.Configuration(con.round+1))
- con.initialRound(startTime.Add(con.currentConfig.RoundInterval))
- })
-}
-
-// Stop the Consensus core.
-func (con *Consensus) Stop() {
- for _, a := range con.baModules {
- a.stop()
- }
- con.event.Reset()
- con.ctxCancel()
-}
-
-func (con *Consensus) processMsg(msgChan <-chan interface{}) {
-MessageLoop:
- for {
- var msg interface{}
- select {
- case msg = <-msgChan:
- case <-con.ctx.Done():
- return
- }
-
- switch val := msg.(type) {
- case *types.Block:
- if ch, exist := func() (chan<- *types.Block, bool) {
- con.lock.RLock()
- defer con.lock.RUnlock()
- ch, e := con.baConfirmedBlock[val.Hash]
- return ch, e
- }(); exist {
- if err := con.lattice.SanityCheck(val); err != nil {
- if err == ErrRetrySanityCheckLater {
- err = nil
- } else {
- con.logger.Error("SanityCheck failed", "error", err)
- continue MessageLoop
- }
- }
- func() {
- con.lock.Lock()
- defer con.lock.Unlock()
- // In case of multiple delivered block.
- if _, exist := con.baConfirmedBlock[val.Hash]; !exist {
- return
- }
- delete(con.baConfirmedBlock, val.Hash)
- ch <- val
- }()
- } else if val.IsFinalized() {
- // For sync mode.
- if err := con.processFinalizedBlock(val); err != nil {
- con.logger.Error("Failed to process finalized block",
- "error", err)
- }
- } else {
- if err := con.preProcessBlock(val); err != nil {
- con.logger.Error("Failed to pre process block",
- "error", err)
- }
- }
- case *types.Vote:
- if err := con.ProcessVote(val); err != nil {
- con.logger.Error("Failed to process vote",
- "error", err)
- }
- case *types.AgreementResult:
- if err := con.ProcessAgreementResult(val); err != nil {
- con.logger.Error("Failed to process agreement result",
- "error", err)
- }
- case *types.BlockRandomnessResult:
- if err := con.ProcessBlockRandomnessResult(val); err != nil {
- con.logger.Error("Failed to process block randomness result",
- "error", err)
- }
- case *typesDKG.PrivateShare:
- if err := con.cfgModule.processPrivateShare(val); err != nil {
- con.logger.Error("Failed to process private share",
- "error", err)
- }
-
- case *typesDKG.PartialSignature:
- if err := con.cfgModule.processPartialSignature(val); err != nil {
- con.logger.Error("Failed to process partial signature",
- "error", err)
- }
- }
- }
-}
-
-func (con *Consensus) proposeBlock(chainID uint32, round uint64) *types.Block {
- block := &types.Block{
- Position: types.Position{
- ChainID: chainID,
- Round: round,
- },
- }
- if err := con.prepareBlock(block, time.Now().UTC()); err != nil {
- con.logger.Error("Failed to prepare block", "error", err)
- return nil
- }
- return block
-}
-
-func (con *Consensus) proposeEmptyBlock(
- chainID uint32) (*types.Block, error) {
- block := &types.Block{
- Position: types.Position{
- ChainID: chainID,
- },
- }
- if err := con.lattice.PrepareEmptyBlock(block); err != nil {
- return nil, err
- }
- return block, nil
-}
-
-// ProcessVote is the entry point to submit ont vote to a Consensus instance.
-func (con *Consensus) ProcessVote(vote *types.Vote) (err error) {
- v := vote.Clone()
- err = con.baModules[v.Position.ChainID].processVote(v)
- return err
-}
-
-// ProcessAgreementResult processes the randomness request.
-func (con *Consensus) ProcessAgreementResult(
- rand *types.AgreementResult) error {
- // Sanity Check.
- notarySet, err := con.nodeSetCache.GetNotarySet(
- rand.Position.Round, rand.Position.ChainID)
- if err != nil {
- return err
- }
- if len(rand.Votes) < len(notarySet)/3*2+1 {
- return ErrNotEnoughVotes
- }
- if len(rand.Votes) > len(notarySet) {
- return ErrIncorrectVoteProposer
- }
- for _, vote := range rand.Votes {
- if vote.BlockHash != rand.BlockHash {
- return ErrIncorrectVoteBlockHash
- }
- if vote.Type != types.VoteCom {
- return ErrIncorrectVoteType
- }
- if vote.Position != rand.Position {
- return ErrIncorrectVotePosition
- }
- if _, exist := notarySet[vote.ProposerID]; !exist {
- return ErrIncorrectVoteProposer
- }
- ok, err := verifyVoteSignature(&vote)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectVoteSignature
- }
- }
- // Syncing BA Module.
- agreement := con.baModules[rand.Position.ChainID]
- aID := agreement.agreementID()
- if rand.Position.Newer(&aID) {
- con.logger.Info("Syncing BA", "position", rand.Position)
- nodes, err := con.nodeSetCache.GetNodeSet(rand.Position.Round)
- if err != nil {
- return err
- }
- con.logger.Debug("Calling Network.PullBlocks for syncing BA",
- "hash", rand.BlockHash)
- con.network.PullBlocks(common.Hashes{rand.BlockHash})
- nIDs := nodes.GetSubSet(
- int(con.gov.Configuration(rand.Position.Round).NotarySetSize),
- types.NewNotarySetTarget(
- con.gov.CRS(rand.Position.Round), rand.Position.ChainID))
- for _, vote := range rand.Votes {
- agreement.processVote(&vote)
- }
- agreement.restart(nIDs, rand.Position)
- }
- // Calculating randomness.
- if rand.Position.Round == 0 {
- return nil
- }
- if !con.ccModule.blockRegistered(rand.BlockHash) {
- return nil
- }
- if DiffUint64(con.round, rand.Position.Round) > 1 {
- return nil
- }
- // Sanity check done.
- if !con.cfgModule.touchTSigHash(rand.BlockHash) {
- return nil
- }
- con.logger.Debug("Calling Network.BroadcastAgreementResult", "result", rand)
- con.network.BroadcastAgreementResult(rand)
- dkgSet, err := con.nodeSetCache.GetDKGSet(rand.Position.Round)
- if err != nil {
- return err
- }
- if _, exist := dkgSet[con.ID]; !exist {
- return nil
- }
- psig, err := con.cfgModule.preparePartialSignature(rand.Position.Round, rand.BlockHash)
- if err != nil {
- return err
- }
- if err = con.authModule.SignDKGPartialSignature(psig); err != nil {
- return err
- }
- if err = con.cfgModule.processPartialSignature(psig); err != nil {
- return err
- }
- con.logger.Debug("Calling Network.BroadcastDKGPartialSignature",
- "proposer", psig.ProposerID,
- "round", psig.Round,
- "hash", psig.Hash)
- con.network.BroadcastDKGPartialSignature(psig)
- go func() {
- tsig, err := con.cfgModule.runTSig(rand.Position.Round, rand.BlockHash)
- if err != nil {
- if err != ErrTSigAlreadyRunning {
- con.logger.Error("Faield to run TSIG", "error", err)
- }
- return
- }
- result := &types.BlockRandomnessResult{
- BlockHash: rand.BlockHash,
- Position: rand.Position,
- Randomness: tsig.Signature,
- }
- if err := con.ProcessBlockRandomnessResult(result); err != nil {
- con.logger.Error("Failed to process randomness result",
- "error", err)
- return
- }
- }()
- return nil
-}
-
-// ProcessBlockRandomnessResult processes the randomness result.
-func (con *Consensus) ProcessBlockRandomnessResult(
- rand *types.BlockRandomnessResult) error {
- if rand.Position.Round == 0 {
- return nil
- }
- if !con.ccModule.blockRegistered(rand.BlockHash) {
- return nil
- }
- round := rand.Position.Round
- v, ok, err := con.ccModule.tsigVerifier.UpdateAndGet(round)
- if err != nil {
- return err
- }
- if !ok {
- return nil
- }
- if !v.VerifySignature(
- rand.BlockHash, crypto.Signature{Signature: rand.Randomness}) {
- return ErrIncorrectBlockRandomnessResult
- }
- con.logger.Debug("Calling Network.BroadcastRandomnessResult",
- "hash", rand.BlockHash,
- "position", rand.Position,
- "randomness", hex.EncodeToString(rand.Randomness))
- con.network.BroadcastRandomnessResult(rand)
- if err := con.ccModule.processBlockRandomnessResult(rand); err != nil {
- if err != ErrBlockNotRegistered {
- return err
- }
- }
- return nil
-}
-
-// preProcessBlock performs Byzantine Agreement on the block.
-func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
- if err = con.lattice.SanityCheck(b); err != nil {
- if err != ErrRetrySanityCheckLater {
- return
- }
- }
- if err = con.baModules[b.Position.ChainID].processBlock(b); err != nil {
- return err
- }
- return
-}
-
-// deliverBlock deliver a block to application layer.
-func (con *Consensus) deliverBlock(b *types.Block) {
- // TODO(mission): clone types.FinalizationResult
- con.logger.Debug("Calling Application.BlockDelivered", "block", b)
- con.app.BlockDelivered(b.Hash, b.Finalization)
- if b.Position.Round+2 == con.roundToNotify {
- // Only the first block delivered of that round would
- // trigger this noitification.
- con.gov.NotifyRoundHeight(
- con.roundToNotify, b.Finalization.Height)
- con.roundToNotify++
- }
-}
-
-// processBlock is the entry point to submit one block to a Consensus instance.
-func (con *Consensus) processBlock(block *types.Block) (err error) {
- if err = con.db.Put(*block); err != nil && err != blockdb.ErrBlockExists {
- return
- }
- con.lock.Lock()
- defer con.lock.Unlock()
- // Block processed by lattice can be out-of-order. But the output of lattice
- // (deliveredBlocks) cannot.
- deliveredBlocks, err := con.lattice.ProcessBlock(block)
- if err != nil {
- return
- }
- // Pass delivered blocks to compaction chain.
- for _, b := range deliveredBlocks {
- if err = con.ccModule.processBlock(b); err != nil {
- return
- }
- go con.event.NotifyTime(b.Finalization.Timestamp)
- }
- deliveredBlocks = con.ccModule.extractBlocks()
- for _, b := range deliveredBlocks {
- if err = con.db.Update(*b); err != nil {
- panic(err)
- }
- con.cfgModule.untouchTSigHash(b.Hash)
- con.deliverBlock(b)
- }
- if err = con.lattice.PurgeBlocks(deliveredBlocks); err != nil {
- return
- }
- return
-}
-
-// processFinalizedBlock is the entry point for syncing blocks.
-func (con *Consensus) processFinalizedBlock(block *types.Block) (err error) {
- if err = con.lattice.SanityCheck(block); err != nil {
- return
- }
- con.ccModule.processFinalizedBlock(block)
- for {
- confirmed := con.ccModule.extractFinalizedBlocks()
- if len(confirmed) == 0 {
- break
- }
- if err = con.lattice.ctModule.processBlocks(confirmed); err != nil {
- return
- }
- for _, b := range confirmed {
- if err = con.db.Put(*b); err != nil {
- if err != blockdb.ErrBlockExists {
- return
- }
- err = nil
- }
- con.deliverBlock(b)
- }
- }
- return
-}
-
-// PrepareBlock would setup header fields of block based on its ProposerID.
-func (con *Consensus) prepareBlock(b *types.Block,
- proposeTime time.Time) (err error) {
- if err = con.lattice.PrepareBlock(b, proposeTime); err != nil {
- return
- }
- // TODO(mission): decide CRS by block's round, which could be determined by
- // block's info (ex. position, timestamp).
- con.logger.Debug("Calling Governance.CRS", "round", 0)
- if err = con.authModule.SignCRS(b, con.gov.CRS(0)); err != nil {
- return
- }
- return
-}
-
-// PrepareGenesisBlock would setup header fields for genesis block.
-func (con *Consensus) PrepareGenesisBlock(b *types.Block,
- proposeTime time.Time) (err error) {
- if err = con.prepareBlock(b, proposeTime); err != nil {
- return
- }
- if len(b.Payload) != 0 {
- err = ErrGenesisBlockNotEmpty
- return
- }
- return
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go
deleted file mode 100644
index 2b7f7a7fc..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "encoding/binary"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-func hashWitness(witness *types.Witness) (common.Hash, error) {
- binaryHeight := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryHeight, witness.Height)
- return crypto.Keccak256Hash(
- binaryHeight,
- witness.Data), nil
-}
-
-func hashBlock(block *types.Block) (common.Hash, error) {
- hashPosition := hashPosition(block.Position)
- // Handling Block.Acks.
- binaryAcks := make([][]byte, len(block.Acks))
- for idx, ack := range block.Acks {
- binaryAcks[idx] = ack[:]
- }
- hashAcks := crypto.Keccak256Hash(binaryAcks...)
- binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary()
- if err != nil {
- return common.Hash{}, err
- }
- binaryWitness, err := hashWitness(&block.Witness)
- if err != nil {
- return common.Hash{}, err
- }
-
- hash := crypto.Keccak256Hash(
- block.ProposerID.Hash[:],
- block.ParentHash[:],
- hashPosition[:],
- hashAcks[:],
- binaryTimestamp[:],
- block.PayloadHash[:],
- binaryWitness[:])
- return hash, nil
-}
-
-func verifyBlockSignature(pubkey crypto.PublicKey,
- block *types.Block, sig crypto.Signature) (bool, error) {
- hash, err := hashBlock(block)
- if err != nil {
- return false, err
- }
- return pubkey.VerifySignature(hash, sig), nil
-}
-
-func hashVote(vote *types.Vote) common.Hash {
- binaryPeriod := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryPeriod, vote.Period)
-
- hashPosition := hashPosition(vote.Position)
-
- hash := crypto.Keccak256Hash(
- vote.ProposerID.Hash[:],
- vote.BlockHash[:],
- binaryPeriod,
- hashPosition[:],
- []byte{byte(vote.Type)},
- )
- return hash
-}
-
-func verifyVoteSignature(vote *types.Vote) (bool, error) {
- hash := hashVote(vote)
- pubKey, err := crypto.SigToPub(hash, vote.Signature)
- if err != nil {
- return false, err
- }
- if vote.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
-
-func hashCRS(block *types.Block, crs common.Hash) common.Hash {
- hashPos := hashPosition(block.Position)
- return crypto.Keccak256Hash(crs[:], hashPos[:])
-}
-
-func verifyCRSSignature(block *types.Block, crs common.Hash) (
- bool, error) {
- hash := hashCRS(block, crs)
- pubKey, err := crypto.SigToPub(hash, block.CRSSignature)
- if err != nil {
- return false, err
- }
- if block.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
-
-func hashPosition(position types.Position) common.Hash {
- binaryChainID := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryChainID, position.ChainID)
-
- binaryHeight := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryHeight, position.Height)
-
- return crypto.Keccak256Hash(
- binaryChainID,
- binaryHeight,
- )
-}
-
-func hashDKGPrivateShare(prvShare *typesDKG.PrivateShare) common.Hash {
- binaryRound := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRound, prvShare.Round)
-
- return crypto.Keccak256Hash(
- prvShare.ProposerID.Hash[:],
- prvShare.ReceiverID.Hash[:],
- binaryRound,
- prvShare.PrivateShare.Bytes(),
- )
-}
-
-func verifyDKGPrivateShareSignature(
- prvShare *typesDKG.PrivateShare) (bool, error) {
- hash := hashDKGPrivateShare(prvShare)
- pubKey, err := crypto.SigToPub(hash, prvShare.Signature)
- if err != nil {
- return false, err
- }
- if prvShare.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
-
-func hashDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) common.Hash {
- binaryRound := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRound, mpk.Round)
-
- return crypto.Keccak256Hash(
- mpk.ProposerID.Hash[:],
- mpk.DKGID.GetLittleEndian(),
- mpk.PublicKeyShares.MasterKeyBytes(),
- binaryRound,
- )
-}
-
-// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature.
-func VerifyDKGMasterPublicKeySignature(
- mpk *typesDKG.MasterPublicKey) (bool, error) {
- hash := hashDKGMasterPublicKey(mpk)
- pubKey, err := crypto.SigToPub(hash, mpk.Signature)
- if err != nil {
- return false, err
- }
- if mpk.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
-
-func hashDKGComplaint(complaint *typesDKG.Complaint) common.Hash {
- binaryRound := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRound, complaint.Round)
-
- hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare)
-
- return crypto.Keccak256Hash(
- complaint.ProposerID.Hash[:],
- binaryRound,
- hashPrvShare[:],
- )
-}
-
-// VerifyDKGComplaintSignature verifies DKGCompliant signature.
-func VerifyDKGComplaintSignature(
- complaint *typesDKG.Complaint) (bool, error) {
- if complaint.Round != complaint.PrivateShare.Round {
- return false, nil
- }
- hash := hashDKGComplaint(complaint)
- pubKey, err := crypto.SigToPub(hash, complaint.Signature)
- if err != nil {
- return false, err
- }
- if complaint.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- if !complaint.IsNack() {
- return verifyDKGPrivateShareSignature(&complaint.PrivateShare)
- }
- return true, nil
-}
-
-func hashDKGPartialSignature(psig *typesDKG.PartialSignature) common.Hash {
- binaryRound := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRound, psig.Round)
-
- return crypto.Keccak256Hash(
- psig.ProposerID.Hash[:],
- binaryRound,
- psig.Hash[:],
- psig.PartialSignature.Signature[:],
- )
-}
-
-func verifyDKGPartialSignatureSignature(
- psig *typesDKG.PartialSignature) (bool, error) {
- hash := hashDKGPartialSignature(psig)
- pubKey, err := crypto.SigToPub(hash, psig.Signature)
- if err != nil {
- return false, err
- }
- if psig.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
-
-func hashDKGFinalize(final *typesDKG.Finalize) common.Hash {
- binaryRound := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRound, final.Round)
-
- return crypto.Keccak256Hash(
- final.ProposerID.Hash[:],
- binaryRound,
- )
-}
-
-// VerifyDKGFinalizeSignature verifies DKGFinalize signature.
-func VerifyDKGFinalizeSignature(
- final *typesDKG.Finalize) (bool, error) {
- hash := hashDKGFinalize(final)
- pubKey, err := crypto.SigToPub(hash, final.Signature)
- if err != nil {
- return false, err
- }
- if final.ProposerID != types.NewNodeID(pubKey) {
- return false, nil
- }
- return true, nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go
deleted file mode 100644
index 3e7ef4574..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/constant.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package dkg
-
-import (
- "github.com/dexon-foundation/bls/ffi/go/bls"
-)
-
-const (
- curve = bls.CurveFp382_2
-)
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go
deleted file mode 100644
index 5be16847d..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/dkg.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package dkg
-
-import (
- "encoding/json"
- "fmt"
- "io"
-
- "github.com/dexon-foundation/bls/ffi/go/bls"
- "github.com/dexon-foundation/dexon/rlp"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-var (
- // ErrDuplicatedShare is reported when adding an private key share of same id.
- ErrDuplicatedShare = fmt.Errorf("invalid share")
- // ErrNoIDToRecover is reported when no id is provided for recovering private
- // key.
- ErrNoIDToRecover = fmt.Errorf("no id to recover private key")
- // ErrShareNotFound is reported when the private key share of id is not found
- // when recovering private key.
- ErrShareNotFound = fmt.Errorf("share not found")
-)
-
-const cryptoType = "bls"
-
-var publicKeyLength int
-
-func init() {
- bls.Init(curve)
-
- pubKey := &bls.PublicKey{}
- publicKeyLength = len(pubKey.Serialize())
-}
-
-// PrivateKey represents a private key structure implments
-// Crypto.PrivateKey interface.
-type PrivateKey struct {
- privateKey bls.SecretKey
- publicKey PublicKey
-}
-
-// EncodeRLP implements rlp.Encoder
-func (prv *PrivateKey) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, prv.Bytes())
-}
-
-// DecodeRLP implements rlp.Decoder
-func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error {
- var b []byte
- if err := s.Decode(&b); err != nil {
- return err
- }
- return prv.SetBytes(b)
-}
-
-// MarshalJSON implements json.Marshaller.
-func (prv *PrivateKey) MarshalJSON() ([]byte, error) {
- return json.Marshal(&prv.privateKey)
-}
-
-// UnmarshalJSON implements json.Unmarshaller.
-func (prv *PrivateKey) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &prv.privateKey)
-}
-
-// ID is the id for DKG protocol.
-type ID = bls.ID
-
-// IDs is an array of ID.
-type IDs []ID
-
-// PublicKey represents a public key structure implements
-// Crypto.PublicKey interface.
-type PublicKey struct {
- publicKey bls.PublicKey
-}
-
-// PrivateKeyShares represents a private key shares for DKG protocol.
-type PrivateKeyShares struct {
- shares []PrivateKey
- shareIndex map[ID]int
- masterPrivateKey []bls.SecretKey
-}
-
-// Equal check equality between two PrivateKeyShares instances.
-func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool {
- // Check shares.
- if len(prvs.shareIndex) != len(other.shareIndex) {
- return false
- }
- for dID, idx := range prvs.shareIndex {
- otherIdx, exists := other.shareIndex[dID]
- if !exists {
- return false
- }
- if !prvs.shares[idx].privateKey.IsEqual(
- &other.shares[otherIdx].privateKey) {
- return false
- }
- }
- // Check master private keys.
- if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) {
- return false
- }
- for idx, m := range prvs.masterPrivateKey {
- if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() {
- return false
- }
- }
- return true
-}
-
-// PublicKeyShares represents a public key shares for DKG protocol.
-type PublicKeyShares struct {
- shareCaches []PublicKey
- shareCacheIndex map[ID]int
- masterPublicKey []bls.PublicKey
-}
-
-type rlpPublicKeyShares struct {
- ShareCaches [][]byte
- ShareCacheIndexK [][]byte
- ShareCacheIndexV []uint32
- MasterPublicKey [][]byte
-}
-
-// Equal checks equality of two PublicKeyShares instance.
-func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool {
- // Check shares.
- for dID, idx := range pubs.shareCacheIndex {
- otherIdx, exists := other.shareCacheIndex[dID]
- if !exists {
- continue
- }
- if !pubs.shareCaches[idx].publicKey.IsEqual(
- &other.shareCaches[otherIdx].publicKey) {
- return false
- }
- }
- // Check master public keys.
- if len(pubs.masterPublicKey) != len(other.masterPublicKey) {
- return false
- }
- for idx, m := range pubs.masterPublicKey {
- if m.GetHexString() != other.masterPublicKey[idx].GetHexString() {
- return false
- }
- }
- return true
-}
-
-// EncodeRLP implements rlp.Encoder
-func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error {
- var rps rlpPublicKeyShares
- for _, share := range pubs.shareCaches {
- rps.ShareCaches = append(rps.ShareCaches, share.Serialize())
- }
-
- for id, v := range pubs.shareCacheIndex {
- rps.ShareCacheIndexK = append(
- rps.ShareCacheIndexK, id.GetLittleEndian())
- rps.ShareCacheIndexV = append(rps.ShareCacheIndexV, uint32(v))
- }
-
- for _, m := range pubs.masterPublicKey {
- rps.MasterPublicKey = append(rps.MasterPublicKey, m.Serialize())
- }
-
- return rlp.Encode(w, rps)
-}
-
-// DecodeRLP implements rlp.Decoder
-func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error {
- var dec rlpPublicKeyShares
- if err := s.Decode(&dec); err != nil {
- return err
- }
-
- if len(dec.ShareCacheIndexK) != len(dec.ShareCacheIndexV) {
- return fmt.Errorf("invalid shareIndex")
- }
-
- ps := NewEmptyPublicKeyShares()
- for _, share := range dec.ShareCaches {
- var publicKey PublicKey
- if err := publicKey.Deserialize(share); err != nil {
- return err
- }
- ps.shareCaches = append(ps.shareCaches, publicKey)
- }
-
- for i, k := range dec.ShareCacheIndexK {
- id, err := BytesID(k)
- if err != nil {
- return err
- }
- ps.shareCacheIndex[id] = int(dec.ShareCacheIndexV[i])
- }
-
- for _, k := range dec.MasterPublicKey {
- var key bls.PublicKey
- if err := key.Deserialize(k); err != nil {
- return err
- }
- ps.masterPublicKey = append(ps.masterPublicKey, key)
- }
-
- *pubs = *ps
- return nil
-}
-
-// MarshalJSON implements json.Marshaller.
-func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) {
- type Alias PublicKeyShares
- data := &struct {
- MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"`
- }{
- make([]*bls.PublicKey, len(pubs.masterPublicKey)),
- }
- for i := range pubs.masterPublicKey {
- data.MasterPublicKeys[i] = &pubs.masterPublicKey[i]
- }
- return json.Marshal(data)
-}
-
-// UnmarshalJSON implements json.Unmarshaller.
-func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error {
- type Alias PublicKeyShares
- aux := &struct {
- MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"`
- }{}
- if err := json.Unmarshal(data, &aux); err != nil {
- return err
- }
- mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys))
- for i, pk := range aux.MasterPublicKeys {
- mpk[i] = *pk
- }
- pubs.masterPublicKey = mpk
- return nil
-}
-
-// Clone clones every fields of PublicKeyShares. This method is mainly
-// for testing purpose thus would panic when error.
-func (pubs *PublicKeyShares) Clone() *PublicKeyShares {
- b, err := rlp.EncodeToBytes(pubs)
- if err != nil {
- panic(err)
- }
- pubsCopy := NewEmptyPublicKeyShares()
- if err := rlp.DecodeBytes(b, pubsCopy); err != nil {
- panic(err)
- }
- return pubsCopy
-}
-
-// NewID creates a ew ID structure.
-func NewID(id []byte) ID {
- var blsID bls.ID
- blsID.SetLittleEndian(id)
- return blsID
-}
-
-// BytesID creates a new ID structure,
-// It returns err if the byte slice is not valid.
-func BytesID(id []byte) (ID, error) {
- var blsID bls.ID
- err := blsID.SetLittleEndian(id)
- return blsID, err
-}
-
-// NewPrivateKey creates a new PrivateKey structure.
-func NewPrivateKey() *PrivateKey {
- var key bls.SecretKey
- key.SetByCSPRNG()
- return &PrivateKey{
- privateKey: key,
- publicKey: *newPublicKey(&key),
- }
-}
-
-// NewPrivateKeyShares creates a DKG private key shares of threshold t.
-func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) {
- var prv bls.SecretKey
- prv.SetByCSPRNG()
- msk := prv.GetMasterSecretKey(t)
- mpk := bls.GetMasterPublicKey(msk)
- return &PrivateKeyShares{
- masterPrivateKey: msk,
- shareIndex: make(map[ID]int),
- }, &PublicKeyShares{
- shareCacheIndex: make(map[ID]int),
- masterPublicKey: mpk,
- }
-}
-
-// NewEmptyPrivateKeyShares creates an empty private key shares.
-func NewEmptyPrivateKeyShares() *PrivateKeyShares {
- return &PrivateKeyShares{
- shareIndex: make(map[ID]int),
- }
-}
-
-// SetParticipants sets the DKG participants.
-func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) {
- prvs.shares = make([]PrivateKey, len(IDs))
- prvs.shareIndex = make(map[ID]int, len(IDs))
- for idx, ID := range IDs {
- prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID)
- prvs.shareIndex[ID] = idx
- }
-}
-
-// AddShare adds a share.
-func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error {
- if idx, exist := prvs.shareIndex[ID]; exist {
- if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) {
- return ErrDuplicatedShare
- }
- return nil
- }
- prvs.shareIndex[ID] = len(prvs.shares)
- prvs.shares = append(prvs.shares, *share)
- return nil
-}
-
-// RecoverPrivateKey recovers private key from the shares.
-func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) (
- *PrivateKey, error) {
- var prv PrivateKey
- if len(qualifyIDs) == 0 {
- return nil, ErrNoIDToRecover
- }
- for i, ID := range qualifyIDs {
- idx, exist := prvs.shareIndex[ID]
- if !exist {
- return nil, ErrShareNotFound
- }
- if i == 0 {
- prv.privateKey = prvs.shares[idx].privateKey
- continue
- }
- prv.privateKey.Add(&prvs.shares[idx].privateKey)
- }
- return &prv, nil
-}
-
-// RecoverPublicKey recovers public key from the shares.
-func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) (
- *PublicKey, error) {
- var pub PublicKey
- if len(qualifyIDs) == 0 {
- return nil, ErrNoIDToRecover
- }
- for i, ID := range qualifyIDs {
- idx, exist := prvs.shareIndex[ID]
- if !exist {
- return nil, ErrShareNotFound
- }
- if i == 0 {
- pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey()
- continue
- }
- pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey())
- }
- return &pub, nil
-}
-
-// Share returns the share for the ID.
-func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) {
- idx, exist := prvs.shareIndex[ID]
- if !exist {
- return nil, false
- }
- return &prvs.shares[idx], true
-}
-
-// NewEmptyPublicKeyShares creates an empty public key shares.
-func NewEmptyPublicKeyShares() *PublicKeyShares {
- return &PublicKeyShares{
- shareCacheIndex: make(map[ID]int),
- }
-}
-
-// Share returns the share for the ID.
-func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) {
- idx, exist := pubs.shareCacheIndex[ID]
- if exist {
- return &pubs.shareCaches[idx], nil
- }
- var pk PublicKey
- if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil {
- return nil, err
- }
- pubs.AddShare(ID, &pk)
- return &pk, nil
-}
-
-// AddShare adds a share.
-func (pubs *PublicKeyShares) AddShare(ID ID, share *PublicKey) error {
- if idx, exist := pubs.shareCacheIndex[ID]; exist {
- if !share.publicKey.IsEqual(&pubs.shareCaches[idx].publicKey) {
- return ErrDuplicatedShare
- }
- return nil
- }
- pubs.shareCacheIndex[ID] = len(pubs.shareCaches)
- pubs.shareCaches = append(pubs.shareCaches, *share)
- return nil
-}
-
-// VerifyPrvShare verifies if the private key shares is valid.
-func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) (
- bool, error) {
- var pk bls.PublicKey
- if err := pk.Set(pubs.masterPublicKey, &ID); err != nil {
- return false, err
- }
- return pk.IsEqual(share.privateKey.GetPublicKey()), nil
-}
-
-// VerifyPubShare verifies if the public key shares is valid.
-func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) (
- bool, error) {
- var pk bls.PublicKey
- if err := pk.Set(pubs.masterPublicKey, &ID); err != nil {
- return false, err
- }
- return pk.IsEqual(&share.publicKey), nil
-}
-
-// RecoverPublicKey recovers private key from the shares.
-func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) (
- *PublicKey, error) {
- var pub PublicKey
- if len(qualifyIDs) == 0 {
- return nil, ErrNoIDToRecover
- }
- for i, ID := range qualifyIDs {
- pk, err := pubs.Share(ID)
- if err != nil {
- return nil, err
- }
- if i == 0 {
- pub.publicKey = pk.publicKey
- continue
- }
- pub.publicKey.Add(&pk.publicKey)
- }
- return &pub, nil
-}
-
-// MasterKeyBytes returns []byte representation of master public key.
-func (pubs *PublicKeyShares) MasterKeyBytes() []byte {
- bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength)
- for _, pk := range pubs.masterPublicKey {
- bytes = append(bytes, pk.Serialize()...)
- }
- return bytes
-}
-
-// newPublicKey creates a new PublicKey structure.
-func newPublicKey(prvKey *bls.SecretKey) *PublicKey {
- return &PublicKey{
- publicKey: *prvKey.GetPublicKey(),
- }
-}
-
-// newPublicKeyFromBytes create a new PublicKey structure
-// from bytes representation of bls.PublicKey
-func newPublicKeyFromBytes(b []byte) (*PublicKey, error) {
- var pub PublicKey
- err := pub.publicKey.Deserialize(b)
- return &pub, err
-}
-
-// PublicKey returns the public key associate this private key.
-func (prv *PrivateKey) PublicKey() crypto.PublicKey {
- return prv.publicKey
-}
-
-// Sign calculates a signature.
-func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) {
- msg := string(hash[:])
- sign := prv.privateKey.Sign(msg)
- return crypto.Signature{
- Type: cryptoType,
- Signature: sign.Serialize(),
- }, nil
-}
-
-// Bytes returns []byte representation of private key.
-func (prv *PrivateKey) Bytes() []byte {
- return prv.privateKey.GetLittleEndian()
-}
-
-// SetBytes sets the private key data to []byte.
-func (prv *PrivateKey) SetBytes(bytes []byte) error {
- var key bls.SecretKey
- if err := key.SetLittleEndian(bytes); err != nil {
- return err
- }
- prv.privateKey = key
- prv.publicKey = *newPublicKey(&prv.privateKey)
- return nil
-}
-
-// String returns string representation of privat key.
-func (prv *PrivateKey) String() string {
- return prv.privateKey.GetHexString()
-}
-
-// VerifySignature checks that the given public key created signature over hash.
-func (pub PublicKey) VerifySignature(
- hash common.Hash, signature crypto.Signature) bool {
- if len(signature.Signature) == 0 {
- return false
- }
- var sig bls.Sign
- if err := sig.Deserialize(signature.Signature[:]); err != nil {
- fmt.Println(err)
- return false
- }
- msg := string(hash[:])
- return sig.Verify(&pub.publicKey, msg)
-}
-
-// Bytes returns []byte representation of public key.
-func (pub PublicKey) Bytes() []byte {
- return pub.publicKey.Serialize()
-}
-
-// Serialize return bytes representation of public key.
-func (pub *PublicKey) Serialize() []byte {
- return pub.publicKey.Serialize()
-}
-
-// Deserialize parses bytes representation of public key.
-func (pub *PublicKey) Deserialize(b []byte) error {
- return pub.publicKey.Deserialize(b)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go
deleted file mode 100644
index fa4ad9f05..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg/utils.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package dkg
-
-import (
- "fmt"
-
- "github.com/dexon-foundation/bls/ffi/go/bls"
-
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-// PartialSignature is a partial signature in DKG+TSIG protocol.
-type PartialSignature crypto.Signature
-
-var (
- // ErrEmptySignature is reported if the signature is empty.
- ErrEmptySignature = fmt.Errorf("invalid empty signature")
-)
-
-// RecoverSignature recovers TSIG signature.
-func RecoverSignature(sigs []PartialSignature, signerIDs IDs) (
- crypto.Signature, error) {
- blsSigs := make([]bls.Sign, len(sigs))
- for i, sig := range sigs {
- if len(sig.Signature) == 0 {
- return crypto.Signature{}, ErrEmptySignature
- }
- if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil {
- return crypto.Signature{}, err
- }
- }
- var recoverSig bls.Sign
- if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil {
- return crypto.Signature{}, err
- }
- return crypto.Signature{
- Type: cryptoType,
- Signature: recoverSig.Serialize()}, nil
-}
-
-// RecoverGroupPublicKey recovers group public key.
-func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey {
- var pub *PublicKey
- for _, pubShare := range pubShares {
- pk0 := pubShare.masterPublicKey[0]
- if pub == nil {
- pub = &PublicKey{
- publicKey: pk0,
- }
- } else {
- pub.publicKey.Add(&pk0)
- }
- }
- return pub
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go
deleted file mode 100644
index 82e4dca4b..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa/ecdsa.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package ecdsa
-
-import (
- "crypto/ecdsa"
-
- dexCrypto "github.com/dexon-foundation/dexon/crypto"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-const cryptoType = "ecdsa"
-
-func init() {
- crypto.RegisterSigToPub(cryptoType, SigToPub)
-}
-
-// PrivateKey represents a private key structure used in geth and implments
-// Crypto.PrivateKey interface.
-type PrivateKey struct {
- privateKey *ecdsa.PrivateKey
-}
-
-// PublicKey represents a public key structure used in geth and implements
-// Crypto.PublicKey interface.
-type PublicKey struct {
- publicKey *ecdsa.PublicKey
-}
-
-// NewPrivateKey creates a new PrivateKey structure.
-func NewPrivateKey() (*PrivateKey, error) {
- key, err := dexCrypto.GenerateKey()
- if err != nil {
- return nil, err
- }
- return &PrivateKey{privateKey: key}, nil
-}
-
-// NewPrivateKeyFromECDSA creates a new PrivateKey structure from
-// ecdsa.PrivateKey.
-func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey {
- return &PrivateKey{privateKey: key}
-}
-
-// NewPublicKeyFromECDSA creates a new PublicKey structure from
-// ecdsa.PublicKey.
-func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey {
- return &PublicKey{publicKey: key}
-}
-
-// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from
-// a byte slice.
-func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) {
- pub, err := dexCrypto.UnmarshalPubkey(b)
- if err != nil {
- return &PublicKey{}, err
- }
- return &PublicKey{publicKey: pub}, nil
-}
-
-// PublicKey returns the public key associate this private key.
-func (prv *PrivateKey) PublicKey() crypto.PublicKey {
- return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey))
-}
-
-// Sign calculates an ECDSA signature.
-//
-// This function is susceptible to chosen plaintext attacks that can leak
-// information about the private key that is used for signing. Callers must
-// be aware that the given hash cannot be chosen by an adversery. Common
-// solution is to hash any input before calculating the signature.
-//
-// The produced signature is in the [R || S || V] format where V is 0 or 1.
-func (prv *PrivateKey) Sign(hash common.Hash) (
- sig crypto.Signature, err error) {
- s, err := dexCrypto.Sign(hash[:], prv.privateKey)
- sig = crypto.Signature{
- Type: cryptoType,
- Signature: s,
- }
- return
-}
-
-// VerifySignature checks that the given public key created signature over hash.
-// The public key should be in compressed (33 bytes) or uncompressed (65 bytes)
-// format.
-// The signature should have the 64 byte [R || S] format.
-func (pub *PublicKey) VerifySignature(
- hash common.Hash, signature crypto.Signature) bool {
- sig := signature.Signature
- if len(sig) == 65 {
- // The last byte is for ecrecover.
- sig = sig[:64]
- }
- return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig)
-}
-
-// Compress encodes a public key to the 33-byte compressed format.
-func (pub *PublicKey) Compress() []byte {
- return dexCrypto.CompressPubkey(pub.publicKey)
-}
-
-// Bytes returns the []byte representation of uncompressed public key. (65 bytes)
-func (pub *PublicKey) Bytes() []byte {
- return dexCrypto.FromECDSAPub(pub.publicKey)
-}
-
-// SigToPub returns the PublicKey that created the given signature.
-func SigToPub(
- hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) {
- key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:])
- if err != nil {
- return &PublicKey{}, err
- }
- return &PublicKey{publicKey: key}, nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go
deleted file mode 100644
index f3e01e42c..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/interfaces.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package crypto
-
-import (
- "github.com/dexon-foundation/dexon-consensus/common"
-)
-
-// Signature is the basic signature type in DEXON.
-type Signature struct {
- Type string
- Signature []byte
-}
-
-// PrivateKey describes the asymmetric cryptography interface that interacts
-// with the private key.
-type PrivateKey interface {
- // PublicKey returns the public key associate this private key.
- PublicKey() PublicKey
-
- // Sign calculates a signature.
- Sign(hash common.Hash) (Signature, error)
-}
-
-// PublicKey describes the asymmetric cryptography interface that interacts
-// with the public key.
-type PublicKey interface {
- // VerifySignature checks that the given public key created signature over hash.
- VerifySignature(hash common.Hash, signature Signature) bool
-
- // Bytes returns the []byte representation of public key.
- Bytes() []byte
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go
deleted file mode 100644
index 59e91f5a5..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/crypto/utils.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package crypto
-
-import (
- "encoding/hex"
- "fmt"
-
- "github.com/dexon-foundation/dexon/crypto"
-
- "github.com/dexon-foundation/dexon-consensus/common"
-)
-
-var (
- // ErrSigToPubTypeNotFound is reported if the type is already used.
- ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found")
-
- // ErrSigToPubTypeAlreadyExist is reported if the type is already used.
- ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist")
-)
-
-// SigToPubFn is a function to recover public key from signature.
-type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error)
-
-var sigToPubCB map[string]SigToPubFn
-
-func init() {
- sigToPubCB = make(map[string]SigToPubFn)
-}
-
-// Keccak256Hash calculates and returns the Keccak256 hash of the input data,
-// converting it to an internal Hash data structure.
-func Keccak256Hash(data ...[]byte) (h common.Hash) {
- return common.Hash(crypto.Keccak256Hash(data...))
-}
-
-// Clone returns a deep copy of a signature.
-func (sig Signature) Clone() Signature {
- return Signature{
- Type: sig.Type,
- Signature: sig.Signature[:],
- }
-}
-
-func (sig Signature) String() string {
- return hex.EncodeToString([]byte(sig.Signature[:]))
-}
-
-// RegisterSigToPub registers a sigToPub function of type.
-func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error {
- if _, exist := sigToPubCB[sigType]; exist {
- return ErrSigToPubTypeAlreadyExist
- }
- sigToPubCB[sigType] = sigToPub
- return nil
-}
-
-// SigToPub recovers public key from signature.
-func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) {
- sigToPub, exist := sigToPubCB[signature.Type]
- if !exist {
- return nil, ErrSigToPubTypeNotFound
- }
- return sigToPub(hash, signature)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go
deleted file mode 100644
index 6645ecbae..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/dkg-tsig-protocol.go
+++ /dev/null
@@ -1,578 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-// Errors for dkg module.
-var (
- ErrNotDKGParticipant = fmt.Errorf(
- "not a DKG participant")
- ErrNotQualifyDKGParticipant = fmt.Errorf(
- "not a qualified DKG participant")
- ErrIDShareNotFound = fmt.Errorf(
- "private share not found for specific ID")
- ErrNotReachThreshold = fmt.Errorf(
- "threshold not reach")
- ErrInvalidThreshold = fmt.Errorf(
- "invalid threshold")
- ErrIncorrectPrivateShareSignature = fmt.Errorf(
- "incorrect private share signature")
- ErrMismatchPartialSignatureHash = fmt.Errorf(
- "mismatch partialSignature hash")
- ErrIncorrectPartialSignatureSignature = fmt.Errorf(
- "incorrect partialSignature signature")
- ErrIncorrectPartialSignature = fmt.Errorf(
- "incorrect partialSignature")
- ErrNotEnoughtPartialSignatures = fmt.Errorf(
- "not enough of partial signatures")
- ErrRoundAlreadyPurged = fmt.Errorf(
- "cache of round already been purged")
-)
-
-type dkgReceiver interface {
- // ProposeDKGComplaint proposes a DKGComplaint.
- ProposeDKGComplaint(complaint *typesDKG.Complaint)
-
- // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey.
- ProposeDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey)
-
- // ProposeDKGPrivateShare propose a DKGPrivateShare.
- ProposeDKGPrivateShare(prv *typesDKG.PrivateShare)
-
- // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint.
- ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare)
-
- // ProposeDKGFinalize propose a DKGFinalize message.
- ProposeDKGFinalize(final *typesDKG.Finalize)
-}
-
-type dkgProtocol struct {
- ID types.NodeID
- recv dkgReceiver
- round uint64
- threshold int
- idMap map[types.NodeID]dkg.ID
- mpkMap map[types.NodeID]*dkg.PublicKeyShares
- masterPrivateShare *dkg.PrivateKeyShares
- prvShares *dkg.PrivateKeyShares
- prvSharesReceived map[types.NodeID]struct{}
- nodeComplained map[types.NodeID]struct{}
- // Complaint[from][to]'s anti is saved to antiComplaint[from][to].
- antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{}
-}
-
-type dkgShareSecret struct {
- privateKey *dkg.PrivateKey
-}
-
-// DKGGroupPublicKey is the result of DKG protocol.
-type DKGGroupPublicKey struct {
- round uint64
- qualifyIDs dkg.IDs
- qualifyNodeIDs map[types.NodeID]struct{}
- idMap map[types.NodeID]dkg.ID
- publicKeys map[types.NodeID]*dkg.PublicKey
- groupPublicKey *dkg.PublicKey
- threshold int
-}
-
-// TSigVerifier is the interface verifying threshold signature.
-type TSigVerifier interface {
- VerifySignature(hash common.Hash, sig crypto.Signature) bool
-}
-
-// TSigVerifierCacheInterface specifies interface used by TSigVerifierCache.
-type TSigVerifierCacheInterface interface {
- // Configuration returns the configuration at a given round.
- // Return the genesis configuration if round == 0.
- Configuration(round uint64) *types.Config
-
- // DKGComplaints gets all the DKGComplaints of round.
- DKGComplaints(round uint64) []*typesDKG.Complaint
-
- // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
- DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
-
- // IsDKGFinal checks if DKG is final.
- IsDKGFinal(round uint64) bool
-}
-
-// TSigVerifierCache is the cache for TSigVerifier.
-type TSigVerifierCache struct {
- intf TSigVerifierCacheInterface
- verifier map[uint64]TSigVerifier
- minRound uint64
- cacheSize int
- lock sync.RWMutex
-}
-
-type tsigProtocol struct {
- groupPublicKey *DKGGroupPublicKey
- hash common.Hash
- sigs map[dkg.ID]dkg.PartialSignature
- threshold int
-}
-
-func newDKGID(ID types.NodeID) dkg.ID {
- return dkg.NewID(ID.Hash[:])
-}
-
-func newDKGProtocol(
- ID types.NodeID,
- recv dkgReceiver,
- round uint64,
- threshold int) *dkgProtocol {
-
- prvShare, pubShare := dkg.NewPrivateKeyShares(threshold)
-
- recv.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{
- ProposerID: ID,
- Round: round,
- DKGID: newDKGID(ID),
- PublicKeyShares: *pubShare,
- })
-
- return &dkgProtocol{
- ID: ID,
- recv: recv,
- round: round,
- threshold: threshold,
- idMap: make(map[types.NodeID]dkg.ID),
- mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares),
- masterPrivateShare: prvShare,
- prvShares: dkg.NewEmptyPrivateKeyShares(),
- prvSharesReceived: make(map[types.NodeID]struct{}),
- nodeComplained: make(map[types.NodeID]struct{}),
- antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}),
- }
-}
-
-func (d *dkgProtocol) processMasterPublicKeys(
- mpks []*typesDKG.MasterPublicKey) error {
- d.idMap = make(map[types.NodeID]dkg.ID, len(mpks))
- d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks))
- d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks))
- ids := make(dkg.IDs, len(mpks))
- for i := range mpks {
- nID := mpks[i].ProposerID
- d.idMap[nID] = mpks[i].DKGID
- d.mpkMap[nID] = &mpks[i].PublicKeyShares
- ids[i] = mpks[i].DKGID
- }
- d.masterPrivateShare.SetParticipants(ids)
- for _, mpk := range mpks {
- share, ok := d.masterPrivateShare.Share(mpk.DKGID)
- if !ok {
- return ErrIDShareNotFound
- }
- d.recv.ProposeDKGPrivateShare(&typesDKG.PrivateShare{
- ProposerID: d.ID,
- ReceiverID: mpk.ProposerID,
- Round: d.round,
- PrivateShare: *share,
- })
- }
- return nil
-}
-
-func (d *dkgProtocol) proposeNackComplaints() {
- for nID := range d.mpkMap {
- if _, exist := d.prvSharesReceived[nID]; exist {
- continue
- }
- d.recv.ProposeDKGComplaint(&typesDKG.Complaint{
- ProposerID: d.ID,
- Round: d.round,
- PrivateShare: typesDKG.PrivateShare{
- ProposerID: nID,
- Round: d.round,
- },
- })
- }
-}
-
-func (d *dkgProtocol) processNackComplaints(complaints []*typesDKG.Complaint) (
- err error) {
- for _, complaint := range complaints {
- if !complaint.IsNack() {
- continue
- }
- if complaint.PrivateShare.ProposerID != d.ID {
- continue
- }
- id, exist := d.idMap[complaint.ProposerID]
- if !exist {
- err = ErrNotDKGParticipant
- continue
- }
- share, ok := d.masterPrivateShare.Share(id)
- if !ok {
- err = ErrIDShareNotFound
- continue
- }
- d.recv.ProposeDKGAntiNackComplaint(&typesDKG.PrivateShare{
- ProposerID: d.ID,
- ReceiverID: complaint.ProposerID,
- Round: d.round,
- PrivateShare: *share,
- })
- }
- return
-}
-
-func (d *dkgProtocol) enforceNackComplaints(complaints []*typesDKG.Complaint) {
- for _, complaint := range complaints {
- if !complaint.IsNack() {
- continue
- }
- to := complaint.PrivateShare.ProposerID
- // Do not propose nack complaint to itself.
- if to == d.ID {
- continue
- }
- from := complaint.ProposerID
- // Nack complaint is already proposed.
- if from == d.ID {
- continue
- }
- if _, exist :=
- d.antiComplaintReceived[from][to]; !exist {
- d.recv.ProposeDKGComplaint(&typesDKG.Complaint{
- ProposerID: d.ID,
- Round: d.round,
- PrivateShare: typesDKG.PrivateShare{
- ProposerID: to,
- Round: d.round,
- },
- })
- }
- }
-}
-
-func (d *dkgProtocol) sanityCheck(prvShare *typesDKG.PrivateShare) error {
- if _, exist := d.idMap[prvShare.ProposerID]; !exist {
- return ErrNotDKGParticipant
- }
- ok, err := verifyDKGPrivateShareSignature(prvShare)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectPrivateShareSignature
- }
- return nil
-}
-
-func (d *dkgProtocol) processPrivateShare(
- prvShare *typesDKG.PrivateShare) error {
- if d.round != prvShare.Round {
- return nil
- }
- receiverID, exist := d.idMap[prvShare.ReceiverID]
- // This node is not a DKG participant, ignore the private share.
- if !exist {
- return nil
- }
- if err := d.sanityCheck(prvShare); err != nil {
- return err
- }
- mpk := d.mpkMap[prvShare.ProposerID]
- ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare)
- if err != nil {
- return err
- }
- if prvShare.ReceiverID == d.ID {
- d.prvSharesReceived[prvShare.ProposerID] = struct{}{}
- }
- if !ok {
- if _, exist := d.nodeComplained[prvShare.ProposerID]; exist {
- return nil
- }
- complaint := &typesDKG.Complaint{
- ProposerID: d.ID,
- Round: d.round,
- PrivateShare: *prvShare,
- }
- d.nodeComplained[prvShare.ProposerID] = struct{}{}
- d.recv.ProposeDKGComplaint(complaint)
- } else if prvShare.ReceiverID == d.ID {
- sender := d.idMap[prvShare.ProposerID]
- if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil {
- return err
- }
- } else {
- // The prvShare is an anti complaint.
- if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist {
- d.antiComplaintReceived[prvShare.ReceiverID] =
- make(map[types.NodeID]struct{})
- d.recv.ProposeDKGAntiNackComplaint(prvShare)
- }
- d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] =
- struct{}{}
- }
- return nil
-}
-
-func (d *dkgProtocol) proposeFinalize() {
- d.recv.ProposeDKGFinalize(&typesDKG.Finalize{
- ProposerID: d.ID,
- Round: d.round,
- })
-}
-
-func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) (
- *dkgShareSecret, error) {
- if len(qualifyIDs) < d.threshold {
- return nil, ErrNotReachThreshold
- }
- prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs)
- if err != nil {
- return nil, err
- }
- return &dkgShareSecret{
- privateKey: prvKey,
- }, nil
-}
-
-func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature {
- // DKG sign will always success.
- sig, _ := ss.privateKey.Sign(hash)
- return dkg.PartialSignature(sig)
-}
-
-// NewDKGGroupPublicKey creats a DKGGroupPublicKey instance.
-func NewDKGGroupPublicKey(
- round uint64,
- mpks []*typesDKG.MasterPublicKey, complaints []*typesDKG.Complaint,
- threshold int) (
- *DKGGroupPublicKey, error) {
-
- if len(mpks) < threshold {
- return nil, ErrInvalidThreshold
- }
-
- // Calculate qualify members.
- disqualifyIDs := map[types.NodeID]struct{}{}
- complaintsByID := map[types.NodeID]int{}
- for _, complaint := range complaints {
- if complaint.IsNack() {
- complaintsByID[complaint.PrivateShare.ProposerID]++
- } else {
- disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{}
- }
- }
- for nID, num := range complaintsByID {
- if num > threshold {
- disqualifyIDs[nID] = struct{}{}
- }
- }
- qualifyIDs := make(dkg.IDs, 0, len(mpks)-len(disqualifyIDs))
- qualifyNodeIDs := make(map[types.NodeID]struct{})
- mpkMap := make(map[dkg.ID]*typesDKG.MasterPublicKey, cap(qualifyIDs))
- idMap := make(map[types.NodeID]dkg.ID)
- for _, mpk := range mpks {
- if _, exist := disqualifyIDs[mpk.ProposerID]; exist {
- continue
- }
- mpkMap[mpk.DKGID] = mpk
- idMap[mpk.ProposerID] = mpk.DKGID
- qualifyIDs = append(qualifyIDs, mpk.DKGID)
- qualifyNodeIDs[mpk.ProposerID] = struct{}{}
- }
- // Recover qualify members' public key.
- pubKeys := make(map[types.NodeID]*dkg.PublicKey, len(qualifyIDs))
- for _, recvID := range qualifyIDs {
- pubShares := dkg.NewEmptyPublicKeyShares()
- for _, id := range qualifyIDs {
- pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID)
- if err != nil {
- return nil, err
- }
- if err := pubShares.AddShare(id, pubShare); err != nil {
- return nil, err
- }
- }
- pubKey, err := pubShares.RecoverPublicKey(qualifyIDs)
- if err != nil {
- return nil, err
- }
- pubKeys[mpkMap[recvID].ProposerID] = pubKey
- }
- // Recover Group Public Key.
- pubShares := make([]*dkg.PublicKeyShares, 0, len(qualifyIDs))
- for _, id := range qualifyIDs {
- pubShares = append(pubShares, &mpkMap[id].PublicKeyShares)
- }
- groupPK := dkg.RecoverGroupPublicKey(pubShares)
- return &DKGGroupPublicKey{
- round: round,
- qualifyIDs: qualifyIDs,
- qualifyNodeIDs: qualifyNodeIDs,
- idMap: idMap,
- publicKeys: pubKeys,
- threshold: threshold,
- groupPublicKey: groupPK,
- }, nil
-}
-
-// VerifySignature verifies if the signature is correct.
-func (gpk *DKGGroupPublicKey) VerifySignature(
- hash common.Hash, sig crypto.Signature) bool {
- return gpk.groupPublicKey.VerifySignature(hash, sig)
-}
-
-// NewTSigVerifierCache creats a DKGGroupPublicKey instance.
-func NewTSigVerifierCache(
- intf TSigVerifierCacheInterface, cacheSize int) *TSigVerifierCache {
- return &TSigVerifierCache{
- intf: intf,
- verifier: make(map[uint64]TSigVerifier),
- cacheSize: cacheSize,
- }
-}
-
-// UpdateAndGet calls Update and then Get.
-func (tc *TSigVerifierCache) UpdateAndGet(round uint64) (
- TSigVerifier, bool, error) {
- ok, err := tc.Update(round)
- if err != nil {
- return nil, false, err
- }
- if !ok {
- return nil, false, nil
- }
- v, ok := tc.Get(round)
- return v, ok, nil
-}
-
-// Update the cache and returns if success.
-func (tc *TSigVerifierCache) Update(round uint64) (bool, error) {
- tc.lock.Lock()
- defer tc.lock.Unlock()
- if round < tc.minRound {
- return false, ErrRoundAlreadyPurged
- }
- if _, exist := tc.verifier[round]; exist {
- return true, nil
- }
- if !tc.intf.IsDKGFinal(round) {
- return false, nil
- }
- gpk, err := NewDKGGroupPublicKey(round,
- tc.intf.DKGMasterPublicKeys(round),
- tc.intf.DKGComplaints(round),
- int(tc.intf.Configuration(round).DKGSetSize/3)+1)
- if err != nil {
- return false, err
- }
- if len(tc.verifier) == 0 {
- tc.minRound = round
- }
- tc.verifier[round] = gpk
- if len(tc.verifier) > tc.cacheSize {
- delete(tc.verifier, tc.minRound)
- }
- for {
- if _, exist := tc.verifier[tc.minRound]; !exist {
- tc.minRound++
- } else {
- break
- }
- }
- return true, nil
-}
-
-// Get the TSigVerifier of round and returns if it exists.
-func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) {
- tc.lock.RLock()
- defer tc.lock.RUnlock()
- verifier, exist := tc.verifier[round]
- return verifier, exist
-}
-
-func newTSigProtocol(
- gpk *DKGGroupPublicKey,
- hash common.Hash) *tsigProtocol {
- return &tsigProtocol{
- groupPublicKey: gpk,
- hash: hash,
- sigs: make(map[dkg.ID]dkg.PartialSignature, gpk.threshold+1),
- }
-}
-
-func (tsig *tsigProtocol) sanityCheck(psig *typesDKG.PartialSignature) error {
- _, exist := tsig.groupPublicKey.publicKeys[psig.ProposerID]
- if !exist {
- return ErrNotQualifyDKGParticipant
- }
- ok, err := verifyDKGPartialSignatureSignature(psig)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectPartialSignatureSignature
- }
- if psig.Hash != tsig.hash {
- return ErrMismatchPartialSignatureHash
- }
- return nil
-}
-
-func (tsig *tsigProtocol) processPartialSignature(
- psig *typesDKG.PartialSignature) error {
- if psig.Round != tsig.groupPublicKey.round {
- return nil
- }
- id, exist := tsig.groupPublicKey.idMap[psig.ProposerID]
- if !exist {
- return ErrNotQualifyDKGParticipant
- }
- if err := tsig.sanityCheck(psig); err != nil {
- return err
- }
- pubKey := tsig.groupPublicKey.publicKeys[psig.ProposerID]
- if !pubKey.VerifySignature(
- tsig.hash, crypto.Signature(psig.PartialSignature)) {
- return ErrIncorrectPartialSignature
- }
- tsig.sigs[id] = psig.PartialSignature
- return nil
-}
-
-func (tsig *tsigProtocol) signature() (crypto.Signature, error) {
- if len(tsig.sigs) < tsig.groupPublicKey.threshold {
- return crypto.Signature{}, ErrNotEnoughtPartialSignatures
- }
- ids := make(dkg.IDs, 0, len(tsig.sigs))
- psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs))
- for id, psig := range tsig.sigs {
- ids = append(ids, id)
- psigs = append(psigs, psig)
- }
- return dkg.RecoverSignature(psigs, ids)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go
deleted file mode 100644
index 75a2fdfcf..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/interfaces.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
- typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg"
-)
-
-// Application describes the application interface that interacts with DEXON
-// consensus core.
-type Application interface {
- // PreparePayload is called when consensus core is preparing a block.
- PreparePayload(position types.Position) ([]byte, error)
-
- // PrepareWitness will return the witness data no lower than consensusHeight.
- PrepareWitness(consensusHeight uint64) (types.Witness, error)
-
- // VerifyBlock verifies if the block is valid.
- VerifyBlock(block *types.Block) types.BlockVerifyStatus
-
- // BlockConfirmed is called when a block is confirmed and added to lattice.
- BlockConfirmed(block types.Block)
-
- // BlockDelivered is called when a block is add to the compaction chain.
- BlockDelivered(blockHash common.Hash, result types.FinalizationResult)
-}
-
-// Debug describes the application interface that requires
-// more detailed consensus execution.
-type Debug interface {
- // StronglyAcked is called when a block is strongly acked.
- StronglyAcked(blockHash common.Hash)
-
- // TotalOrderingDelivered is called when the total ordering algorithm deliver
- // a set of block.
- TotalOrderingDelivered(common.Hashes, uint32)
-}
-
-// Network describs the network interface that interacts with DEXON consensus
-// core.
-type Network interface {
- // PullBlocks tries to pull blocks from the DEXON network.
- PullBlocks(hashes common.Hashes)
-
- // PullVotes tries to pull votes from the DEXON network.
- PullVotes(position types.Position)
-
- // BroadcastVote broadcasts vote to all nodes in DEXON network.
- BroadcastVote(vote *types.Vote)
-
- // BroadcastBlock broadcasts block to all nodes in DEXON network.
- BroadcastBlock(block *types.Block)
-
- // BroadcastAgreementResult broadcasts agreement result to DKG set.
- BroadcastAgreementResult(randRequest *types.AgreementResult)
-
- // BroadcastRandomnessResult broadcasts rand request to Notary set.
- BroadcastRandomnessResult(randResult *types.BlockRandomnessResult)
-
- // SendDKGPrivateShare sends PrivateShare to a DKG participant.
- SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare)
-
- // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants.
- BroadcastDKGPrivateShare(prvShare *typesDKG.PrivateShare)
-
- // BroadcastDKGPartialSignature broadcasts partialSignature to all
- // DKG participants.
- BroadcastDKGPartialSignature(psig *typesDKG.PartialSignature)
-
- // ReceiveChan returns a channel to receive messages from DEXON network.
- ReceiveChan() <-chan interface{}
-}
-
-// Governance interface specifies interface to control the governance contract.
-// Note that there are a lot more methods in the governance contract, that this
-// interface only define those that are required to run the consensus algorithm.
-type Governance interface {
- // Configuration returns the configuration at a given round.
- // Return the genesis configuration if round == 0.
- Configuration(round uint64) *types.Config
-
- // CRS returns the CRS for a given round.
- // Return the genesis CRS if round == 0.
- CRS(round uint64) common.Hash
-
- // Propose a CRS of round.
- ProposeCRS(round uint64, signedCRS []byte)
-
- // NodeSet returns the node set at a given round.
- // Return the genesis node set if round == 0.
- NodeSet(round uint64) []crypto.PublicKey
-
- // NotifyRoundHeight notifies governance contract to generate configuration
- // for that round with the block on that consensus height.
- NotifyRoundHeight(targetRound, consensusHeight uint64)
-
- //// DKG-related methods.
-
- // AddDKGComplaint adds a DKGComplaint.
- AddDKGComplaint(round uint64, complaint *typesDKG.Complaint)
-
- // DKGComplaints gets all the DKGComplaints of round.
- DKGComplaints(round uint64) []*typesDKG.Complaint
-
- // AddDKGMasterPublicKey adds a DKGMasterPublicKey.
- AddDKGMasterPublicKey(round uint64, masterPublicKey *typesDKG.MasterPublicKey)
-
- // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round.
- DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey
-
- // AddDKGFinalize adds a DKG finalize message.
- AddDKGFinalize(round uint64, final *typesDKG.Finalize)
-
- // IsDKGFinal checks if DKG is final.
- IsDKGFinal(round uint64) bool
-}
-
-// Ticker define the capability to tick by interval.
-type Ticker interface {
- // Tick would return a channel, which would be triggered until next tick.
- Tick() <-chan time.Time
-
- // Stop the ticker.
- Stop()
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go
deleted file mode 100644
index 564675730..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice-data.go
+++ /dev/null
@@ -1,645 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "fmt"
- "sort"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for sanity check error.
-var (
- ErrDuplicatedAckOnOneChain = fmt.Errorf("duplicated ack on one chain")
- ErrInvalidChainID = fmt.Errorf("invalid chain id")
- ErrInvalidProposerID = fmt.Errorf("invalid proposer id")
- ErrInvalidWitness = fmt.Errorf("invalid witness data")
- ErrInvalidBlock = fmt.Errorf("invalid block")
- ErrNotAckParent = fmt.Errorf("not ack parent")
- ErrDoubleAck = fmt.Errorf("double ack")
- ErrAcksNotSorted = fmt.Errorf("acks not sorted")
- ErrInvalidBlockHeight = fmt.Errorf("invalid block height")
- ErrAlreadyInLattice = fmt.Errorf("block already in lattice")
- ErrIncorrectBlockTime = fmt.Errorf("block timestamp is incorrect")
- ErrInvalidRoundID = fmt.Errorf("invalid round id")
- ErrUnknownRoundID = fmt.Errorf("unknown round id")
- ErrRoundOutOfRange = fmt.Errorf("round out of range")
- ErrRoundNotSwitch = fmt.Errorf("round not switch")
- ErrNotGenesisBlock = fmt.Errorf("not a genesis block")
- ErrUnexpectedGenesisBlock = fmt.Errorf("unexpected genesis block")
-)
-
-// ErrAckingBlockNotExists is for sanity check error.
-type ErrAckingBlockNotExists struct {
- hash common.Hash
-}
-
-func (e ErrAckingBlockNotExists) Error() string {
- return fmt.Sprintf("acking block %s not exists", e.hash)
-}
-
-// Errors for method usage
-var (
- ErrRoundNotIncreasing = errors.New("round not increasing")
- ErrPurgedBlockNotFound = errors.New("purged block not found")
- ErrPurgeNotDeliveredBlock = errors.New("not purge from head")
-)
-
-// latticeDataConfig is the configuration for latticeData for each round.
-type latticeDataConfig struct {
- roundBasedConfig
- // Number of chains between runs
- numChains uint32
- // Block interval specifies reasonable time difference between
- // parent/child blocks.
- minBlockTimeInterval time.Duration
- maxBlockTimeInterval time.Duration
-}
-
-// Initiate latticeDataConfig from types.Config.
-func (config *latticeDataConfig) fromConfig(roundID uint64, cfg *types.Config) {
- config.numChains = cfg.NumChains
- config.minBlockTimeInterval = cfg.MinBlockInterval
- config.maxBlockTimeInterval = cfg.MaxBlockInterval
- config.setupRoundBasedFields(roundID, cfg)
-}
-
-// Check if timestamp of a block is valid according to a reference time.
-func (config *latticeDataConfig) isValidBlockTime(
- b *types.Block, ref time.Time) bool {
- return !(b.Timestamp.Before(ref.Add(config.minBlockTimeInterval)) ||
- b.Timestamp.After(ref.Add(config.maxBlockTimeInterval)))
-}
-
-// isValidGenesisBlockTime check if a timestamp is valid for a genesis block.
-func (config *latticeDataConfig) isValidGenesisBlockTime(b *types.Block) bool {
- return !(b.Timestamp.Before(config.roundBeginTime) || b.Timestamp.After(
- config.roundBeginTime.Add(config.maxBlockTimeInterval)))
-}
-
-// newGenesisLatticeDataConfig constructs a latticeDataConfig instance.
-func newGenesisLatticeDataConfig(
- dMoment time.Time, config *types.Config) *latticeDataConfig {
- c := &latticeDataConfig{}
- c.fromConfig(0, config)
- c.setRoundBeginTime(dMoment)
- return c
-}
-
-// newLatticeDataConfig constructs a latticeDataConfig instance.
-func newLatticeDataConfig(
- prev *latticeDataConfig, cur *types.Config) *latticeDataConfig {
- c := &latticeDataConfig{}
- c.fromConfig(prev.roundID+1, cur)
- c.setRoundBeginTime(prev.roundEndTime)
- return c
-}
-
-// latticeData is a module for storing lattice.
-type latticeData struct {
- // we need blockdb to read blocks purged from cache in memory.
- db blockdb.Reader
- // chains stores chains' blocks and other info.
- chains []*chainStatus
- // blockByHash stores blocks, indexed by block hash.
- blockByHash map[common.Hash]*types.Block
- // This stores configuration for each round.
- configs []*latticeDataConfig
-}
-
-// newLatticeData creates a new latticeData struct.
-func newLatticeData(
- db blockdb.Reader, genesisConfig *latticeDataConfig) (data *latticeData) {
- data = &latticeData{
- db: db,
- chains: make([]*chainStatus, genesisConfig.numChains),
- blockByHash: make(map[common.Hash]*types.Block),
- configs: []*latticeDataConfig{genesisConfig},
- }
- for i := range data.chains {
- data.chains[i] = &chainStatus{
- ID: uint32(i),
- blocks: []*types.Block{},
- lastAckPos: make([]*types.Position, genesisConfig.numChains),
- }
- }
- return
-}
-
-func (data *latticeData) checkAckingRelations(b *types.Block) error {
- acksByChainID := make(map[uint32]struct{}, len(data.chains))
- for _, hash := range b.Acks {
- bAck, err := data.findBlock(hash)
- if err != nil {
- if err == blockdb.ErrBlockDoesNotExist {
- return &ErrAckingBlockNotExists{hash}
- }
- return err
- }
- // Check if it acks blocks from old rounds, the allowed round difference
- // is 1.
- if DiffUint64(bAck.Position.Round, b.Position.Round) > 1 {
- return ErrRoundOutOfRange
- }
- // Check if it acks older blocks than blocks on the same chain.
- lastAckPos :=
- data.chains[bAck.Position.ChainID].lastAckPos[b.Position.ChainID]
- if lastAckPos != nil && !bAck.Position.Newer(lastAckPos) {
- return ErrDoubleAck
- }
- // Check if ack two blocks on the same chain. This would need
- // to check after we replace map with slice for acks.
- if _, acked := acksByChainID[bAck.Position.ChainID]; acked {
- return ErrDuplicatedAckOnOneChain
- }
- acksByChainID[bAck.Position.ChainID] = struct{}{}
- }
- return nil
-}
-
-func (data *latticeData) sanityCheck(b *types.Block) error {
- // TODO(mission): Check if its proposer is in validator set somewhere,
- // lattice doesn't have to know about node set.
- config := data.getConfig(b.Position.Round)
- if config == nil {
- return ErrInvalidRoundID
- }
- // Check if the chain id is valid.
- if b.Position.ChainID >= config.numChains {
- return ErrInvalidChainID
- }
- // Make sure parent block is arrived.
- chain := data.chains[b.Position.ChainID]
- chainTip := chain.tip
- if chainTip == nil {
- if !b.ParentHash.Equal(common.Hash{}) {
- return &ErrAckingBlockNotExists{b.ParentHash}
- }
- if !b.IsGenesis() {
- return ErrNotGenesisBlock
- }
- if !config.isValidGenesisBlockTime(b) {
- return ErrIncorrectBlockTime
- }
- return data.checkAckingRelations(b)
- }
- // Check parent block if parent hash is specified.
- if !b.ParentHash.Equal(common.Hash{}) {
- if !b.ParentHash.Equal(chainTip.Hash) {
- return &ErrAckingBlockNotExists{b.ParentHash}
- }
- if !b.IsAcking(b.ParentHash) {
- return ErrNotAckParent
- }
- }
- chainTipConfig := data.getConfig(chainTip.Position.Round)
- // Round can't be rewinded.
- if chainTip.Position.Round > b.Position.Round {
- return ErrInvalidRoundID
- }
- checkTip := false
- if chainTip.Timestamp.After(chainTipConfig.roundEndTime) {
- // Round switching should happen when chainTip already pass
- // round end time of its round.
- if chainTip.Position.Round == b.Position.Round {
- return ErrRoundNotSwitch
- }
- // The round ID is continuous.
- if b.Position.Round-chainTip.Position.Round == 1 {
- checkTip = true
- } else {
- // This block should be genesis block of new round because round
- // ID is not continuous.
- if !b.IsGenesis() {
- return ErrNotGenesisBlock
- }
- if !config.isValidGenesisBlockTime(b) {
- return ErrIncorrectBlockTime
- }
- // TODO(mission): make sure rounds between chainTip and current block
- // don't expect blocks from this chain.
- }
- } else {
- if chainTip.Position.Round != b.Position.Round {
- // Round should not switch.
- return ErrInvalidRoundID
- }
- checkTip = true
- }
- // Validate the relation between chain tip when needed.
- if checkTip {
- if b.Position.Height != chainTip.Position.Height+1 {
- return ErrInvalidBlockHeight
- }
- if b.Witness.Height < chainTip.Witness.Height {
- return ErrInvalidWitness
- }
- if !config.isValidBlockTime(b, chainTip.Timestamp) {
- return ErrIncorrectBlockTime
- }
- // Chain tip should be acked.
- if !b.IsAcking(chainTip.Hash) {
- return ErrNotAckParent
- }
- }
- if err := data.checkAckingRelations(b); err != nil {
- return err
- }
- return nil
-}
-
-// addBlock processes block, it does sanity check, inserts block into
-// lattice and deletes blocks which will not be used.
-func (data *latticeData) addBlock(
- block *types.Block) (deliverable []*types.Block, err error) {
- var (
- bAck *types.Block
- updated bool
- )
- data.chains[block.Position.ChainID].addBlock(block)
- data.blockByHash[block.Hash] = block
- // Update lastAckPos.
- for _, ack := range block.Acks {
- if bAck, err = data.findBlock(ack); err != nil {
- if err == blockdb.ErrBlockDoesNotExist {
- err = nil
- continue
- }
- return
- }
- data.chains[bAck.Position.ChainID].lastAckPos[block.Position.ChainID] =
- bAck.Position.Clone()
- }
-
- // Extract blocks that deliverable to total ordering.
- // A block is deliverable to total ordering iff:
- // - All its acking blocks are delivered to total ordering.
- for {
- updated = false
- for _, status := range data.chains {
- if status.nextOutputIndex >= len(status.blocks) {
- continue
- }
- tip := status.blocks[status.nextOutputIndex]
- allAckingBlockDelivered := true
- for _, ack := range tip.Acks {
- if bAck, err = data.findBlock(ack); err != nil {
- if err == blockdb.ErrBlockDoesNotExist {
- err = nil
- allAckingBlockDelivered = false
- break
- }
- return
- }
- // Check if this block is outputed or not.
- idx := data.chains[bAck.Position.ChainID].findBlock(
- &bAck.Position)
- var ok bool
- if idx == -1 {
- // Either the block is delivered or not added to chain yet.
- if out :=
- data.chains[bAck.Position.ChainID].lastOutputPosition; out != nil {
- ok = !out.Older(&bAck.Position)
- } else if ackTip :=
- data.chains[bAck.Position.ChainID].tip; ackTip != nil {
- ok = !ackTip.Position.Older(&bAck.Position)
- }
- } else {
- ok = idx < data.chains[bAck.Position.ChainID].nextOutputIndex
- }
- if ok {
- continue
- }
- // This acked block exists and not delivered yet.
- allAckingBlockDelivered = false
- }
- if allAckingBlockDelivered {
- status.lastOutputPosition = &tip.Position
- status.nextOutputIndex++
- deliverable = append(deliverable, tip)
- updated = true
- }
- }
- if !updated {
- break
- }
- }
- return
-}
-
-// addFinalizedBlock processes block for syncing internal data.
-func (data *latticeData) addFinalizedBlock(
- block *types.Block) (err error) {
- var bAck *types.Block
- chain := data.chains[block.Position.ChainID]
- if chain.tip != nil && chain.tip.Position.Height >=
- block.Position.Height {
- return
- }
- chain.nextOutputIndex = 0
- chain.blocks = []*types.Block{}
- chain.tip = block
- chain.lastOutputPosition = nil
- // Update lastAckPost.
- for _, ack := range block.Acks {
- if bAck, err = data.findBlock(ack); err != nil {
- return
- }
- data.chains[bAck.Position.ChainID].lastAckPos[block.Position.ChainID] =
- bAck.Position.Clone()
- }
- return
-}
-
-// prepareBlock helps to setup fields of block based on its ChainID and Round,
-// including:
-// - Acks
-// - Timestamp
-// - ParentHash and Height from parent block. If there is no valid parent block
-// (ex. Newly added chain or bootstrap ), these fields would be setup as
-// genesis block.
-func (data *latticeData) prepareBlock(b *types.Block) error {
- var (
- minTimestamp, maxTimestamp time.Time
- config *latticeDataConfig
- acks common.Hashes
- bindTip bool
- chainTip *types.Block
- )
- if config = data.getConfig(b.Position.Round); config == nil {
- return ErrUnknownRoundID
- }
- // When this chain is illegal in this round, reject it.
- if b.Position.ChainID >= config.numChains {
- return ErrInvalidChainID
- }
- // Reset fields to make sure we got these information from parent block.
- b.Position.Height = 0
- b.ParentHash = common.Hash{}
- // Decide valid timestamp range.
- homeChain := data.chains[b.Position.ChainID]
- if homeChain.tip != nil {
- chainTip = homeChain.tip
- if b.Position.Round < chainTip.Position.Round {
- return ErrInvalidRoundID
- }
- chainTipConfig := data.getConfig(chainTip.Position.Round)
- if chainTip.Timestamp.After(chainTipConfig.roundEndTime) {
- if b.Position.Round == chainTip.Position.Round {
- return ErrRoundNotSwitch
- }
- if b.Position.Round == chainTip.Position.Round+1 {
- bindTip = true
- }
- } else {
- if b.Position.Round != chainTip.Position.Round {
- return ErrInvalidRoundID
- }
- bindTip = true
- }
- // TODO(mission): find a way to prevent us to assign a witness height
- // from Jurassic period.
- b.Witness.Height = chainTip.Witness.Height
- }
- // For blocks with continuous round ID, assign timestamp range based on
- // parent block and bound config.
- if bindTip {
- minTimestamp = chainTip.Timestamp.Add(config.minBlockTimeInterval)
- maxTimestamp = chainTip.Timestamp.Add(config.maxBlockTimeInterval)
- // When a chain is removed and added back, the reference block
- // of previous round can't be used as parent block.
- b.ParentHash = chainTip.Hash
- b.Position.Height = chainTip.Position.Height + 1
- } else {
- // Discontinuous round ID detected, another fresh start of
- // new round.
- minTimestamp = config.roundBeginTime
- maxTimestamp = config.roundBeginTime.Add(config.maxBlockTimeInterval)
- }
- // Fix timestamp if the given one is invalid.
- if b.Timestamp.Before(minTimestamp) {
- b.Timestamp = minTimestamp
- } else if b.Timestamp.After(maxTimestamp) {
- b.Timestamp = maxTimestamp
- }
- // Setup acks fields.
- for _, status := range data.chains {
- // Check if we can ack latest block on that chain.
- if status.tip == nil {
- continue
- }
- lastAckPos := status.lastAckPos[b.Position.ChainID]
- if lastAckPos != nil && !status.tip.Position.Newer(lastAckPos) {
- // The reference block is already acked.
- continue
- }
- if status.tip.Position.Round > b.Position.Round {
- // Avoid forward acking: acking some block from later rounds.
- continue
- }
- if b.Position.Round > status.tip.Position.Round+1 {
- // Can't ack block too old or too new to us.
- continue
- }
- acks = append(acks, status.tip.Hash)
- }
- b.Acks = common.NewSortedHashes(acks)
- return nil
-}
-
-// prepareEmptyBlock helps to setup fields of block based on its ChainID.
-// including:
-// - Acks only acking its parent
-// - Timestamp with parent.Timestamp + minBlockProposeInterval
-// - ParentHash and Height from parent block. If there is no valid parent block
-// (ex. Newly added chain or bootstrap ), these fields would be setup as
-// genesis block.
-func (data *latticeData) prepareEmptyBlock(b *types.Block) {
- // emptyBlock has no proposer.
- b.ProposerID = types.NodeID{}
- var acks common.Hashes
- // Reset fields to make sure we got these information from parent block.
- b.Position.Height = 0
- b.Position.Round = 0
- b.ParentHash = common.Hash{}
- b.Timestamp = time.Time{}
- // Decide valid timestamp range.
- homeChain := data.chains[b.Position.ChainID]
- if homeChain.tip != nil {
- chainTip := homeChain.tip
- b.ParentHash = chainTip.Hash
- chainTipConfig := data.getConfig(chainTip.Position.Round)
- if chainTip.Timestamp.After(chainTipConfig.roundEndTime) {
- b.Position.Round = chainTip.Position.Round + 1
- } else {
- b.Position.Round = chainTip.Position.Round
- }
- b.Position.Height = chainTip.Position.Height + 1
- b.Timestamp = chainTip.Timestamp.Add(chainTipConfig.minBlockTimeInterval)
- acks = append(acks, chainTip.Hash)
- }
- b.Acks = common.NewSortedHashes(acks)
-}
-
-// TODO(mission): make more abstraction for this method.
-// nextHeight returns the next height for the chain.
-func (data *latticeData) nextPosition(chainID uint32) types.Position {
- return data.chains[chainID].nextPosition()
-}
-
-// findBlock seeks blocks in memory or db.
-func (data *latticeData) findBlock(h common.Hash) (b *types.Block, err error) {
- if b = data.blockByHash[h]; b != nil {
- return
- }
- var tmpB types.Block
- if tmpB, err = data.db.Get(h); err != nil {
- return
- }
- b = &tmpB
- return
-}
-
-// purgeBlocks purges blocks from cache.
-func (data *latticeData) purgeBlocks(blocks []*types.Block) error {
- for _, b := range blocks {
- if _, exists := data.blockByHash[b.Hash]; !exists {
- return ErrPurgedBlockNotFound
- }
- delete(data.blockByHash, b.Hash)
- // blocks would be purged in ascending order in position.
- if err := data.chains[b.Position.ChainID].purgeBlock(b); err != nil {
- return err
- }
- }
- return nil
-}
-
-// getConfig get configuration for lattice-data by round ID.
-func (data *latticeData) getConfig(round uint64) (config *latticeDataConfig) {
- if round >= uint64(len(data.configs)) {
- return
- }
- return data.configs[round]
-}
-
-// appendConfig appends a configuration for upcoming round. When you append
-// a config for round R, next time you can only append the config for round R+1.
-func (data *latticeData) appendConfig(
- round uint64, config *types.Config) (err error) {
- // Make sure caller knows which round this config belongs to.
- if round != uint64(len(data.configs)) {
- return ErrRoundNotIncreasing
- }
- // Set round beginning time.
- newConfig := newLatticeDataConfig(data.configs[len(data.configs)-1], config)
- data.configs = append(data.configs, newConfig)
- // Resize each slice if incoming config contains larger number of chains.
- if uint32(len(data.chains)) < newConfig.numChains {
- count := newConfig.numChains - uint32(len(data.chains))
- for _, status := range data.chains {
- status.lastAckPos = append(
- status.lastAckPos, make([]*types.Position, count)...)
- }
- for i := uint32(len(data.chains)); i < newConfig.numChains; i++ {
- data.chains = append(data.chains, &chainStatus{
- ID: i,
- blocks: []*types.Block{},
- lastAckPos: make([]*types.Position, newConfig.numChains),
- })
- }
- }
- return nil
-}
-
-type chainStatus struct {
- // ID keeps the chainID of this chain status.
- ID uint32
- // blocks stores blocks proposed for this chain, sorted by height.
- blocks []*types.Block
- // tip is the last block on this chain.
- tip *types.Block
- // lastAckPos caches last acking position from other chains. Nil means
- // not acked yet.
- lastAckPos []*types.Position
- // the index to be output next time.
- nextOutputIndex int
- // the position of output last time.
- lastOutputPosition *types.Position
-}
-
-// findBlock finds index of block in current pending blocks on this chain.
-// -1 means not found.
-func (s *chainStatus) findBlock(pos *types.Position) (idx int) {
- idx = sort.Search(len(s.blocks), func(i int) bool {
- return s.blocks[i].Position.Newer(pos) ||
- s.blocks[i].Position.Equal(pos)
- })
- if idx == len(s.blocks) {
- idx = -1
- } else if !s.blocks[idx].Position.Equal(pos) {
- idx = -1
- }
- return idx
-}
-
-// getBlock returns a pending block by giving its index from findBlock method.
-func (s *chainStatus) getBlock(idx int) (b *types.Block) {
- if idx < 0 || idx >= len(s.blocks) {
- return
- }
- b = s.blocks[idx]
- return
-}
-
-// addBlock adds a block to pending blocks on this chain.
-func (s *chainStatus) addBlock(b *types.Block) {
- s.blocks = append(s.blocks, b)
- s.tip = b
-}
-
-// TODO(mission): change back to nextHeight.
-// nextPosition returns a valid position for new block in this chain.
-func (s *chainStatus) nextPosition() types.Position {
- if s.tip == nil {
- return types.Position{
- ChainID: s.ID,
- Height: 0,
- }
- }
- return types.Position{
- ChainID: s.ID,
- Height: s.tip.Position.Height + 1,
- }
-}
-
-// purgeBlock purge a block from cache, make sure this block already
-// persists to blockdb.
-func (s *chainStatus) purgeBlock(b *types.Block) error {
- if b.Hash != s.blocks[0].Hash || s.nextOutputIndex <= 0 {
- return ErrPurgeNotDeliveredBlock
- }
- s.blocks = s.blocks[1:]
- s.nextOutputIndex--
- return nil
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go
deleted file mode 100644
index af9c3c42f..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/lattice.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/blockdb"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for sanity check error.
-var (
- ErrRetrySanityCheckLater = fmt.Errorf("retry sanity check later")
-)
-
-// Lattice represents a unit to produce a global ordering from multiple chains.
-type Lattice struct {
- lock sync.RWMutex
- authModule *Authenticator
- app Application
- debug Debug
- pool blockPool
- retryAdd bool
- data *latticeData
- toModule *totalOrdering
- ctModule *consensusTimestamp
- logger common.Logger
-}
-
-// NewLattice constructs an Lattice instance.
-func NewLattice(
- dMoment time.Time,
- cfg *types.Config,
- authModule *Authenticator,
- app Application,
- debug Debug,
- db blockdb.BlockDatabase,
- logger common.Logger) (s *Lattice) {
- // Create genesis latticeDataConfig.
- dataConfig := newGenesisLatticeDataConfig(dMoment, cfg)
- toConfig := newGenesisTotalOrderingConfig(dMoment, cfg)
- s = &Lattice{
- authModule: authModule,
- app: app,
- debug: debug,
- pool: newBlockPool(cfg.NumChains),
- data: newLatticeData(db, dataConfig),
- toModule: newTotalOrdering(toConfig),
- ctModule: newConsensusTimestamp(dMoment, 0, cfg.NumChains),
- logger: logger,
- }
- return
-}
-
-// PrepareBlock setup block's field based on current lattice status.
-func (s *Lattice) PrepareBlock(
- b *types.Block, proposeTime time.Time) (err error) {
-
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- b.Timestamp = proposeTime
- if err = s.data.prepareBlock(b); err != nil {
- return
- }
- s.logger.Debug("Calling Application.PreparePayload", "position", b.Position)
- if b.Payload, err = s.app.PreparePayload(b.Position); err != nil {
- return
- }
- s.logger.Debug("Calling Application.PrepareWitness",
- "height", b.Witness.Height)
- if b.Witness, err = s.app.PrepareWitness(b.Witness.Height); err != nil {
- return
- }
- if err = s.authModule.SignBlock(b); err != nil {
- return
- }
- return
-}
-
-// PrepareEmptyBlock setup block's field based on current lattice status.
-func (s *Lattice) PrepareEmptyBlock(b *types.Block) (err error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
- s.data.prepareEmptyBlock(b)
- if b.Hash, err = hashBlock(b); err != nil {
- return
- }
- return
-}
-
-// SanityCheck check if a block is valid.
-//
-// If some acking blocks don't exists, Lattice would help to cache this block
-// and retry when lattice updated in Lattice.ProcessBlock.
-func (s *Lattice) SanityCheck(b *types.Block) (err error) {
- if b.IsEmpty() {
- // Only need to verify block's hash.
- var hash common.Hash
- if hash, err = hashBlock(b); err != nil {
- return
- }
- if b.Hash != hash {
- return ErrInvalidBlock
- }
- } else {
- // Verify block's signature.
- if err = s.authModule.VerifyBlock(b); err != nil {
- return
- }
- }
- // Make sure acks are sorted.
- for i := range b.Acks {
- if i == 0 {
- continue
- }
- if !b.Acks[i-1].Less(b.Acks[i]) {
- err = ErrAcksNotSorted
- return
- }
- }
- if err = func() (err error) {
- s.lock.RLock()
- defer s.lock.RUnlock()
- if err = s.data.sanityCheck(b); err != nil {
- if _, ok := err.(*ErrAckingBlockNotExists); ok {
- err = ErrRetrySanityCheckLater
- }
- s.logger.Error("Sanity Check failed", "error", err)
- return
- }
- return
- }(); err != nil {
- return
- }
- // Verify data in application layer.
- s.logger.Debug("Calling Application.VerifyBlock", "block", b)
- switch s.app.VerifyBlock(b) {
- case types.VerifyInvalidBlock:
- err = ErrInvalidBlock
- case types.VerifyRetryLater:
- err = ErrRetrySanityCheckLater
- }
- return
-}
-
-// addBlockToLattice adds a block into lattice, and deliver blocks with the acks
-// already delivered.
-//
-// NOTE: assume the block passed sanity check.
-func (s *Lattice) addBlockToLattice(
- input *types.Block) (outputBlocks []*types.Block, err error) {
- if tip := s.data.chains[input.Position.ChainID].tip; tip != nil {
- if !input.Position.Newer(&tip.Position) {
- return
- }
- }
- s.pool.addBlock(input)
- // Replay tips in pool to check their validity.
- for {
- hasOutput := false
- for i := uint32(0); i < uint32(len(s.pool)); i++ {
- var tip *types.Block
- if tip = s.pool.tip(i); tip == nil {
- continue
- }
- err = s.data.sanityCheck(tip)
- if err == nil {
- var output []*types.Block
- if output, err = s.data.addBlock(tip); err != nil {
- s.logger.Error("Sanity Check failed", "error", err)
- continue
- }
- hasOutput = true
- outputBlocks = append(outputBlocks, output...)
- }
- if _, ok := err.(*ErrAckingBlockNotExists); ok {
- err = nil
- continue
- }
- s.pool.removeTip(i)
- }
- if !hasOutput {
- break
- }
- }
-
- for _, b := range outputBlocks {
- // TODO(jimmy-dexon): change this name of classic DEXON algorithm.
- if s.debug != nil {
- s.debug.StronglyAcked(b.Hash)
- }
- s.logger.Debug("Calling Application.BlockConfirmed", "block", input)
- s.app.BlockConfirmed(*b.Clone())
- // Purge blocks in pool with the same chainID and lower height.
- s.pool.purgeBlocks(b.Position.ChainID, b.Position.Height)
- }
-
- return
-}
-
-// ProcessBlock adds a block into lattice, and deliver ordered blocks.
-// If any block pass sanity check after this block add into lattice, they
-// would be returned, too.
-//
-// NOTE: assume the block passed sanity check.
-func (s *Lattice) ProcessBlock(
- input *types.Block) (delivered []*types.Block, err error) {
- var (
- b *types.Block
- inLattice []*types.Block
- toDelivered []*types.Block
- deliveredMode uint32
- )
-
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if inLattice, err = s.addBlockToLattice(input); err != nil {
- return
- }
-
- if len(inLattice) == 0 {
- return
- }
-
- // Perform total ordering for each block added to lattice.
- for _, b = range inLattice {
- toDelivered, deliveredMode, err = s.toModule.processBlock(b)
- if err != nil {
- // All errors from total ordering is serious, should panic.
- panic(err)
- }
- if len(toDelivered) == 0 {
- continue
- }
- hashes := make(common.Hashes, len(toDelivered))
- for idx := range toDelivered {
- hashes[idx] = toDelivered[idx].Hash
- }
- if s.debug != nil {
- s.debug.TotalOrderingDelivered(hashes, deliveredMode)
- }
- // Perform timestamp generation.
- if err = s.ctModule.processBlocks(toDelivered); err != nil {
- return
- }
- delivered = append(delivered, toDelivered...)
- }
- return
-}
-
-// NextPosition returns expected position of incoming block for that chain.
-func (s *Lattice) NextPosition(chainID uint32) types.Position {
- s.lock.RLock()
- defer s.lock.RUnlock()
-
- return s.data.nextPosition(chainID)
-}
-
-// PurgeBlocks from cache of blocks in memory, this is called when the caller
-// make sure those blocks are saved to db.
-func (s *Lattice) PurgeBlocks(blocks []*types.Block) error {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- return s.data.purgeBlocks(blocks)
-}
-
-// AppendConfig add new configs for upcoming rounds. If you add a config for
-// round R, next time you can only add the config for round R+1.
-func (s *Lattice) AppendConfig(round uint64, config *types.Config) (err error) {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- s.pool.resize(config.NumChains)
- if err = s.data.appendConfig(round, config); err != nil {
- return
- }
- if err = s.toModule.appendConfig(round, config); err != nil {
- return
- }
- if err = s.ctModule.appendConfig(round, config); err != nil {
- return
- }
- return
-}
-
-// ProcessFinalizedBlock is used for syncing lattice data.
-func (s *Lattice) ProcessFinalizedBlock(input *types.Block) {
- defer func() { s.retryAdd = true }()
- s.lock.Lock()
- defer s.lock.Unlock()
- if err := s.data.addFinalizedBlock(input); err != nil {
- panic(err)
- }
- s.pool.purgeBlocks(input.Position.ChainID, input.Position.Height)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go
deleted file mode 100644
index 2be596abc..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/leader-selector.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "math/big"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// Errors for leader module.
-var (
- ErrIncorrectCRSSignature = fmt.Errorf("incorrect CRS signature")
-)
-
-type validLeaderFn func(*types.Block) bool
-
-// Some constant value.
-var (
- maxHash *big.Int
- one *big.Rat
-)
-
-func init() {
- hash := make([]byte, common.HashLength)
- for i := range hash {
- hash[i] = 0xff
- }
- maxHash = big.NewInt(0).SetBytes(hash)
- one = big.NewRat(1, 1)
-}
-
-type leaderSelector struct {
- hashCRS common.Hash
- numCRS *big.Int
- minCRSBlock *big.Int
- minBlockHash common.Hash
- pendingBlocks []*types.Block
- validLeader validLeaderFn
- lock sync.Mutex
-}
-
-func newLeaderSelector(
- crs common.Hash, validLeader validLeaderFn) *leaderSelector {
- numCRS := big.NewInt(0)
- numCRS.SetBytes(crs[:])
- return &leaderSelector{
- numCRS: numCRS,
- hashCRS: crs,
- minCRSBlock: maxHash,
- validLeader: validLeader,
- }
-}
-
-func (l *leaderSelector) distance(sig crypto.Signature) *big.Int {
- hash := crypto.Keccak256Hash(sig.Signature[:])
- num := big.NewInt(0)
- num.SetBytes(hash[:])
- num.Abs(num.Sub(l.numCRS, num))
- return num
-}
-
-func (l *leaderSelector) probability(sig crypto.Signature) float64 {
- dis := l.distance(sig)
- prob := big.NewRat(1, 1).SetFrac(dis, maxHash)
- p, _ := prob.Sub(one, prob).Float64()
- return p
-}
-
-func (l *leaderSelector) restart() {
- l.lock.Lock()
- defer l.lock.Unlock()
- l.minCRSBlock = maxHash
- l.minBlockHash = common.Hash{}
- l.pendingBlocks = []*types.Block{}
-}
-
-func (l *leaderSelector) leaderBlockHash() common.Hash {
- l.lock.Lock()
- defer l.lock.Unlock()
- newPendingBlocks := []*types.Block{}
- for _, b := range l.pendingBlocks {
- if l.validLeader(b) {
- l.updateLeader(b)
- } else {
- newPendingBlocks = append(newPendingBlocks, b)
- }
- }
- l.pendingBlocks = newPendingBlocks
- return l.minBlockHash
-}
-
-func (l *leaderSelector) processBlock(block *types.Block) error {
- ok, err := verifyCRSSignature(block, l.hashCRS)
- if err != nil {
- return err
- }
- if !ok {
- return ErrIncorrectCRSSignature
- }
- l.lock.Lock()
- defer l.lock.Unlock()
- if !l.validLeader(block) {
- l.pendingBlocks = append(l.pendingBlocks, block)
- return nil
- }
- l.updateLeader(block)
- return nil
-}
-func (l *leaderSelector) updateLeader(block *types.Block) {
- dist := l.distance(block.CRSSignature)
- cmp := l.minCRSBlock.Cmp(dist)
- if cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash)) {
- l.minCRSBlock = dist
- l.minBlockHash = block.Hash
- }
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go
deleted file mode 100644
index 417862912..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/negative-ack.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it and/or
-// modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-type negativeAck struct {
- // owner is the ID of proposer itself, this is used when deciding
- // a node to be restricted or not.
- owner types.NodeID
-
- numOfNodes int
-
- // timeDelay and timeExpire are for nack timeout.
- timeDelay time.Duration
- timeExpire time.Duration
-
- // restricteds stores nodes which has been restricted and the time it's
- // restricted.
- restricteds map[types.NodeID]time.Time
-
- // lastVotes and lockedVotes store the votes for nack. lastVotes[nid1][nid2]
- // and lockedVotes[nid1][nid2] both mean that nid2 votes nid1. The difference
- // is lockedVotes works only when nid1 is restricted, so that the votes are
- // needed to be locked.
- lastVotes map[types.NodeID]map[types.NodeID]struct{}
- lockedVotes map[types.NodeID]map[types.NodeID]struct{}
-
- // timeDiffs is the cache for last time stamps. timeDiffs[nid1][nid2] means
- // the last updated timestamps nid1 sees nid2.
- timeDiffs map[types.NodeID]map[types.NodeID]map[types.NodeID]time.Time
-}
-
-// newNegativeAck creates a new negaticeAck instance.
-func newNegativeAck(nid types.NodeID) *negativeAck {
- n := &negativeAck{
- owner: nid,
- numOfNodes: 0,
- restricteds: make(map[types.NodeID]time.Time),
- lastVotes: make(map[types.NodeID]map[types.NodeID]struct{}),
- lockedVotes: make(map[types.NodeID]map[types.NodeID]struct{}),
- timeDiffs: make(map[types.NodeID]map[types.NodeID]map[types.NodeID]time.Time),
- }
- n.addNode(nid)
- return n
-}
-
-// processNewVote is called when a new "vote" occurs, that is, a node
-// sees that other 2f + 1 nodes think a node is slow. "nid" is the
-// node which propesed the block which the timestamps votes and "h" is
-// the node been voted to be nacked.
-func (n *negativeAck) processNewVote(
- nid types.NodeID,
- h types.NodeID,
-) []types.NodeID {
-
- nackeds := []types.NodeID{}
- if _, exist := n.restricteds[h]; exist {
- n.lockedVotes[h][nid] = struct{}{}
- if len(n.lockedVotes[h]) > 2*(n.numOfNodes-1)/3 {
- nackeds = append(nackeds, h)
- delete(n.restricteds, h)
- }
- } else {
- if n.owner == nid {
- n.restrict(h)
- } else {
- n.lastVotes[h][nid] = struct{}{}
- if len(n.lastVotes[h]) > (n.numOfNodes-1)/3 {
- n.restrict(h)
- }
- }
- }
- return nackeds
-}
-
-// processTimestamps process new timestamps of a block which is proposed by
-// node nid, and returns the nodes being nacked.
-func (n *negativeAck) processTimestamps(
- nid types.NodeID,
- ts map[types.NodeID]time.Time,
-) []types.NodeID {
-
- n.checkRestrictExpire()
-
- nackeds := []types.NodeID{}
- for h := range n.timeDiffs {
- if n.timeDiffs[nid][h][h].Equal(ts[h]) {
- votes := 0
- for hh := range n.timeDiffs {
- if ts[hh].Sub(n.timeDiffs[nid][h][hh]) >= n.timeDelay {
- votes++
- }
- }
- if votes > 2*((n.numOfNodes-1)/3) {
- n.lastVotes[h][nid] = struct{}{}
- nack := n.processNewVote(nid, h)
- for _, i := range nack {
- nackeds = append(nackeds, i)
- }
- } else {
- delete(n.lastVotes[h], nid)
- }
- } else {
- for hh := range n.timeDiffs {
- n.timeDiffs[nid][h][hh] = ts[hh]
- }
- delete(n.lastVotes[h], nid)
- }
- }
- return nackeds
-}
-
-func (n *negativeAck) checkRestrictExpire() {
- expired := []types.NodeID{}
- now := time.Now()
- for h, t := range n.restricteds {
- if now.Sub(t) >= n.timeExpire {
- expired = append(expired, h)
- }
- }
- for _, h := range expired {
- delete(n.restricteds, h)
- }
-}
-
-func (n *negativeAck) restrict(nid types.NodeID) {
- if _, exist := n.restricteds[nid]; !exist {
- n.restricteds[nid] = time.Now().UTC()
- n.lockedVotes[nid] = map[types.NodeID]struct{}{}
- for h := range n.lastVotes[nid] {
- n.lockedVotes[nid][h] = struct{}{}
- }
- }
-}
-
-func (n *negativeAck) getRestrictedNodes() map[types.NodeID]struct{} {
- n.checkRestrictExpire()
- ret := map[types.NodeID]struct{}{}
- for h := range n.restricteds {
- ret[h] = struct{}{}
- }
- return ret
-}
-
-func (n *negativeAck) setTimeDelay(t time.Duration) {
- n.timeDelay = t
-}
-
-func (n *negativeAck) setTimeExpire(t time.Duration) {
- n.timeExpire = t
-}
-
-func (n *negativeAck) addNode(nid types.NodeID) {
- n.numOfNodes++
- n.lastVotes[nid] = make(map[types.NodeID]struct{})
- n.lockedVotes[nid] = make(map[types.NodeID]struct{})
-
- newTimeDiff := make(map[types.NodeID]map[types.NodeID]time.Time)
- for h := range n.timeDiffs {
- newTimeDiff2 := make(map[types.NodeID]time.Time)
- for hh := range n.timeDiffs {
- newTimeDiff2[hh] = time.Time{}
- }
- newTimeDiff[h] = newTimeDiff2
- }
- n.timeDiffs[nid] = newTimeDiff
- for h := range n.timeDiffs {
- n.timeDiffs[h][nid] = make(map[types.NodeID]time.Time)
- }
-}
-
-func (n *negativeAck) deleteNode(nid types.NodeID) {
- n.numOfNodes--
-
- delete(n.timeDiffs, nid)
-
- for h := range n.lastVotes {
- delete(n.lastVotes[h], nid)
- }
- delete(n.lastVotes, nid)
- delete(n.lockedVotes, nid)
-
- for h := range n.timeDiffs {
- delete(n.timeDiffs[h], nid)
- for hh := range n.timeDiffs[h] {
- delete(n.timeDiffs[h][hh], nid)
- }
- }
-
- delete(n.restricteds, nid)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go
deleted file mode 100644
index bf7b88d89..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nodeset-cache.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-var (
- // ErrRoundNotReady means we got nil config.
- ErrRoundNotReady = errors.New("round is not ready")
-)
-
-type sets struct {
- nodeSet *types.NodeSet
- notarySet []map[types.NodeID]struct{}
- dkgSet map[types.NodeID]struct{}
-}
-
-// NodeSetCacheInterface interface specifies interface used by NodeSetCache.
-type NodeSetCacheInterface interface {
- // Configuration returns the configuration at a given round.
- // Return the genesis configuration if round == 0.
- Configuration(round uint64) *types.Config
-
- // CRS returns the CRS for a given round.
- // Return the genesis CRS if round == 0.
- CRS(round uint64) common.Hash
-
- // NodeSet returns the node set at a given round.
- // Return the genesis node set if round == 0.
- NodeSet(round uint64) []crypto.PublicKey
-}
-
-// NodeSetCache caches node set information.
-type NodeSetCache struct {
- lock sync.RWMutex
- nsIntf NodeSetCacheInterface
- rounds map[uint64]*sets
- keyPool map[types.NodeID]*struct {
- pubKey crypto.PublicKey
- refCnt int
- }
-}
-
-// NewNodeSetCache constructs an NodeSetCache instance.
-func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache {
- return &NodeSetCache{
- nsIntf: nsIntf,
- rounds: make(map[uint64]*sets),
- keyPool: make(map[types.NodeID]*struct {
- pubKey crypto.PublicKey
- refCnt int
- }),
- }
-}
-
-// Exists checks if a node is in node set of that round.
-func (cache *NodeSetCache) Exists(
- round uint64, nodeID types.NodeID) (exists bool, err error) {
-
- nIDs, exists := cache.get(round)
- if !exists {
- if nIDs, err = cache.update(round); err != nil {
- return
- }
- }
- _, exists = nIDs.nodeSet.IDs[nodeID]
- return
-}
-
-// GetPublicKey return public key for that node:
-func (cache *NodeSetCache) GetPublicKey(
- nodeID types.NodeID) (key crypto.PublicKey, exists bool) {
-
- cache.lock.RLock()
- defer cache.lock.RUnlock()
-
- rec, exists := cache.keyPool[nodeID]
- if exists {
- key = rec.pubKey
- }
- return
-}
-
-// GetNodeSet returns IDs of nodes set of this round as map.
-func (cache *NodeSetCache) GetNodeSet(
- round uint64) (nIDs *types.NodeSet, err error) {
-
- IDs, exists := cache.get(round)
- if !exists {
- if IDs, err = cache.update(round); err != nil {
- return
- }
- }
- nIDs = IDs.nodeSet.Clone()
- return
-}
-
-// GetNotarySet returns of notary set of this round.
-func (cache *NodeSetCache) GetNotarySet(
- round uint64, chainID uint32) (map[types.NodeID]struct{}, error) {
- IDs, err := cache.getOrUpdate(round)
- if err != nil {
- return nil, err
- }
- if chainID >= uint32(len(IDs.notarySet)) {
- return nil, ErrInvalidChainID
- }
- return cache.cloneMap(IDs.notarySet[chainID]), nil
-}
-
-// GetDKGSet returns of DKG set of this round.
-func (cache *NodeSetCache) GetDKGSet(
- round uint64) (map[types.NodeID]struct{}, error) {
- IDs, err := cache.getOrUpdate(round)
- if err != nil {
- return nil, err
- }
- return cache.cloneMap(IDs.dkgSet), nil
-}
-
-func (cache *NodeSetCache) cloneMap(
- nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} {
- nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs))
- for k := range nIDs {
- nIDsCopy[k] = struct{}{}
- }
- return nIDsCopy
-}
-
-func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) {
- s, exists := cache.get(round)
- if !exists {
- if s, err = cache.update(round); err != nil {
- return
- }
- }
- nIDs = s
- return
-}
-
-// update node set for that round.
-//
-// This cache would maintain 10 rounds before the updated round and purge
-// rounds not in this range.
-func (cache *NodeSetCache) update(
- round uint64) (nIDs *sets, err error) {
-
- cache.lock.Lock()
- defer cache.lock.Unlock()
-
- // Get the requested round.
- keySet := cache.nsIntf.NodeSet(round)
- if keySet == nil {
- // That round is not ready yet.
- err = ErrRoundNotReady
- return
- }
- // Cache new round.
- nodeSet := types.NewNodeSet()
- for _, key := range keySet {
- nID := types.NewNodeID(key)
- nodeSet.Add(nID)
- if rec, exists := cache.keyPool[nID]; exists {
- rec.refCnt++
- } else {
- cache.keyPool[nID] = &struct {
- pubKey crypto.PublicKey
- refCnt int
- }{key, 1}
- }
- }
- cfg := cache.nsIntf.Configuration(round)
- crs := cache.nsIntf.CRS(round)
- nIDs = &sets{
- nodeSet: nodeSet,
- notarySet: make([]map[types.NodeID]struct{}, cfg.NumChains),
- dkgSet: nodeSet.GetSubSet(
- int(cfg.DKGSetSize), types.NewDKGSetTarget(crs)),
- }
- for i := range nIDs.notarySet {
- nIDs.notarySet[i] = nodeSet.GetSubSet(
- int(cfg.NotarySetSize), types.NewNotarySetTarget(crs, uint32(i)))
- }
-
- cache.rounds[round] = nIDs
- // Purge older rounds.
- for rID, nIDs := range cache.rounds {
- nodeSet := nIDs.nodeSet
- if round-rID <= 5 {
- continue
- }
- for nID := range nodeSet.IDs {
- rec := cache.keyPool[nID]
- if rec.refCnt--; rec.refCnt == 0 {
- delete(cache.keyPool, nID)
- }
- }
- delete(cache.rounds, rID)
- }
- return
-}
-
-func (cache *NodeSetCache) get(
- round uint64) (nIDs *sets, exists bool) {
-
- cache.lock.RLock()
- defer cache.lock.RUnlock()
-
- nIDs, exists = cache.rounds[round]
- return
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go
deleted file mode 100644
index fafbd10bb..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/nonblocking.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "fmt"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-type blockConfirmedEvent struct {
- block *types.Block
-}
-
-type stronglyAckedEvent struct {
- blockHash common.Hash
-}
-
-type totalOrderingDeliveredEvent struct {
- blockHashes common.Hashes
- mode uint32
-}
-
-type blockDeliveredEvent struct {
- blockHash common.Hash
- result *types.FinalizationResult
-}
-
-// nonBlocking implements these interfaces and is a decorator for
-// them that makes the methods to be non-blocking.
-// - Application
-// - Debug
-// - It also provides nonblockig for blockdb update.
-type nonBlocking struct {
- app Application
- debug Debug
- eventChan chan interface{}
- events []interface{}
- eventsChange *sync.Cond
- running sync.WaitGroup
-}
-
-func newNonBlocking(app Application, debug Debug) *nonBlocking {
- nonBlockingModule := &nonBlocking{
- app: app,
- debug: debug,
- eventChan: make(chan interface{}, 6),
- events: make([]interface{}, 0, 100),
- eventsChange: sync.NewCond(&sync.Mutex{}),
- }
- go nonBlockingModule.run()
- return nonBlockingModule
-}
-
-func (nb *nonBlocking) addEvent(event interface{}) {
- nb.eventsChange.L.Lock()
- defer nb.eventsChange.L.Unlock()
- nb.events = append(nb.events, event)
- nb.eventsChange.Broadcast()
-}
-
-func (nb *nonBlocking) run() {
- // This go routine consume the first event from events and call the
- // corresponding methods of Application/Debug/blockdb.
- for {
- var event interface{}
- func() {
- nb.eventsChange.L.Lock()
- defer nb.eventsChange.L.Unlock()
- for len(nb.events) == 0 {
- nb.eventsChange.Wait()
- }
- event = nb.events[0]
- nb.events = nb.events[1:]
- nb.running.Add(1)
- }()
- switch e := event.(type) {
- case stronglyAckedEvent:
- nb.debug.StronglyAcked(e.blockHash)
- case blockConfirmedEvent:
- nb.app.BlockConfirmed(*e.block)
- case totalOrderingDeliveredEvent:
- nb.debug.TotalOrderingDelivered(e.blockHashes, e.mode)
- case blockDeliveredEvent:
- nb.app.BlockDelivered(e.blockHash, *e.result)
- default:
- fmt.Printf("Unknown event %v.", e)
- }
- nb.running.Done()
- nb.eventsChange.Broadcast()
- }
-}
-
-// wait will wait for all event in events finishes.
-func (nb *nonBlocking) wait() {
- nb.eventsChange.L.Lock()
- defer nb.eventsChange.L.Unlock()
- for len(nb.events) > 0 {
- nb.eventsChange.Wait()
- }
- nb.running.Wait()
-}
-
-// PreparePayload cannot be non-blocking.
-func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) {
- return nb.app.PreparePayload(position)
-}
-
-// PrepareWitness cannot be non-blocking.
-func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) {
- return nb.app.PrepareWitness(height)
-}
-
-// VerifyBlock cannot be non-blocking.
-func (nb *nonBlocking) VerifyBlock(block *types.Block) types.BlockVerifyStatus {
- return nb.app.VerifyBlock(block)
-}
-
-// BlockConfirmed is called when a block is confirmed and added to lattice.
-func (nb *nonBlocking) BlockConfirmed(block types.Block) {
- nb.addEvent(blockConfirmedEvent{&block})
-}
-
-// StronglyAcked is called when a block is strongly acked.
-func (nb *nonBlocking) StronglyAcked(blockHash common.Hash) {
- if nb.debug != nil {
- nb.addEvent(stronglyAckedEvent{blockHash})
- }
-}
-
-// TotalOrderingDelivered is called when the total ordering algorithm deliver
-// a set of block.
-func (nb *nonBlocking) TotalOrderingDelivered(
- blockHashes common.Hashes, mode uint32) {
- if nb.debug != nil {
- nb.addEvent(totalOrderingDeliveredEvent{blockHashes, mode})
- }
-}
-
-// BlockDelivered is called when a block is add to the compaction chain.
-func (nb *nonBlocking) BlockDelivered(
- blockHash common.Hash, result types.FinalizationResult) {
- nb.addEvent(blockDeliveredEvent{
- blockHash: blockHash,
- result: &result,
- })
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go
deleted file mode 100644
index 580b65e1c..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/round-based-config.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-type roundBasedConfig struct {
- roundID uint64
- // roundBeginTime is the beginning of round, as local time.
- roundBeginTime time.Time
- roundInterval time.Duration
- // roundEndTime is a cache for begin + interval.
- roundEndTime time.Time
-}
-
-func (config *roundBasedConfig) setupRoundBasedFields(
- roundID uint64, cfg *types.Config) {
- config.roundID = roundID
- config.roundInterval = cfg.RoundInterval
-}
-
-func (config *roundBasedConfig) setRoundBeginTime(begin time.Time) {
- config.roundBeginTime = begin
- config.roundEndTime = begin.Add(config.roundInterval)
-}
-
-// isValidLastBlock checks if a block is a valid last block of this round.
-func (config *roundBasedConfig) isValidLastBlock(b *types.Block) bool {
- return b.Position.Round == config.roundID &&
- b.Timestamp.After(config.roundEndTime)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go
deleted file mode 100644
index 3728a79e6..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/ticker.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import "time"
-
-// TickerType is the type of ticker.
-type TickerType int
-
-// TickerType enum.
-const (
- TickerBA TickerType = iota
- TickerDKG
- TickerCRS
-)
-
-// defaultTicker is a wrapper to implement ticker interface based on
-// time.Ticker.
-type defaultTicker struct {
- ticker *time.Ticker
-}
-
-// newDefaultTicker constructs an defaultTicker instance by giving an interval.
-func newDefaultTicker(lambda time.Duration) *defaultTicker {
- return &defaultTicker{ticker: time.NewTicker(lambda)}
-}
-
-// Tick implements Tick method of ticker interface.
-func (t *defaultTicker) Tick() <-chan time.Time {
- return t.ticker.C
-}
-
-// Stop implements Stop method of ticker interface.
-func (t *defaultTicker) Stop() {
- t.ticker.Stop()
-}
-
-// newTicker is a helper to setup a ticker by giving an Governance. If
-// the governace object implements a ticker generator, a ticker from that
-// generator would be returned, else constructs a default one.
-func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) {
- type tickerGenerator interface {
- NewTicker(TickerType) Ticker
- }
-
- if gen, ok := gov.(tickerGenerator); ok {
- t = gen.NewTicker(tickerType)
- }
- if t == nil {
- var duration time.Duration
- switch tickerType {
- case TickerBA:
- duration = gov.Configuration(round).LambdaBA
- case TickerDKG:
- duration = gov.Configuration(round).LambdaDKG
- case TickerCRS:
- duration = gov.Configuration(round).RoundInterval / 2
- }
- t = newDefaultTicker(duration)
- }
- return
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering-syncer.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering-syncer.go
deleted file mode 100644
index aa90a1ded..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering-syncer.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "sort"
- "sync"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-type totalOrderingSyncer struct {
- lock sync.RWMutex
-
- numChains uint32
- syncHeight map[uint32]uint64
- syncDeliverySetIdx int
- pendingBlocks []*types.Block
- inPendingBlocks map[common.Hash]struct{}
-
- bootstrapChain map[uint32]struct{}
-
- // Data to restore delivery set.
- pendingDeliveryBlocks []*types.Block
- deliverySet map[int][]*types.Block
- mapToDeliverySet map[common.Hash]int
-}
-
-func newTotalOrderingSyncer(numChains uint32) *totalOrderingSyncer {
- return &totalOrderingSyncer{
- numChains: numChains,
- syncHeight: make(map[uint32]uint64),
- syncDeliverySetIdx: -1,
- inPendingBlocks: make(map[common.Hash]struct{}),
- bootstrapChain: make(map[uint32]struct{}),
- deliverySet: make(map[int][]*types.Block),
- mapToDeliverySet: make(map[common.Hash]int),
- }
-}
-
-func (tos *totalOrderingSyncer) synced() bool {
- tos.lock.RLock()
- defer tos.lock.RUnlock()
- return tos.syncDeliverySetIdx != -1
-}
-
-func (tos *totalOrderingSyncer) processBlock(
- block *types.Block) (delivered []*types.Block) {
- if tos.synced() {
- if tos.syncHeight[block.Position.ChainID] >= block.Position.Height {
- return
- }
- delivered = append(delivered, block)
- return
- }
- tos.lock.Lock()
- defer tos.lock.Unlock()
- tos.inPendingBlocks[block.Hash] = struct{}{}
- tos.pendingBlocks = append(tos.pendingBlocks, block)
- if block.Position.Height == 0 {
- tos.bootstrapChain[block.Position.ChainID] = struct{}{}
- }
- if uint32(len(tos.bootstrapChain)) == tos.numChains {
- // Bootstrap mode.
- delivered = tos.pendingBlocks
- tos.syncDeliverySetIdx = 0
- for i := uint32(0); i < tos.numChains; i++ {
- tos.syncHeight[i] = uint64(0)
- }
- } else {
- maxDeliverySetIdx := -1
- // TODO(jimmy-dexon): below for loop can be optimized.
- PendingBlockLoop:
- for i, block := range tos.pendingBlocks {
- idx, exist := tos.mapToDeliverySet[block.Hash]
- if !exist {
- continue
- }
- deliverySet := tos.deliverySet[idx]
- // Check if all the blocks in deliverySet are in the pendingBlocks.
- for _, dBlock := range deliverySet {
- if _, exist := tos.inPendingBlocks[dBlock.Hash]; !exist {
- continue PendingBlockLoop
- }
- }
- if idx > maxDeliverySetIdx {
- maxDeliverySetIdx = idx
- }
- // Check if all of the chains have delivered.
- for _, dBlock := range deliverySet {
- if h, exist := tos.syncHeight[dBlock.Position.ChainID]; exist {
- if dBlock.Position.Height < h {
- continue
- }
- }
- tos.syncHeight[dBlock.Position.ChainID] = dBlock.Position.Height
- }
- if uint32(len(tos.syncHeight)) != tos.numChains {
- continue
- }
- // Core is fully synced, it can start delivering blocks from idx.
- tos.syncDeliverySetIdx = maxDeliverySetIdx
- delivered = make([]*types.Block, 0, i)
- break
- }
- if tos.syncDeliverySetIdx == -1 {
- return
- }
- // Generating delivering blocks.
- for i := maxDeliverySetIdx; i < len(tos.deliverySet); i++ {
- deliverySet := tos.deliverySet[i]
- sort.Sort(types.ByHash(deliverySet))
- for _, block := range deliverySet {
- if block.Position.Height > tos.syncHeight[block.Position.ChainID] {
- tos.syncHeight[block.Position.ChainID] = block.Position.Height
- }
- delivered = append(delivered, block)
- }
- }
- // Flush remaining blocks.
- for _, block := range tos.pendingBlocks {
- if _, exist := tos.mapToDeliverySet[block.Hash]; exist {
- continue
- }
- if block.Position.Height > tos.syncHeight[block.Position.ChainID] {
- tos.syncHeight[block.Position.ChainID] = block.Position.Height
- }
- delivered = append(delivered, block)
- }
- }
- // Clean internal data model to save memory.
- tos.pendingBlocks = nil
- tos.inPendingBlocks = nil
- tos.bootstrapChain = nil
- tos.pendingDeliveryBlocks = nil
- tos.deliverySet = nil
- tos.mapToDeliverySet = nil
- return
-}
-
-// The finalized block should be passed by the order of consensus height.
-func (tos *totalOrderingSyncer) processFinalizedBlock(block *types.Block) {
- tos.lock.Lock()
- defer tos.lock.Unlock()
- if len(tos.pendingDeliveryBlocks) > 0 {
- if block.Hash.Less(
- tos.pendingDeliveryBlocks[len(tos.pendingDeliveryBlocks)-1].Hash) {
- // pendingDeliveryBlocks forms a deliverySet.
- idx := len(tos.deliverySet)
- tos.deliverySet[idx] = tos.pendingDeliveryBlocks
- for _, block := range tos.pendingDeliveryBlocks {
- tos.mapToDeliverySet[block.Hash] = idx
- }
- tos.pendingDeliveryBlocks = []*types.Block{}
- }
- }
- tos.pendingDeliveryBlocks = append(tos.pendingDeliveryBlocks, block)
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go
deleted file mode 100644
index a4778f593..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/total-ordering.go
+++ /dev/null
@@ -1,1355 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "math"
- "sort"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-const (
- infinity uint64 = math.MaxUint64
-)
-
-const (
- // TotalOrderingModeError returns mode error.
- TotalOrderingModeError uint32 = iota
- // TotalOrderingModeNormal returns mode normal.
- TotalOrderingModeNormal
- // TotalOrderingModeEarly returns mode early.
- TotalOrderingModeEarly
- // TotalOrderingModeFlush returns mode flush.
- TotalOrderingModeFlush
-)
-
-var (
- // ErrNotValidDAG would be reported when block subbmitted to totalOrdering
- // didn't form a DAG.
- ErrNotValidDAG = errors.New("not a valid dag")
- // ErrFutureRoundDelivered means some blocks from later rounds are
- // delivered, this means program error.
- ErrFutureRoundDelivered = errors.New("future round delivered")
- // ErrBlockFromPastRound means we receive some block from past round.
- ErrBlockFromPastRound = errors.New("block from past round")
- // ErrTotalOrderingHangs means total ordering hangs somewhere.
- ErrTotalOrderingHangs = errors.New("total ordering hangs")
- // ErrForwardAck means a block acking some blocks from newer round.
- ErrForwardAck = errors.New("forward ack")
- // ErrUnexpected means general (I'm lazy) errors.
- ErrUnexpected = errors.New("unexpected")
-)
-
-// totalOrderingConfig is the configuration for total ordering.
-type totalOrderingConfig struct {
- roundBasedConfig
- // k represents the k in 'k-level total ordering'.
- // In short, only block height equals to (global minimum height + k)
- // would be taken into consideration.
- k uint64
- // phi is a const to control how strong the leading preceding block
- // should be.
- phi uint64
- // chainNum is the count of chains.
- numChains uint32
- // Is round cutting required?
- isFlushRequired bool
-}
-
-func (config *totalOrderingConfig) fromConfig(
- roundID uint64, cfg *types.Config) {
- config.k = uint64(cfg.K)
- config.numChains = cfg.NumChains
- config.phi = uint64(float32(cfg.NumChains-1)*cfg.PhiRatio + 1)
- config.setupRoundBasedFields(roundID, cfg)
-}
-
-func newGenesisTotalOrderingConfig(
- dMoment time.Time, config *types.Config) *totalOrderingConfig {
- c := &totalOrderingConfig{}
- c.fromConfig(0, config)
- c.setRoundBeginTime(dMoment)
- return c
-}
-
-func newTotalOrderingConfig(
- prev *totalOrderingConfig, cur *types.Config) *totalOrderingConfig {
- c := &totalOrderingConfig{}
- c.fromConfig(prev.roundID+1, cur)
- c.setRoundBeginTime(prev.roundEndTime)
- prev.isFlushRequired = c.k != prev.k ||
- c.phi != prev.phi ||
- c.numChains != prev.numChains
- return c
-}
-
-// totalOrderingWinRecord caches which chains this candidate
-// wins another one based on their height vector.
-type totalOrderingWinRecord struct {
- wins []int8
- count uint
-}
-
-func (rec *totalOrderingWinRecord) reset() {
- rec.count = 0
- for idx := range rec.wins {
- rec.wins[idx] = 0
- }
-}
-
-func newTotalOrderingWinRecord(numChains uint32) (
- rec *totalOrderingWinRecord) {
- rec = &totalOrderingWinRecord{}
- rec.reset()
- rec.wins = make([]int8, numChains)
- return
-}
-
-// grade implements the 'grade' potential function described in white paper.
-func (rec *totalOrderingWinRecord) grade(
- numChains uint32, phi uint64, globalAnsLength uint64) int {
- if uint64(rec.count) >= phi {
- return 1
- } else if uint64(rec.count) < phi-uint64(numChains)+globalAnsLength {
- return 0
- } else {
- return -1
- }
-}
-
-// totalOrderingHeightRecord records two things:
-// - the minimum heiht of block from that chain acking this block.
-// - the count of blocks from that chain acking this block.
-type totalOrderingHeightRecord struct{ minHeight, count uint64 }
-
-// totalOrderingObjectCache caches objects for reuse.
-// The target object is map because:
-// - reuse map would prevent it grows during usage, when map grows,
-// hashes of key would be recaculated, bucket reallocated, and values
-// are copied.
-// However, to reuse a map, we have no easy way to erase its content but
-// iterating its keys and delete corresponding values.
-type totalOrderingObjectCache struct {
- ackedStatus [][]*totalOrderingHeightRecord
- heightVectors [][]uint64
- winRecordContainers [][]*totalOrderingWinRecord
- ackedVectors []map[common.Hash]struct{}
- winRecordPool sync.Pool
- numChains uint32
-}
-
-// newTotalOrderingObjectCache constructs an totalOrderingObjectCache
-// instance.
-func newTotalOrderingObjectCache(numChains uint32) *totalOrderingObjectCache {
- return &totalOrderingObjectCache{
- winRecordPool: sync.Pool{
- New: func() interface{} {
- return newTotalOrderingWinRecord(numChains)
- },
- },
- numChains: numChains,
- }
-}
-
-// resize makes sure internal storage of totalOrdering instance can handle
-// maximum possible numChains in future configs.
-func (cache *totalOrderingObjectCache) resize(numChains uint32) {
- // Basically, everything in cache needs to be cleaned.
- if cache.numChains >= numChains {
- return
- }
- cache.ackedStatus = nil
- cache.heightVectors = nil
- cache.winRecordContainers = nil
- cache.ackedVectors = nil
- cache.numChains = numChains
- cache.winRecordPool = sync.Pool{
- New: func() interface{} {
- return newTotalOrderingWinRecord(numChains)
- },
- }
-}
-
-// requestAckedStatus requests a structure to record acking status of one
-// candidate (or a global view of acking status of pending set).
-func (cache *totalOrderingObjectCache) requestAckedStatus() (
- acked []*totalOrderingHeightRecord) {
- if len(cache.ackedStatus) == 0 {
- acked = make([]*totalOrderingHeightRecord, cache.numChains)
- for idx := range acked {
- acked[idx] = &totalOrderingHeightRecord{count: 0}
- }
- } else {
- acked, cache.ackedStatus =
- cache.ackedStatus[len(cache.ackedStatus)-1],
- cache.ackedStatus[:len(cache.ackedStatus)-1]
- // Reset acked status.
- for idx := range acked {
- acked[idx].count = 0
- }
- }
- return
-}
-
-// recycleAckedStatys recycles the structure to record acking status.
-func (cache *totalOrderingObjectCache) recycleAckedStatus(
- acked []*totalOrderingHeightRecord) {
- // If the recycled objects supports lower numChains than we required,
- // don't recycle it.
- if uint32(len(acked)) != cache.numChains {
- return
- }
- cache.ackedStatus = append(cache.ackedStatus, acked)
-}
-
-// requestWinRecord requests an totalOrderingWinRecord instance.
-func (cache *totalOrderingObjectCache) requestWinRecord() (
- win *totalOrderingWinRecord) {
- win = cache.winRecordPool.Get().(*totalOrderingWinRecord)
- win.reset()
- return
-}
-
-// recycleWinRecord recycles an totalOrderingWinRecord instance.
-func (cache *totalOrderingObjectCache) recycleWinRecord(
- win *totalOrderingWinRecord) {
- if win == nil {
- return
- }
- // If the recycled objects supports lower numChains than we required,
- // don't recycle it.
- if uint32(len(win.wins)) != cache.numChains {
- return
- }
- cache.winRecordPool.Put(win)
-}
-
-// requestHeightVector requests a structure to record acking heights
-// of one candidate.
-func (cache *totalOrderingObjectCache) requestHeightVector() (hv []uint64) {
- if len(cache.heightVectors) == 0 {
- hv = make([]uint64, cache.numChains)
- } else {
- hv, cache.heightVectors =
- cache.heightVectors[len(cache.heightVectors)-1],
- cache.heightVectors[:len(cache.heightVectors)-1]
- }
- for idx := range hv {
- hv[idx] = infinity
- }
- return
-}
-
-// recycleHeightVector recycles an instance to record acking heights
-// of one candidate.
-func (cache *totalOrderingObjectCache) recycleHeightVector(hv []uint64) {
- // If the recycled objects supports lower numChains than we required,
- // don't recycle it.
- if uint32(len(hv)) != cache.numChains {
- return
- }
- cache.heightVectors = append(cache.heightVectors, hv)
-}
-
-// requestWinRecordContainer requests a map of totalOrderingWinRecord.
-func (cache *totalOrderingObjectCache) requestWinRecordContainer() (
- con []*totalOrderingWinRecord) {
- if len(cache.winRecordContainers) == 0 {
- con = make([]*totalOrderingWinRecord, cache.numChains)
- } else {
- con, cache.winRecordContainers =
- cache.winRecordContainers[len(cache.winRecordContainers)-1],
- cache.winRecordContainers[:len(cache.winRecordContainers)-1]
- for idx := range con {
- con[idx] = nil
- }
- }
- return
-}
-
-// recycleWinRecordContainer recycles a map of totalOrderingWinRecord.
-func (cache *totalOrderingObjectCache) recycleWinRecordContainer(
- con []*totalOrderingWinRecord) {
- // If the recycled objects supports lower numChains than we required,
- // don't recycle it.
- if uint32(len(con)) != cache.numChains {
- return
- }
- cache.winRecordContainers = append(cache.winRecordContainers, con)
-}
-
-// requestAckedVector requests an acked vector instance.
-func (cache *totalOrderingObjectCache) requestAckedVector() (
- acked map[common.Hash]struct{}) {
- if len(cache.ackedVectors) == 0 {
- acked = make(map[common.Hash]struct{})
- } else {
- acked, cache.ackedVectors =
- cache.ackedVectors[len(cache.ackedVectors)-1],
- cache.ackedVectors[:len(cache.ackedVectors)-1]
- for k := range acked {
- delete(acked, k)
- }
- }
- return
-}
-
-// recycleAckedVector recycles an acked vector instance.
-func (cache *totalOrderingObjectCache) recycleAckedVector(
- acked map[common.Hash]struct{}) {
- if acked == nil {
- return
- }
- cache.ackedVectors = append(cache.ackedVectors, acked)
-}
-
-// totalOrderingCandidateInfo describes proceeding status for one candidate,
-// including:
-// - acked status as height records, which could keep 'how many blocks from
-// one chain acking this candidate.
-// - cached height vector, which valid height based on K-level used for
-// comparison in 'grade' function.
-// - cached result of grade function to other candidates.
-//
-// Height Record:
-// When block A acks block B, all blocks proposed from the same proposer
-// as block A with higher height would also acks block B. Therefore,
-// we just need to record:
-// - the minimum height of acking block from that proposer
-// - count of acking blocks from that proposer
-// to repsent the acking status for block A.
-type totalOrderingCandidateInfo struct {
- ackedStatus []*totalOrderingHeightRecord
- cachedHeightVector []uint64
- winRecords []*totalOrderingWinRecord
- hash common.Hash
-}
-
-// newTotalOrderingCandidateInfo constructs an totalOrderingCandidateInfo
-// instance.
-func newTotalOrderingCandidateInfo(
- candidateHash common.Hash,
- objCache *totalOrderingObjectCache) *totalOrderingCandidateInfo {
- return &totalOrderingCandidateInfo{
- ackedStatus: objCache.requestAckedStatus(),
- winRecords: objCache.requestWinRecordContainer(),
- hash: candidateHash,
- }
-}
-
-// clean clear information related to another candidate, which should be called
-// when that candidate is selected as deliver set.
-func (v *totalOrderingCandidateInfo) clean(otherCandidateChainID uint32) {
- v.winRecords[otherCandidateChainID] = nil
-}
-
-// recycle objects for later usage, this eases the loading of
-// golangs' GC.
-func (v *totalOrderingCandidateInfo) recycle(
- objCache *totalOrderingObjectCache) {
- if v.winRecords != nil {
- for _, win := range v.winRecords {
- objCache.recycleWinRecord(win)
- }
- objCache.recycleWinRecordContainer(v.winRecords)
- }
- if v.cachedHeightVector != nil {
- objCache.recycleHeightVector(v.cachedHeightVector)
- }
- objCache.recycleAckedStatus(v.ackedStatus)
-}
-
-// addBlock would update totalOrderingCandidateInfo, it's caller's duty
-// to make sure the input block acutally acking the target block.
-func (v *totalOrderingCandidateInfo) addBlock(b *types.Block) (err error) {
- rec := v.ackedStatus[b.Position.ChainID]
- if rec.count == 0 {
- rec.minHeight = b.Position.Height
- rec.count = 1
- } else {
- if b.Position.Height < rec.minHeight {
- err = ErrNotValidDAG
- return
- }
- rec.count++
- }
- return
-}
-
-// getAckingNodeSetLength would generate the Acking Node Set and return its
-// length. Only block height larger than
-//
-// global minimum height + k
-//
-// would be taken into consideration, ex.
-//
-// For some chain X:
-// - the global minimum acking height = 1,
-// - k = 1
-// then only block height >= 2 would be added to acking node set.
-func (v *totalOrderingCandidateInfo) getAckingNodeSetLength(
- global *totalOrderingCandidateInfo,
- k uint64,
- numChains uint32) (count uint64) {
- var rec *totalOrderingHeightRecord
- for idx, gRec := range global.ackedStatus[:numChains] {
- if gRec.count == 0 {
- continue
- }
- rec = v.ackedStatus[idx]
- if rec.count == 0 {
- continue
- }
- // This line would check if these two ranges would overlap:
- // - (global minimum height + k, infinity)
- // - (local minimum height, local minimum height + count - 1)
- if rec.minHeight+rec.count-1 >= gRec.minHeight+k {
- count++
- }
- }
- return
-}
-
-// updateAckingHeightVector would cached acking height vector.
-//
-// Only block height equals to (global minimum block height + k) would be
-// taken into consideration.
-func (v *totalOrderingCandidateInfo) updateAckingHeightVector(
- global *totalOrderingCandidateInfo,
- k uint64,
- dirtyChainIDs []int,
- objCache *totalOrderingObjectCache) {
- var (
- idx int
- gRec, rec *totalOrderingHeightRecord
- )
- // The reason not to merge the two loops is the iteration over map
- // is expensive when chain count is large, iterating over dirty
- // chains is cheaper.
- // TODO(mission): merge the code in this if/else if the performance won't be
- // downgraded when adding a function for the shared part.
- if v.cachedHeightVector == nil {
- // Generate height vector from scratch.
- v.cachedHeightVector = objCache.requestHeightVector()
- for idx, gRec = range global.ackedStatus {
- if gRec.count <= k {
- continue
- }
- rec = v.ackedStatus[idx]
- if rec.count == 0 {
- v.cachedHeightVector[idx] = infinity
- } else if rec.minHeight <= gRec.minHeight+k {
- // This check is sufficient to make sure the block height:
- //
- // gRec.minHeight + k
- //
- // would be included in this totalOrderingCandidateInfo.
- v.cachedHeightVector[idx] = gRec.minHeight + k
- } else {
- v.cachedHeightVector[idx] = infinity
- }
- }
- } else {
- // Return the cached one, only update dirty fields.
- for _, idx = range dirtyChainIDs {
- gRec = global.ackedStatus[idx]
- if gRec.count == 0 || gRec.count <= k {
- v.cachedHeightVector[idx] = infinity
- continue
- }
- rec = v.ackedStatus[idx]
- if rec.count == 0 {
- v.cachedHeightVector[idx] = infinity
- } else if rec.minHeight <= gRec.minHeight+k {
- v.cachedHeightVector[idx] = gRec.minHeight + k
- } else {
- v.cachedHeightVector[idx] = infinity
- }
- }
- }
- return
-}
-
-// updateWinRecord setup win records between two candidates.
-func (v *totalOrderingCandidateInfo) updateWinRecord(
- otherChainID uint32,
- other *totalOrderingCandidateInfo,
- dirtyChainIDs []int,
- objCache *totalOrderingObjectCache,
- numChains uint32) {
- var (
- idx int
- height uint64
- )
- // The reason not to merge the two loops is the iteration over map
- // is expensive when chain count is large, iterating over dirty
- // chains is cheaper.
- // TODO(mission): merge the code in this if/else if add a function won't
- // affect the performance.
- win := v.winRecords[otherChainID]
- if win == nil {
- win = objCache.requestWinRecord()
- v.winRecords[otherChainID] = win
- for idx, height = range v.cachedHeightVector[:numChains] {
- if height == infinity {
- continue
- }
- if other.cachedHeightVector[idx] == infinity {
- win.wins[idx] = 1
- win.count++
- }
- }
- } else {
- for _, idx = range dirtyChainIDs {
- if v.cachedHeightVector[idx] == infinity {
- if win.wins[idx] == 1 {
- win.wins[idx] = 0
- win.count--
- }
- continue
- }
- if other.cachedHeightVector[idx] == infinity {
- if win.wins[idx] == 0 {
- win.wins[idx] = 1
- win.count++
- }
- } else {
- if win.wins[idx] == 1 {
- win.wins[idx] = 0
- win.count--
- }
- }
- }
- }
-}
-
-// totalOrderingBreakpoint is a record to store the height discontinuity
-// on a chain.
-type totalOrderingBreakpoint struct {
- roundID uint64
- // height of last block in previous round.
- lastHeight uint64
-}
-
-// totalOrderingGroupVector keeps global status of current pending set.
-type totalOrderingGlobalVector struct {
- // blocks stores all blocks grouped by their proposers and
- // sorted by their block height.
- //
- // TODO(mission): the way we use this slice would make it reallocate
- // frequently.
- blocks [][]*types.Block
-
- // breakpoints caches rounds for chains that blocks' height on them are
- // not continuous. Ex.
- // ChainID Round Height
- // 1 0 0
- // 1 0 1
- // 1 1 2
- // 1 1 3
- // 1 1 4
- // 1 3 0 <- a breakpoint for round 3 would be cached
- // for chain 1 as (roundID=1, lastHeight=4).
- breakpoints [][]*totalOrderingBreakpoint
-
- // curRound caches the last round ID used to purge breakpoints.
- curRound uint64
-
- // tips records the last seen block for each chain.
- tips []*types.Block
-
- // cachedCandidateInfo is an totalOrderingCandidateInfo instance,
- // which is just used for actual candidates to calculate height vector.
- cachedCandidateInfo *totalOrderingCandidateInfo
-}
-
-func newTotalOrderingGlobalVector(numChains uint32) *totalOrderingGlobalVector {
- return &totalOrderingGlobalVector{
- blocks: make([][]*types.Block, numChains),
- tips: make([]*types.Block, numChains),
- breakpoints: make([][]*totalOrderingBreakpoint, numChains),
- }
-}
-
-func (global *totalOrderingGlobalVector) resize(numChains uint32) {
- if len(global.blocks) >= int(numChains) {
- return
- }
- // Resize blocks.
- newBlocks := make([][]*types.Block, numChains)
- copy(newBlocks, global.blocks)
- global.blocks = newBlocks
- // Resize breakpoints.
- newBreakPoints := make([][]*totalOrderingBreakpoint, numChains)
- copy(newBreakPoints, global.breakpoints)
- global.breakpoints = newBreakPoints
- // Resize tips.
- newTips := make([]*types.Block, numChains)
- copy(newTips, global.tips)
- global.tips = newTips
-}
-
-func (global *totalOrderingGlobalVector) switchRound(roundID uint64) {
- if global.curRound+1 != roundID {
- panic(ErrUnexpected)
- }
- global.curRound = roundID
- for chainID, bs := range global.breakpoints {
- if len(bs) == 0 {
- continue
- }
- if bs[0].roundID == roundID {
- global.breakpoints[chainID] = bs[1:]
- }
- }
-}
-
-func (global *totalOrderingGlobalVector) prepareHeightRecord(
- candidate *types.Block,
- info *totalOrderingCandidateInfo,
- acked map[common.Hash]struct{}) {
- var (
- chainID = candidate.Position.ChainID
- breakpoints = global.breakpoints[chainID]
- breakpoint *totalOrderingBreakpoint
- rec *totalOrderingHeightRecord
- )
- // Setup height record for own chain.
- rec = &totalOrderingHeightRecord{
- minHeight: candidate.Position.Height,
- }
- if len(breakpoints) == 0 {
- rec.count = uint64(len(global.blocks[chainID]))
- } else {
- rec.count = breakpoints[0].lastHeight - candidate.Position.Height + 1
- }
- info.ackedStatus[chainID] = rec
- if acked == nil {
- return
- }
- for idx, blocks := range global.blocks {
- if idx == int(candidate.Position.ChainID) {
- continue
- }
- breakpoint = nil
- if len(global.breakpoints[idx]) > 0 {
- breakpoint = global.breakpoints[idx][0]
- }
- for i, b := range blocks {
- if breakpoint != nil && b.Position.Round >= breakpoint.roundID {
- break
- }
- if _, acked := acked[b.Hash]; !acked {
- continue
- }
- // If this block acks this candidate, all newer blocks
- // from the same chain also 'indirect' acks it.
- rec = info.ackedStatus[idx]
- rec.minHeight = b.Position.Height
- if breakpoint == nil {
- rec.count = uint64(len(blocks) - i)
- } else {
- rec.count = breakpoint.lastHeight - b.Position.Height + 1
- }
- break
- }
- }
-
-}
-
-func (global *totalOrderingGlobalVector) addBlock(
- b *types.Block) (pos int, pending bool, err error) {
- curPosition := b.Position
- tip := global.tips[curPosition.ChainID]
- pos = len(global.blocks[curPosition.ChainID])
- if tip != nil {
- // Perform light weight sanity check based on tip.
- lastPosition := tip.Position
- if lastPosition.Round > curPosition.Round {
- err = ErrNotValidDAG
- return
- }
- if DiffUint64(lastPosition.Round, curPosition.Round) > 1 {
- if curPosition.Height != 0 {
- err = ErrNotValidDAG
- return
- }
- // Add breakpoint.
- global.breakpoints[curPosition.ChainID] = append(
- global.breakpoints[curPosition.ChainID],
- &totalOrderingBreakpoint{
- roundID: curPosition.Round,
- lastHeight: lastPosition.Height,
- })
- } else {
- if curPosition.Height != lastPosition.Height+1 {
- err = ErrNotValidDAG
- return
- }
- }
- } else {
- if curPosition.Round < global.curRound {
- err = ErrBlockFromPastRound
- return
- }
- if curPosition.Round > global.curRound {
- // Add breakpoint.
- global.breakpoints[curPosition.ChainID] = append(
- global.breakpoints[curPosition.ChainID],
- &totalOrderingBreakpoint{
- roundID: curPosition.Round,
- lastHeight: 0,
- })
- }
- }
- breakpoints := global.breakpoints[b.Position.ChainID]
- pending = len(breakpoints) > 0 && breakpoints[0].roundID <= b.Position.Round
- global.blocks[b.Position.ChainID] = append(
- global.blocks[b.Position.ChainID], b)
- global.tips[b.Position.ChainID] = b
- return
-}
-
-// updateCandidateInfo udpate cached candidate info.
-func (global *totalOrderingGlobalVector) updateCandidateInfo(
- dirtyChainIDs []int, objCache *totalOrderingObjectCache) {
- var (
- idx int
- blocks []*types.Block
- block *types.Block
- info *totalOrderingCandidateInfo
- rec *totalOrderingHeightRecord
- breakpoint *totalOrderingBreakpoint
- )
- if global.cachedCandidateInfo == nil {
- info = newTotalOrderingCandidateInfo(common.Hash{}, objCache)
- for idx, blocks = range global.blocks {
- if len(blocks) == 0 {
- continue
- }
- rec = info.ackedStatus[idx]
- if len(global.breakpoints[idx]) > 0 {
- breakpoint = global.breakpoints[idx][0]
- block = blocks[0]
- if block.Position.Round >= breakpoint.roundID {
- continue
- }
- rec.minHeight = block.Position.Height
- rec.count = breakpoint.lastHeight - block.Position.Height + 1
- } else {
- rec.minHeight = blocks[0].Position.Height
- rec.count = uint64(len(blocks))
- }
- }
- global.cachedCandidateInfo = info
- } else {
- info = global.cachedCandidateInfo
- for _, idx = range dirtyChainIDs {
- blocks = global.blocks[idx]
- if len(blocks) == 0 {
- info.ackedStatus[idx].count = 0
- continue
- }
- rec = info.ackedStatus[idx]
- if len(global.breakpoints[idx]) > 0 {
- breakpoint = global.breakpoints[idx][0]
- block = blocks[0]
- if block.Position.Round >= breakpoint.roundID {
- continue
- }
- rec.minHeight = block.Position.Height
- rec.count = breakpoint.lastHeight - block.Position.Height + 1
- } else {
- rec.minHeight = blocks[0].Position.Height
- rec.count = uint64(len(blocks))
- }
- }
- }
- return
-}
-
-// totalOrdering represent a process unit to handle total ordering
-// for blocks.
-type totalOrdering struct {
- // pendings stores blocks awaiting to be ordered.
- pendings map[common.Hash]*types.Block
-
- // The round of config used when performing total ordering.
- curRound uint64
-
- // duringFlush is a flag to switch the flush mode and normal mode.
- duringFlush bool
-
- // flushReadyChains checks if the last block of that chain arrived. Once
- // last blocks from all chains in current config are arrived, we can
- // perform flush.
- flushReadyChains map[uint32]struct{}
-
- // flush is a map to record which blocks are already flushed.
- flushed map[uint32]struct{}
-
- // globalVector group all pending blocks by proposers and
- // sort them by block height. This structure is helpful when:
- //
- // - build global height vector
- // - picking candidates next round
- globalVector *totalOrderingGlobalVector
-
- // candidates caches result of potential function during generating
- // preceding sets.
- candidates []*totalOrderingCandidateInfo
-
- // acked cache the 'block A acked by block B' relation by
- // keeping a record in acked[A.Hash][B.Hash]
- acked map[common.Hash]map[common.Hash]struct{}
-
- // dirtyChainIDs records which chainID that should be updated
- // for all cached status (win record, acking status).
- dirtyChainIDs []int
-
- // objCache caches allocated objects, like map.
- objCache *totalOrderingObjectCache
-
- // candidateChainMapping keeps a mapping from candidate's hash to
- // their chain IDs.
- candidateChainMapping map[uint32]common.Hash
-
- // candidateChainIDs records chain ID of all candidates.
- candidateChainIDs []uint32
-
- // configs keeps configuration for each round in continuous way.
- configs []*totalOrderingConfig
-}
-
-// newTotalOrdering constructs an totalOrdering instance.
-func newTotalOrdering(config *totalOrderingConfig) *totalOrdering {
- globalVector := newTotalOrderingGlobalVector(config.numChains)
- objCache := newTotalOrderingObjectCache(config.numChains)
- candidates := make([]*totalOrderingCandidateInfo, config.numChains)
- to := &totalOrdering{
- pendings: make(map[common.Hash]*types.Block),
- globalVector: globalVector,
- dirtyChainIDs: make([]int, 0, config.numChains),
- acked: make(map[common.Hash]map[common.Hash]struct{}),
- objCache: objCache,
- candidateChainMapping: make(map[uint32]common.Hash),
- candidates: candidates,
- candidateChainIDs: make([]uint32, 0, config.numChains),
- curRound: config.roundID,
- }
- to.configs = []*totalOrderingConfig{config}
- return to
-}
-
-// appendConfig add new configs for upcoming rounds. If you add a config for
-// round R, next time you can only add the config for round R+1.
-func (to *totalOrdering) appendConfig(
- round uint64, config *types.Config) error {
- if round != uint64(len(to.configs))+to.configs[0].roundID {
- return ErrRoundNotIncreasing
- }
- to.configs = append(
- to.configs,
- newTotalOrderingConfig(to.configs[len(to.configs)-1], config))
- // Resize internal structures.
- to.globalVector.resize(config.NumChains)
- to.objCache.resize(config.NumChains)
- if int(config.NumChains) > len(to.candidates) {
- newCandidates := make([]*totalOrderingCandidateInfo, config.NumChains)
- copy(newCandidates, to.candidates)
- to.candidates = newCandidates
- }
- return nil
-}
-
-func (to *totalOrdering) switchRound() {
- to.curRound++
- to.globalVector.switchRound(to.curRound)
-}
-
-// buildBlockRelation populates the acked according their acking relationships.
-// This function would update all blocks implcitly acked by input block
-// recursively.
-func (to *totalOrdering) buildBlockRelation(b *types.Block) {
- var (
- curBlock, nextBlock *types.Block
- ack common.Hash
- acked map[common.Hash]struct{}
- exists, alreadyPopulated bool
- toCheck = []*types.Block{b}
- )
- for {
- if len(toCheck) == 0 {
- break
- }
- curBlock, toCheck = toCheck[len(toCheck)-1], toCheck[:len(toCheck)-1]
- if curBlock.Position.Round > b.Position.Round {
- // It's illegal for a block to acking some block from future
- // round, this rule should be promised before delivering to
- // total ordering.
- panic(ErrForwardAck)
- }
- for _, ack = range curBlock.Acks {
- if acked, exists = to.acked[ack]; !exists {
- acked = to.objCache.requestAckedVector()
- to.acked[ack] = acked
- }
- // This means we've walked this block already.
- if _, alreadyPopulated = acked[b.Hash]; alreadyPopulated {
- continue
- }
- acked[b.Hash] = struct{}{}
- // See if we need to go forward.
- if nextBlock, exists = to.pendings[ack]; !exists {
- continue
- } else {
- toCheck = append(toCheck, nextBlock)
- }
- }
- }
-}
-
-// clean a block from working set. This behaviour would prevent
-// our memory usage growing infinity.
-func (to *totalOrdering) clean(b *types.Block) {
- var (
- h = b.Hash
- chainID = b.Position.ChainID
- )
- to.objCache.recycleAckedVector(to.acked[h])
- delete(to.acked, h)
- delete(to.pendings, h)
- to.candidates[chainID].recycle(to.objCache)
- to.candidates[chainID] = nil
- delete(to.candidateChainMapping, chainID)
- // Remove this candidate from candidate IDs.
- to.candidateChainIDs =
- removeFromSortedUint32Slice(to.candidateChainIDs, chainID)
- // Clear records of this candidate from other candidates.
- for _, idx := range to.candidateChainIDs {
- to.candidates[idx].clean(chainID)
- }
-}
-
-// updateVectors is a helper function to update all cached vectors.
-func (to *totalOrdering) updateVectors(b *types.Block) (pos int, err error) {
- var (
- candidateHash common.Hash
- chainID uint32
- acked bool
- pending bool
- )
- // Update global height vector
- if pos, pending, err = to.globalVector.addBlock(b); err != nil {
- return
- }
- if to.duringFlush {
- // It makes no sense to calculate potential functions of total ordering
- // when flushing would be happened.
- return
- }
- if pending {
- // The chain of this block contains breakpoints, which means their
- // height are not continuous. This implementation of DEXON total
- // ordering algorithm assumes the height of blocks in working set should
- // be continuous.
- //
- // To workaround this issue, when block arrived after breakpoints,
- // their information would not be contributed to current working set.
- // This mechanism works because we switch rounds by flushing and
- // reset the whole working set.
- return
- }
- // Update acking status of candidates.
- for chainID, candidateHash = range to.candidateChainMapping {
- if _, acked = to.acked[candidateHash][b.Hash]; !acked {
- continue
- }
- if err = to.candidates[chainID].addBlock(b); err != nil {
- return
- }
- }
- return
-}
-
-// prepareCandidate is a helper function to
-// build totalOrderingCandidateInfo for new candidate.
-func (to *totalOrdering) prepareCandidate(candidate *types.Block) {
- var (
- info = newTotalOrderingCandidateInfo(candidate.Hash, to.objCache)
- chainID = candidate.Position.ChainID
- )
- to.candidates[chainID] = info
- to.candidateChainMapping[chainID] = candidate.Hash
- // Add index to slot to allocated list, make sure the modified list sorted.
- to.candidateChainIDs = append(to.candidateChainIDs, chainID)
- sort.Slice(to.candidateChainIDs, func(i, j int) bool {
- return to.candidateChainIDs[i] < to.candidateChainIDs[j]
- })
- to.globalVector.prepareHeightRecord(
- candidate, info, to.acked[candidate.Hash])
- return
-}
-
-// isAckOnlyPrecedings is a helper function to check if a block
-// only contain acks to delivered blocks.
-func (to *totalOrdering) isAckOnlyPrecedings(b *types.Block) bool {
- for _, ack := range b.Acks {
- if _, pending := to.pendings[ack]; pending {
- return false
- }
- }
- return true
-}
-
-// output is a helper function to finish the delivery of
-// deliverable preceding set.
-func (to *totalOrdering) output(
- precedings map[common.Hash]struct{},
- numChains uint32) (ret []*types.Block) {
- for p := range precedings {
- // Remove the first element from corresponding blockVector.
- b := to.pendings[p]
- chainID := b.Position.ChainID
- // TODO(mission): This way to use slice makes it reallocate frequently.
- to.globalVector.blocks[int(chainID)] =
- to.globalVector.blocks[int(chainID)][1:]
- ret = append(ret, b)
- // Remove block relations.
- to.clean(b)
- to.dirtyChainIDs = append(to.dirtyChainIDs, int(chainID))
- }
- sort.Sort(types.ByHash(ret))
- // Find new candidates from tip of globalVector of each chain.
- // The complexity here is O(N^2logN).
- // TODO(mission): only those tips that acking some blocks in
- // the devliered set should be checked. This
- // improvment related to the latency introduced by K.
- for chainID, blocks := range to.globalVector.blocks[:numChains] {
- if len(blocks) == 0 {
- continue
- }
- if _, picked := to.candidateChainMapping[uint32(chainID)]; picked {
- continue
- }
- if !to.isAckOnlyPrecedings(blocks[0]) {
- continue
- }
- // Build totalOrderingCandidateInfo for new candidate.
- to.prepareCandidate(blocks[0])
- }
- return ret
-}
-
-// generateDeliverSet would:
-// - generate preceding set
-// - check if the preceding set deliverable by checking potential function
-func (to *totalOrdering) generateDeliverSet() (
- delivered map[common.Hash]struct{}, mode uint32) {
- var (
- chainID, otherChainID uint32
- info, otherInfo *totalOrderingCandidateInfo
- precedings = make(map[uint32]struct{})
- cfg = to.configs[to.curRound-to.configs[0].roundID]
- )
- mode = TotalOrderingModeNormal
- to.globalVector.updateCandidateInfo(to.dirtyChainIDs, to.objCache)
- globalInfo := to.globalVector.cachedCandidateInfo
- for _, chainID = range to.candidateChainIDs {
- to.candidates[chainID].updateAckingHeightVector(
- globalInfo, cfg.k, to.dirtyChainIDs, to.objCache)
- }
- // Update winning records for each candidate.
- // TODO(mission): It's not reasonable to
- // request one routine for each candidate, the context
- // switch rate would be high.
- var wg sync.WaitGroup
- wg.Add(len(to.candidateChainIDs))
- for _, chainID := range to.candidateChainIDs {
- info = to.candidates[chainID]
- go func(can uint32, canInfo *totalOrderingCandidateInfo) {
- for _, otherChainID := range to.candidateChainIDs {
- if can == otherChainID {
- continue
- }
- canInfo.updateWinRecord(
- otherChainID,
- to.candidates[otherChainID],
- to.dirtyChainIDs,
- to.objCache,
- cfg.numChains)
- }
- wg.Done()
- }(chainID, info)
- }
- wg.Wait()
- // Reset dirty chains.
- to.dirtyChainIDs = to.dirtyChainIDs[:0]
- // TODO(mission): ANS should be bound by current numChains.
- globalAnsLength := globalInfo.getAckingNodeSetLength(
- globalInfo, cfg.k, cfg.numChains)
-CheckNextCandidateLoop:
- for _, chainID = range to.candidateChainIDs {
- info = to.candidates[chainID]
- for _, otherChainID = range to.candidateChainIDs {
- if chainID == otherChainID {
- continue
- }
- otherInfo = to.candidates[otherChainID]
- // TODO(mission): grade should be bound by current numChains.
- if otherInfo.winRecords[chainID].grade(
- cfg.numChains, cfg.phi, globalAnsLength) != 0 {
- continue CheckNextCandidateLoop
- }
- }
- precedings[chainID] = struct{}{}
- }
- if len(precedings) == 0 {
- return
- }
- // internal is a helper function to verify internal stability.
- internal := func() bool {
- var (
- isPreceding, beaten bool
- p uint32
- )
- for _, chainID = range to.candidateChainIDs {
- if _, isPreceding = precedings[chainID]; isPreceding {
- continue
- }
- beaten = false
- for p = range precedings {
- // TODO(mission): grade should be bound by current numChains.
- if beaten = to.candidates[p].winRecords[chainID].grade(
- cfg.numChains, cfg.phi, globalAnsLength) == 1; beaten {
- break
- }
- }
- if !beaten {
- return false
- }
- }
- return true
- }
- // checkAHV is a helper function to verify external stability.
- // It would make sure some preceding block is strong enough
- // to lead the whole preceding set.
- checkAHV := func() bool {
- var (
- height, count uint64
- p uint32
- )
- for p = range precedings {
- count = 0
- info = to.candidates[p]
- for _, height = range info.cachedHeightVector {
- if height != infinity {
- count++
- if count > cfg.phi {
- return true
- }
- }
- }
- }
- return false
- }
- // checkANS is a helper function to verify external stability.
- // It would make sure all preceding blocks are strong enough
- // to be delivered.
- checkANS := func() bool {
- var chainAnsLength uint64
- for p := range precedings {
- // TODO(mission): ANS should be bound by current numChains.
- chainAnsLength = to.candidates[p].getAckingNodeSetLength(
- globalInfo, cfg.k, cfg.numChains)
- if uint64(chainAnsLength) < uint64(cfg.numChains)-cfg.phi {
- return false
- }
- }
- return true
- }
- // If all chains propose enough blocks, we should force
- // to deliver since the whole picture of the DAG is revealed.
- if globalAnsLength != uint64(cfg.numChains) {
- // Check internal stability first.
- if !internal() {
- return
- }
-
- // The whole picture is not ready, we need to check if
- // exteranl stability is met, and we can deliver earlier.
- if checkAHV() && checkANS() {
- mode = TotalOrderingModeEarly
- } else {
- return
- }
- }
- delivered = make(map[common.Hash]struct{})
- for p := range precedings {
- delivered[to.candidates[p].hash] = struct{}{}
- }
- return
-}
-
-// flushBlocks flushes blocks.
-func (to *totalOrdering) flushBlocks(
- b *types.Block) (flushed []*types.Block, mode uint32, err error) {
- cfg := to.configs[to.curRound-to.configs[0].roundID]
- mode = TotalOrderingModeFlush
- if cfg.isValidLastBlock(b) {
- to.flushReadyChains[b.Position.ChainID] = struct{}{}
- }
- // Flush blocks until last blocks from all chains are arrived.
- if len(to.flushReadyChains) < int(cfg.numChains) {
- return
- }
- if len(to.flushReadyChains) > int(cfg.numChains) {
- // This line should never be reached.
- err = ErrFutureRoundDelivered
- return
- }
- // Dump all blocks in this round.
- for {
- if len(to.flushed) == int(cfg.numChains) {
- break
- }
- // Dump all candidates without checking potential function.
- flushedHashes := make(map[common.Hash]struct{})
- for _, chainID := range to.candidateChainIDs {
- candidateBlock := to.pendings[to.candidates[chainID].hash]
- if candidateBlock.Position.Round > to.curRound {
- continue
- }
- flushedHashes[candidateBlock.Hash] = struct{}{}
- }
- if len(flushedHashes) == 0 {
- err = ErrTotalOrderingHangs
- return
- }
- flushedBlocks := to.output(flushedHashes, cfg.numChains)
- for _, b := range flushedBlocks {
- if !cfg.isValidLastBlock(b) {
- continue
- }
- to.flushed[b.Position.ChainID] = struct{}{}
- }
- flushed = append(flushed, flushedBlocks...)
- }
- // Switch back to normal mode: delivered by DEXON total ordering algorithm.
- to.duringFlush = false
- to.flushed = make(map[uint32]struct{})
- to.flushReadyChains = make(map[uint32]struct{})
- // Clean all cached intermediate stats.
- for idx := range to.candidates {
- if to.candidates[idx] == nil {
- continue
- }
- to.candidates[idx].recycle(to.objCache)
- to.candidates[idx] = nil
- }
- to.dirtyChainIDs = nil
- to.candidateChainMapping = make(map[uint32]common.Hash)
- to.candidateChainIDs = nil
- to.globalVector.cachedCandidateInfo = nil
- to.switchRound()
- // Force to pick new candidates.
- numChains := to.configs[to.curRound-to.configs[0].roundID].numChains
- to.output(map[common.Hash]struct{}{}, numChains)
- return
-}
-
-// deliverBlocks delivers blocks by DEXON total ordering algorithm.
-func (to *totalOrdering) deliverBlocks() (
- delivered []*types.Block, mode uint32, err error) {
- hashes, mode := to.generateDeliverSet()
- cfg := to.configs[to.curRound-to.configs[0].roundID]
- // output precedings
- delivered = to.output(hashes, cfg.numChains)
- // Check if any block in delivered set are the last block in this round
- // of that chain. If yes, flush or round-switching would be performed.
- for _, b := range delivered {
- if b.Position.Round > to.curRound {
- err = ErrFutureRoundDelivered
- return
- }
- if !cfg.isValidLastBlock(b) {
- continue
- }
- if cfg.isFlushRequired {
- // Switch to flush mode.
- to.duringFlush = true
- to.flushReadyChains = make(map[uint32]struct{})
- to.flushed = make(map[uint32]struct{})
- } else {
- // Switch round directly.
- to.switchRound()
- }
- break
- }
- if to.duringFlush {
- // Make sure last blocks from all chains are marked as 'flushed'.
- for _, b := range delivered {
- if !cfg.isValidLastBlock(b) {
- continue
- }
- to.flushed[b.Position.ChainID] = struct{}{}
- }
- // Some last blocks for the round to be flushed might not be delivered
- // yet.
- for _, tip := range to.globalVector.tips[:cfg.numChains] {
- if tip.Position.Round > to.curRound || cfg.isValidLastBlock(tip) {
- to.flushReadyChains[tip.Position.ChainID] = struct{}{}
- }
- }
- }
- return
-}
-
-// processBlock is the entry point of totalOrdering.
-func (to *totalOrdering) processBlock(
- b *types.Block) ([]*types.Block, uint32, error) {
- // NOTE: I assume the block 'b' is already safe for total ordering.
- // That means, all its acking blocks are during/after
- // total ordering stage.
- cfg := to.configs[to.curRound-to.configs[0].roundID]
- to.pendings[b.Hash] = b
- to.buildBlockRelation(b)
- pos, err := to.updateVectors(b)
- if err != nil {
- return nil, uint32(0), err
- }
- // Mark the proposer of incoming block as dirty.
- if b.Position.ChainID < cfg.numChains {
- to.dirtyChainIDs = append(to.dirtyChainIDs, int(b.Position.ChainID))
- _, picked := to.candidateChainMapping[b.Position.ChainID]
- if pos == 0 && !picked {
- if to.isAckOnlyPrecedings(b) {
- to.prepareCandidate(b)
- }
- }
- }
- if to.duringFlush {
- return to.flushBlocks(b)
- }
- return to.deliverBlocks()
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go
deleted file mode 100644
index 1eaa3e398..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block-randomness.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "fmt"
-
- "github.com/dexon-foundation/dexon-consensus/common"
-)
-
-// AgreementResult describes an agremeent result.
-type AgreementResult struct {
- BlockHash common.Hash `json:"block_hash"`
- Position Position `json:"position"`
- Votes []Vote `json:"votes"`
-}
-
-func (r *AgreementResult) String() string {
- return fmt.Sprintf(
- "agreementResult[%s:%s]", r.BlockHash, &r.Position)
-}
-
-// BlockRandomnessResult describes a block randomness result
-type BlockRandomnessResult struct {
- BlockHash common.Hash `json:"block_hash"`
- Position Position `json:"position"`
- Randomness []byte `json:"randomness"`
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go
deleted file mode 100644
index bde07d518..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/block.go
+++ /dev/null
@@ -1,341 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-// TODO(jimmy-dexon): remove comments of WitnessAck before open source.
-
-package types
-
-import (
- "bytes"
- "fmt"
- "io"
- "sort"
- "sync"
- "time"
-
- "github.com/dexon-foundation/dexon/rlp"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-// BlockVerifyStatus is the return code for core.Application.VerifyBlock
-type BlockVerifyStatus int
-
-// Enums for return value of core.Application.VerifyBlock.
-const (
- // VerifyOK: Block is verified.
- VerifyOK BlockVerifyStatus = iota
- // VerifyRetryLater: Block is unable to be verified at this moment.
- // Try again later.
- VerifyRetryLater
- // VerifyInvalidBlock: Block is an invalid one.
- VerifyInvalidBlock
-)
-
-var (
- // blockPool is the blocks cache to reuse allocated blocks.
- blockPool = sync.Pool{
- New: func() interface{} {
- return &Block{}
- },
- }
-)
-
-type rlpTimestamp struct {
- time.Time
-}
-
-func (t *rlpTimestamp) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, uint64(t.UTC().UnixNano()))
-}
-
-func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error {
- var nano uint64
- err := s.Decode(&nano)
- if err == nil {
- sec := int64(nano) / 1000000000
- nsec := int64(nano) % 1000000000
- t.Time = time.Unix(sec, nsec).UTC()
- }
- return err
-}
-
-// FinalizationResult represents the result of DEXON consensus algorithm.
-type FinalizationResult struct {
- ParentHash common.Hash `json:"parent_hash"`
- Randomness []byte `json:"randomness"`
- Timestamp time.Time `json:"timestamp"`
- Height uint64 `json:"height"`
-}
-
-type rlpFinalizationResult struct {
- ParentHash common.Hash
- Randomness []byte
- Timestamp *rlpTimestamp
- Height uint64
-}
-
-// EncodeRLP implements rlp.Encoder
-func (f *FinalizationResult) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, &rlpFinalizationResult{
- ParentHash: f.ParentHash,
- Randomness: f.Randomness,
- Timestamp: &rlpTimestamp{f.Timestamp},
- Height: f.Height,
- })
-}
-
-// DecodeRLP implements rlp.Decoder
-func (f *FinalizationResult) DecodeRLP(s *rlp.Stream) error {
- var dec rlpFinalizationResult
- err := s.Decode(&dec)
- if err == nil {
- *f = FinalizationResult{
- ParentHash: dec.ParentHash,
- Randomness: dec.Randomness,
- Timestamp: dec.Timestamp.Time,
- Height: dec.Height,
- }
- }
- return err
-}
-
-// Witness represents the consensus information on the compaction chain.
-type Witness struct {
- Height uint64 `json:"height"`
- Data []byte `json:"data"`
-}
-
-// RecycleBlock put unused block into cache, which might be reused if
-// not garbage collected.
-func RecycleBlock(b *Block) {
- blockPool.Put(b)
-}
-
-// NewBlock initiate a block.
-func NewBlock() (b *Block) {
- b = blockPool.Get().(*Block)
- b.Acks = b.Acks[:0]
- return
-}
-
-// Block represents a single event broadcasted on the network.
-type Block struct {
- ProposerID NodeID `json:"proposer_id"`
- ParentHash common.Hash `json:"parent_hash"`
- Hash common.Hash `json:"hash"`
- Position Position `json:"position"`
- Timestamp time.Time `json:"timestamp"`
- Acks common.SortedHashes `json:"acks"`
- Payload []byte `json:"payload"`
- PayloadHash common.Hash `json:"payload_hash"`
- Witness Witness `json:"witness"`
- Finalization FinalizationResult `json:"finalization"`
- Signature crypto.Signature `json:"signature"`
-
- CRSSignature crypto.Signature `json:"crs_signature"`
-}
-
-type rlpBlock struct {
- ProposerID NodeID
- ParentHash common.Hash
- Hash common.Hash
- Position Position
- Timestamp *rlpTimestamp
- Acks common.SortedHashes
- Payload []byte
- PayloadHash common.Hash
- Witness *Witness
- Finalization *FinalizationResult
- Signature crypto.Signature
-
- CRSSignature crypto.Signature
-}
-
-// EncodeRLP implements rlp.Encoder
-func (b *Block) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpBlock{
- ProposerID: b.ProposerID,
- ParentHash: b.ParentHash,
- Hash: b.Hash,
- Position: b.Position,
- Timestamp: &rlpTimestamp{b.Timestamp},
- Acks: b.Acks,
- Payload: b.Payload,
- PayloadHash: b.PayloadHash,
- Witness: &b.Witness,
- Finalization: &b.Finalization,
- Signature: b.Signature,
- CRSSignature: b.CRSSignature,
- })
-}
-
-// DecodeRLP implements rlp.Decoder
-func (b *Block) DecodeRLP(s *rlp.Stream) error {
- var dec rlpBlock
- err := s.Decode(&dec)
- if err == nil {
- *b = Block{
- ProposerID: dec.ProposerID,
- ParentHash: dec.ParentHash,
- Hash: dec.Hash,
- Position: dec.Position,
- Timestamp: dec.Timestamp.Time,
- Acks: dec.Acks,
- Payload: dec.Payload,
- PayloadHash: dec.PayloadHash,
- Witness: *dec.Witness,
- Finalization: *dec.Finalization,
- Signature: dec.Signature,
- CRSSignature: dec.CRSSignature,
- }
- }
- return err
-}
-
-func (b *Block) String() string {
- return fmt.Sprintf("Block(%v:%d:%d)", b.Hash.String()[:6],
- b.Position.ChainID, b.Position.Height)
-}
-
-// Clone returns a deep copy of a block.
-func (b *Block) Clone() (bcopy *Block) {
- bcopy = NewBlock()
- bcopy.ProposerID = b.ProposerID
- bcopy.ParentHash = b.ParentHash
- bcopy.Hash = b.Hash
- bcopy.Position.Round = b.Position.Round
- bcopy.Position.ChainID = b.Position.ChainID
- bcopy.Position.Height = b.Position.Height
- bcopy.Signature = b.Signature.Clone()
- bcopy.CRSSignature = b.CRSSignature.Clone()
- bcopy.Finalization.ParentHash = b.Finalization.ParentHash
- bcopy.Finalization.Timestamp = b.Finalization.Timestamp
- bcopy.Finalization.Height = b.Finalization.Height
- bcopy.Witness.Height = b.Witness.Height
- bcopy.Witness.Data = make([]byte, len(b.Witness.Data))
- copy(bcopy.Witness.Data, b.Witness.Data)
- bcopy.Timestamp = b.Timestamp
- bcopy.Acks = make(common.SortedHashes, len(b.Acks))
- copy(bcopy.Acks, b.Acks)
- bcopy.Payload = make([]byte, len(b.Payload))
- copy(bcopy.Payload, b.Payload)
- bcopy.PayloadHash = b.PayloadHash
- bcopy.Finalization.Randomness = make([]byte, len(b.Finalization.Randomness))
- copy(bcopy.Finalization.Randomness, b.Finalization.Randomness)
- return
-}
-
-// IsGenesis checks if the block is a genesisBlock
-func (b *Block) IsGenesis() bool {
- return b.Position.Height == 0 && b.ParentHash == common.Hash{}
-}
-
-// IsFinalized checks if the finalization data is ready.
-func (b *Block) IsFinalized() bool {
- return b.Finalization.Height != 0
-}
-
-// IsEmpty checks if the block is an 'empty block'.
-func (b *Block) IsEmpty() bool {
- return b.ProposerID.Hash == common.Hash{}
-}
-
-// IsAcking checks if a block acking another by it's hash.
-func (b *Block) IsAcking(hash common.Hash) bool {
- idx := sort.Search(len(b.Acks), func(i int) bool {
- return bytes.Compare(b.Acks[i][:], hash[:]) >= 0
- })
- return !(idx == len(b.Acks) || b.Acks[idx] != hash)
-}
-
-// ByHash is the helper type for sorting slice of blocks by hash.
-type ByHash []*Block
-
-func (b ByHash) Len() int {
- return len(b)
-}
-
-func (b ByHash) Less(i int, j int) bool {
- return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1
-}
-
-func (b ByHash) Swap(i int, j int) {
- b[i], b[j] = b[j], b[i]
-}
-
-// ByPosition is the helper type for sorting slice of blocks by position.
-type ByPosition []*Block
-
-// Len implements Len method in sort.Sort interface.
-func (bs ByPosition) Len() int {
- return len(bs)
-}
-
-// Less implements Less method in sort.Sort interface.
-func (bs ByPosition) Less(i int, j int) bool {
- return bs[j].Position.Newer(&bs[i].Position)
-}
-
-// Swap implements Swap method in sort.Sort interface.
-func (bs ByPosition) Swap(i int, j int) {
- bs[i], bs[j] = bs[j], bs[i]
-}
-
-// Push implements Push method in heap interface.
-func (bs *ByPosition) Push(x interface{}) {
- *bs = append(*bs, x.(*Block))
-}
-
-// Pop implements Pop method in heap interface.
-func (bs *ByPosition) Pop() (ret interface{}) {
- n := len(*bs)
- *bs, ret = (*bs)[0:n-1], (*bs)[n-1]
- return
-}
-
-// ByFinalizationHeight is the helper type for sorting slice of blocks by
-// finalization height.
-type ByFinalizationHeight []*Block
-
-// Len implements Len method in sort.Sort interface.
-func (bs ByFinalizationHeight) Len() int {
- return len(bs)
-}
-
-// Less implements Less method in sort.Sort interface.
-func (bs ByFinalizationHeight) Less(i int, j int) bool {
- return bs[i].Finalization.Height < bs[j].Finalization.Height
-}
-
-// Swap implements Swap method in sort.Sort interface.
-func (bs ByFinalizationHeight) Swap(i int, j int) {
- bs[i], bs[j] = bs[j], bs[i]
-}
-
-// Push implements Push method in heap interface.
-func (bs *ByFinalizationHeight) Push(x interface{}) {
- *bs = append(*bs, x.(*Block))
-}
-
-// Pop implements Pop method in heap interface.
-func (bs *ByFinalizationHeight) Pop() (ret interface{}) {
- n := len(*bs)
- *bs, ret = (*bs)[0:n-1], (*bs)[n-1]
- return
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go
deleted file mode 100644
index 975eec9cb..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/config.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "encoding/binary"
- "math"
- "time"
-)
-
-// Config stands for Current Configuration Parameters.
-type Config struct {
- // Network related.
- NumChains uint32
-
- // Lambda related.
- LambdaBA time.Duration
- LambdaDKG time.Duration
-
- // Total ordering related.
- K int
- PhiRatio float32
-
- // Set related.
- NotarySetSize uint32
- DKGSetSize uint32
-
- // Time related.
- RoundInterval time.Duration
- MinBlockInterval time.Duration
- MaxBlockInterval time.Duration
-}
-
-// Clone return a copied configuration.
-func (c *Config) Clone() *Config {
- return &Config{
- NumChains: c.NumChains,
- LambdaBA: c.LambdaBA,
- LambdaDKG: c.LambdaDKG,
- K: c.K,
- PhiRatio: c.PhiRatio,
- NotarySetSize: c.NotarySetSize,
- DKGSetSize: c.DKGSetSize,
- RoundInterval: c.RoundInterval,
- MinBlockInterval: c.MinBlockInterval,
- MaxBlockInterval: c.MaxBlockInterval,
- }
-}
-
-// Bytes returns []byte representation of Config.
-func (c *Config) Bytes() []byte {
- binaryNumChains := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryNumChains, c.NumChains)
-
- binaryLambdaBA := make([]byte, 8)
- binary.LittleEndian.PutUint64(
- binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds()))
- binaryLambdaDKG := make([]byte, 8)
- binary.LittleEndian.PutUint64(
- binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds()))
-
- binaryK := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryK, uint32(c.K))
- binaryPhiRatio := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryPhiRatio, math.Float32bits(c.PhiRatio))
-
- binaryNotarySetSize := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize)
- binaryDKGSetSize := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryDKGSetSize, c.DKGSetSize)
-
- binaryRoundInterval := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryRoundInterval,
- uint64(c.RoundInterval.Nanoseconds()))
- binaryMinBlockInterval := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryMinBlockInterval,
- uint64(c.MinBlockInterval.Nanoseconds()))
- binaryMaxBlockInterval := make([]byte, 8)
- binary.LittleEndian.PutUint64(binaryMaxBlockInterval,
- uint64(c.MaxBlockInterval.Nanoseconds()))
-
- enc := make([]byte, 0, 40)
- enc = append(enc, binaryNumChains...)
- enc = append(enc, binaryLambdaBA...)
- enc = append(enc, binaryLambdaDKG...)
- enc = append(enc, binaryK...)
- enc = append(enc, binaryPhiRatio...)
- enc = append(enc, binaryNotarySetSize...)
- enc = append(enc, binaryDKGSetSize...)
- enc = append(enc, binaryRoundInterval...)
- enc = append(enc, binaryMinBlockInterval...)
- enc = append(enc, binaryMaxBlockInterval...)
- return enc
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg/dkg.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg/dkg.go
deleted file mode 100644
index 4053c5a28..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/dkg/dkg.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package dkg
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
-
- "github.com/dexon-foundation/dexon/rlp"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-// PrivateShare describe a secret share in DKG protocol.
-type PrivateShare struct {
- ProposerID types.NodeID `json:"proposer_id"`
- ReceiverID types.NodeID `json:"receiver_id"`
- Round uint64 `json:"round"`
- PrivateShare cryptoDKG.PrivateKey `json:"private_share"`
- Signature crypto.Signature `json:"signature"`
-}
-
-// Equal checks equality between two PrivateShare instances.
-func (p *PrivateShare) Equal(other *PrivateShare) bool {
- return p.ProposerID.Equal(other.ProposerID) &&
- p.ReceiverID.Equal(other.ReceiverID) &&
- p.Round == other.Round &&
- p.Signature.Type == other.Signature.Type &&
- bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 &&
- bytes.Compare(
- p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0
-}
-
-// MasterPublicKey decrtibe a master public key in DKG protocol.
-type MasterPublicKey struct {
- ProposerID types.NodeID `json:"proposer_id"`
- Round uint64 `json:"round"`
- DKGID cryptoDKG.ID `json:"dkg_id"`
- PublicKeyShares cryptoDKG.PublicKeyShares `json:"public_key_shares"`
- Signature crypto.Signature `json:"signature"`
-}
-
-func (d *MasterPublicKey) String() string {
- return fmt.Sprintf("MasterPublicKey[%s:%d]",
- d.ProposerID.String()[:6],
- d.Round)
-}
-
-// Equal check equality of two DKG master public keys.
-func (d *MasterPublicKey) Equal(other *MasterPublicKey) bool {
- return d.ProposerID.Equal(other.ProposerID) &&
- d.Round == other.Round &&
- d.DKGID.GetHexString() == other.DKGID.GetHexString() &&
- d.PublicKeyShares.Equal(&other.PublicKeyShares) &&
- d.Signature.Type == other.Signature.Type &&
- bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0
-}
-
-type rlpMasterPublicKey struct {
- ProposerID types.NodeID
- Round uint64
- DKGID []byte
- PublicKeyShares *cryptoDKG.PublicKeyShares
- Signature crypto.Signature
-}
-
-// EncodeRLP implements rlp.Encoder
-func (d *MasterPublicKey) EncodeRLP(w io.Writer) error {
- return rlp.Encode(w, rlpMasterPublicKey{
- ProposerID: d.ProposerID,
- Round: d.Round,
- DKGID: d.DKGID.GetLittleEndian(),
- PublicKeyShares: &d.PublicKeyShares,
- Signature: d.Signature,
- })
-}
-
-// DecodeRLP implements rlp.Decoder
-func (d *MasterPublicKey) DecodeRLP(s *rlp.Stream) error {
- var dec rlpMasterPublicKey
- if err := s.Decode(&dec); err != nil {
- return err
- }
-
- id, err := cryptoDKG.BytesID(dec.DKGID)
- if err != nil {
- return err
- }
-
- *d = MasterPublicKey{
- ProposerID: dec.ProposerID,
- Round: dec.Round,
- DKGID: id,
- PublicKeyShares: *dec.PublicKeyShares,
- Signature: dec.Signature,
- }
- return err
-}
-
-// NewMasterPublicKey returns a new MasterPublicKey instance.
-func NewMasterPublicKey() *MasterPublicKey {
- return &MasterPublicKey{
- PublicKeyShares: *cryptoDKG.NewEmptyPublicKeyShares(),
- }
-}
-
-// UnmarshalJSON implements json.Unmarshaller.
-func (d *MasterPublicKey) UnmarshalJSON(data []byte) error {
- type innertMasterPublicKey MasterPublicKey
- d.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares()
- return json.Unmarshal(data, (*innertMasterPublicKey)(d))
-}
-
-// Complaint describe a complaint in DKG protocol.
-type Complaint struct {
- ProposerID types.NodeID `json:"proposer_id"`
- Round uint64 `json:"round"`
- PrivateShare PrivateShare `json:"private_share"`
- Signature crypto.Signature `json:"signature"`
-}
-
-func (c *Complaint) String() string {
- if c.IsNack() {
- return fmt.Sprintf("DKGNackComplaint[%s:%d]%s",
- c.ProposerID.String()[:6], c.Round,
- c.PrivateShare.ProposerID.String()[:6])
- }
- return fmt.Sprintf("Complaint[%s:%d]%v",
- c.ProposerID.String()[:6], c.Round, c.PrivateShare)
-}
-
-// Equal checks equality between two Complaint instances.
-func (c *Complaint) Equal(other *Complaint) bool {
- return c.ProposerID.Equal(other.ProposerID) &&
- c.Round == other.Round &&
- c.PrivateShare.Equal(&other.PrivateShare) &&
- c.Signature.Type == other.Signature.Type &&
- bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0
-}
-
-// PartialSignature describe a partial signature in DKG protocol.
-type PartialSignature struct {
- ProposerID types.NodeID `json:"proposer_id"`
- Round uint64 `json:"round"`
- Hash common.Hash `json:"hash"`
- PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"`
- Signature crypto.Signature `json:"signature"`
-}
-
-// Finalize describe a dig finalize message in DKG protocol.
-type Finalize struct {
- ProposerID types.NodeID `json:"proposer_id"`
- Round uint64 `json:"round"`
- Signature crypto.Signature `json:"signature"`
-}
-
-func (final *Finalize) String() string {
- return fmt.Sprintf("DKGFinal[%s:%d]",
- final.ProposerID.String()[:6],
- final.Round)
-}
-
-// Equal check equality of two Finalize instances.
-func (final *Finalize) Equal(other *Finalize) bool {
- return final.ProposerID.Equal(other.ProposerID) &&
- final.Round == other.Round &&
- final.Signature.Type == other.Signature.Type &&
- bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0
-}
-
-// IsNack returns true if it's a nack complaint in DKG protocol.
-func (c *Complaint) IsNack() bool {
- return len(c.PrivateShare.Signature.Signature) == 0
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go
deleted file mode 100644
index 2c90f65c8..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/node.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "bytes"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-// NodeID is the ID type for nodes.
-type NodeID struct {
- common.Hash
-}
-
-// NewNodeID returns a NodeID with Hash set to the hash value of
-// public key.
-func NewNodeID(pubKey crypto.PublicKey) NodeID {
- return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])}
-}
-
-// Equal checks if the hash representation is the same NodeID.
-func (v NodeID) Equal(v2 NodeID) bool {
- return v.Hash == v2.Hash
-}
-
-// NodeIDs implements sort.Interface for NodeID.
-type NodeIDs []NodeID
-
-func (v NodeIDs) Len() int {
- return len(v)
-}
-
-func (v NodeIDs) Less(i int, j int) bool {
- return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1
-}
-
-func (v NodeIDs) Swap(i int, j int) {
- v[i], v[j] = v[j], v[i]
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go
deleted file mode 100644
index 3222b3c2f..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/nodeset.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "container/heap"
- "encoding/binary"
- "math/big"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-// NodeSet is the node set structure as defined in DEXON consensus core.
-type NodeSet struct {
- IDs map[NodeID]struct{}
-}
-
-// SubSetTarget is the sub set target for GetSubSet().
-type SubSetTarget *big.Int
-
-type subSetTargetType byte
-
-const (
- targetNotarySet subSetTargetType = iota
- targetDKGSet
-)
-
-type nodeRank struct {
- ID NodeID
- rank *big.Int
-}
-
-// rankHeap is a MaxHeap structure.
-type rankHeap []*nodeRank
-
-func (h rankHeap) Len() int { return len(h) }
-func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 }
-func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
-func (h *rankHeap) Push(x interface{}) {
- *h = append(*h, x.(*nodeRank))
-}
-func (h *rankHeap) Pop() interface{} {
- old := *h
- n := len(old)
- x := old[n-1]
- *h = old[0 : n-1]
- return x
-}
-
-// NewNodeSet creates a new NodeSet instance.
-func NewNodeSet() *NodeSet {
- return &NodeSet{
- IDs: make(map[NodeID]struct{}),
- }
-}
-
-// NewNotarySetTarget is the target for getting Notary Set.
-func NewNotarySetTarget(crs common.Hash, chainID uint32) SubSetTarget {
- binaryChainID := make([]byte, 4)
- binary.LittleEndian.PutUint32(binaryChainID, chainID)
-
- return newTarget(targetNotarySet, crs[:], binaryChainID)
-}
-
-// NewDKGSetTarget is the target for getting DKG Set.
-func NewDKGSetTarget(crs common.Hash) SubSetTarget {
- return newTarget(targetDKGSet, crs[:])
-}
-
-// Add a NodeID to the set.
-func (ns *NodeSet) Add(ID NodeID) {
- ns.IDs[ID] = struct{}{}
-}
-
-// Clone the NodeSet.
-func (ns *NodeSet) Clone() *NodeSet {
- nsCopy := NewNodeSet()
- for ID := range ns.IDs {
- nsCopy.Add(ID)
- }
- return nsCopy
-}
-
-// GetSubSet returns the subset of given target.
-func (ns *NodeSet) GetSubSet(
- size int, target SubSetTarget) map[NodeID]struct{} {
- h := rankHeap{}
- idx := 0
- for nID := range ns.IDs {
- if idx < size {
- h = append(h, newNodeRank(nID, target))
- } else if idx == size {
- heap.Init(&h)
- }
- if idx >= size {
- rank := newNodeRank(nID, target)
- if rank.rank.Cmp(h[0].rank) < 0 {
- h[0] = rank
- heap.Fix(&h, 0)
- }
- }
- idx++
- }
-
- nIDs := make(map[NodeID]struct{}, size)
- for _, rank := range h {
- nIDs[rank.ID] = struct{}{}
- }
-
- return nIDs
-}
-
-func newTarget(targetType subSetTargetType, data ...[]byte) SubSetTarget {
- data = append(data, []byte{byte(targetType)})
- h := crypto.Keccak256Hash(data...)
- num := big.NewInt(0)
- num.SetBytes(h[:])
- return SubSetTarget(num)
-}
-
-func newNodeRank(ID NodeID, target SubSetTarget) *nodeRank {
- num := big.NewInt(0)
- num.SetBytes(ID.Hash[:])
- num.Abs(num.Sub((*big.Int)(target), num))
- return &nodeRank{
- ID: ID,
- rank: num,
- }
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go
deleted file mode 100644
index 404f3035e..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/position.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "fmt"
-)
-
-// Position describes the position in the block lattice of an entity.
-type Position struct {
- ChainID uint32 `json:"chain_id"`
- Round uint64 `json:"round"`
- Height uint64 `json:"height"`
-}
-
-func (pos *Position) String() string {
- return fmt.Sprintf("pos[%d:%d:%d]", pos.Round, pos.ChainID, pos.Height)
-}
-
-// Equal checks if two positions are equal, it panics when their chainIDs
-// are different.
-func (pos *Position) Equal(other *Position) bool {
- if pos.ChainID != other.ChainID {
- panic(fmt.Errorf("unexpected chainID %d, should be %d",
- other.ChainID, pos.ChainID))
- }
- return pos.Round == other.Round && pos.Height == other.Height
-}
-
-// Newer checks if one block is newer than another one on the same chain.
-// If two blocks on different chain compared by this function, it would panic.
-func (pos *Position) Newer(other *Position) bool {
- if pos.ChainID != other.ChainID {
- panic(fmt.Errorf("unexpected chainID %d, should be %d",
- other.ChainID, pos.ChainID))
- }
- return pos.Round > other.Round ||
- (pos.Round == other.Round && pos.Height > other.Height)
-}
-
-// Older checks if one block is older than another one on the same chain.
-// If two blocks on different chain compared by this function, it would panic.
-func (pos *Position) Older(other *Position) bool {
- if pos.ChainID != other.ChainID {
- panic(fmt.Errorf("unexpected chainID %d, should be %d",
- other.ChainID, pos.ChainID))
- }
- return pos.Round < other.Round ||
- (pos.Round == other.Round && pos.Height < other.Height)
-}
-
-// Clone a position instance.
-func (pos *Position) Clone() *Position {
- return &Position{
- ChainID: pos.ChainID,
- Round: pos.Round,
- Height: pos.Height,
- }
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go
deleted file mode 100644
index 32fb8982d..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/types/vote.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package types
-
-import (
- "fmt"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
-)
-
-// VoteType is the type of vote.
-type VoteType byte
-
-// VoteType enum.
-const (
- VoteInit VoteType = iota
- VotePreCom
- VoteCom
- // Do not add any type below MaxVoteType.
- MaxVoteType
-)
-
-// Vote is the vote structure defined in Crypto Shuffle Algorithm.
-type Vote struct {
- ProposerID NodeID `json:"proposer_id"`
- Type VoteType `json:"type"`
- BlockHash common.Hash `json:"block_hash"`
- Period uint64 `json:"period"`
- Position Position `json:"position"`
- Signature crypto.Signature `json:"signature"`
-}
-
-func (v *Vote) String() string {
- return fmt.Sprintf("Vote[%s:%d:%d](%d:%d):%s",
- v.ProposerID.String()[:6], v.Position.ChainID, v.Position.Height,
- v.Period, v.Type, v.BlockHash.String()[:6])
-}
-
-// Clone returns a deep copy of a vote.
-func (v *Vote) Clone() *Vote {
- return &Vote{
- ProposerID: v.ProposerID,
- Type: v.Type,
- BlockHash: v.BlockHash,
- Period: v.Period,
- Position: v.Position,
- Signature: v.Signature.Clone(),
- }
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go b/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go
deleted file mode 100644
index 6b9ce634f..000000000
--- a/vendor/github.com/dexon-foundation/dexon-consensus-core/core/utils.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2018 The dexon-consensus Authors
-// This file is part of the dexon-consensus library.
-//
-// The dexon-consensus library is free software: you can redistribute it
-// and/or modify it under the terms of the GNU Lesser General Public License as
-// published by the Free Software Foundation, either version 3 of the License,
-// or (at your option) any later version.
-//
-// The dexon-consensus library is distributed in the hope that it will be
-// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
-// General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the dexon-consensus library. If not, see
-// <http://www.gnu.org/licenses/>.
-
-package core
-
-import (
- "errors"
- "fmt"
- "os"
- "sort"
- "time"
-
- "github.com/dexon-foundation/dexon-consensus/common"
- "github.com/dexon-foundation/dexon-consensus/core/crypto"
- "github.com/dexon-foundation/dexon-consensus/core/types"
-)
-
-var (
- debug = false
- // ErrEmptyTimestamps would be reported if Block.timestamps is empty.
- ErrEmptyTimestamps = errors.New("timestamp vector should not be empty")
-)
-
-func init() {
- if os.Getenv("DEBUG") != "" {
- debug = true
- }
-}
-
-// Debugf is like fmt.Printf, but only output when we are in debug mode.
-func Debugf(format string, args ...interface{}) {
- if debug {
- fmt.Printf(format, args...)
- }
-}
-
-// Debugln is like fmt.Println, but only output when we are in debug mode.
-func Debugln(args ...interface{}) {
- if debug {
- fmt.Println(args)
- }
-}
-
-func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time {
- if sep == 0 {
- return []time.Time{}
- }
- if t1.After(t2) {
- return interpoTime(t2, t1, sep)
- }
- timestamps := make([]time.Time, sep)
- duration := t2.Sub(t1)
- period := time.Duration(
- (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond
- prevTime := t1
- for idx := range timestamps {
- prevTime = prevTime.Add(period)
- timestamps[idx] = prevTime
- }
- return timestamps
-}
-
-func getMedianTime(timestamps []time.Time) (t time.Time, err error) {
- if len(timestamps) == 0 {
- err = ErrEmptyTimestamps
- return
- }
- tscopy := make([]time.Time, 0, len(timestamps))
- for _, ts := range timestamps {
- tscopy = append(tscopy, ts)
- }
- sort.Sort(common.ByTime(tscopy))
- if len(tscopy)%2 == 0 {
- t1 := tscopy[len(tscopy)/2-1]
- t2 := tscopy[len(tscopy)/2]
- t = interpoTime(t1, t2, 1)[0]
- } else {
- t = tscopy[len(tscopy)/2]
- }
- return
-}
-
-func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 {
- indexToRemove := sort.Search(len(xs), func(idx int) bool {
- return xs[idx] >= x
- })
- if indexToRemove == len(xs) || xs[indexToRemove] != x {
- // This value is not found.
- return xs
- }
- return append(xs[:indexToRemove], xs[indexToRemove+1:]...)
-}
-
-// HashConfigurationBlock returns the hash value of configuration block.
-func HashConfigurationBlock(
- notarySet map[types.NodeID]struct{},
- config *types.Config,
- snapshotHash common.Hash,
- prevHash common.Hash,
-) common.Hash {
- notaryIDs := make(types.NodeIDs, 0, len(notarySet))
- for nID := range notarySet {
- notaryIDs = append(notaryIDs, nID)
- }
- sort.Sort(notaryIDs)
- notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{}))
- for _, nID := range notaryIDs {
- notarySetBytes = append(notarySetBytes, nID.Hash[:]...)
- }
- configBytes := config.Bytes()
-
- return crypto.Keccak256Hash(
- notarySetBytes[:],
- configBytes[:],
- snapshotHash[:],
- prevHash[:],
- )
-}
-
-// VerifyBlock verifies the signature of types.Block.
-func VerifyBlock(b *types.Block) (err error) {
- hash, err := hashBlock(b)
- if err != nil {
- return
- }
- if hash != b.Hash {
- err = ErrIncorrectHash
- return
- }
- pubKey, err := crypto.SigToPub(b.Hash, b.Signature)
- if err != nil {
- return
- }
- if !b.ProposerID.Equal(types.NewNodeID(pubKey)) {
- err = ErrIncorrectSignature
- return
- }
- return
-}
-
-// DiffUint64 calculates difference between two uint64.
-func DiffUint64(a, b uint64) uint64 {
- if a > b {
- return a - b
- }
- return b - a
-}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
index 8741baf10..d6875bc45 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/agreement.go
@@ -118,7 +118,6 @@ type agreement struct {
func newAgreement(
ID types.NodeID,
recv agreementReceiver,
- notarySet map[types.NodeID]struct{},
leader *leaderSelector,
authModule *Authenticator) *agreement {
agreement := &agreement{
@@ -137,7 +136,7 @@ func newAgreement(
// restart the agreement
func (a *agreement) restart(
- notarySet map[types.NodeID]struct{}, aID types.Position) {
+ notarySet map[types.NodeID]struct{}, aID types.Position, crs common.Hash) {
func() {
a.lock.Lock()
@@ -151,7 +150,7 @@ func (a *agreement) restart(
a.data.period = 1
a.data.blocks = make(map[types.NodeID]*types.Block)
a.data.requiredVote = len(notarySet)/3*2 + 1
- a.data.leader.restart()
+ a.data.leader.restart(crs)
a.data.lockValue = nullBlockHash
a.data.lockRound = 1
a.fastForward = make(chan uint64, 1)
@@ -213,7 +212,7 @@ func (a *agreement) restart(
func (a *agreement) stop() {
a.restart(make(map[types.NodeID]struct{}), types.Position{
ChainID: math.MaxUint32,
- })
+ }, common.Hash{})
}
func isStop(aID types.Position) bool {
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
index cec3c4f64..29d4aa2c8 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/consensus.go
@@ -315,12 +315,6 @@ func NewConsensus(
config := gov.Configuration(round)
nodeSetCache := NewNodeSetCache(gov)
logger.Debug("Calling Governance.CRS", "round", round)
- crs := gov.CRS(round)
- // Setup acking by information returned from Governace.
- nodes, err := nodeSetCache.GetNodeSet(round)
- if err != nil {
- panic(err)
- }
// Setup auth module.
authModule := NewAuthenticator(prv)
// Check if the application implement Debug interface.
@@ -385,8 +379,7 @@ func NewConsensus(
agreementModule := newAgreement(
con.ID,
recv,
- nodes.IDs,
- newLeaderSelector(crs, validLeader),
+ newLeaderSelector(validLeader),
con.authModule,
)
// Hacky way to make agreement module self contained.
@@ -448,6 +441,7 @@ func (con *Consensus) runBA(chainID uint32, tick <-chan struct{}) {
recv := con.receivers[chainID]
recv.restartNotary <- true
nIDs := make(map[types.NodeID]struct{})
+ crs := common.Hash{}
// Reset ticker
<-tick
BALoop:
@@ -466,16 +460,17 @@ BALoop:
if err != nil {
panic(err)
}
+ con.logger.Debug("Calling Governance.CRS", "round", recv.round)
+ crs = con.gov.CRS(recv.round)
con.logger.Debug("Calling Governance.Configuration",
"round", recv.round)
- con.logger.Debug("Calling Governance.CRS", "round", recv.round)
nIDs = nodes.GetSubSet(
int(con.gov.Configuration(recv.round).NotarySetSize),
- types.NewNotarySetTarget(con.gov.CRS(recv.round), chainID))
+ types.NewNotarySetTarget(crs, chainID))
}
nextPos := con.lattice.NextPosition(chainID)
nextPos.Round = recv.round
- agreement.restart(nIDs, nextPos)
+ agreement.restart(nIDs, nextPos, crs)
default:
}
if agreement.pullVotes() {
@@ -809,14 +804,15 @@ func (con *Consensus) ProcessAgreementResult(
con.logger.Debug("Calling Network.PullBlocks for syncing BA",
"hash", rand.BlockHash)
con.network.PullBlocks(common.Hashes{rand.BlockHash})
+ con.logger.Debug("Calling Governance.CRS", "round", rand.Position.Round)
+ crs := con.gov.CRS(rand.Position.Round)
nIDs := nodes.GetSubSet(
int(con.gov.Configuration(rand.Position.Round).NotarySetSize),
- types.NewNotarySetTarget(
- con.gov.CRS(rand.Position.Round), rand.Position.ChainID))
+ types.NewNotarySetTarget(crs, rand.Position.ChainID))
for _, vote := range rand.Votes {
agreement.processVote(&vote)
}
- agreement.restart(nIDs, rand.Position)
+ agreement.restart(nIDs, rand.Position, crs)
}
// Calculating randomness.
if rand.Position.Round == 0 {
@@ -929,7 +925,7 @@ func (con *Consensus) preProcessBlock(b *types.Block) (err error) {
func (con *Consensus) deliverBlock(b *types.Block) {
// TODO(mission): clone types.FinalizationResult
con.logger.Debug("Calling Application.BlockDelivered", "block", b)
- con.app.BlockDelivered(b.Hash, b.Finalization)
+ con.app.BlockDelivered(b.Hash, b.Position, b.Finalization)
if b.Position.Round+2 == con.roundToNotify {
// Only the first block delivered of that round would
// trigger this noitification.
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
index 75a2fdfcf..3a9c0752a 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/interfaces.go
@@ -42,7 +42,8 @@ type Application interface {
BlockConfirmed(block types.Block)
// BlockDelivered is called when a block is add to the compaction chain.
- BlockDelivered(blockHash common.Hash, result types.FinalizationResult)
+ BlockDelivered(blockHash common.Hash,
+ blockPosition types.Position, result types.FinalizationResult)
}
// Debug describes the application interface that requires
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
index 2be596abc..08006dbfb 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/leader-selector.go
@@ -59,13 +59,8 @@ type leaderSelector struct {
lock sync.Mutex
}
-func newLeaderSelector(
- crs common.Hash, validLeader validLeaderFn) *leaderSelector {
- numCRS := big.NewInt(0)
- numCRS.SetBytes(crs[:])
+func newLeaderSelector(validLeader validLeaderFn) *leaderSelector {
return &leaderSelector{
- numCRS: numCRS,
- hashCRS: crs,
minCRSBlock: maxHash,
validLeader: validLeader,
}
@@ -86,9 +81,13 @@ func (l *leaderSelector) probability(sig crypto.Signature) float64 {
return p
}
-func (l *leaderSelector) restart() {
+func (l *leaderSelector) restart(crs common.Hash) {
+ numCRS := big.NewInt(0)
+ numCRS.SetBytes(crs[:])
l.lock.Lock()
defer l.lock.Unlock()
+ l.numCRS = numCRS
+ l.hashCRS = crs
l.minCRSBlock = maxHash
l.minBlockHash = common.Hash{}
l.pendingBlocks = []*types.Block{}
diff --git a/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go b/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go
index fafbd10bb..a73331fae 100644
--- a/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go
+++ b/vendor/github.com/dexon-foundation/dexon-consensus/core/nonblocking.go
@@ -39,8 +39,9 @@ type totalOrderingDeliveredEvent struct {
}
type blockDeliveredEvent struct {
- blockHash common.Hash
- result *types.FinalizationResult
+ blockHash common.Hash
+ blockPosition types.Position
+ result *types.FinalizationResult
}
// nonBlocking implements these interfaces and is a decorator for
@@ -99,7 +100,7 @@ func (nb *nonBlocking) run() {
case totalOrderingDeliveredEvent:
nb.debug.TotalOrderingDelivered(e.blockHashes, e.mode)
case blockDeliveredEvent:
- nb.app.BlockDelivered(e.blockHash, *e.result)
+ nb.app.BlockDelivered(e.blockHash, e.blockPosition, *e.result)
default:
fmt.Printf("Unknown event %v.", e)
}
@@ -155,10 +156,11 @@ func (nb *nonBlocking) TotalOrderingDelivered(
}
// BlockDelivered is called when a block is add to the compaction chain.
-func (nb *nonBlocking) BlockDelivered(
- blockHash common.Hash, result types.FinalizationResult) {
+func (nb *nonBlocking) BlockDelivered(blockHash common.Hash,
+ blockPosition types.Position, result types.FinalizationResult) {
nb.addEvent(blockDeliveredEvent{
- blockHash: blockHash,
- result: &result,
+ blockHash: blockHash,
+ blockPosition: blockPosition,
+ result: &result,
})
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 7eec125ec..346ff6f09 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -105,50 +105,50 @@
{
"checksumSHA1": "ev84RyegNbt2Pr/sK26LK9LoQNI=",
"path": "github.com/dexon-foundation/dexon-consensus/common",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
- "checksumSHA1": "0r8fTQxyg8QYnU7bvwuRvy1Vme8=",
+ "checksumSHA1": "Fm4DEad3JPvl3TIUth4knbY35NE=",
"path": "github.com/dexon-foundation/dexon-consensus/core",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "vNsaBvsrXJF+W6K5DCLpgy1rUZY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/blockdb",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "tQSbYCu5P00lUhKsx3IbBZCuSLY=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "p2jOAulavUU2xyj018pYPHlj8XA=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "6Pf6caC8LTNCI7IflFmglKYnxYo=",
"path": "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "Zxp6rFW4SLz4ZSITYbyrm/5jHDg=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "ovChyW9OfDGnk/7CDAR+A5vJymc=",
"path": "github.com/dexon-foundation/dexon-consensus/core/types/dkg",
- "revision": "117e7b00aeb314e5201a6e82b606385370a86ee4",
- "revisionTime": "2018-11-02T05:42:54Z"
+ "revision": "99e2bca925030368ed485626a58ab9a03d40eda6",
+ "revisionTime": "2018-11-02T08:05:25Z"
},
{
"checksumSHA1": "TAkwduKZqLyimyTPPWIllZWYFuE=",