diff options
author | Mission Liao <mission.liao@dexon.org> | 2019-05-06 16:51:06 +0800 |
---|---|---|
committer | Mission Liao <mission.liao@dexon.org> | 2019-05-07 15:09:38 +0800 |
commit | 9d7cc8733bdd9b7a2f1bbcaa2cf7f9a17a29e215 (patch) | |
tree | fc0eb54fb65e0576e557a8bfb93b81c52defdc4f | |
parent | 57bdeb708fedb8f73f091f105628880fa27c772e (diff) | |
download | dexon-9d7cc8733bdd9b7a2f1bbcaa2cf7f9a17a29e215.tar.gz dexon-9d7cc8733bdd9b7a2f1bbcaa2cf7f9a17a29e215.tar.zst dexon-9d7cc8733bdd9b7a2f1bbcaa2cf7f9a17a29e215.zip |
Copy essential files dexon-consensus directly
- from 42d585f1e5c9420f15b1d7333e7874a04345cc36@master
- simulation, bin, cmd are not required
96 files changed, 28338 insertions, 0 deletions
diff --git a/dex/consensus/common/event.go b/dex/consensus/common/event.go new file mode 100644 index 000000000..4e4e23bf3 --- /dev/null +++ b/dex/consensus/common/event.go @@ -0,0 +1,101 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import ( + "container/heap" + "sync" +) + +type heightEventFn func(uint64) + +type heightEvent struct { + h uint64 + fn heightEventFn +} + +// heightEvents implements a Min-Heap structure. +type heightEvents []heightEvent + +func (h heightEvents) Len() int { return len(h) } +func (h heightEvents) Less(i, j int) bool { return h[i].h < h[j].h } +func (h heightEvents) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *heightEvents) Push(x interface{}) { + *h = append(*h, x.(heightEvent)) +} +func (h *heightEvents) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// Event implements the Observer pattern. +type Event struct { + heightEvents heightEvents + heightEventsLock sync.Mutex +} + +// NewEvent creates a new event instance. +func NewEvent() *Event { + he := heightEvents{} + heap.Init(&he) + return &Event{ + heightEvents: he, + } +} + +// RegisterHeight to get notified on a specific height. +func (e *Event) RegisterHeight(h uint64, fn heightEventFn) { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + heap.Push(&e.heightEvents, heightEvent{ + h: h, + fn: fn, + }) +} + +// NotifyHeight and trigger function callback. +func (e *Event) NotifyHeight(h uint64) { + fns := func() (fns []heightEventFn) { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + if len(e.heightEvents) == 0 { + return + } + for h >= e.heightEvents[0].h { + he := heap.Pop(&e.heightEvents).(heightEvent) + fns = append(fns, he.fn) + if len(e.heightEvents) == 0 { + return + } + } + return + }() + for _, fn := range fns { + fn(h) + } +} + +// Reset clears all pending event +func (e *Event) Reset() { + e.heightEventsLock.Lock() + defer e.heightEventsLock.Unlock() + e.heightEvents = heightEvents{} +} diff --git a/dex/consensus/common/event_test.go b/dex/consensus/common/event_test.go new file mode 100644 index 000000000..86c497194 --- /dev/null +++ b/dex/consensus/common/event_test.go @@ -0,0 +1,106 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/suite" +) + +type EventTestSuite struct { + suite.Suite +} + +func (s *EventTestSuite) TestHeightEvent() { + event := NewEvent() + triggered := make(chan int, 100) + trigger := func(id int) func(uint64) { + return func(uint64) { + triggered <- id + } + } + event.RegisterHeight(100, trigger(0)) + event.NotifyHeight(0) + s.Len(triggered, 0) + event.NotifyHeight(150) + s.Len(triggered, 1) + triggered = make(chan int, 100) + event.NotifyHeight(150) + s.Len(triggered, 0) + + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.NotifyHeight(150) + s.Len(triggered, 4) + + triggered = make(chan int, 100) + for i := 0; i < 10; i++ { + event.RegisterHeight(uint64(100+i*10), trigger(i)) + } + event.NotifyHeight(130) + s.Require().Len(triggered, 4) + for i := 0; i < 4; i++ { + j := <-triggered + s.Equal(i, j) + } + + event = NewEvent() + triggered = make(chan int, 100) + nums := make([]int, 10) + for i := range nums { + nums[i] = i + } + rand.Shuffle(len(nums), func(i, j int) { + nums[i], nums[j] = nums[j], nums[i] + }) + for _, i := range nums { + event.RegisterHeight(uint64(100+i*10), trigger(i)) + } + event.NotifyHeight(130) + s.Require().Len(triggered, 4) + for i := 0; i < 4; i++ { + j := <-triggered + s.Equal(i, j) + } +} + +func (s *EventTestSuite) TestReset() { + event := NewEvent() + triggered := make(chan int, 100) + trigger := func(id int) func(h uint64) { + return func(uint64) { + triggered <- id + } + } + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.RegisterHeight(100, trigger(0)) + event.Reset() + event.NotifyHeight(150) + s.Len(triggered, 0) +} + +func TestEvent(t *testing.T) { + suite.Run(t, new(EventTestSuite)) +} diff --git a/dex/consensus/common/logger.go b/dex/consensus/common/logger.go new file mode 100644 index 000000000..3328e939a --- /dev/null +++ b/dex/consensus/common/logger.go @@ -0,0 +1,134 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import "log" + +// Logger define the way to receive logs from Consensus instance. +// NOTE: parameter in 'ctx' should be paired as key-value mapping. For example, +// to log an error with message: +// logger.Error("some message", "error", err) +// which is similar to loggers with context: +// logger.Error("some message", map[string]interface{}{ +// "error": err, +// }) +type Logger interface { + // Info logs info level logs. + Trace(msg string, ctx ...interface{}) + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) +} + +// NullLogger logs nothing. +type NullLogger struct{} + +// Trace implements Logger interface. +func (logger *NullLogger) Trace(msg string, ctx ...interface{}) { +} + +// Debug implements Logger interface. +func (logger *NullLogger) Debug(msg string, ctx ...interface{}) { +} + +// Info implements Logger interface. +func (logger *NullLogger) Info(msg string, ctx ...interface{}) { +} + +// Warn implements Logger interface. +func (logger *NullLogger) Warn(msg string, ctx ...interface{}) { +} + +// Error implements Logger interface. +func (logger *NullLogger) Error(msg string, ctx ...interface{}) { +} + +// SimpleLogger logs everything. +type SimpleLogger struct{} + +// composeVargs makes (msg, ctx...) could be pass to log.Println +func composeVargs(msg string, ctxs []interface{}) []interface{} { + args := []interface{}{msg} + for _, c := range ctxs { + args = append(args, c) + } + return args +} + +// Trace implements Logger interface. +func (logger *SimpleLogger) Trace(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Debug implements Logger interface. +func (logger *SimpleLogger) Debug(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Info implements Logger interface. +func (logger *SimpleLogger) Info(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Warn implements Logger interface. +func (logger *SimpleLogger) Warn(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// Error implements Logger interface. +func (logger *SimpleLogger) Error(msg string, ctx ...interface{}) { + log.Println(composeVargs(msg, ctx)...) +} + +// CustomLogger logs everything. +type CustomLogger struct { + logger *log.Logger +} + +// NewCustomLogger creates a new custom logger. +func NewCustomLogger(logger *log.Logger) *CustomLogger { + return &CustomLogger{ + logger: logger, + } +} + +// Trace implements Logger interface. +func (logger *CustomLogger) Trace(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Debug implements Logger interface. +func (logger *CustomLogger) Debug(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Info implements Logger interface. +func (logger *CustomLogger) Info(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Warn implements Logger interface. +func (logger *CustomLogger) Warn(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} + +// Error implements Logger interface. +func (logger *CustomLogger) Error(msg string, ctx ...interface{}) { + logger.logger.Println(composeVargs(msg, ctx)...) +} diff --git a/dex/consensus/common/types.go b/dex/consensus/common/types.go new file mode 100644 index 000000000..883492bf3 --- /dev/null +++ b/dex/consensus/common/types.go @@ -0,0 +1,90 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package common + +import ( + "bytes" + "encoding/hex" + "sort" + "time" +) + +const ( + // HashLength is the length of a hash in DEXON. + HashLength = 32 +) + +// Hash is the basic hash type in DEXON. +type Hash [HashLength]byte + +func (h Hash) String() string { + return hex.EncodeToString([]byte(h[:])) +} + +// Bytes return the hash as slice of bytes. +func (h Hash) Bytes() []byte { + return h[:] +} + +// Equal compares if two hashes are the same. +func (h Hash) Equal(hp Hash) bool { + return h == hp +} + +// Less compares if current hash is lesser. +func (h Hash) Less(hp Hash) bool { + return bytes.Compare(h[:], hp[:]) < 0 +} + +// MarshalText implements the encoding.TextMarhsaler interface. +func (h Hash) MarshalText() ([]byte, error) { + result := make([]byte, hex.EncodedLen(HashLength)) + hex.Encode(result, h[:]) + return result, nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (h *Hash) UnmarshalText(text []byte) error { + _, err := hex.Decode(h[:], text) + return err +} + +// Hashes is for sorting hashes. +type Hashes []Hash + +func (hs Hashes) Len() int { return len(hs) } +func (hs Hashes) Less(i, j int) bool { return hs[i].Less(hs[j]) } +func (hs Hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } + +// SortedHashes is a slice of hashes sorted in ascending order. +type SortedHashes Hashes + +// NewSortedHashes converts a slice of hashes to a sorted one. It's a +// firewall to prevent us from assigning unsorted hashes to a variable +// declared as SortedHashes directly. +func NewSortedHashes(hs Hashes) SortedHashes { + sort.Sort(hs) + return SortedHashes(hs) +} + +// ByTime implements sort.Interface for time.Time. +type ByTime []time.Time + +func (t ByTime) Len() int { return len(t) } +func (t ByTime) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t ByTime) Less(i, j int) bool { return t[i].Before(t[j]) } diff --git a/dex/consensus/common/utils.go b/dex/consensus/common/utils.go new file mode 100644 index 000000000..0e847900f --- /dev/null +++ b/dex/consensus/common/utils.go @@ -0,0 +1,41 @@ +package common + +import ( + "math/rand" + "time" +) + +var random *rand.Rand + +func init() { + random = rand.New(rand.NewSource(time.Now().Unix())) +} + +// NewRandomHash returns a random Hash-like value. +func NewRandomHash() Hash { + x := Hash{} + for i := 0; i < HashLength; i++ { + x[i] = byte(random.Int() % 256) + } + return x +} + +// GenerateRandomBytes generates bytes randomly. +func GenerateRandomBytes() []byte { + randomness := make([]byte, 32) + _, err := rand.Read(randomness) + if err != nil { + panic(err) + } + return randomness +} + +// CopyBytes copies byte slice. +func CopyBytes(src []byte) (dst []byte) { + if len(src) == 0 { + return + } + dst = make([]byte, len(src)) + copy(dst, src) + return +} diff --git a/dex/consensus/core/agreement-mgr.go b/dex/consensus/core/agreement-mgr.go new file mode 100644 index 000000000..17def6747 --- /dev/null +++ b/dex/consensus/core/agreement-mgr.go @@ -0,0 +1,676 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "errors" + "math" + "sync" + "time" + + lru "github.com/hashicorp/golang-lru" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors returned from BA modules +var ( + ErrPreviousRoundIsNotFinished = errors.New("previous round is not finished") + ErrRoundOutOfRange = errors.New("round out of range") + ErrInvalidBlock = errors.New("invalid block") + ErrNoValidLeader = errors.New("no valid leader") + ErrIncorrectCRSSignature = errors.New("incorrect CRS signature") + ErrBlockTooOld = errors.New("block too old") +) + +const maxResultCache = 100 +const settingLimit = 3 + +// genValidLeader generate a validLeader function for agreement modules. +func genValidLeader( + mgr *agreementMgr) validLeaderFn { + return func(block *types.Block, crs common.Hash) (bool, error) { + if block.Timestamp.After(time.Now()) { + return false, nil + } + if block.Position.Round >= DKGDelayRound { + if mgr.recv.npks == nil { + return false, nil + } + if block.Position.Round > mgr.recv.npks.Round { + return false, nil + } + if block.Position.Round < mgr.recv.npks.Round { + return false, ErrBlockTooOld + } + } + if !utils.VerifyCRSSignature(block, crs, mgr.recv.npks) { + return false, ErrIncorrectCRSSignature + } + if err := mgr.bcModule.sanityCheck(block); err != nil { + if err == ErrRetrySanityCheckLater { + return false, nil + } + return false, err + } + mgr.logger.Debug("Calling Application.VerifyBlock", "block", block) + switch mgr.app.VerifyBlock(block) { + case types.VerifyInvalidBlock: + return false, ErrInvalidBlock + case types.VerifyRetryLater: + return false, nil + default: + } + return true, nil + } +} + +type agreementMgrConfig struct { + utils.RoundBasedConfig + + notarySetSize uint32 + lambdaBA time.Duration + crs common.Hash +} + +func (c *agreementMgrConfig) from( + round uint64, config *types.Config, crs common.Hash) { + c.notarySetSize = config.NotarySetSize + c.lambdaBA = config.LambdaBA + c.crs = crs + c.SetupRoundBasedFields(round, config) +} + +func newAgreementMgrConfig(prev agreementMgrConfig, config *types.Config, + crs common.Hash) (c agreementMgrConfig) { + c = agreementMgrConfig{} + c.from(prev.RoundID()+1, config, crs) + c.AppendTo(prev.RoundBasedConfig) + return +} + +type baRoundSetting struct { + round uint64 + dkgSet map[types.NodeID]struct{} + threshold int + ticker Ticker + crs common.Hash +} + +type agreementMgr struct { + // TODO(mission): unbound Consensus instance from this module. + con *Consensus + ID types.NodeID + app Application + gov Governance + network Network + logger common.Logger + cache *utils.NodeSetCache + signer *utils.Signer + bcModule *blockChain + ctx context.Context + configs []agreementMgrConfig + baModule *agreement + recv *consensusBAReceiver + processedBAResult map[types.Position]struct{} + voteFilter *utils.VoteFilter + settingCache *lru.Cache + curRoundSetting *baRoundSetting + waitGroup sync.WaitGroup + isRunning bool + lock sync.RWMutex +} + +func newAgreementMgr(con *Consensus) (mgr *agreementMgr, err error) { + settingCache, _ := lru.New(settingLimit) + mgr = &agreementMgr{ + con: con, + ID: con.ID, + app: con.app, + gov: con.gov, + network: con.network, + logger: con.logger, + cache: con.nodeSetCache, + signer: con.signer, + bcModule: con.bcModule, + ctx: con.ctx, + processedBAResult: make(map[types.Position]struct{}, maxResultCache), + voteFilter: utils.NewVoteFilter(), + settingCache: settingCache, + } + mgr.recv = &consensusBAReceiver{ + consensus: con, + restartNotary: make(chan types.Position, 1), + } + return mgr, nil +} + +func (mgr *agreementMgr) prepare() { + round := mgr.bcModule.tipRound() + agr := newAgreement( + mgr.ID, + mgr.recv, + newLeaderSelector(genValidLeader(mgr), mgr.logger), + mgr.signer, + mgr.logger) + setting := mgr.generateSetting(round) + if setting == nil { + mgr.logger.Warn("Unable to prepare init setting", "round", round) + return + } + mgr.curRoundSetting = setting + agr.notarySet = mgr.curRoundSetting.dkgSet + // Hacky way to make agreement module self contained. + mgr.recv.agreementModule = agr + mgr.baModule = agr + if round >= DKGDelayRound { + if _, exist := setting.dkgSet[mgr.ID]; exist { + mgr.logger.Debug("Preparing signer and npks.", "round", round) + npk, signer, err := mgr.con.cfgModule.getDKGInfo(round, false) + if err != nil { + mgr.logger.Error("Failed to prepare signer and npks.", + "round", round, + "error", err) + } + mgr.logger.Debug("Prepared signer and npks.", + "round", round, "signer", signer != nil, "npks", npk != nil) + } + } + return +} + +func (mgr *agreementMgr) run() { + mgr.lock.Lock() + defer mgr.lock.Unlock() + if mgr.isRunning { + return + } + mgr.isRunning = true + mgr.waitGroup.Add(1) + go func() { + defer mgr.waitGroup.Done() + mgr.runBA(mgr.bcModule.tipRound()) + }() +} + +func (mgr *agreementMgr) calcLeader( + dkgSet map[types.NodeID]struct{}, + crs common.Hash, pos types.Position) ( + types.NodeID, error) { + nodeSet := types.NewNodeSetFromMap(dkgSet) + leader := nodeSet.GetSubSet(1, types.NewNodeLeaderTarget( + crs, pos.Height)) + for nID := range leader { + return nID, nil + } + return types.NodeID{}, ErrNoValidLeader +} + +func (mgr *agreementMgr) config(round uint64) *agreementMgrConfig { + mgr.lock.RLock() + defer mgr.lock.RUnlock() + if round < mgr.configs[0].RoundID() { + panic(ErrRoundOutOfRange) + } + roundIndex := round - mgr.configs[0].RoundID() + if roundIndex >= uint64(len(mgr.configs)) { + return nil + } + return &mgr.configs[roundIndex] +} + +func (mgr *agreementMgr) notifyRoundEvents(evts []utils.RoundEventParam) error { + mgr.lock.Lock() + defer mgr.lock.Unlock() + apply := func(e utils.RoundEventParam) error { + if len(mgr.configs) > 0 { + lastCfg := mgr.configs[len(mgr.configs)-1] + if e.BeginHeight != lastCfg.RoundEndHeight() { + return ErrInvalidBlockHeight + } + if lastCfg.RoundID() == e.Round { + mgr.configs[len(mgr.configs)-1].ExtendLength() + } else if lastCfg.RoundID()+1 == e.Round { + mgr.configs = append(mgr.configs, newAgreementMgrConfig( + lastCfg, e.Config, e.CRS)) + } else { + return ErrInvalidRoundID + } + } else { + c := agreementMgrConfig{} + c.from(e.Round, e.Config, e.CRS) + c.SetRoundBeginHeight(e.BeginHeight) + mgr.configs = append(mgr.configs, c) + } + return nil + } + for _, e := range evts { + if err := apply(e); err != nil { + return err + } + } + return nil +} + +func (mgr *agreementMgr) checkProposer( + round uint64, proposerID types.NodeID) error { + if round == mgr.curRoundSetting.round { + if _, exist := mgr.curRoundSetting.dkgSet[proposerID]; !exist { + return ErrNotInNotarySet + } + } else if round == mgr.curRoundSetting.round+1 { + setting := mgr.generateSetting(round) + if setting == nil { + return ErrConfigurationNotReady + } + if _, exist := setting.dkgSet[proposerID]; !exist { + return ErrNotInNotarySet + } + } + return nil +} + +func (mgr *agreementMgr) processVote(v *types.Vote) (err error) { + if !mgr.recv.isNotary { + return nil + } + if mgr.voteFilter.Filter(v) { + return nil + } + if err := mgr.checkProposer(v.Position.Round, v.ProposerID); err != nil { + return err + } + if err = mgr.baModule.processVote(v); err == nil { + mgr.baModule.updateFilter(mgr.voteFilter) + mgr.voteFilter.AddVote(v) + } + if err == ErrSkipButNoError { + err = nil + } + return +} + +func (mgr *agreementMgr) processBlock(b *types.Block) error { + if err := mgr.checkProposer(b.Position.Round, b.ProposerID); err != nil { + return err + } + return mgr.baModule.processBlock(b) +} + +func (mgr *agreementMgr) touchAgreementResult( + result *types.AgreementResult) (first bool) { + // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! + if _, exist := mgr.processedBAResult[result.Position]; !exist { + first = true + if len(mgr.processedBAResult) > maxResultCache { + for k := range mgr.processedBAResult { + // Randomly drop one element. + delete(mgr.processedBAResult, k) + break + } + } + mgr.processedBAResult[result.Position] = struct{}{} + } + return +} + +func (mgr *agreementMgr) untouchAgreementResult( + result *types.AgreementResult) { + // DO NOT LOCK THIS FUNCTION!!!!!!!! YOU WILL REGRET IT!!!!! + delete(mgr.processedBAResult, result.Position) +} + +func (mgr *agreementMgr) processAgreementResult( + result *types.AgreementResult) error { + aID := mgr.baModule.agreementID() + if isStop(aID) { + return nil + } + if result.Position == aID && !mgr.baModule.confirmed() { + mgr.logger.Info("Syncing BA", "position", result.Position) + if result.Position.Round >= DKGDelayRound { + return mgr.baModule.processAgreementResult(result) + } + for key := range result.Votes { + if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { + return err + } + } + } else if result.Position.Newer(aID) { + mgr.logger.Info("Fast syncing BA", "position", result.Position) + if result.Position.Round < DKGDelayRound { + mgr.logger.Debug("Calling Network.PullBlocks for fast syncing BA", + "hash", result.BlockHash) + mgr.network.PullBlocks(common.Hashes{result.BlockHash}) + for key := range result.Votes { + if err := mgr.baModule.processVote(&result.Votes[key]); err != nil { + return err + } + } + } + setting := mgr.generateSetting(result.Position.Round) + if setting == nil { + mgr.logger.Warn("unable to get setting", "round", + result.Position.Round) + return ErrConfigurationNotReady + } + mgr.curRoundSetting = setting + leader, err := mgr.calcLeader(setting.dkgSet, setting.crs, result.Position) + if err != nil { + return err + } + mgr.baModule.restart( + setting.dkgSet, setting.threshold, + result.Position, leader, setting.crs) + if result.Position.Round >= DKGDelayRound { + return mgr.baModule.processAgreementResult(result) + } + } + return nil +} + +func (mgr *agreementMgr) processFinalizedBlock(block *types.Block) error { + aID := mgr.baModule.agreementID() + if block.Position.Older(aID) { + return nil + } + mgr.baModule.processFinalizedBlock(block) + return nil +} + +func (mgr *agreementMgr) stop() { + // Stop all running agreement modules. + func() { + mgr.lock.Lock() + defer mgr.lock.Unlock() + mgr.baModule.stop() + }() + // Block until all routines are done. + mgr.waitGroup.Wait() +} + +func (mgr *agreementMgr) generateSetting(round uint64) *baRoundSetting { + if setting, exist := mgr.settingCache.Get(round); exist { + return setting.(*baRoundSetting) + } + curConfig := mgr.config(round) + if curConfig == nil { + return nil + } + var dkgSet map[types.NodeID]struct{} + if round >= DKGDelayRound { + _, qualidifed, err := typesDKG.CalcQualifyNodes( + mgr.gov.DKGMasterPublicKeys(round), + mgr.gov.DKGComplaints(round), + utils.GetDKGThreshold(mgr.gov.Configuration(round)), + ) + if err != nil { + mgr.logger.Error("Failed to get gpk", "round", round, "error", err) + return nil + } + dkgSet = qualidifed + } + if len(dkgSet) == 0 { + var err error + dkgSet, err = mgr.cache.GetNotarySet(round) + if err != nil { + mgr.logger.Error("Failed to get notarySet", "round", round, "error", err) + return nil + } + } + setting := &baRoundSetting{ + crs: curConfig.crs, + dkgSet: dkgSet, + round: round, + threshold: utils.GetBAThreshold(&types.Config{ + NotarySetSize: curConfig.notarySetSize}), + } + mgr.settingCache.Add(round, setting) + return setting +} + +func (mgr *agreementMgr) runBA(initRound uint64) { + // These are round based variables. + var ( + currentRound uint64 + nextRound = initRound + curConfig = mgr.config(initRound) + setting = &baRoundSetting{} + tickDuration time.Duration + ticker Ticker + ) + + // Check if this routine needs to awake in this round and prepare essential + // variables when yes. + checkRound := func() (isDKG bool) { + defer func() { + currentRound = nextRound + nextRound++ + }() + // Wait until the configuartion for next round is ready. + for { + if setting = mgr.generateSetting(nextRound); setting != nil { + break + } else { + mgr.logger.Debug("Round is not ready", "round", nextRound) + time.Sleep(1 * time.Second) + } + } + _, isDKG = setting.dkgSet[mgr.ID] + if isDKG { + mgr.logger.Info("Selected as dkg set", + "ID", mgr.ID, + "round", nextRound) + } else { + mgr.logger.Info("Not selected as dkg set", + "ID", mgr.ID, + "round", nextRound) + } + // Setup ticker + if tickDuration != curConfig.lambdaBA { + if ticker != nil { + ticker.Stop() + } + ticker = newTicker(mgr.gov, nextRound, TickerBA) + tickDuration = curConfig.lambdaBA + } + setting.ticker = ticker + return + } +Loop: + for { + select { + case <-mgr.ctx.Done(): + break Loop + default: + } + mgr.recv.isNotary = checkRound() + mgr.voteFilter = utils.NewVoteFilter() + mgr.voteFilter.Position.Round = currentRound + mgr.recv.emptyBlockHashMap = &sync.Map{} + if currentRound >= DKGDelayRound && mgr.recv.isNotary { + var err error + mgr.recv.npks, mgr.recv.psigSigner, err = + mgr.con.cfgModule.getDKGInfo(currentRound, false) + if err != nil { + mgr.logger.Warn("cannot get dkg info", + "round", currentRound, "error", err) + } + } else { + mgr.recv.npks = nil + mgr.recv.psigSigner = nil + } + // Run BA for this round. + mgr.recv.restartNotary <- types.Position{ + Round: currentRound, + Height: math.MaxUint64, + } + if err := mgr.baRoutineForOneRound(setting); err != nil { + mgr.logger.Error("BA routine failed", + "error", err, + "nodeID", mgr.ID) + break Loop + } + } +} + +func (mgr *agreementMgr) baRoutineForOneRound( + setting *baRoundSetting) (err error) { + agr := mgr.baModule + recv := mgr.recv + oldPos := agr.agreementID() + restart := func(restartPos types.Position) (breakLoop bool, err error) { + if !isStop(restartPos) { + if restartPos.Height+1 >= mgr.config(setting.round).RoundEndHeight() { + for { + select { + case <-mgr.ctx.Done(): + break + default: + } + tipRound := mgr.bcModule.tipRound() + if tipRound > setting.round { + break + } else { + mgr.logger.Debug("Waiting blockChain to change round...", + "curRound", setting.round, + "tipRound", tipRound) + } + time.Sleep(100 * time.Millisecond) + } + // This round is finished. + breakLoop = true + return + } + if restartPos.Older(oldPos) { + // The restartNotary event is triggered by 'BlockConfirmed' + // of some older block. + return + } + } + var nextHeight uint64 + var nextTime time.Time + for { + // Make sure we are stoppable. + select { + case <-mgr.ctx.Done(): + breakLoop = true + return + default: + } + nextHeight, nextTime = mgr.bcModule.nextBlock() + if nextHeight != notReadyHeight { + if isStop(restartPos) { + break + } + if nextHeight > restartPos.Height { + break + } + } + mgr.logger.Debug("BlockChain not ready!!!", + "old", oldPos, "restart", restartPos, "next", nextHeight) + time.Sleep(100 * time.Millisecond) + } + nextPos := types.Position{ + Round: setting.round, + Height: nextHeight, + } + oldPos = nextPos + var leader types.NodeID + leader, err = mgr.calcLeader(setting.dkgSet, setting.crs, nextPos) + if err != nil { + return + } + time.Sleep(nextTime.Sub(time.Now())) + setting.ticker.Restart() + agr.restart(setting.dkgSet, setting.threshold, nextPos, leader, setting.crs) + return + } +Loop: + for { + select { + case <-mgr.ctx.Done(): + break Loop + default: + } + if agr.confirmed() { + // Block until receive restartPos + select { + case restartPos := <-recv.restartNotary: + breakLoop, err := restart(restartPos) + if err != nil { + return err + } + if breakLoop { + break Loop + } + case <-mgr.ctx.Done(): + break Loop + } + } + select { + case restartPos := <-recv.restartNotary: + breakLoop, err := restart(restartPos) + if err != nil { + return err + } + if breakLoop { + break Loop + } + default: + } + if !mgr.recv.isNotary { + select { + case <-setting.ticker.Tick(): + continue Loop + case <-mgr.ctx.Done(): + break Loop + } + } + if err = agr.nextState(); err != nil { + mgr.logger.Error("Failed to proceed to next state", + "nodeID", mgr.ID.String(), + "error", err) + break Loop + } + if agr.pullVotes() { + pos := agr.agreementID() + mgr.logger.Debug("Calling Network.PullVotes for syncing votes", + "position", pos) + mgr.network.PullVotes(pos) + } + for i := 0; i < agr.clocks(); i++ { + // Priority select for agreement.done(). + select { + case <-agr.done(): + continue Loop + default: + } + select { + case <-agr.done(): + continue Loop + case <-setting.ticker.Tick(): + } + } + } + return nil +} diff --git a/dex/consensus/core/agreement-state.go b/dex/consensus/core/agreement-state.go new file mode 100644 index 000000000..0d1ae58bc --- /dev/null +++ b/dex/consensus/core/agreement-state.go @@ -0,0 +1,213 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// Errors for agreement state module. +var ( + ErrNoEnoughVoteInPrepareState = fmt.Errorf("no enough vote in prepare state") + ErrNoEnoughVoteInAckState = fmt.Errorf("no enough vote in ack state") +) + +// agreementStateType is the state of agreement +type agreementStateType int + +// agreementStateType enum. +const ( + stateFast agreementStateType = iota + stateFastVote + stateInitial + statePreCommit + stateCommit + stateForward + statePullVote + stateSleep +) + +type agreementState interface { + state() agreementStateType + nextState() (agreementState, error) + clocks() int +} + +//----- FastState ----- +type fastState struct { + a *agreementData +} + +func newFastState(a *agreementData) *fastState { + return &fastState{a: a} +} + +func (s *fastState) state() agreementStateType { return stateFast } +func (s *fastState) clocks() int { return 0 } +func (s *fastState) nextState() (agreementState, error) { + if func() bool { + s.a.lock.Lock() + defer s.a.lock.Unlock() + return s.a.isLeader + }() { + hash := s.a.recv.ProposeBlock() + if hash != types.NullBlockHash { + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteFast, hash, s.a.period)) + } + } + return newFastVoteState(s.a), nil +} + +//----- FastVoteState ----- +type fastVoteState struct { + a *agreementData +} + +func newFastVoteState(a *agreementData) *fastVoteState { + return &fastVoteState{a: a} +} + +func (s *fastVoteState) state() agreementStateType { return stateFastVote } +func (s *fastVoteState) clocks() int { return 3 } +func (s *fastVoteState) nextState() (agreementState, error) { + return newInitialState(s.a), nil +} + +//----- InitialState ----- +type initialState struct { + a *agreementData +} + +func newInitialState(a *agreementData) *initialState { + return &initialState{a: a} +} + +func (s *initialState) state() agreementStateType { return stateInitial } +func (s *initialState) clocks() int { return 0 } +func (s *initialState) nextState() (agreementState, error) { + if func() bool { + s.a.lock.Lock() + defer s.a.lock.Unlock() + return !s.a.isLeader + }() { + // Leader already proposed block in fastState. + hash := s.a.recv.ProposeBlock() + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteInit, hash, s.a.period)) + } + return newPreCommitState(s.a), nil +} + +//----- PreCommitState ----- +type preCommitState struct { + a *agreementData +} + +func newPreCommitState(a *agreementData) *preCommitState { + return &preCommitState{a: a} +} + +func (s *preCommitState) state() agreementStateType { return statePreCommit } +func (s *preCommitState) clocks() int { return 2 } +func (s *preCommitState) nextState() (agreementState, error) { + s.a.lock.RLock() + defer s.a.lock.RUnlock() + if s.a.lockValue == types.SkipBlockHash || + s.a.lockValue == types.NullBlockHash { + hash := s.a.leader.leaderBlockHash() + s.a.recv.ProposeVote(types.NewVote(types.VotePreCom, hash, s.a.period)) + } else { + s.a.recv.ProposeVote(types.NewVote( + types.VotePreCom, s.a.lockValue, s.a.period)) + } + return newCommitState(s.a), nil +} + +//----- CommitState ----- +type commitState struct { + a *agreementData +} + +func newCommitState(a *agreementData) *commitState { + return &commitState{a: a} +} + +func (s *commitState) state() agreementStateType { return stateCommit } +func (s *commitState) clocks() int { return 2 } +func (s *commitState) nextState() (agreementState, error) { + s.a.lock.Lock() + defer s.a.lock.Unlock() + s.a.recv.ProposeVote(types.NewVote(types.VoteCom, s.a.lockValue, s.a.period)) + return newForwardState(s.a), nil +} + +// ----- ForwardState ----- +type forwardState struct { + a *agreementData +} + +func newForwardState(a *agreementData) *forwardState { + return &forwardState{a: a} +} + +func (s *forwardState) state() agreementStateType { return stateForward } +func (s *forwardState) clocks() int { return 4 } + +func (s *forwardState) nextState() (agreementState, error) { + return newPullVoteState(s.a), nil +} + +// ----- PullVoteState ----- +// pullVoteState is a special state to ensure the assumption in the consensus +// algorithm that every vote will eventually arrive for all nodes. +type pullVoteState struct { + a *agreementData +} + +func newPullVoteState(a *agreementData) *pullVoteState { + return &pullVoteState{a: a} +} + +func (s *pullVoteState) state() agreementStateType { return statePullVote } +func (s *pullVoteState) clocks() int { return 4 } + +func (s *pullVoteState) nextState() (agreementState, error) { + return s, nil +} + +// ----- SleepState ----- +// sleepState is a special state after BA has output and waits for restart. +type sleepState struct { + a *agreementData +} + +func newSleepState(a *agreementData) *sleepState { + return &sleepState{a: a} +} + +func (s *sleepState) state() agreementStateType { return stateSleep } +func (s *sleepState) clocks() int { return 65536 } + +func (s *sleepState) nextState() (agreementState, error) { + return s, nil +} diff --git a/dex/consensus/core/agreement-state_test.go b/dex/consensus/core/agreement-state_test.go new file mode 100644 index 000000000..32ce8ac88 --- /dev/null +++ b/dex/consensus/core/agreement-state_test.go @@ -0,0 +1,304 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type AgreementStateTestSuite struct { + suite.Suite + ID types.NodeID + signers map[types.NodeID]*utils.Signer + voteChan chan *types.Vote + blockChan chan common.Hash + confirmChan chan common.Hash + block map[common.Hash]*types.Block +} + +type agreementStateTestReceiver struct { + s *AgreementStateTestSuite + leader *leaderSelector +} + +func (r *agreementStateTestReceiver) VerifyPartialSignature(*types.Vote) (bool, bool) { + return true, false +} + +func (r *agreementStateTestReceiver) ProposeVote(vote *types.Vote) { + r.s.voteChan <- vote +} + +func (r *agreementStateTestReceiver) ProposeBlock() common.Hash { + block := r.s.proposeBlock(r.leader) + r.s.blockChan <- block.Hash + return block.Hash +} + +func (r *agreementStateTestReceiver) ConfirmBlock(block common.Hash, + _ map[types.NodeID]*types.Vote) { + r.s.confirmChan <- block +} + +func (r *agreementStateTestReceiver) PullBlocks(common.Hashes) {} + +func (r *agreementStateTestReceiver) ReportForkVote(v1, v2 *types.Vote) {} +func (r *agreementStateTestReceiver) ReportForkBlock(b1, b2 *types.Block) {} + +func (s *AgreementStateTestSuite) proposeBlock( + leader *leaderSelector) *types.Block { + block := &types.Block{ + ProposerID: s.ID, + Position: types.Position{Height: types.GenesisHeight}, + Hash: common.NewRandomHash(), + } + s.Require().NoError(s.signers[s.ID].SignCRS(block, leader.hashCRS)) + s.Require().NoError(s.signers[s.ID].SignBlock(block)) + s.block[block.Hash] = block + return block +} + +func (s *AgreementStateTestSuite) prepareVote( + nID types.NodeID, voteType types.VoteType, blockHash common.Hash, + period uint64) ( + vote *types.Vote) { + vote = types.NewVote(voteType, blockHash, period) + s.Require().NoError(s.signers[nID].SignVote(vote)) + return +} + +func (s *AgreementStateTestSuite) SetupTest() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + s.ID = types.NewNodeID(prvKey.PublicKey()) + s.signers = map[types.NodeID]*utils.Signer{ + s.ID: utils.NewSigner(prvKey), + } + s.voteChan = make(chan *types.Vote, 100) + s.blockChan = make(chan common.Hash, 100) + s.confirmChan = make(chan common.Hash, 100) + s.block = make(map[common.Hash]*types.Block) +} + +func (s *AgreementStateTestSuite) newAgreement(numNode int) *agreement { + logger := &common.NullLogger{} + leader := newLeaderSelector(func(*types.Block, common.Hash) (bool, error) { + return true, nil + }, logger) + notarySet := make(map[types.NodeID]struct{}) + for i := 0; i < numNode-1; i++ { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + notarySet[nID] = struct{}{} + s.signers[nID] = utils.NewSigner(prvKey) + } + notarySet[s.ID] = struct{}{} + agreement := newAgreement( + s.ID, + &agreementStateTestReceiver{ + s: s, + leader: leader, + }, + leader, + s.signers[s.ID], + logger, + ) + agreement.restart(notarySet, + utils.GetBAThreshold(&types.Config{ + NotarySetSize: uint32(len(notarySet)), + }), + types.Position{Height: types.GenesisHeight}, + types.NodeID{}, common.NewRandomHash()) + return agreement +} + +func (s *AgreementStateTestSuite) TestFastStateLeader() { + a := s.newAgreement(4) + state := newFastState(a.data) + s.Equal(stateFast, state.state()) + s.Equal(0, state.clocks()) + + // Proposing a new block if it's leader. + a.data.period = 1 + a.data.isLeader = true + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.blockChan, 1) + proposedBlock := <-s.blockChan + s.NotEqual(common.Hash{}, proposedBlock) + s.Require().NoError(a.processBlock(s.block[proposedBlock])) + s.Require().Len(s.voteChan, 1) + proposedVote := <-s.voteChan + s.Equal(proposedBlock, proposedVote.BlockHash) + s.Equal(types.VoteFast, proposedVote.Type) + s.Equal(stateFastVote, newState.state()) +} + +func (s *AgreementStateTestSuite) TestFastStateNotLeader() { + a := s.newAgreement(4) + state := newFastState(a.data) + s.Equal(stateFast, state.state()) + s.Equal(0, state.clocks()) + + // Not proposing any block if it's not leader. + a.data.period = 1 + a.data.isLeader = false + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.blockChan, 0) + s.Equal(stateFastVote, newState.state()) +} + +func (s *AgreementStateTestSuite) TestFastVoteState() { + a := s.newAgreement(4) + state := newFastVoteState(a.data) + s.Equal(stateFastVote, state.state()) + s.Equal(3, state.clocks()) + + // The vote proposed is not implemented inside state. + a.data.period = 1 + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 0) + s.Equal(stateInitial, newState.state()) +} + +func (s *AgreementStateTestSuite) TestInitialState() { + a := s.newAgreement(4) + state := newInitialState(a.data) + s.Equal(stateInitial, state.state()) + s.Equal(0, state.clocks()) + + // Proposing a new block. + a.data.period = 1 + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.blockChan, 1) + proposedBlock := <-s.blockChan + s.NotEqual(common.Hash{}, proposedBlock) + s.Require().NoError(a.processBlock(s.block[proposedBlock])) + s.Equal(statePreCommit, newState.state()) +} + +func (s *AgreementStateTestSuite) TestPreCommitState() { + a := s.newAgreement(4) + state := newPreCommitState(a.data) + s.Equal(statePreCommit, state.state()) + s.Equal(2, state.clocks()) + + blocks := make([]*types.Block, 3) + for i := range blocks { + blocks[i] = s.proposeBlock(a.data.leader) + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + signer := utils.NewSigner(prv) + blocks[i].ProposerID = types.NewNodeID(prv.PublicKey()) + s.Require().NoError(signer.SignCRS( + blocks[i], a.data.leader.hashCRS)) + s.Require().NoError(signer.SignBlock(blocks[i])) + s.Require().NoError(a.processBlock(blocks[i])) + } + + // If lockvalue == null, propose preCom-vote for the leader block. + a.data.lockValue = types.NullBlockHash + a.data.period = 1 + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 1) + vote := <-s.voteChan + s.Equal(types.VotePreCom, vote.Type) + s.NotEqual(types.SkipBlockHash, vote.BlockHash) + s.Equal(stateCommit, newState.state()) + + // If lockvalue == SKIP, propose preCom-vote for the leader block. + a.data.lockValue = types.SkipBlockHash + a.data.period = 2 + newState, err = state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VotePreCom, vote.Type) + s.NotEqual(types.SkipBlockHash, vote.BlockHash) + s.Equal(stateCommit, newState.state()) + + // Else, preCom-vote on lockValue. + a.data.period = 3 + hash := common.NewRandomHash() + a.data.lockValue = hash + newState, err = state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VotePreCom, vote.Type) + s.Equal(hash, vote.BlockHash) + s.Equal(stateCommit, newState.state()) +} + +func (s *AgreementStateTestSuite) TestCommitState() { + a := s.newAgreement(4) + state := newCommitState(a.data) + s.Equal(stateCommit, state.state()) + s.Equal(2, state.clocks()) + + // Commit on lock value. + a.data.period = 1 + a.data.lockValue = common.NewRandomHash() + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 1) + vote := <-s.voteChan + s.Equal(types.VoteCom, vote.Type) + s.Equal(a.data.lockValue, vote.BlockHash) + s.Equal(stateForward, newState.state()) +} + +func (s *AgreementStateTestSuite) TestForwardState() { + a := s.newAgreement(4) + state := newForwardState(a.data) + s.Equal(stateForward, state.state()) + s.Equal(4, state.clocks()) + + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 0) + s.Equal(statePullVote, newState.state()) +} + +func (s *AgreementStateTestSuite) TestPullVoteState() { + a := s.newAgreement(4) + state := newPullVoteState(a.data) + s.Equal(statePullVote, state.state()) + s.Equal(4, state.clocks()) + + newState, err := state.nextState() + s.Require().NoError(err) + s.Require().Len(s.voteChan, 0) + s.Equal(statePullVote, newState.state()) +} + +func TestAgreementState(t *testing.T) { + suite.Run(t, new(AgreementStateTestSuite)) +} diff --git a/dex/consensus/core/agreement.go b/dex/consensus/core/agreement.go new file mode 100644 index 000000000..d90afc610 --- /dev/null +++ b/dex/consensus/core/agreement.go @@ -0,0 +1,797 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// closedchan is a reusable closed channel. +var closedchan = make(chan struct{}) + +func init() { + close(closedchan) +} + +// Errors for agreement module. +var ( + ErrInvalidVote = fmt.Errorf("invalid vote") + ErrNotInNotarySet = fmt.Errorf("not in notary set") + ErrIncorrectVoteSignature = fmt.Errorf("incorrect vote signature") + ErrIncorrectVotePartialSignature = fmt.Errorf("incorrect vote psig") + ErrMismatchBlockPosition = fmt.Errorf("mismatch block position") +) + +// ErrFork for fork error in agreement. +type ErrFork struct { + nID types.NodeID + old, new common.Hash +} + +func (e *ErrFork) Error() string { + return fmt.Sprintf("fork is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +// ErrForkVote for fork vote error in agreement. +type ErrForkVote struct { + nID types.NodeID + old, new *types.Vote +} + +func (e *ErrForkVote) Error() string { + return fmt.Sprintf("fork vote is found for %s, old %s, new %s", + e.nID.String(), e.old, e.new) +} + +func newVoteListMap() []map[types.NodeID]*types.Vote { + listMap := make([]map[types.NodeID]*types.Vote, types.MaxVoteType) + for idx := range listMap { + listMap[idx] = make(map[types.NodeID]*types.Vote) + } + return listMap +} + +// agreementReceiver is the interface receiving agreement event. +type agreementReceiver interface { + ProposeVote(vote *types.Vote) + ProposeBlock() common.Hash + // ConfirmBlock is called with lock hold. User can safely use all data within + // agreement module. + ConfirmBlock(common.Hash, map[types.NodeID]*types.Vote) + PullBlocks(common.Hashes) + ReportForkVote(v1, v2 *types.Vote) + ReportForkBlock(b1, b2 *types.Block) + VerifyPartialSignature(vote *types.Vote) (bool, bool) +} + +type pendingBlock struct { + block *types.Block + receivedTime time.Time +} + +type pendingVote struct { + vote *types.Vote + receivedTime time.Time +} + +// agreementData is the data for agreementState. +type agreementData struct { + recv agreementReceiver + + ID types.NodeID + isLeader bool + leader *leaderSelector + lockValue common.Hash + lockIter uint64 + period uint64 + requiredVote int + votes map[uint64][]map[types.NodeID]*types.Vote + lock sync.RWMutex + blocks map[types.NodeID]*types.Block + blocksLock sync.Mutex +} + +// agreement is the agreement protocal describe in the Crypto Shuffle Algorithm. +type agreement struct { + state agreementState + data *agreementData + aID *atomic.Value + doneChan chan struct{} + notarySet map[types.NodeID]struct{} + hasVoteFast bool + hasOutput bool + lock sync.RWMutex + pendingBlock []pendingBlock + pendingVote []pendingVote + pendingAgreementResult map[types.Position]*types.AgreementResult + candidateBlock map[common.Hash]*types.Block + fastForward chan uint64 + signer *utils.Signer + logger common.Logger +} + +// newAgreement creates a agreement instance. +func newAgreement( + ID types.NodeID, + recv agreementReceiver, + leader *leaderSelector, + signer *utils.Signer, + logger common.Logger) *agreement { + agreement := &agreement{ + data: &agreementData{ + recv: recv, + ID: ID, + leader: leader, + }, + aID: &atomic.Value{}, + pendingAgreementResult: make(map[types.Position]*types.AgreementResult), + candidateBlock: make(map[common.Hash]*types.Block), + fastForward: make(chan uint64, 1), + signer: signer, + logger: logger, + } + agreement.stop() + return agreement +} + +// restart the agreement +func (a *agreement) restart( + notarySet map[types.NodeID]struct{}, + threshold int, aID types.Position, leader types.NodeID, + crs common.Hash) { + if !func() bool { + a.lock.Lock() + defer a.lock.Unlock() + if !isStop(aID) { + oldAID := a.agreementID() + if !isStop(oldAID) && !aID.Newer(oldAID) { + return false + } + } + a.logger.Debug("Restarting BA", + "notarySet", notarySet, "position", aID, "leader", leader) + a.data.lock.Lock() + defer a.data.lock.Unlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + a.data.votes = make(map[uint64][]map[types.NodeID]*types.Vote) + a.data.votes[1] = newVoteListMap() + a.data.period = 2 + a.data.blocks = make(map[types.NodeID]*types.Block) + a.data.requiredVote = threshold + a.data.leader.restart(crs) + a.data.lockValue = types.SkipBlockHash + a.data.lockIter = 0 + a.data.isLeader = a.data.ID == leader + if a.doneChan != nil { + close(a.doneChan) + } + a.doneChan = make(chan struct{}) + a.fastForward = make(chan uint64, 1) + a.hasVoteFast = false + a.hasOutput = false + a.state = newFastState(a.data) + a.notarySet = notarySet + a.candidateBlock = make(map[common.Hash]*types.Block) + a.aID.Store(struct { + pos types.Position + leader types.NodeID + }{aID, leader}) + return true + }() { + return + } + + if isStop(aID) { + return + } + + var result *types.AgreementResult + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingAgreementResult := make( + map[types.Position]*types.AgreementResult) + for pos, agr := range a.pendingAgreementResult { + if pos.Newer(aID) { + newPendingAgreementResult[pos] = agr + } else if pos == aID { + result = agr + } + } + a.pendingAgreementResult = newPendingAgreementResult + }() + + expireTime := time.Now().Add(-10 * time.Second) + replayBlock := make([]*types.Block, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingBlock := make([]pendingBlock, 0) + for _, pending := range a.pendingBlock { + if aID.Newer(pending.block.Position) { + continue + } else if pending.block.Position == aID { + if result == nil || + result.Position.Round < DKGDelayRound || + result.BlockHash == pending.block.Hash { + replayBlock = append(replayBlock, pending.block) + } + } else if pending.receivedTime.After(expireTime) { + newPendingBlock = append(newPendingBlock, pending) + } + } + a.pendingBlock = newPendingBlock + }() + + replayVote := make([]*types.Vote, 0) + func() { + a.lock.Lock() + defer a.lock.Unlock() + newPendingVote := make([]pendingVote, 0) + for _, pending := range a.pendingVote { + if aID.Newer(pending.vote.Position) { + continue + } else if pending.vote.Position == aID { + if result == nil || result.Position.Round < DKGDelayRound { + replayVote = append(replayVote, pending.vote) + } + } else if pending.receivedTime.After(expireTime) { + newPendingVote = append(newPendingVote, pending) + } + } + a.pendingVote = newPendingVote + }() + + for _, block := range replayBlock { + if err := a.processBlock(block); err != nil { + a.logger.Error("Failed to process block when restarting agreement", + "block", block) + } + } + + if result != nil { + if err := a.processAgreementResult(result); err != nil { + a.logger.Error("Failed to process agreement result when retarting", + "result", result) + } + } + + for _, vote := range replayVote { + if err := a.processVote(vote); err != nil { + a.logger.Error("Failed to process vote when restarting agreement", + "vote", vote) + } + } +} + +func (a *agreement) stop() { + a.restart(make(map[types.NodeID]struct{}), int(math.MaxInt32), + types.Position{ + Height: math.MaxUint64, + }, + types.NodeID{}, common.Hash{}) +} + +func isStop(aID types.Position) bool { + return aID.Height == math.MaxUint64 +} + +// clocks returns how many time this state is required. +func (a *agreement) clocks() int { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + scale := int(a.data.period) - 1 + if a.state.state() == stateForward { + scale = 1 + } + if scale < 1 { + // just in case. + scale = 1 + } + // 10 is a magic number derived from many years of experience. + if scale > 10 { + scale = 10 + } + return a.state.clocks() * scale +} + +// pullVotes returns if current agreement requires more votes to continue. +func (a *agreement) pullVotes() bool { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + return a.state.state() == statePullVote || + a.state.state() == stateInitial || + (a.state.state() == statePreCommit && (a.data.period%3) == 0) +} + +// agreementID returns the current agreementID. +func (a *agreement) agreementID() types.Position { + return a.aID.Load().(struct { + pos types.Position + leader types.NodeID + }).pos +} + +// leader returns the current leader. +func (a *agreement) leader() types.NodeID { + return a.aID.Load().(struct { + pos types.Position + leader types.NodeID + }).leader +} + +// nextState is called at the specific clock time. +func (a *agreement) nextState() (err error) { + a.lock.Lock() + defer a.lock.Unlock() + if a.hasOutput { + a.state = newSleepState(a.data) + return + } + a.state, err = a.state.nextState() + return +} + +func (a *agreement) sanityCheck(vote *types.Vote) error { + if vote.Type >= types.MaxVoteType { + return ErrInvalidVote + } + ok, err := utils.VerifyVoteSignature(vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + if vote.Position.Round != a.agreementID().Round { + // TODO(jimmy): maybe we can verify partial signature at agreement-mgr. + return nil + } + if ok, report := a.data.recv.VerifyPartialSignature(vote); !ok { + if report { + return ErrIncorrectVotePartialSignature + } + return ErrSkipButNoError + } + return nil +} + +func (a *agreement) checkForkVote(vote *types.Vote) ( + alreadyExist bool, err error) { + a.data.lock.RLock() + defer a.data.lock.RUnlock() + if votes, exist := a.data.votes[vote.Period]; exist { + if oldVote, exist := votes[vote.Type][vote.ProposerID]; exist { + alreadyExist = true + if vote.BlockHash != oldVote.BlockHash { + a.data.recv.ReportForkVote(oldVote, vote) + err = &ErrForkVote{vote.ProposerID, oldVote, vote} + return + } + } + } + return +} + +// prepareVote prepares a vote. +func (a *agreement) prepareVote(vote *types.Vote) (err error) { + vote.Position = a.agreementID() + err = a.signer.SignVote(vote) + return +} + +func (a *agreement) updateFilter(filter *utils.VoteFilter) { + if isStop(a.agreementID()) { + return + } + a.lock.RLock() + defer a.lock.RUnlock() + a.data.lock.RLock() + defer a.data.lock.RUnlock() + filter.Confirm = a.hasOutput + filter.LockIter = a.data.lockIter + filter.Period = a.data.period + filter.Position.Height = a.agreementID().Height +} + +// processVote is the entry point for processing Vote. +func (a *agreement) processVote(vote *types.Vote) error { + a.lock.Lock() + defer a.lock.Unlock() + if err := a.sanityCheck(vote); err != nil { + return err + } + aID := a.agreementID() + + // Agreement module has stopped. + if isStop(aID) { + // Hacky way to not drop first votes when round just begins. + if vote.Position.Round == aID.Round { + a.pendingVote = append(a.pendingVote, pendingVote{ + vote: vote, + receivedTime: time.Now().UTC(), + }) + return nil + } + return ErrSkipButNoError + } + if vote.Position != aID { + if aID.Newer(vote.Position) { + return nil + } + a.pendingVote = append(a.pendingVote, pendingVote{ + vote: vote, + receivedTime: time.Now().UTC(), + }) + return nil + } + exist, err := a.checkForkVote(vote) + if err != nil { + return err + } + if exist { + return nil + } + + a.data.lock.Lock() + defer a.data.lock.Unlock() + if _, exist := a.data.votes[vote.Period]; !exist { + a.data.votes[vote.Period] = newVoteListMap() + } + if _, exist := a.data.votes[vote.Period][vote.Type][vote.ProposerID]; exist { + return nil + } + a.data.votes[vote.Period][vote.Type][vote.ProposerID] = vote + if !a.hasOutput && + (vote.Type == types.VoteCom || + vote.Type == types.VoteFast || + vote.Type == types.VoteFastCom) { + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != types.SkipBlockHash { + if vote.Type == types.VoteFast { + if !a.hasVoteFast { + if a.state.state() == stateFast || + a.state.state() == stateFastVote { + a.data.recv.ProposeVote( + types.NewVote(types.VoteFastCom, hash, vote.Period)) + a.hasVoteFast = true + + } + if a.data.lockIter == 0 { + a.data.lockValue = hash + a.data.lockIter = 1 + } + } + } else { + a.hasOutput = true + a.data.recv.ConfirmBlock(hash, + a.data.votes[vote.Period][vote.Type]) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + } + return nil + } + } else if a.hasOutput { + return nil + } + + // Check if the agreement requires fast-forwarding. + if len(a.fastForward) > 0 { + return nil + } + if vote.Type == types.VotePreCom { + if vote.Period < a.data.lockIter { + // This PreCom is useless for us. + return nil + } + if hash, ok := a.data.countVoteNoLock(vote.Period, vote.Type); ok && + hash != types.SkipBlockHash { + // Condition 1. + if vote.Period > a.data.lockIter { + a.data.lockValue = hash + a.data.lockIter = vote.Period + } + // Condition 2. + if vote.Period > a.data.period { + a.fastForward <- vote.Period + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil + } + } + } + // Condition 3. + if vote.Type == types.VoteCom && vote.Period >= a.data.period && + len(a.data.votes[vote.Period][types.VoteCom]) >= a.data.requiredVote { + hashes := common.Hashes{} + addPullBlocks := func(voteType types.VoteType) { + for _, vote := range a.data.votes[vote.Period][voteType] { + if vote.BlockHash == types.NullBlockHash || + vote.BlockHash == types.SkipBlockHash { + continue + } + if _, found := a.findCandidateBlockNoLock(vote.BlockHash); !found { + hashes = append(hashes, vote.BlockHash) + } + } + } + addPullBlocks(types.VotePreCom) + addPullBlocks(types.VoteCom) + if len(hashes) > 0 { + a.data.recv.PullBlocks(hashes) + } + a.fastForward <- vote.Period + 1 + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil + } + return nil +} + +func (a *agreement) processFinalizedBlock(block *types.Block) { + a.lock.Lock() + defer a.lock.Unlock() + if a.hasOutput { + return + } + aID := a.agreementID() + if aID.Older(block.Position) { + return + } + a.addCandidateBlockNoLock(block) + a.hasOutput = true + a.data.lock.Lock() + defer a.data.lock.Unlock() + a.data.recv.ConfirmBlock(block.Hash, nil) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } +} + +func (a *agreement) processAgreementResult(result *types.AgreementResult) error { + a.lock.Lock() + defer a.lock.Unlock() + aID := a.agreementID() + if result.Position.Older(aID) { + return nil + } else if result.Position.Newer(aID) { + a.pendingAgreementResult[result.Position] = result + return nil + } + if a.hasOutput { + return nil + } + a.data.lock.Lock() + defer a.data.lock.Unlock() + if _, exist := a.findCandidateBlockNoLock(result.BlockHash); !exist { + a.data.recv.PullBlocks(common.Hashes{result.BlockHash}) + } + a.hasOutput = true + a.data.recv.ConfirmBlock(result.BlockHash, nil) + if a.doneChan != nil { + close(a.doneChan) + a.doneChan = nil + } + return nil +} + +func (a *agreement) done() <-chan struct{} { + a.lock.Lock() + defer a.lock.Unlock() + select { + case period := <-a.fastForward: + a.data.lock.Lock() + defer a.data.lock.Unlock() + if period <= a.data.period { + break + } + a.data.setPeriod(period) + a.state = newPreCommitState(a.data) + a.doneChan = make(chan struct{}) + return closedchan + default: + } + if a.doneChan == nil { + return closedchan + } + return a.doneChan +} + +func (a *agreement) confirmed() bool { + a.lock.RLock() + defer a.lock.RUnlock() + return a.confirmedNoLock() +} + +func (a *agreement) confirmedNoLock() bool { + return a.hasOutput +} + +// processBlock is the entry point for processing Block. +func (a *agreement) processBlock(block *types.Block) error { + checkSkip := func() bool { + aID := a.agreementID() + if block.Position != aID { + // Agreement module has stopped. + if !isStop(aID) { + if aID.Newer(block.Position) { + return true + } + } + } + return false + } + if checkSkip() { + return nil + } + if err := utils.VerifyBlockSignature(block); err != nil { + return err + } + + a.lock.Lock() + defer a.lock.Unlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + aID := a.agreementID() + // a.agreementID might change during lock, so we need to checkSkip again. + if checkSkip() { + return nil + } else if aID != block.Position { + a.pendingBlock = append(a.pendingBlock, pendingBlock{ + block: block, + receivedTime: time.Now().UTC(), + }) + return nil + } else if a.confirmedNoLock() { + return nil + } + if b, exist := a.data.blocks[block.ProposerID]; exist { + if b.Hash != block.Hash { + a.data.recv.ReportForkBlock(b, block) + return &ErrFork{block.ProposerID, b.Hash, block.Hash} + } + return nil + } + if err := a.data.leader.processBlock(block); err != nil { + return err + } + a.data.blocks[block.ProposerID] = block + a.addCandidateBlockNoLock(block) + if block.ProposerID != a.data.ID && + (a.state.state() == stateFast || a.state.state() == stateFastVote) && + block.ProposerID == a.leader() { + go func() { + for func() bool { + if aID != a.agreementID() { + return false + } + a.lock.RLock() + defer a.lock.RUnlock() + if a.state.state() != stateFast && a.state.state() != stateFastVote { + return false + } + a.data.lock.RLock() + defer a.data.lock.RUnlock() + a.data.blocksLock.Lock() + defer a.data.blocksLock.Unlock() + block, exist := a.data.blocks[a.leader()] + if !exist { + return true + } + ok, err := a.data.leader.validLeader(block, a.data.leader.hashCRS) + if err != nil { + fmt.Println("Error checking validLeader for Fast BA", + "error", err, "block", block) + return false + } + if ok { + a.data.recv.ProposeVote( + types.NewVote(types.VoteFast, block.Hash, a.data.period)) + return false + } + return true + }() { + // TODO(jimmy): retry interval should be related to configurations. + time.Sleep(250 * time.Millisecond) + } + }() + } + return nil +} + +func (a *agreement) addCandidateBlock(block *types.Block) { + a.lock.Lock() + defer a.lock.Unlock() + a.addCandidateBlockNoLock(block) +} + +func (a *agreement) addCandidateBlockNoLock(block *types.Block) { + a.candidateBlock[block.Hash] = block +} + +func (a *agreement) findCandidateBlockNoLock( + hash common.Hash) (*types.Block, bool) { + b, e := a.candidateBlock[hash] + return b, e +} + +// find a block in both candidate blocks and pending blocks in leader-selector. +// A block might be confirmed by others while we can't verify its validity. +func (a *agreement) findBlockNoLock(hash common.Hash) (*types.Block, bool) { + b, e := a.findCandidateBlockNoLock(hash) + if !e { + b, e = a.data.leader.findPendingBlock(hash) + } + return b, e +} + +func (a *agreementData) countVote(period uint64, voteType types.VoteType) ( + blockHash common.Hash, ok bool) { + a.lock.RLock() + defer a.lock.RUnlock() + return a.countVoteNoLock(period, voteType) +} + +func (a *agreementData) countVoteNoLock( + period uint64, voteType types.VoteType) (blockHash common.Hash, ok bool) { + votes, exist := a.votes[period] + if !exist { + return + } + candidate := make(map[common.Hash]int) + for _, vote := range votes[voteType] { + if _, exist := candidate[vote.BlockHash]; !exist { + candidate[vote.BlockHash] = 0 + } + candidate[vote.BlockHash]++ + } + for candidateHash, votes := range candidate { + if votes >= a.requiredVote { + blockHash = candidateHash + ok = true + return + } + } + return +} + +func (a *agreementData) setPeriod(period uint64) { + for i := a.period + 1; i <= period; i++ { + if _, exist := a.votes[i]; !exist { + a.votes[i] = newVoteListMap() + } + } + a.period = period +} diff --git a/dex/consensus/core/agreement_test.go b/dex/consensus/core/agreement_test.go new file mode 100644 index 000000000..9332762c3 --- /dev/null +++ b/dex/consensus/core/agreement_test.go @@ -0,0 +1,583 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +// agreementTestReceiver implements core.agreementReceiver. +type agreementTestReceiver struct { + s *AgreementTestSuite + agreementIndex int +} + +func (r *agreementTestReceiver) VerifyPartialSignature(*types.Vote) (bool, bool) { + return true, false +} + +func (r *agreementTestReceiver) ProposeVote(vote *types.Vote) { + vote.Position = r.s.agreementID + r.s.voteChan <- vote +} + +func (r *agreementTestReceiver) ProposeBlock() common.Hash { + block := r.s.proposeBlock( + r.s.agreement[r.agreementIndex].data.ID, + r.s.agreement[r.agreementIndex].data.leader.hashCRS, + []byte{}) + r.s.blockChan <- block.Hash + return block.Hash +} + +func (r *agreementTestReceiver) ConfirmBlock(block common.Hash, + _ map[types.NodeID]*types.Vote) { + r.s.confirmChan <- block +} + +func (r *agreementTestReceiver) PullBlocks(hashes common.Hashes) { + for _, hash := range hashes { + r.s.pulledBlocks[hash] = struct{}{} + } + +} + +// agreementTestForkReporter implement core.forkReporter. +type agreementTestForkReporter struct { + s *AgreementTestSuite +} + +func (r *agreementTestReceiver) ReportForkVote(v1, v2 *types.Vote) { + r.s.forkVoteChan <- v1.BlockHash + r.s.forkVoteChan <- v2.BlockHash +} + +func (r *agreementTestReceiver) ReportForkBlock(b1, b2 *types.Block) { + r.s.forkBlockChan <- b1.Hash + r.s.forkBlockChan <- b2.Hash +} + +func (s *AgreementTestSuite) proposeBlock( + nID types.NodeID, crs common.Hash, payload []byte) *types.Block { + block := &types.Block{ + ProposerID: nID, + Position: types.Position{Height: types.GenesisHeight}, + Payload: payload, + } + signer, exist := s.signers[block.ProposerID] + s.Require().True(exist) + s.Require().NoError(signer.SignCRS(block, crs)) + s.Require().NoError(signer.SignBlock(block)) + s.block[block.Hash] = block + + return block +} + +type AgreementTestSuite struct { + suite.Suite + ID types.NodeID + signers map[types.NodeID]*utils.Signer + voteChan chan *types.Vote + blockChan chan common.Hash + confirmChan chan common.Hash + forkVoteChan chan common.Hash + forkBlockChan chan common.Hash + block map[common.Hash]*types.Block + pulledBlocks map[common.Hash]struct{} + agreement []*agreement + agreementID types.Position + defaultValidLeader validLeaderFn +} + +func (s *AgreementTestSuite) SetupTest() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + s.ID = types.NewNodeID(prvKey.PublicKey()) + s.signers = map[types.NodeID]*utils.Signer{ + s.ID: utils.NewSigner(prvKey), + } + s.voteChan = make(chan *types.Vote, 100) + s.blockChan = make(chan common.Hash, 100) + s.confirmChan = make(chan common.Hash, 100) + s.forkVoteChan = make(chan common.Hash, 100) + s.forkBlockChan = make(chan common.Hash, 100) + s.block = make(map[common.Hash]*types.Block) + s.pulledBlocks = make(map[common.Hash]struct{}) + s.agreementID = types.Position{Height: types.GenesisHeight} + s.defaultValidLeader = func(*types.Block, common.Hash) (bool, error) { + return true, nil + } +} + +func (s *AgreementTestSuite) newAgreement( + numNotarySet, leaderIdx int, validLeader validLeaderFn) (*agreement, types.NodeID) { + s.Require().True(leaderIdx < numNotarySet) + logger := &common.NullLogger{} + leader := newLeaderSelector(validLeader, logger) + agreementIdx := len(s.agreement) + var leaderNode types.NodeID + notarySet := make(map[types.NodeID]struct{}) + for i := 0; i < numNotarySet-1; i++ { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + notarySet[nID] = struct{}{} + s.signers[nID] = utils.NewSigner(prvKey) + if i == leaderIdx-1 { + leaderNode = nID + } + } + if leaderIdx == 0 { + leaderNode = s.ID + } + notarySet[s.ID] = struct{}{} + agreement := newAgreement( + s.ID, + &agreementTestReceiver{ + s: s, + agreementIndex: agreementIdx, + }, + leader, + s.signers[s.ID], + logger, + ) + agreement.restart(notarySet, utils.GetBAThreshold(&types.Config{ + NotarySetSize: uint32(len(notarySet)), + }), s.agreementID, leaderNode, + common.NewRandomHash()) + s.agreement = append(s.agreement, agreement) + return agreement, leaderNode +} + +func (s *AgreementTestSuite) copyVote( + vote *types.Vote, proposer types.NodeID) *types.Vote { + v := vote.Clone() + s.signers[proposer].SignVote(v) + return v +} + +func (s *AgreementTestSuite) prepareVote( + nID types.NodeID, voteType types.VoteType, blockHash common.Hash, + period uint64) ( + vote *types.Vote) { + vote = types.NewVote(voteType, blockHash, period) + vote.Position = types.Position{Height: types.GenesisHeight} + s.Require().NoError(s.signers[nID].SignVote(vote)) + return +} + +func (s *AgreementTestSuite) TestSimpleConfirm() { + a, leaderNode := s.newAgreement(4, 0, s.defaultValidLeader) + s.Require().Equal(s.ID, leaderNode) + // FastState + a.nextState() + // FastVoteState + s.Require().Len(s.blockChan, 1) + blockHash := <-s.blockChan + block, exist := s.block[blockHash] + s.Require().True(exist) + s.Require().Equal(s.ID, block.ProposerID) + s.Require().NoError(a.processBlock(block)) + // Wait some time for go routine in processBlock to finish. + time.Sleep(500 * time.Millisecond) + s.Require().Len(s.voteChan, 1) + fastVote := <-s.voteChan + s.Equal(types.VoteFast, fastVote.Type) + s.Equal(blockHash, fastVote.BlockHash) + s.Require().Len(s.voteChan, 0) + a.nextState() + // InitialState + a.nextState() + // PreCommitState + a.nextState() + // CommitState + s.Require().Len(s.voteChan, 1) + vote := <-s.voteChan + s.Equal(types.VotePreCom, vote.Type) + s.Equal(blockHash, vote.BlockHash) + // Fast-votes should be ignored. + for nID := range s.signers { + v := s.copyVote(fastVote, nID) + s.Require().NoError(a.processVote(v)) + } + s.Require().Len(s.voteChan, 0) + s.Equal(uint64(1), a.data.lockIter) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + a.nextState() + // ForwardState + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VoteCom, vote.Type) + s.Equal(blockHash, vote.BlockHash) + s.Equal(blockHash, a.data.lockValue) + s.Equal(uint64(2), a.data.lockIter) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + // We have enough of Com-Votes. + s.Require().Len(s.confirmChan, 1) + confirmBlock := <-s.confirmChan + s.Equal(blockHash, confirmBlock) +} + +func (s *AgreementTestSuite) TestPartitionOnCommitVote() { + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + // FastState + a.nextState() + // FastVoteState + a.nextState() + // InitialState + a.nextState() + // PreCommitState + s.Require().Len(s.blockChan, 1) + blockHash := <-s.blockChan + block, exist := s.block[blockHash] + s.Require().True(exist) + s.Require().NoError(a.processBlock(block)) + s.Require().Len(s.voteChan, 1) + vote := <-s.voteChan + s.Equal(types.VoteInit, vote.Type) + s.Equal(blockHash, vote.BlockHash) + a.nextState() + // CommitState + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VotePreCom, vote.Type) + s.Equal(blockHash, vote.BlockHash) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + a.nextState() + // ForwardState + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VoteCom, vote.Type) + s.Equal(blockHash, vote.BlockHash) + s.Equal(blockHash, a.data.lockValue) + s.Equal(uint64(2), a.data.lockIter) + // RepeateVoteState + a.nextState() + s.True(a.pullVotes()) + s.Require().Len(s.voteChan, 0) +} + +func (s *AgreementTestSuite) TestFastConfirmLeader() { + a, leaderNode := s.newAgreement(4, 0, s.defaultValidLeader) + s.Require().Equal(s.ID, leaderNode) + // FastState + a.nextState() + // FastVoteState + s.Require().Len(s.blockChan, 1) + blockHash := <-s.blockChan + block, exist := s.block[blockHash] + s.Require().True(exist) + s.Require().Equal(s.ID, block.ProposerID) + s.Require().NoError(a.processBlock(block)) + // Wait some time for go routine in processBlock to finish. + time.Sleep(500 * time.Millisecond) + s.Require().Len(s.voteChan, 1) + vote := <-s.voteChan + s.Equal(types.VoteFast, vote.Type) + s.Equal(blockHash, vote.BlockHash) + s.Require().Len(s.voteChan, 0) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + // We have enough of Fast-Votes. + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + s.Equal(types.VoteFastCom, vote.Type) + s.Equal(blockHash, vote.BlockHash) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + // We have enough of Fast-ConfirmVotes. + s.Require().Len(s.confirmChan, 1) + confirmBlock := <-s.confirmChan + s.Equal(blockHash, confirmBlock) +} + +func (s *AgreementTestSuite) TestFastConfirmNonLeader() { + a, leaderNode := s.newAgreement(4, 1, s.defaultValidLeader) + s.Require().NotEqual(s.ID, leaderNode) + // FastState + a.nextState() + // FastVoteState + s.Require().Len(s.blockChan, 0) + block := s.proposeBlock(leaderNode, a.data.leader.hashCRS, []byte{}) + s.Require().Equal(leaderNode, block.ProposerID) + s.Require().NoError(a.processBlock(block)) + // Wait some time for go routine in processBlock to finish. + time.Sleep(500 * time.Millisecond) + var vote *types.Vote + select { + case vote = <-s.voteChan: + case <-time.After(500 * time.Millisecond): + s.FailNow("Should propose vote") + } + s.Equal(types.VoteFast, vote.Type) + s.Equal(block.Hash, vote.BlockHash) + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + // We have enough of Fast-Votes. + s.Require().Len(s.voteChan, 1) + vote = <-s.voteChan + for nID := range s.signers { + v := s.copyVote(vote, nID) + s.Require().NoError(a.processVote(v)) + } + // We have enough of Fast-ConfirmVotes. + s.Require().Len(s.confirmChan, 1) + confirmBlock := <-s.confirmChan + s.Equal(block.Hash, confirmBlock) +} + +func (s *AgreementTestSuite) TestFastForwardCond1() { + votes := 0 + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + a.data.lockIter = 1 + a.data.period = 3 + hash := common.NewRandomHash() + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VotePreCom, hash, uint64(2)) + s.Require().NoError(a.processVote(vote)) + if votes++; votes == 3 { + break + } + } + + select { + case <-a.done(): + s.FailNow("Unexpected fast forward.") + default: + } + s.Equal(hash, a.data.lockValue) + s.Equal(uint64(2), a.data.lockIter) + s.Equal(uint64(3), a.data.period) + + // No fast forward if vote.BlockHash == SKIP + a.data.lockIter = 6 + a.data.period = 8 + a.data.lockValue = types.NullBlockHash + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VotePreCom, types.SkipBlockHash, uint64(7)) + s.Require().NoError(a.processVote(vote)) + } + + select { + case <-a.done(): + s.FailNow("Unexpected fast forward.") + default: + } + + // No fast forward if lockValue == vote.BlockHash. + a.data.lockIter = 11 + a.data.period = 13 + a.data.lockValue = hash + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VotePreCom, hash, uint64(12)) + s.Require().NoError(a.processVote(vote)) + } + + select { + case <-a.done(): + s.FailNow("Unexpected fast forward.") + default: + } +} + +func (s *AgreementTestSuite) TestFastForwardCond2() { + votes := 0 + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + a.data.period = 1 + done := a.done() + hash := common.NewRandomHash() + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VotePreCom, hash, uint64(2)) + s.Require().NoError(a.processVote(vote)) + if votes++; votes == 3 { + break + } + } + + select { + case <-done: + default: + s.FailNow("Expecting fast forward for pending done() call.") + } + select { + case <-a.done(): + default: + s.FailNow("Expecting fast forward.") + } + s.Equal(hash, a.data.lockValue) + s.Equal(uint64(2), a.data.lockIter) + s.Equal(uint64(2), a.data.period) + + // No fast forward if vote.BlockHash == SKIP + a.data.period = 6 + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VotePreCom, types.SkipBlockHash, uint64(7)) + s.Require().NoError(a.processVote(vote)) + } + + select { + case <-a.done(): + s.FailNow("Unexpected fast forward.") + default: + } +} + +func (s *AgreementTestSuite) TestFastForwardCond3() { + numVotes := 0 + votes := []*types.Vote{} + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + a.data.period = 1 + done := a.done() + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VoteCom, common.NewRandomHash(), uint64(2)) + votes = append(votes, vote) + s.Require().NoError(a.processVote(vote)) + if numVotes++; numVotes == 3 { + break + } + } + + select { + case <-done: + default: + s.FailNow("Expecting fast forward for pending done() call.") + } + select { + case <-a.done(): + default: + s.FailNow("Expecting fast forward.") + } + s.Equal(uint64(3), a.data.period) + + s.Len(s.pulledBlocks, 3) + for _, vote := range votes { + _, exist := s.pulledBlocks[vote.BlockHash] + s.True(exist) + } +} + +func (s *AgreementTestSuite) TestDecide() { + votes := 0 + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + a.data.period = 5 + + // No decide if com-vote on SKIP. + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VoteCom, types.SkipBlockHash, uint64(2)) + s.Require().NoError(a.processVote(vote)) + if votes++; votes == 3 { + break + } + } + s.Require().Len(s.confirmChan, 0) + + // Normal decide. + hash := common.NewRandomHash() + for nID := range a.notarySet { + vote := s.prepareVote(nID, types.VoteCom, hash, uint64(3)) + s.Require().NoError(a.processVote(vote)) + if votes++; votes == 3 { + break + } + } + s.Require().Len(s.confirmChan, 1) + confirmBlock := <-s.confirmChan + s.Equal(hash, confirmBlock) +} + +func (s *AgreementTestSuite) TestForkVote() { + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + a.data.period = 2 + for nID := range a.notarySet { + v01 := s.prepareVote(nID, types.VotePreCom, common.NewRandomHash(), 2) + v02 := s.prepareVote(nID, types.VotePreCom, common.NewRandomHash(), 2) + s.Require().NoError(a.processVote(v01)) + s.Require().IsType(&ErrForkVote{}, a.processVote(v02)) + s.Require().Equal(v01.BlockHash, <-s.forkVoteChan) + s.Require().Equal(v02.BlockHash, <-s.forkVoteChan) + break + } +} + +func (s *AgreementTestSuite) TestForkBlock() { + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + for nID := range a.notarySet { + b01 := s.proposeBlock(nID, a.data.leader.hashCRS, []byte{1}) + b02 := s.proposeBlock(nID, a.data.leader.hashCRS, []byte{2}) + s.Require().NoError(a.processBlock(b01)) + s.Require().IsType(&ErrFork{}, a.processBlock(b02)) + s.Require().Equal(b01.Hash, <-s.forkBlockChan) + s.Require().Equal(b02.Hash, <-s.forkBlockChan) + } +} + +func (s *AgreementTestSuite) TestFindBlockInPendingSet() { + a, leaderNode := s.newAgreement(4, 0, func(*types.Block, common.Hash) (bool, error) { + return false, nil + }) + block := s.proposeBlock(leaderNode, a.data.leader.hashCRS, []byte{}) + s.Require().NoError(a.processBlock(block)) + // Make sure the block goes to pending pool in leader selector. + block, exist := a.data.leader.findPendingBlock(block.Hash) + s.Require().True(exist) + s.Require().NotNil(block) + // This block is allowed to be found by findBlockNoLock. + block, exist = a.findBlockNoLock(block.Hash) + s.Require().True(exist) + s.Require().NotNil(block) +} + +func (s *AgreementTestSuite) TestConfirmWithBlock() { + a, _ := s.newAgreement(4, -1, s.defaultValidLeader) + block := &types.Block{ + Hash: common.NewRandomHash(), + Position: a.agreementID(), + Randomness: []byte{0x1, 0x2, 0x3, 0x4}, + } + a.processFinalizedBlock(block) + s.Require().Len(s.confirmChan, 1) + confirm := <-s.confirmChan + s.Equal(block.Hash, confirm) + s.True(a.confirmed()) +} + +func TestAgreement(t *testing.T) { + suite.Run(t, new(AgreementTestSuite)) +} diff --git a/dex/consensus/core/blockchain.go b/dex/consensus/core/blockchain.go new file mode 100644 index 000000000..4fae221c7 --- /dev/null +++ b/dex/consensus/core/blockchain.go @@ -0,0 +1,681 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "bytes" + "errors" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors for sanity check error. +var ( + ErrBlockFromOlderPosition = errors.New("block from older position") + ErrNotGenesisBlock = errors.New("not a genesis block") + ErrIsGenesisBlock = errors.New("is a genesis block") + ErrIncorrectParentHash = errors.New("incorrect parent hash") + ErrInvalidBlockHeight = errors.New("invalid block height") + ErrInvalidRoundID = errors.New("invalid round id") + ErrInvalidTimestamp = errors.New("invalid timestamp") + ErrNotFollowTipPosition = errors.New("not follow tip position") + ErrDuplicatedPendingBlock = errors.New("duplicated pending block") + ErrRetrySanityCheckLater = errors.New("retry sanity check later") + ErrRoundNotSwitch = errors.New("round not switch") + ErrIncorrectAgreementResult = errors.New( + "incorrect block randomness result") + ErrMissingRandomness = errors.New("missing block randomness") +) + +const notReadyHeight uint64 = math.MaxUint64 + +type pendingBlockRecord struct { + position types.Position + block *types.Block +} + +type pendingBlockRecords []pendingBlockRecord + +func (pb *pendingBlockRecords) insert(p pendingBlockRecord) error { + idx := sort.Search(len(*pb), func(i int) bool { + return !(*pb)[i].position.Older(p.position) + }) + switch idx { + case len(*pb): + *pb = append(*pb, p) + default: + if (*pb)[idx].position.Equal(p.position) { + // Allow to overwrite pending block record for empty blocks, we may + // need to pull that block from others when its parent is not found + // locally. + if (*pb)[idx].block == nil && p.block != nil { + (*pb)[idx].block = p.block + return nil + } + return ErrDuplicatedPendingBlock + } + // Insert the value to that index. + *pb = append((*pb), pendingBlockRecord{}) + copy((*pb)[idx+1:], (*pb)[idx:]) + (*pb)[idx] = p + } + return nil +} + +func (pb pendingBlockRecords) searchByHeight(h uint64) ( + pendingBlockRecord, bool) { + idx := sort.Search(len(pb), func(i int) bool { + return pb[i].position.Height >= h + }) + if idx == len(pb) || pb[idx].position.Height != h { + return pendingBlockRecord{}, false + } + return pb[idx], true +} + +func (pb pendingBlockRecords) searchByPosition(p types.Position) ( + pendingBlockRecord, bool) { + idx := sort.Search(len(pb), func(i int) bool { + return !pb[i].block.Position.Older(p) + }) + if idx == len(pb) || !pb[idx].position.Equal(p) { + return pendingBlockRecord{}, false + } + return pb[idx], true +} + +type blockChainConfig struct { + utils.RoundBasedConfig + + minBlockInterval time.Duration +} + +func (c *blockChainConfig) fromConfig(round uint64, config *types.Config) { + c.minBlockInterval = config.MinBlockInterval + c.SetupRoundBasedFields(round, config) +} + +func newBlockChainConfig(prev blockChainConfig, config *types.Config) ( + c blockChainConfig) { + c = blockChainConfig{} + c.fromConfig(prev.RoundID()+1, config) + c.AppendTo(prev.RoundBasedConfig) + return +} + +type tsigVerifierGetter interface { + UpdateAndGet(uint64) (TSigVerifier, bool, error) + Purge(uint64) +} + +type blockChain struct { + lock sync.RWMutex + ID types.NodeID + lastConfirmed *types.Block + lastDelivered *types.Block + signer *utils.Signer + vGetter tsigVerifierGetter + app Application + logger common.Logger + pendingRandomnesses map[types.Position][]byte + configs []blockChainConfig + pendingBlocks pendingBlockRecords + confirmedBlocks types.BlocksByPosition + dMoment time.Time + + // Do not access this variable besides processAgreementResult. + lastPosition types.Position +} + +func newBlockChain(nID types.NodeID, dMoment time.Time, initBlock *types.Block, + app Application, vGetter tsigVerifierGetter, signer *utils.Signer, + logger common.Logger) *blockChain { + return &blockChain{ + ID: nID, + lastConfirmed: initBlock, + lastDelivered: initBlock, + signer: signer, + vGetter: vGetter, + app: app, + logger: logger, + dMoment: dMoment, + pendingRandomnesses: make( + map[types.Position][]byte), + } +} + +func (bc *blockChain) notifyRoundEvents(evts []utils.RoundEventParam) error { + bc.lock.Lock() + defer bc.lock.Unlock() + apply := func(e utils.RoundEventParam) error { + if len(bc.configs) > 0 { + lastCfg := bc.configs[len(bc.configs)-1] + if e.BeginHeight != lastCfg.RoundEndHeight() { + return ErrInvalidBlockHeight + } + if lastCfg.RoundID() == e.Round { + bc.configs[len(bc.configs)-1].ExtendLength() + } else if lastCfg.RoundID()+1 == e.Round { + bc.configs = append(bc.configs, newBlockChainConfig( + lastCfg, e.Config)) + } else { + return ErrInvalidRoundID + } + } else { + c := blockChainConfig{} + c.fromConfig(e.Round, e.Config) + c.SetRoundBeginHeight(e.BeginHeight) + if bc.lastConfirmed == nil { + if c.RoundID() != 0 { + panic(fmt.Errorf( + "genesis config should from round 0, but %d", + c.RoundID())) + } + } else { + if c.RoundID() != bc.lastConfirmed.Position.Round { + panic(fmt.Errorf("incompatible config/block round %s %d", + bc.lastConfirmed, c.RoundID())) + } + if !c.Contains(bc.lastConfirmed.Position.Height) { + panic(fmt.Errorf( + "unmatched round-event with block %s %d %d %d", + bc.lastConfirmed, e.Round, e.Reset, e.BeginHeight)) + } + } + bc.configs = append(bc.configs, c) + } + return nil + } + for _, e := range evts { + if err := apply(e); err != nil { + return err + } + } + return nil +} + +func (bc *blockChain) proposeBlock(position types.Position, + proposeTime time.Time, isEmpty bool) (b *types.Block, err error) { + bc.lock.RLock() + defer bc.lock.RUnlock() + return bc.prepareBlock(position, proposeTime, isEmpty) +} + +func (bc *blockChain) extractBlocks() (ret []*types.Block) { + bc.lock.Lock() + defer bc.lock.Unlock() + for len(bc.confirmedBlocks) > 0 { + c := bc.confirmedBlocks[0] + if c.Position.Round >= DKGDelayRound && + len(c.Randomness) == 0 && + !bc.setRandomnessFromPending(c) { + break + } + c, bc.confirmedBlocks = bc.confirmedBlocks[0], bc.confirmedBlocks[1:] + ret = append(ret, c) + bc.lastDelivered = c + } + return +} + +func (bc *blockChain) sanityCheck(b *types.Block) error { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed == nil { + // It should be a genesis block. + if !b.IsGenesis() { + return ErrNotGenesisBlock + } + if b.Timestamp.Before(bc.dMoment.Add(bc.configs[0].minBlockInterval)) { + return ErrInvalidTimestamp + } + return nil + } + if b.IsGenesis() { + return ErrIsGenesisBlock + } + if b.Position.Height != bc.lastConfirmed.Position.Height+1 { + if b.Position.Height > bc.lastConfirmed.Position.Height { + return ErrRetrySanityCheckLater + } + return ErrInvalidBlockHeight + } + tipConfig := bc.tipConfig() + if tipConfig.IsLastBlock(bc.lastConfirmed) { + if b.Position.Round != bc.lastConfirmed.Position.Round+1 { + return ErrRoundNotSwitch + } + } else { + if b.Position.Round != bc.lastConfirmed.Position.Round { + return ErrInvalidRoundID + } + } + if !b.ParentHash.Equal(bc.lastConfirmed.Hash) { + return ErrIncorrectParentHash + } + if b.Timestamp.Before(bc.lastConfirmed.Timestamp.Add( + tipConfig.minBlockInterval)) { + return ErrInvalidTimestamp + } + if err := utils.VerifyBlockSignature(b); err != nil { + return err + } + return nil +} + +// addEmptyBlock is called when an empty block is confirmed by BA. +func (bc *blockChain) addEmptyBlock(position types.Position) ( + *types.Block, error) { + bc.lock.Lock() + defer bc.lock.Unlock() + add := func() *types.Block { + emptyB, err := bc.prepareBlock(position, time.Time{}, true) + if err != nil || emptyB == nil { + // This helper is expected to be called when an empty block is ready + // to be confirmed. + panic(err) + } + bc.confirmBlock(emptyB) + bc.checkIfBlocksConfirmed() + return emptyB + } + if bc.lastConfirmed != nil { + if !position.Newer(bc.lastConfirmed.Position) { + bc.logger.Warn("Dropping empty block: older than tip", + "position", &position, + "last-confirmed", bc.lastConfirmed) + return nil, ErrBlockFromOlderPosition + } + if bc.lastConfirmed.Position.Height+1 == position.Height { + return add(), nil + } + } else if position.Height == types.GenesisHeight && position.Round == 0 { + return add(), nil + } else { + return nil, ErrInvalidBlockHeight + } + return nil, bc.addPendingBlockRecord(pendingBlockRecord{position, nil}) +} + +// addBlock should be called when the block is confirmed by BA, we won't perform +// sanity check against this block, it's ok to add block with skipping height. +func (bc *blockChain) addBlock(b *types.Block) error { + if b.Position.Round >= DKGDelayRound && + len(b.Randomness) == 0 && + !bc.setRandomnessFromPending(b) { + return ErrMissingRandomness + } + bc.lock.Lock() + defer bc.lock.Unlock() + confirmed := false + if bc.lastConfirmed != nil { + if !b.Position.Newer(bc.lastConfirmed.Position) { + bc.logger.Warn("Dropping block: older than tip", + "block", b, "last-confirmed", bc.lastConfirmed) + return nil + } + if bc.lastConfirmed.Position.Height+1 == b.Position.Height { + confirmed = true + } + } else if b.IsGenesis() { + confirmed = true + } + delete(bc.pendingRandomnesses, b.Position) + if !confirmed { + return bc.addPendingBlockRecord(pendingBlockRecord{b.Position, b}) + } + bc.confirmBlock(b) + bc.checkIfBlocksConfirmed() + return nil +} + +func (bc *blockChain) tipRound() uint64 { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed == nil { + return 0 + } + offset, tipConfig := uint64(0), bc.tipConfig() + if tipConfig.IsLastBlock(bc.lastConfirmed) { + offset++ + } + return bc.lastConfirmed.Position.Round + offset +} + +func (bc *blockChain) confirmed(h uint64) bool { + bc.lock.RLock() + defer bc.lock.RUnlock() + if bc.lastConfirmed != nil && bc.lastConfirmed.Position.Height >= h { + return true + } + r, found := bc.pendingBlocks.searchByHeight(h) + if !found { + return false + } + return r.block != nil +} + +func (bc *blockChain) nextBlock() (uint64, time.Time) { + bc.lock.RLock() + defer bc.lock.RUnlock() + // It's ok to access tip config directly without checking the existence of + // lastConfirmed block in the scenario of "nextBlock" method. + tip, config := bc.lastConfirmed, bc.configs[0] + if tip == nil { + return types.GenesisHeight, bc.dMoment + } + if tip != bc.lastDelivered { + // If tip is not delivered, we should not proceed to next block. + return notReadyHeight, time.Time{} + } + return tip.Position.Height + 1, tip.Timestamp.Add(config.minBlockInterval) +} + +func (bc *blockChain) pendingBlocksWithoutRandomness() []*types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + blocks := make([]*types.Block, 0) + for _, b := range bc.confirmedBlocks { + if b.Position.Round < DKGDelayRound || + len(b.Randomness) > 0 || + bc.setRandomnessFromPending(b) { + continue + } + blocks = append(blocks, b) + } + for _, r := range bc.pendingBlocks { + if r.position.Round < DKGDelayRound { + continue + } + if r.block != nil && + len(r.block.Randomness) == 0 && + !bc.setRandomnessFromPending(r.block) { + blocks = append(blocks, r.block) + } + } + return blocks +} + +func (bc *blockChain) lastDeliveredBlock() *types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + return bc.lastDelivered +} + +func (bc *blockChain) lastPendingBlock() *types.Block { + bc.lock.RLock() + defer bc.lock.RUnlock() + if len(bc.confirmedBlocks) == 0 { + return nil + } + return bc.confirmedBlocks[0] +} + +///////////////////////////////////////////// +// +// internal helpers +// +///////////////////////////////////////////// + +// findPendingBlock is a helper to find a block in either pending or confirmed +// state by position. +func (bc *blockChain) findPendingBlock(p types.Position) *types.Block { + if idx := sort.Search(len(bc.confirmedBlocks), func(i int) bool { + return !bc.confirmedBlocks[i].Position.Older(p) + }); idx != len(bc.confirmedBlocks) && + bc.confirmedBlocks[idx].Position.Equal(p) { + return bc.confirmedBlocks[idx] + } + pendingRec, _ := bc.pendingBlocks.searchByPosition(p) + return pendingRec.block +} + +func (bc *blockChain) addPendingBlockRecord(p pendingBlockRecord) error { + if err := bc.pendingBlocks.insert(p); err != nil { + if err == ErrDuplicatedPendingBlock { + // We need to ignore this error because BA might confirm duplicated + // blocks in position. + err = nil + } + return err + } + return nil +} + +func (bc *blockChain) checkIfBlocksConfirmed() { + var err error + for len(bc.pendingBlocks) > 0 { + if bc.pendingBlocks[0].position.Height < + bc.lastConfirmed.Position.Height+1 { + panic(fmt.Errorf("unexpected case %s %s", bc.lastConfirmed, + bc.pendingBlocks[0].position)) + } + if bc.pendingBlocks[0].position.Height > + bc.lastConfirmed.Position.Height+1 { + break + } + var pending pendingBlockRecord + pending, bc.pendingBlocks = bc.pendingBlocks[0], bc.pendingBlocks[1:] + nextTip := pending.block + if nextTip == nil { + if nextTip, err = bc.prepareBlock( + pending.position, time.Time{}, true); err != nil { + // It should not be error when prepare empty block for correct + // position. + panic(err) + } + } + bc.confirmBlock(nextTip) + } +} + +func (bc *blockChain) purgeConfig() { + for bc.configs[0].RoundID() < bc.lastConfirmed.Position.Round { + bc.configs = bc.configs[1:] + } + if bc.configs[0].RoundID() != bc.lastConfirmed.Position.Round { + panic(fmt.Errorf("mismatched tip config: %d %d", + bc.configs[0].RoundID(), bc.lastConfirmed.Position.Round)) + } +} + +func (bc *blockChain) verifyRandomness( + blockHash common.Hash, round uint64, randomness []byte) (bool, error) { + if round < DKGDelayRound { + return bytes.Compare(randomness, NoRand) == 0, nil + } + v, ok, err := bc.vGetter.UpdateAndGet(round) + if err != nil { + return false, err + } + if !ok { + return false, ErrTSigNotReady + } + return v.VerifySignature(blockHash, crypto.Signature{ + Type: "bls", + Signature: randomness}), nil +} + +func (bc *blockChain) prepareBlock(position types.Position, + proposeTime time.Time, empty bool) (b *types.Block, err error) { + b = &types.Block{Position: position, Timestamp: proposeTime} + tip := bc.lastConfirmed + // Make sure we can propose a block at expected position for callers. + if tip == nil { + if bc.configs[0].RoundID() != uint64(0) { + panic(fmt.Errorf( + "Genesis config should be ready when preparing genesis: %d", + bc.configs[0].RoundID())) + } + // It should be the case for genesis block. + if !position.Equal(types.Position{Height: types.GenesisHeight}) { + b, err = nil, ErrNotGenesisBlock + return + } + minExpectedTime := bc.dMoment.Add(bc.configs[0].minBlockInterval) + if empty { + b.Timestamp = minExpectedTime + } else { + bc.logger.Debug("Calling genesis Application.PreparePayload") + if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { + b = nil + return + } + bc.logger.Debug("Calling genesis Application.PrepareWitness") + if b.Witness, err = bc.app.PrepareWitness(0); err != nil { + b = nil + return + } + if proposeTime.Before(minExpectedTime) { + b.Timestamp = minExpectedTime + } + } + } else { + tipConfig := bc.tipConfig() + if tip.Position.Height+1 != position.Height { + b, err = nil, ErrNotFollowTipPosition + return + } + if tipConfig.IsLastBlock(tip) { + if tip.Position.Round+1 != position.Round { + b, err = nil, ErrRoundNotSwitch + return + } + } else { + if tip.Position.Round != position.Round { + b, err = nil, ErrInvalidRoundID + return + } + } + minExpectedTime := tip.Timestamp.Add(bc.configs[0].minBlockInterval) + b.ParentHash = tip.Hash + if !empty { + bc.logger.Debug("Calling Application.PreparePayload", + "position", b.Position) + if b.Payload, err = bc.app.PreparePayload(b.Position); err != nil { + b = nil + return + } + bc.logger.Debug("Calling Application.PrepareWitness", + "height", tip.Witness.Height) + if b.Witness, err = bc.app.PrepareWitness( + tip.Witness.Height); err != nil { + b = nil + return + } + if b.Timestamp.Before(minExpectedTime) { + b.Timestamp = minExpectedTime + } + } else { + b.Witness.Height = tip.Witness.Height + b.Witness.Data = make([]byte, len(tip.Witness.Data)) + copy(b.Witness.Data, tip.Witness.Data) + b.Timestamp = minExpectedTime + } + } + if empty { + if b.Hash, err = utils.HashBlock(b); err != nil { + b = nil + return + } + } else { + if err = bc.signer.SignBlock(b); err != nil { + b = nil + return + } + } + return +} + +func (bc *blockChain) tipConfig() blockChainConfig { + if bc.lastConfirmed == nil { + panic(fmt.Errorf("attempting to access config without tip")) + } + if bc.lastConfirmed.Position.Round != bc.configs[0].RoundID() { + panic(fmt.Errorf("inconsist config and tip: %d %d", + bc.lastConfirmed.Position.Round, bc.configs[0].RoundID())) + } + return bc.configs[0] +} + +func (bc *blockChain) confirmBlock(b *types.Block) { + if bc.lastConfirmed != nil && + bc.lastConfirmed.Position.Height+1 != b.Position.Height { + panic(fmt.Errorf("confirmed blocks not continuous in height: %s %s", + bc.lastConfirmed, b)) + } + bc.logger.Debug("Calling Application.BlockConfirmed", "block", b) + bc.app.BlockConfirmed(*b) + bc.lastConfirmed = b + bc.confirmedBlocks = append(bc.confirmedBlocks, b) + bc.purgeConfig() +} + +func (bc *blockChain) setRandomnessFromPending(b *types.Block) bool { + if r, exist := bc.pendingRandomnesses[b.Position]; exist { + b.Randomness = r + delete(bc.pendingRandomnesses, b.Position) + return true + } + return false +} + +func (bc *blockChain) processAgreementResult(result *types.AgreementResult) error { + if result.Position.Round < DKGDelayRound { + return nil + } + if !result.Position.Newer(bc.lastPosition) { + return ErrSkipButNoError + } + ok, err := bc.verifyRandomness( + result.BlockHash, result.Position.Round, result.Randomness) + if err != nil { + return err + } + if !ok { + return ErrIncorrectAgreementResult + } + bc.lock.Lock() + defer bc.lock.Unlock() + if !result.Position.Newer(bc.lastDelivered.Position) { + return nil + } + bc.pendingRandomnesses[result.Position] = result.Randomness + bc.lastPosition = bc.lastDelivered.Position + return nil +} + +func (bc *blockChain) addBlockRandomness(pos types.Position, rand []byte) { + if pos.Round < DKGDelayRound { + return + } + bc.lock.Lock() + defer bc.lock.Unlock() + if !pos.Newer(bc.lastDelivered.Position) { + return + } + bc.pendingRandomnesses[pos] = rand +} diff --git a/dex/consensus/core/blockchain_test.go b/dex/consensus/core/blockchain_test.go new file mode 100644 index 000000000..c3d2cb725 --- /dev/null +++ b/dex/consensus/core/blockchain_test.go @@ -0,0 +1,630 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "math/rand" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +type testTSigVerifier struct{} + +func (v *testTSigVerifier) VerifySignature(hash common.Hash, + sig crypto.Signature) bool { + return true +} + +type testTSigVerifierGetter struct{} + +func (t *testTSigVerifierGetter) UpdateAndGet(round uint64) ( + TSigVerifier, bool, error) { + return &testTSigVerifier{}, true, nil +} + +func (t *testTSigVerifierGetter) Purge(_ uint64) {} + +type BlockChainTestSuite struct { + suite.Suite + + nID types.NodeID + signer *utils.Signer + dMoment time.Time + blockInterval time.Duration +} + +func (s *BlockChainTestSuite) SetupSuite() { + prvKeys, pubKeys, err := test.NewKeys(1) + s.Require().NoError(err) + s.nID = types.NewNodeID(pubKeys[0]) + s.signer = utils.NewSigner(prvKeys[0]) + s.dMoment = time.Now().UTC() + s.blockInterval = 1 * time.Millisecond +} + +func (s *BlockChainTestSuite) newBlocks(c uint64, initBlock *types.Block) ( + blocks []*types.Block) { + parentHash := common.Hash{} + baseHeight := types.GenesisHeight + t := s.dMoment.Add(s.blockInterval) + initRound := uint64(0) + if initBlock != nil { + parentHash = initBlock.Hash + t = initBlock.Timestamp.Add(s.blockInterval) + initRound = initBlock.Position.Round + baseHeight = initBlock.Position.Height + 1 + } + for i := uint64(0); i < uint64(c); i++ { + b := &types.Block{ + ParentHash: parentHash, + Position: types.Position{Round: initRound, Height: baseHeight + i}, + Timestamp: t, + } + if b.Position.Round >= DKGDelayRound { + b.Randomness = common.GenerateRandomBytes() + } else { + b.Randomness = NoRand + } + s.Require().NoError(s.signer.SignBlock(b)) + blocks = append(blocks, b) + parentHash = b.Hash + t = t.Add(s.blockInterval) + } + return +} + +func (s *BlockChainTestSuite) newEmptyBlock(parent *types.Block, + blockInterval time.Duration) *types.Block { + emptyB := &types.Block{ + ParentHash: parent.Hash, + Position: types.Position{ + Round: parent.Position.Round, + Height: parent.Position.Height + 1, + }, + Timestamp: parent.Timestamp.Add(blockInterval), + } + var err error + emptyB.Hash, err = utils.HashBlock(emptyB) + s.Require().NoError(err) + return emptyB +} + +func (s *BlockChainTestSuite) newBlock(parent *types.Block, round uint64, + blockInterval time.Duration) *types.Block { + b := &types.Block{ + ParentHash: parent.Hash, + Position: types.Position{ + Round: round, + Height: parent.Position.Height + 1, + }, + Timestamp: parent.Timestamp.Add(blockInterval), + } + if b.Position.Round >= DKGDelayRound { + b.Randomness = common.GenerateRandomBytes() + } else { + b.Randomness = NoRand + } + s.Require().NoError(s.signer.SignBlock(b)) + return b +} + +func (s *BlockChainTestSuite) newRandomnessFromBlock( + b *types.Block) *types.AgreementResult { + return &types.AgreementResult{ + BlockHash: b.Hash, + Position: b.Position, + Randomness: common.GenerateRandomBytes(), + } +} + +func (s *BlockChainTestSuite) newBlockChain(initB *types.Block, + roundLength uint64) (bc *blockChain) { + initRound := uint64(0) + if initB != nil { + initRound = initB.Position.Round + } + initHeight := types.GenesisHeight + if initB != nil { + initHeight = initB.Position.Height + } + bc = newBlockChain(s.nID, s.dMoment, initB, test.NewApp(0, nil, nil), + &testTSigVerifierGetter{}, s.signer, &common.NullLogger{}) + // Provide the genesis round event. + s.Require().NoError(bc.notifyRoundEvents([]utils.RoundEventParam{ + utils.RoundEventParam{ + Round: initRound, + Reset: 0, + BeginHeight: initHeight, + Config: &types.Config{ + MinBlockInterval: s.blockInterval, + RoundLength: roundLength, + }}})) + return +} + +func (s *BlockChainTestSuite) newRoundOneInitBlock() *types.Block { + initBlock := &types.Block{ + ParentHash: common.NewRandomHash(), + Position: types.Position{Round: 1}, + Timestamp: s.dMoment, + } + s.Require().NoError(s.signer.SignBlock(initBlock)) + return initBlock +} + +func (s *BlockChainTestSuite) baseConcurrentAceessTest(initBlock *types.Block, + blocks []*types.Block, results []*types.AgreementResult) { + var ( + bc = s.newBlockChain(initBlock, uint64(len(blocks)+1)) + start = make(chan struct{}) + newNotif = make(chan struct{}, 1) + delivered []*types.Block + ) + resultsCopy := make([]*types.AgreementResult, len(results)) + copy(resultsCopy, results) + type randomnessResult types.AgreementResult + add := func(v interface{}) { + <-start + switch val := v.(type) { + case *types.Block: + if err := bc.addBlock(val); err != nil { + // Never assertion in sub routine when testing. + panic(err) + } + case *types.AgreementResult: + if err := bc.processAgreementResult(val); err != nil { + if err != ErrSkipButNoError { + // Never assertion in sub routine when testing. + panic(err) + } + } + case *randomnessResult: + bc.addBlockRandomness(val.Position, val.Randomness) + default: + panic(fmt.Errorf("unknown type: %v", v)) + } + select { + case newNotif <- struct{}{}: + default: + } + } + rand.Shuffle(len(resultsCopy), func(i, j int) { + resultsCopy[i], resultsCopy[j] = resultsCopy[j], resultsCopy[i] + }) + for _, b := range blocks { + go add(b) + } + for i, r := range resultsCopy { + if i >= len(resultsCopy)/2 { + break + } + go add((*randomnessResult)(r)) + } + go func() { + for i, a := range resultsCopy { + if i < len(resultsCopy)/2 { + continue + } + add(a) + } + }() + close(start) + for { + select { + case <-newNotif: + delivered = append(delivered, bc.extractBlocks()...) + case <-time.After(100 * time.Millisecond): + delivered = append(delivered, bc.extractBlocks()...) + } + if len(delivered) == len(blocks) { + break + } + } + // Check result. + b := delivered[0] + s.Require().Equal(b.Position.Height, uint64(1)) + s.Require().NotEmpty(b.Randomness) + for _, bb := range delivered[1:] { + s.Require().Equal(b.Position.Height+1, bb.Position.Height) + s.Require().NotEmpty(b.Randomness) + b = bb + } +} + +func (s *BlockChainTestSuite) TestBasicUsage() { + initBlock := s.newRoundOneInitBlock() + bc := s.newBlockChain(initBlock, 10) + // test scenario: block, empty block, randomness can be added in any order + // of position. + blocks := s.newBlocks(4, initBlock) + b0, b1, b2, b3 := blocks[0], blocks[1], blocks[2], blocks[3] + // generate block-5 after block-4, which is an empty block. + b4 := s.newEmptyBlock(b3, time.Millisecond) + b5 := &types.Block{ + ParentHash: b4.Hash, + Position: types.Position{Round: 1, Height: b4.Position.Height + 1}, + Randomness: common.GenerateRandomBytes(), + } + s.Require().NoError(s.signer.SignBlock(b5)) + s.Require().NoError(bc.addBlock(b5)) + emptyB, err := bc.addEmptyBlock(b4.Position) + s.Require().Nil(emptyB) + s.Require().NoError(err) + s.Require().NoError(bc.addBlock(b3)) + s.Require().NoError(bc.addBlock(b2)) + s.Require().NoError(bc.addBlock(b1)) + s.Require().NoError(bc.addBlock(b0)) + extracted := bc.extractBlocks() + s.Require().Len(extracted, 4) + bc.pendingRandomnesses[b4.Position] = common.GenerateRandomBytes() + extracted = bc.extractBlocks() + s.Require().Len(extracted, 2) + s.Require().Equal(extracted[0].Hash, b4.Hash) + extracted = bc.extractBlocks() + s.Require().Len(extracted, 0) +} + +func (s *BlockChainTestSuite) TestConcurrentAccess() { + // Raise one go routine for each block and randomness. And let them try to + // add to blockChain at the same time. Make sure we can delivered them all. + var ( + retry = 10 + initBlock = s.newRoundOneInitBlock() + blocks = s.newBlocks(500, initBlock) + rands = []*types.AgreementResult{} + ) + for _, b := range blocks { + rands = append(rands, s.newRandomnessFromBlock(b)) + } + for i := 0; i < retry; i++ { + s.baseConcurrentAceessTest(initBlock, blocks, rands) + } +} + +func (s *BlockChainTestSuite) TestSanityCheck() { + bc := s.newBlockChain(nil, 4) + blocks := s.newBlocks(3, nil) + b0, b1, b2 := blocks[0], blocks[1], blocks[2] + // ErrNotGenesisBlock + s.Require().Equal(ErrNotGenesisBlock.Error(), bc.sanityCheck(b1).Error()) + // Genesis block should pass sanity check. + s.Require().NoError(bc.sanityCheck(b0)) + s.Require().NoError(bc.addBlock(b0)) + // ErrIsGenesisBlock + s.Require().Equal(ErrIsGenesisBlock.Error(), bc.sanityCheck(b0).Error()) + // ErrRetrySanityCheckLater + s.Require().Equal( + ErrRetrySanityCheckLater.Error(), bc.sanityCheck(b2).Error()) + // ErrInvalidBlockHeight + s.Require().NoError(bc.addBlock(b1)) + s.Require().NoError(bc.addBlock(b2)) + s.Require().Equal( + ErrInvalidBlockHeight.Error(), bc.sanityCheck(b1).Error()) + // ErrInvalidRoundID + // Should not switch round when tip is not the last block. + s.Require().Equal( + ErrInvalidRoundID.Error(), + bc.sanityCheck(s.newBlock(b2, 1, 1*time.Second)).Error()) + b3 := s.newBlock(b2, 0, 100*time.Second) + s.Require().NoError(bc.addBlock(b3)) + // Should switch round when tip is the last block. + s.Require().Equal( + ErrRoundNotSwitch.Error(), + bc.sanityCheck(s.newBlock(b3, 0, 1*time.Second)).Error()) + b4 := &types.Block{ + ParentHash: b2.Hash, + Position: types.Position{ + Round: 1, + Height: 5, + }, + Timestamp: b3.Timestamp, + } + s.Require().NoError(s.signer.SignBlock(b4)) + // ErrIncorrectParentHash + s.Require().EqualError(ErrIncorrectParentHash, bc.sanityCheck(b4).Error()) + b4.ParentHash = b3.Hash + // ErrInvalidTimestamp + s.Require().EqualError(ErrInvalidTimestamp, bc.sanityCheck(b4).Error()) + b4.Timestamp = b3.Timestamp.Add(1 * time.Second) + // There is no valid signature attached. + s.Require().Error(bc.sanityCheck(b4)) + // OK case. + s.Require().NoError(s.signer.SignBlock(b4)) + s.Require().NoError(bc.sanityCheck(b4)) +} + +func (s *BlockChainTestSuite) TestNotifyRoundEvents() { + roundLength := uint64(10) + bc := s.newBlockChain(nil, roundLength) + newEvent := func(round, reset, height uint64) []utils.RoundEventParam { + return []utils.RoundEventParam{ + utils.RoundEventParam{ + Round: round, + Reset: reset, + BeginHeight: types.GenesisHeight + height, + CRS: common.Hash{}, + Config: &types.Config{RoundLength: roundLength}, + }} + } + s.Require().Equal(ErrInvalidRoundID.Error(), + bc.notifyRoundEvents(newEvent(2, 0, roundLength)).Error()) + s.Require().NoError(bc.notifyRoundEvents(newEvent(1, 0, roundLength))) + // Make sure new config is appended when new round is ready. + s.Require().Len(bc.configs, 2) + s.Require().Equal(ErrInvalidRoundID.Error(), + bc.notifyRoundEvents(newEvent(3, 1, roundLength*2)).Error()) + s.Require().Equal(ErrInvalidBlockHeight.Error(), + bc.notifyRoundEvents(newEvent(1, 1, roundLength)).Error()) + s.Require().NoError(bc.notifyRoundEvents(newEvent(1, 1, roundLength*2))) + // Make sure roundEndHeight is extended when DKG reset. + s.Require().Equal(bc.configs[len(bc.configs)-1].RoundEndHeight(), + types.GenesisHeight+roundLength*3) +} + +func (s *BlockChainTestSuite) TestConfirmed() { + bc := s.newBlockChain(nil, 10) + blocks := s.newBlocks(3, nil) + // Add a confirmed block. + s.Require().NoError(bc.addBlock(blocks[0])) + // Add a pending block. + s.Require().NoError(bc.addBlock(blocks[2])) + s.Require().True(bc.confirmed(1)) + s.Require().False(bc.confirmed(2)) + s.Require().True(bc.confirmed(3)) +} + +func (s *BlockChainTestSuite) TestNextBlockAndTipRound() { + var roundLength uint64 = 3 + bc := s.newBlockChain(nil, roundLength) + s.Require().NoError(bc.notifyRoundEvents([]utils.RoundEventParam{ + utils.RoundEventParam{ + Round: 1, + Reset: 0, + BeginHeight: types.GenesisHeight + roundLength, + CRS: common.Hash{}, + Config: &types.Config{ + MinBlockInterval: s.blockInterval, + RoundLength: roundLength, + }}})) + blocks := s.newBlocks(3, nil) + nextH, nextT := bc.nextBlock() + s.Require().Equal(nextH, types.GenesisHeight) + s.Require().Equal(nextT, s.dMoment) + // Add one block. + s.Require().NoError(bc.addBlock(blocks[0])) + s.Require().Len(bc.extractBlocks(), 1) + nextH, nextT = bc.nextBlock() + s.Require().Equal(nextH, uint64(2)) + s.Require().Equal( + nextT, blocks[0].Timestamp.Add(bc.configs[0].minBlockInterval)) + // Add one block, expected to be pending. + s.Require().NoError(bc.addBlock(blocks[2])) + nextH2, nextT2 := bc.nextBlock() + s.Require().Equal(nextH, nextH2) + s.Require().Equal(nextT, nextT2) + // Add a block, which is the last block of this round. + b3 := s.newBlock(blocks[2], 1, 1*time.Second) + s.Require().NoError(bc.addBlock(blocks[1])) + s.Require().NoError(bc.sanityCheck(b3)) + s.Require().NoError(bc.addBlock(b3)) + s.Require().Equal(bc.tipRound(), uint64(1)) +} + +func (s *BlockChainTestSuite) TestPendingBlocksWithoutRandomness() { + initBlock := s.newRoundOneInitBlock() + bc := s.newBlockChain(initBlock, 10) + b0, err := bc.addEmptyBlock(types.Position{Round: 1, Height: 1}) + s.Require().NoError(err) + b1, err := bc.addEmptyBlock(types.Position{Round: 1, Height: 2}) + s.Require().NoError(err) + b2, err := bc.addEmptyBlock(types.Position{Round: 1, Height: 3}) + s.Require().NoError(err) + s.Require().Equal(bc.pendingBlocksWithoutRandomness(), []*types.Block{ + b0, b1, b2}) + s.Require().NoError(bc.processAgreementResult(s.newRandomnessFromBlock(b0))) + s.Require().Equal(bc.pendingBlocksWithoutRandomness(), []*types.Block{ + b1, b2}) +} + +func (s *BlockChainTestSuite) TestLastXBlock() { + initBlock := s.newRoundOneInitBlock() + bc := s.newBlockChain(initBlock, 10) + s.Require().Nil(bc.lastPendingBlock()) + s.Require().True(bc.lastDeliveredBlock() == initBlock) + blocks := s.newBlocks(2, initBlock) + s.Require().NoError(bc.addBlock(blocks[0])) + s.Require().True(bc.lastPendingBlock() == blocks[0]) + s.Require().True(bc.lastDeliveredBlock() == initBlock) + s.Require().Len(bc.extractBlocks(), 1) + s.Require().Nil(bc.lastPendingBlock()) + s.Require().True(bc.lastDeliveredBlock() == blocks[0]) + s.Require().NoError(bc.addBlock(blocks[1])) + s.Require().True(bc.lastPendingBlock() == blocks[1]) + s.Require().True(bc.lastDeliveredBlock() == blocks[0]) +} + +func (s *BlockChainTestSuite) TestPendingBlockRecords() { + bs := s.newBlocks(5, nil) + ps := pendingBlockRecords{} + s.Require().NoError(ps.insert(pendingBlockRecord{bs[2].Position, bs[2]})) + s.Require().NoError(ps.insert(pendingBlockRecord{bs[1].Position, bs[1]})) + s.Require().NoError(ps.insert(pendingBlockRecord{bs[0].Position, bs[0]})) + s.Require().Equal(ErrDuplicatedPendingBlock.Error(), + ps.insert(pendingBlockRecord{bs[0].Position, nil}).Error()) + s.Require().True(ps[0].position.Equal(bs[0].Position)) + s.Require().True(ps[1].position.Equal(bs[1].Position)) + s.Require().True(ps[2].position.Equal(bs[2].Position)) + s.Require().NoError(ps.insert(pendingBlockRecord{bs[4].Position, bs[4]})) + // Here assume block3 is empty, since we didn't verify parent hash in + // pendingBlockRecords, it should be fine. + s.Require().NoError(ps.insert(pendingBlockRecord{bs[3].Position, nil})) + s.Require().True(ps[3].position.Equal(bs[3].Position)) + s.Require().True(ps[4].position.Equal(bs[4].Position)) +} + +func (s *BlockChainTestSuite) TestFindPendingBlock() { + bc := s.newBlockChain(nil, 10) + blocks := s.newBlocks(7, nil) + s.Require().NoError(bc.addBlock(blocks[6])) + s.Require().NoError(bc.addBlock(blocks[5])) + s.Require().NoError(bc.addBlock(blocks[3])) + s.Require().NoError(bc.addBlock(blocks[2])) + s.Require().NoError(bc.addBlock(blocks[1])) + s.Require().NoError(bc.addBlock(blocks[0])) + s.Require().True(bc.findPendingBlock(blocks[0].Position) == blocks[0]) + s.Require().True(bc.findPendingBlock(blocks[1].Position) == blocks[1]) + s.Require().True(bc.findPendingBlock(blocks[2].Position) == blocks[2]) + s.Require().True(bc.findPendingBlock(blocks[3].Position) == blocks[3]) + s.Require().Nil(bc.findPendingBlock(blocks[4].Position)) + s.Require().True(bc.findPendingBlock(blocks[5].Position) == blocks[5]) + s.Require().True(bc.findPendingBlock(blocks[6].Position) == blocks[6]) +} + +func (s *BlockChainTestSuite) TestAddEmptyBlockDirectly() { + bc := s.newBlockChain(nil, 10) + blocks := s.newBlocks(1, nil) + s.Require().NoError(bc.addBlock(blocks[0])) + // Add an empty block after a normal block. + pos := types.Position{Height: 2} + emptyB1, err := bc.addEmptyBlock(pos) + s.Require().NotNil(emptyB1) + s.Require().True(emptyB1.Position.Equal(pos)) + s.Require().NoError(err) + // Add an empty block after an empty block. + pos = types.Position{Height: 3} + emptyB2, err := bc.addEmptyBlock(pos) + s.Require().NotNil(emptyB2) + s.Require().True(emptyB2.Position.Equal(pos)) + s.Require().NoError(err) + // prepare a normal block. + pos = types.Position{Height: 4} + expectedTimestamp := emptyB2.Timestamp.Add(s.blockInterval) + b3, err := bc.proposeBlock(pos, expectedTimestamp.Add(-100*time.Second), false) + s.Require().NotNil(b3) + s.Require().NoError(err) + // The timestamp should be refined. + s.Require().True(b3.Timestamp.Equal(expectedTimestamp)) + // Add an empty block far away from current tip. + pos = types.Position{Height: 5} + emptyB4, err := bc.addEmptyBlock(pos) + s.Require().Nil(emptyB4) + s.Require().NoError(err) + // propose an empty block based on the block at height=3, which mimics the + // scenario that the empty block is pulled from others. + emptyB4 = &types.Block{ + ParentHash: b3.Hash, + Position: pos, + Timestamp: b3.Timestamp.Add(s.blockInterval), + Witness: types.Witness{ + Height: b3.Witness.Height, + Data: b3.Witness.Data, // Hacky, don't worry. + }, + } + emptyB4.Hash, err = utils.HashBlock(emptyB4) + s.Require().NoError(err) + s.Require().NoError(bc.addBlock(emptyB4)) + rec, found := bc.pendingBlocks.searchByHeight(5) + s.Require().True(found) + s.Require().NotNil(rec.block) +} + +func (s *BlockChainTestSuite) TestPrepareBlock() { + roundLength := uint64(2) + bc := s.newBlockChain(nil, roundLength) + // Try to propose blocks at height=0. + b0, err := bc.prepareBlock(types.Position{Height: types.GenesisHeight + 1}, + s.dMoment, false) + s.Require().Nil(b0) + s.Require().EqualError(ErrNotGenesisBlock, err.Error()) + b0, err = bc.prepareBlock(types.Position{Height: types.GenesisHeight}, + s.dMoment, false) + s.Require().NoError(err) + s.Require().Equal(b0.Position, types.Position{Height: types.GenesisHeight}) + s.Require().True(b0.Timestamp.Equal(s.dMoment.Add(s.blockInterval))) + empty0, err := bc.prepareBlock(types.Position{Height: types.GenesisHeight}, + s.dMoment, true) + s.Require().NoError(err) + s.Require().Equal(empty0.Position, types.Position{ + Height: types.GenesisHeight}) + s.Require().True(empty0.Timestamp.Equal(s.dMoment.Add(s.blockInterval))) + // Try to propose blocks at height=1. + s.Require().NoError(bc.addBlock(b0)) + prepare1 := func(empty bool) *types.Block { + b, err := bc.prepareBlock(types.Position{Height: types.GenesisHeight}, + s.dMoment, empty) + s.Require().Nil(b) + s.Require().EqualError(ErrNotFollowTipPosition, err.Error()) + b, err = bc.prepareBlock(types.Position{ + Height: types.GenesisHeight + 2}, s.dMoment, empty) + s.Require().Nil(b) + s.Require().EqualError(ErrNotFollowTipPosition, err.Error()) + b, err = bc.prepareBlock(types.Position{ + Round: 1, + Height: types.GenesisHeight + 1}, s.dMoment, empty) + s.Require().Nil(b) + s.Require().EqualError(ErrInvalidRoundID, err.Error()) + b, err = bc.prepareBlock(types.Position{ + Height: types.GenesisHeight + 1}, s.dMoment, empty) + s.Require().NoError(err) + s.Require().NotNil(b) + s.Require().Equal(b.ParentHash, b0.Hash) + s.Require().True(b.Timestamp.Equal(b0.Timestamp.Add(s.blockInterval))) + return b + } + b1 := prepare1(false) + prepare1(true) + // Try to propose blocks at height=2, which should trigger round switch. + s.Require().NoError(bc.notifyRoundEvents([]utils.RoundEventParam{ + utils.RoundEventParam{ + Round: 1, + Reset: 0, + BeginHeight: types.GenesisHeight + roundLength, + Config: &types.Config{ + MinBlockInterval: s.blockInterval, + RoundLength: roundLength, + }}})) + s.Require().NoError(bc.addBlock(b1)) + prepare2 := func(empty bool) *types.Block { + b, err := bc.prepareBlock(types.Position{ + Height: types.GenesisHeight + 2}, s.dMoment, empty) + s.Require().EqualError(ErrRoundNotSwitch, err.Error()) + s.Require().Nil(b) + b, err = bc.prepareBlock(types.Position{ + Round: 1, + Height: types.GenesisHeight + 2}, s.dMoment, empty) + s.Require().NoError(err) + s.Require().NotNil(b) + s.Require().Equal(b.ParentHash, b1.Hash) + s.Require().True(b.Timestamp.Equal(b1.Timestamp.Add(s.blockInterval))) + return b + } + prepare2(false) + prepare2(true) +} + +func TestBlockChain(t *testing.T) { + suite.Run(t, new(BlockChainTestSuite)) +} diff --git a/dex/consensus/core/configuration-chain.go b/dex/consensus/core/configuration-chain.go new file mode 100644 index 000000000..3b4cdbbc8 --- /dev/null +++ b/dex/consensus/core/configuration-chain.go @@ -0,0 +1,795 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors for configuration chain.. +var ( + ErrDKGNotRegistered = fmt.Errorf( + "not yet registered in DKG protocol") + ErrTSigAlreadyRunning = fmt.Errorf( + "tsig is already running") + ErrDKGNotReady = fmt.Errorf( + "DKG is not ready") + ErrSkipButNoError = fmt.Errorf( + "skip but no error") + ErrDKGAborted = fmt.Errorf( + "DKG is aborted") +) + +// ErrMismatchDKG represent an attempt to run DKG protocol is failed because +// the register DKG protocol is mismatched, interms of round and resetCount. +type ErrMismatchDKG struct { + expectRound, expectReset uint64 + actualRound, actualReset uint64 +} + +func (e ErrMismatchDKG) Error() string { + return fmt.Sprintf( + "mismatch DKG, abort running: expect(%d %d) actual(%d %d)", + e.expectRound, e.expectReset, e.actualRound, e.actualReset) +} + +type dkgStepFn func(round uint64, reset uint64) error + +type configurationChain struct { + ID types.NodeID + recv dkgReceiver + gov Governance + dkg *dkgProtocol + dkgRunPhases []dkgStepFn + logger common.Logger + dkgLock sync.RWMutex + dkgSigner map[uint64]*dkgShareSecret + npks map[uint64]*typesDKG.NodePublicKeys + complaints []*typesDKG.Complaint + dkgResult sync.RWMutex + tsig map[common.Hash]*tsigProtocol + tsigTouched map[common.Hash]struct{} + tsigReady *sync.Cond + cache *utils.NodeSetCache + db db.Database + notarySet map[types.NodeID]struct{} + mpkReady bool + pendingPrvShare map[types.NodeID]*typesDKG.PrivateShare + // TODO(jimmy-dexon): add timeout to pending psig. + pendingPsig map[common.Hash][]*typesDKG.PartialSignature + prevHash common.Hash + dkgCtx context.Context + dkgCtxCancel context.CancelFunc + dkgRunning bool +} + +func newConfigurationChain( + ID types.NodeID, + recv dkgReceiver, + gov Governance, + cache *utils.NodeSetCache, + dbInst db.Database, + logger common.Logger) *configurationChain { + configurationChain := &configurationChain{ + ID: ID, + recv: recv, + gov: gov, + logger: logger, + dkgSigner: make(map[uint64]*dkgShareSecret), + npks: make(map[uint64]*typesDKG.NodePublicKeys), + tsig: make(map[common.Hash]*tsigProtocol), + tsigTouched: make(map[common.Hash]struct{}), + tsigReady: sync.NewCond(&sync.Mutex{}), + cache: cache, + db: dbInst, + pendingPsig: make(map[common.Hash][]*typesDKG.PartialSignature), + } + configurationChain.initDKGPhasesFunc() + return configurationChain +} + +func (cc *configurationChain) abortDKG( + parentCtx context.Context, + round, reset uint64) bool { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil { + return cc.abortDKGNoLock(parentCtx, round, reset) + } + return false +} + +func (cc *configurationChain) abortDKGNoLock( + ctx context.Context, + round, reset uint64) bool { + if cc.dkg.round > round || + (cc.dkg.round == round && cc.dkg.reset > reset) { + cc.logger.Error("Newer DKG already is registered", + "round", round, + "reset", reset) + return false + } + cc.logger.Error("Previous DKG is not finished", + "round", round, + "reset", reset, + "previous-round", cc.dkg.round, + "previous-reset", cc.dkg.reset) + // Abort DKG routine in previous round. + cc.logger.Error("Aborting DKG in previous round", + "round", round, + "previous-round", cc.dkg.round) + // Notify current running DKG protocol to abort. + if cc.dkgCtxCancel != nil { + cc.dkgCtxCancel() + } + cc.dkgLock.Unlock() + // Wait for current running DKG protocol aborting. + for { + cc.dkgLock.Lock() + if cc.dkgRunning == false { + cc.dkg = nil + break + } + select { + case <-ctx.Done(): + return false + case <-time.After(100 * time.Millisecond): + } + cc.dkgLock.Unlock() + } + cc.logger.Error("Previous DKG aborted", + "round", round, + "reset", reset) + return cc.dkg == nil +} + +func (cc *configurationChain) registerDKG( + parentCtx context.Context, + round, reset uint64, + threshold int) { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil { + // Make sure we only proceed when cc.dkg is nil. + if !cc.abortDKGNoLock(parentCtx, round, reset) { + return + } + select { + case <-parentCtx.Done(): + return + default: + } + if cc.dkg != nil { + // This panic would only raise when multiple attampts to register + // a DKG protocol at the same time. + panic(ErrMismatchDKG{ + expectRound: round, + expectReset: reset, + actualRound: cc.dkg.round, + actualReset: cc.dkg.reset, + }) + } + } + notarySet, err := cc.cache.GetNotarySet(round) + if err != nil { + cc.logger.Error("Error getting notary set from cache", "error", err) + return + } + cc.notarySet = notarySet + cc.pendingPrvShare = make(map[types.NodeID]*typesDKG.PrivateShare) + cc.mpkReady = false + cc.dkg, err = recoverDKGProtocol(cc.ID, cc.recv, round, reset, cc.db) + cc.dkgCtx, cc.dkgCtxCancel = context.WithCancel(parentCtx) + if err != nil { + panic(err) + } + if cc.dkg == nil { + cc.dkg = newDKGProtocol( + cc.ID, + cc.recv, + round, + reset, + threshold) + + err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) + if err != nil { + cc.logger.Error("Error put or update DKG protocol", "error", + err) + return + } + } + + go func() { + ticker := newTicker(cc.gov, round, TickerDKG) + defer ticker.Stop() + <-ticker.Tick() + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg != nil && cc.dkg.round == round && cc.dkg.reset == reset { + cc.dkg.proposeMPKReady() + } + }() +} + +func (cc *configurationChain) runDKGPhaseOne(round uint64, reset uint64) error { + if cc.dkg.round < round || + (cc.dkg.round == round && cc.dkg.reset < reset) { + return ErrDKGNotRegistered + } + if cc.dkg.round != round || cc.dkg.reset != reset { + cc.logger.Warn("DKG canceled", "round", round, "reset", reset) + return ErrSkipButNoError + } + cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) + if cc.gov.IsDKGFinal(round) { + cc.logger.Warn("DKG already final", "round", round) + return ErrSkipButNoError + } + cc.logger.Debug("Calling Governance.IsDKGMPKReady", "round", round) + var err error + for err == nil && !cc.gov.IsDKGMPKReady(round) { + cc.dkgLock.Unlock() + cc.logger.Debug("DKG MPKs are not ready yet. Try again later...", + "nodeID", cc.ID, + "round", round) + select { + case <-cc.dkgCtx.Done(): + err = ErrDKGAborted + case <-time.After(500 * time.Millisecond): + } + cc.dkgLock.Lock() + } + return err +} + +func (cc *configurationChain) runDKGPhaseTwoAndThree( + round uint64, reset uint64) error { + // Check if this node successfully join the protocol. + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + mpks := cc.gov.DKGMasterPublicKeys(round) + inProtocol := false + for _, mpk := range mpks { + if mpk.ProposerID == cc.ID { + inProtocol = true + break + } + } + if !inProtocol { + cc.logger.Warn("Failed to join DKG protocol", + "round", round, + "reset", reset) + return ErrSkipButNoError + } + // Phase 2(T = 0): Exchange DKG secret key share. + if err := cc.dkg.processMasterPublicKeys(mpks); err != nil { + cc.logger.Error("Failed to process master public key", + "round", round, + "reset", reset, + "error", err) + } + cc.mpkReady = true + // The time to process private share might be long, check aborting before + // get into that loop. + select { + case <-cc.dkgCtx.Done(): + return ErrDKGAborted + default: + } + for _, prvShare := range cc.pendingPrvShare { + if err := cc.dkg.processPrivateShare(prvShare); err != nil { + cc.logger.Error("Failed to process private share", + "round", round, + "reset", reset, + "error", err) + } + } + + // Phase 3(T = 0~λ): Propose complaint. + // Propose complaint is done in `processMasterPublicKeys`. + return nil +} + +func (cc *configurationChain) runDKGPhaseFour() { + // Phase 4(T = λ): Propose nack complaints. + cc.dkg.proposeNackComplaints() +} + +func (cc *configurationChain) runDKGPhaseFiveAndSix(round uint64, reset uint64) { + // Phase 5(T = 2λ): Propose Anti nack complaint. + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + cc.complaints = cc.gov.DKGComplaints(round) + if err := cc.dkg.processNackComplaints(cc.complaints); err != nil { + cc.logger.Error("Failed to process NackComplaint", + "round", round, + "reset", reset, + "error", err) + } + + // Phase 6(T = 3λ): Rebroadcast anti nack complaint. + // Rebroadcast is done in `processPrivateShare`. +} + +func (cc *configurationChain) runDKGPhaseSeven() { + // Phase 7(T = 4λ): Enforce complaints and nack complaints. + cc.dkg.enforceNackComplaints(cc.complaints) + // Enforce complaint is done in `processPrivateShare`. +} + +func (cc *configurationChain) runDKGPhaseEight() { + // Phase 8(T = 5λ): DKG finalize. + cc.dkg.proposeFinalize() +} + +func (cc *configurationChain) runDKGPhaseNine(round uint64, reset uint64) error { + // Phase 9(T = 6λ): DKG is ready. + // Normally, IsDKGFinal would return true here. Use this for in case of + // unexpected network fluctuation and ensure the robustness of DKG protocol. + cc.logger.Debug("Calling Governance.IsDKGFinal", "round", round) + var err error + for err == nil && !cc.gov.IsDKGFinal(round) { + cc.dkgLock.Unlock() + cc.logger.Debug("DKG is not ready yet. Try again later...", + "nodeID", cc.ID.String()[:6], + "round", round, + "reset", reset) + select { + case <-cc.dkgCtx.Done(): + err = ErrDKGAborted + case <-time.After(500 * time.Millisecond): + } + cc.dkgLock.Lock() + } + if err != nil { + return err + } + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys", "round", round) + cc.logger.Debug("Calling Governance.DKGComplaints", "round", round) + npks, err := typesDKG.NewNodePublicKeys(round, + cc.gov.DKGMasterPublicKeys(round), + cc.gov.DKGComplaints(round), + cc.dkg.threshold) + if err != nil { + return err + } + qualifies := "" + for nID := range npks.QualifyNodeIDs { + qualifies += fmt.Sprintf("%s ", nID.String()[:6]) + } + cc.logger.Info("Qualify Nodes", + "nodeID", cc.ID, + "round", round, + "reset", reset, + "count", len(npks.QualifyIDs), + "qualifies", qualifies) + if _, exist := npks.QualifyNodeIDs[cc.ID]; !exist { + cc.logger.Warn("Self is not in Qualify Nodes", + "round", round, + "reset", reset) + return nil + } + signer, err := cc.dkg.recoverShareSecret(npks.QualifyIDs) + if err != nil { + return err + } + // Save private shares to DB. + if err = + cc.db.PutDKGPrivateKey(round, reset, *signer.privateKey); err != nil { + return err + } + cc.dkg.proposeSuccess() + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.dkgSigner[round] = signer + cc.npks[round] = npks + return nil +} + +func (cc *configurationChain) initDKGPhasesFunc() { + cc.dkgRunPhases = []dkgStepFn{ + func(round uint64, reset uint64) error { + return cc.runDKGPhaseOne(round, reset) + }, + func(round uint64, reset uint64) error { + return cc.runDKGPhaseTwoAndThree(round, reset) + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseFour() + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseFiveAndSix(round, reset) + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseSeven() + return nil + }, + func(round uint64, reset uint64) error { + cc.runDKGPhaseEight() + return nil + }, + func(round uint64, reset uint64) error { + return cc.runDKGPhaseNine(round, reset) + }, + } +} + +func (cc *configurationChain) runDKG( + round uint64, reset uint64, event *common.Event, + dkgBeginHeight, dkgHeight uint64) (err error) { + // Check if corresponding DKG signer is ready. + if _, _, err = cc.getDKGInfo(round, false); err == nil { + return ErrSkipButNoError + } + cfg := utils.GetConfigWithPanic(cc.gov, round, cc.logger) + phaseHeight := uint64( + cfg.LambdaDKG.Nanoseconds() / cfg.MinBlockInterval.Nanoseconds()) + skipPhase := int(dkgHeight / phaseHeight) + cc.logger.Info("Skipping DKG phase", "phase", skipPhase) + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil { + return ErrDKGNotRegistered + } + // Make sure the existed dkgProtocol is expected one. + if cc.dkg.round != round || cc.dkg.reset != reset { + return ErrMismatchDKG{ + expectRound: round, + expectReset: reset, + actualRound: cc.dkg.round, + actualReset: cc.dkg.reset, + } + } + if cc.dkgRunning { + panic(fmt.Errorf("duplicated call to runDKG: %d %d", round, reset)) + } + cc.dkgRunning = true + defer func() { + // Here we should hold the cc.dkgLock, reset cc.dkg to nil when done. + if cc.dkg != nil { + cc.dkg = nil + } + cc.dkgRunning = false + }() + wg := sync.WaitGroup{} + var dkgError error + // Make a copy of cc.dkgCtx so each phase function can refer to the correct + // context. + ctx := cc.dkgCtx + cc.dkg.step = skipPhase + for i := skipPhase; i < len(cc.dkgRunPhases); i++ { + wg.Add(1) + event.RegisterHeight(dkgBeginHeight+phaseHeight*uint64(i), func(uint64) { + go func() { + defer wg.Done() + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if dkgError != nil { + return + } + select { + case <-ctx.Done(): + dkgError = ErrDKGAborted + return + default: + } + + err := cc.dkgRunPhases[cc.dkg.step](round, reset) + if err == nil || err == ErrSkipButNoError { + err = nil + cc.dkg.step++ + err = cc.db.PutOrUpdateDKGProtocol(cc.dkg.toDKGProtocolInfo()) + if err != nil { + cc.logger.Error("Failed to save DKG Protocol", + "step", cc.dkg.step, + "error", err) + } + } + if err != nil && dkgError == nil { + dkgError = err + } + }() + }) + } + cc.dkgLock.Unlock() + wgChan := make(chan struct{}, 1) + go func() { + wg.Wait() + wgChan <- struct{}{} + }() + select { + case <-cc.dkgCtx.Done(): + case <-wgChan: + } + cc.dkgLock.Lock() + select { + case <-cc.dkgCtx.Done(): + return ErrDKGAborted + default: + } + return dkgError +} + +func (cc *configurationChain) isDKGFinal(round uint64) bool { + if !cc.gov.IsDKGFinal(round) { + return false + } + _, _, err := cc.getDKGInfo(round, false) + return err == nil +} + +func (cc *configurationChain) getDKGInfo( + round uint64, ignoreSigner bool) ( + *typesDKG.NodePublicKeys, *dkgShareSecret, error) { + getFromCache := func() (*typesDKG.NodePublicKeys, *dkgShareSecret) { + cc.dkgResult.RLock() + defer cc.dkgResult.RUnlock() + npks := cc.npks[round] + signer := cc.dkgSigner[round] + return npks, signer + } + npks, signer := getFromCache() + if npks == nil || (!ignoreSigner && signer == nil) { + if err := cc.recoverDKGInfo(round, ignoreSigner); err != nil { + return nil, nil, err + } + npks, signer = getFromCache() + } + if npks == nil || (!ignoreSigner && signer == nil) { + return nil, nil, ErrDKGNotReady + } + return npks, signer, nil +} + +func (cc *configurationChain) recoverDKGInfo( + round uint64, ignoreSigner bool) error { + var npksExists, signerExists bool + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + _, signerExists = cc.dkgSigner[round] + _, npksExists = cc.npks[round] + }() + if signerExists && npksExists { + return nil + } + if !cc.gov.IsDKGFinal(round) { + return ErrDKGNotReady + } + + threshold := utils.GetDKGThreshold( + utils.GetConfigWithPanic(cc.gov, round, cc.logger)) + cc.logger.Debug("Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", + "round", round) + mpk := cc.gov.DKGMasterPublicKeys(round) + cc.logger.Debug("Calling Governance.DKGComplaints for recoverDKGInfo", + "round", round) + comps := cc.gov.DKGComplaints(round) + qualifies, _, err := typesDKG.CalcQualifyNodes(mpk, comps, threshold) + if err != nil { + return err + } + if len(qualifies) < + utils.GetDKGValidThreshold(utils.GetConfigWithPanic( + cc.gov, round, cc.logger)) { + return typesDKG.ErrNotReachThreshold + } + + if !npksExists { + npks, err := typesDKG.NewNodePublicKeys(round, + cc.gov.DKGMasterPublicKeys(round), + cc.gov.DKGComplaints(round), + threshold) + if err != nil { + cc.logger.Warn("Failed to create DKGNodePublicKeys", + "round", round, "error", err) + return err + } + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.npks[round] = npks + }() + } + if !signerExists && !ignoreSigner { + reset := cc.gov.DKGResetCount(round) + // Check if we have private shares in DB. + prvKey, err := cc.db.GetDKGPrivateKey(round, reset) + if err != nil { + cc.logger.Warn("Failed to create DKGPrivateKey", + "round", round, "error", err) + dkgProtocolInfo, err := cc.db.GetDKGProtocol() + if err != nil { + cc.logger.Warn("Unable to recover DKGProtocolInfo", + "round", round, "error", err) + return err + } + if dkgProtocolInfo.Round != round { + cc.logger.Warn("DKGProtocolInfo round mismatch", + "round", round, "infoRound", dkgProtocolInfo.Round) + return err + } + prvKeyRecover, err := + dkgProtocolInfo.PrvShares.RecoverPrivateKey(qualifies) + if err != nil { + cc.logger.Warn("Failed to recover DKGPrivateKey", + "round", round, "error", err) + return err + } + if err = cc.db.PutDKGPrivateKey( + round, reset, *prvKeyRecover); err != nil { + cc.logger.Warn("Failed to save DKGPrivateKey", + "round", round, "error", err) + } + prvKey = *prvKeyRecover + } + func() { + cc.dkgResult.Lock() + defer cc.dkgResult.Unlock() + cc.dkgSigner[round] = &dkgShareSecret{ + privateKey: &prvKey, + } + }() + } + return nil +} + +func (cc *configurationChain) preparePartialSignature( + round uint64, hash common.Hash) (*typesDKG.PartialSignature, error) { + _, signer, _ := cc.getDKGInfo(round, false) + if signer == nil { + return nil, ErrDKGNotReady + } + return &typesDKG.PartialSignature{ + ProposerID: cc.ID, + Round: round, + Hash: hash, + PartialSignature: signer.sign(hash), + }, nil +} + +func (cc *configurationChain) touchTSigHash(hash common.Hash) (first bool) { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + _, exist := cc.tsigTouched[hash] + cc.tsigTouched[hash] = struct{}{} + return !exist +} + +func (cc *configurationChain) untouchTSigHash(hash common.Hash) { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + delete(cc.tsigTouched, hash) +} + +func (cc *configurationChain) runTSig( + round uint64, hash common.Hash, wait time.Duration) ( + crypto.Signature, error) { + npks, _, _ := cc.getDKGInfo(round, false) + if npks == nil { + return crypto.Signature{}, ErrDKGNotReady + } + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[hash]; exist { + return crypto.Signature{}, ErrTSigAlreadyRunning + } + cc.tsig[hash] = newTSigProtocol(npks, hash) + pendingPsig := cc.pendingPsig[hash] + delete(cc.pendingPsig, hash) + go func() { + for _, psig := range pendingPsig { + if err := cc.processPartialSignature(psig); err != nil { + cc.logger.Error("Failed to process partial signature", + "nodeID", cc.ID, + "error", err) + } + } + }() + timeout := make(chan struct{}, 1) + go func() { + time.Sleep(wait) + timeout <- struct{}{} + cc.tsigReady.Broadcast() + }() + var signature crypto.Signature + var err error + for func() bool { + signature, err = cc.tsig[hash].signature() + select { + case <-timeout: + return false + default: + } + return err == ErrNotEnoughtPartialSignatures + }() { + cc.tsigReady.Wait() + } + delete(cc.tsig, hash) + if err != nil { + return crypto.Signature{}, err + } + return signature, nil +} + +func (cc *configurationChain) runCRSTSig( + round uint64, crs common.Hash) ([]byte, error) { + sig, err := cc.runTSig(round, crs, cc.gov.Configuration(round).LambdaDKG*5) + cc.logger.Info("CRS", + "nodeID", cc.ID, + "round", round+1, + "signature", sig) + return sig.Signature[:], err +} + +func (cc *configurationChain) processPrivateShare( + prvShare *typesDKG.PrivateShare) error { + cc.dkgLock.Lock() + defer cc.dkgLock.Unlock() + if cc.dkg == nil { + return nil + } + if _, exist := cc.notarySet[prvShare.ProposerID]; !exist { + return ErrNotDKGParticipant + } + if !cc.mpkReady { + // TODO(jimmy-dexon): remove duplicated signature check in dkg module. + ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPrivateShareSignature + } + cc.pendingPrvShare[prvShare.ProposerID] = prvShare + return nil + } + return cc.dkg.processPrivateShare(prvShare) +} + +func (cc *configurationChain) processPartialSignature( + psig *typesDKG.PartialSignature) error { + cc.tsigReady.L.Lock() + defer cc.tsigReady.L.Unlock() + if _, exist := cc.tsig[psig.Hash]; !exist { + ok, err := utils.VerifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + cc.pendingPsig[psig.Hash] = append(cc.pendingPsig[psig.Hash], psig) + return nil + } + if err := cc.tsig[psig.Hash].processPartialSignature(psig); err != nil { + return err + } + cc.tsigReady.Broadcast() + return nil +} diff --git a/dex/consensus/core/configuration-chain_test.go b/dex/consensus/core/configuration-chain_test.go new file mode 100644 index 000000000..f8a923afd --- /dev/null +++ b/dex/consensus/core/configuration-chain_test.go @@ -0,0 +1,781 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "bytes" + "context" + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type ConfigurationChainTestSuite struct { + suite.Suite + + nIDs types.NodeIDs + dkgIDs map[types.NodeID]dkg.ID + signers map[types.NodeID]*utils.Signer + pubKeys []crypto.PublicKey +} + +type testCCGlobalReceiver struct { + s *ConfigurationChainTestSuite + + nodes map[types.NodeID]*configurationChain + govs map[types.NodeID]Governance +} + +func newTestCCGlobalReceiver( + s *ConfigurationChainTestSuite) *testCCGlobalReceiver { + return &testCCGlobalReceiver{ + s: s, + nodes: make(map[types.NodeID]*configurationChain), + govs: make(map[types.NodeID]Governance), + } +} + +func (r *testCCGlobalReceiver) ProposeDKGComplaint( + complaint *typesDKG.Complaint) { + for _, gov := range r.govs { + gov.AddDKGComplaint(test.CloneDKGComplaint(complaint)) + } +} + +func (r *testCCGlobalReceiver) ProposeDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) { + for _, gov := range r.govs { + gov.AddDKGMasterPublicKey(test.CloneDKGMasterPublicKey(mpk)) + } +} + +func (r *testCCGlobalReceiver) ProposeDKGPrivateShare( + prv *typesDKG.PrivateShare) { + go func() { + receiver, exist := r.nodes[prv.ReceiverID] + if !exist { + panic(errors.New("should exist")) + } + if err := receiver.processPrivateShare(prv); err != nil { + panic(err) + } + }() +} + +func (r *testCCGlobalReceiver) ProposeDKGAntiNackComplaint( + prv *typesDKG.PrivateShare) { + go func() { + for _, cc := range r.nodes { + if err := cc.processPrivateShare( + test.CloneDKGPrivateShare(prv)); err != nil { + panic(err) + } + } + }() +} + +func (r *testCCGlobalReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + for _, gov := range r.govs { + gov.AddDKGMPKReady(test.CloneDKGMPKReady(ready)) + } +} + +func (r *testCCGlobalReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { + for _, gov := range r.govs { + gov.AddDKGFinalize(test.CloneDKGFinalize(final)) + } +} + +func (r *testCCGlobalReceiver) ProposeDKGSuccess(success *typesDKG.Success) { + for _, gov := range r.govs { + gov.AddDKGSuccess(test.CloneDKGSuccess(success)) + } +} + +type testCCReceiver struct { + signer *utils.Signer + recv *testCCGlobalReceiver +} + +func newTestCCReceiver(nID types.NodeID, recv *testCCGlobalReceiver) *testCCReceiver { + return &testCCReceiver{ + signer: recv.s.signers[nID], + recv: recv, + } +} + +func (r *testCCReceiver) ProposeDKGComplaint( + complaint *typesDKG.Complaint) { + if err := r.signer.SignDKGComplaint(complaint); err != nil { + panic(err) + } + r.recv.ProposeDKGComplaint(complaint) +} + +func (r *testCCReceiver) ProposeDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) { + if err := r.signer.SignDKGMasterPublicKey(mpk); err != nil { + panic(err) + } + r.recv.ProposeDKGMasterPublicKey(mpk) +} + +func (r *testCCReceiver) ProposeDKGPrivateShare( + prv *typesDKG.PrivateShare) { + if err := r.signer.SignDKGPrivateShare(prv); err != nil { + panic(err) + } + r.recv.ProposeDKGPrivateShare(prv) +} + +func (r *testCCReceiver) ProposeDKGAntiNackComplaint( + prv *typesDKG.PrivateShare) { + // We would need to propose anti nack complaint for private share from + // others. Only sign those private shares with zero length signature. + if len(prv.Signature.Signature) == 0 { + if err := r.signer.SignDKGPrivateShare(prv); err != nil { + panic(err) + } + } + r.recv.ProposeDKGAntiNackComplaint(prv) +} + +func (r *testCCReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + if err := r.signer.SignDKGMPKReady(ready); err != nil { + panic(err) + } + r.recv.ProposeDKGMPKReady(ready) +} + +func (r *testCCReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { + if err := r.signer.SignDKGFinalize(final); err != nil { + panic(err) + } + r.recv.ProposeDKGFinalize(final) +} + +func (r *testCCReceiver) ProposeDKGSuccess(success *typesDKG.Success) { + if err := r.signer.SignDKGSuccess(success); err != nil { + panic(err) + } + r.recv.ProposeDKGSuccess(success) +} + +func (s *ConfigurationChainTestSuite) setupNodes(n int) { + s.nIDs = make(types.NodeIDs, 0, n) + s.signers = make(map[types.NodeID]*utils.Signer, n) + s.dkgIDs = make(map[types.NodeID]dkg.ID) + s.pubKeys = nil + ids := make(dkg.IDs, 0, n) + for i := 0; i < n; i++ { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + s.nIDs = append(s.nIDs, nID) + s.signers[nID] = utils.NewSigner(prvKey) + s.pubKeys = append(s.pubKeys, prvKey.PublicKey()) + id := dkg.NewID(nID.Hash[:]) + ids = append(ids, id) + s.dkgIDs[nID] = id + } +} + +type testEvent struct { + event *common.Event + ctx context.Context + cancel context.CancelFunc +} + +func newTestEvent() *testEvent { + e := &testEvent{ + event: common.NewEvent(), + } + return e +} + +func (evt *testEvent) run(interval time.Duration) { + evt.ctx, evt.cancel = context.WithCancel(context.Background()) + go func() { + height := uint64(0) + Loop: + for { + select { + case <-evt.ctx.Done(): + break Loop + case <-time.After(interval): + } + evt.event.NotifyHeight(height) + height++ + } + }() +} + +func (evt *testEvent) stop() { + evt.cancel() +} + +func (s *ConfigurationChainTestSuite) runDKG( + k, n int, round, reset uint64) map[types.NodeID]*configurationChain { + s.setupNodes(n) + + evts := make(map[types.NodeID]*testEvent) + cfgChains := make(map[types.NodeID]*configurationChain) + recv := newTestCCGlobalReceiver(s) + + for _, nID := range s.nIDs { + evts[nID] = newTestEvent() + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + s.pubKeys, 100*time.Millisecond, &common.NullLogger{}, true, + ), ConfigRoundShift) + s.Require().NoError(err) + cache := utils.NewNodeSetCache(gov) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + cfgChains[nID] = newConfigurationChain(nID, + newTestCCReceiver(nID, recv), gov, cache, dbInst, + &common.NullLogger{}) + recv.nodes[nID] = cfgChains[nID] + recv.govs[nID] = gov + } + + for _, cc := range cfgChains { + cc.registerDKG(context.Background(), round, reset, k) + } + + for _, gov := range recv.govs { + s.Require().Len(gov.DKGMasterPublicKeys(round), n) + } + + errs := make(chan error, n) + wg := sync.WaitGroup{} + wg.Add(n) + for nID, cc := range cfgChains { + go func(cc *configurationChain, nID types.NodeID) { + defer wg.Done() + errs <- cc.runDKG(round, reset, evts[nID].event, 10, 0) + }(cc, nID) + evts[nID].run(100 * time.Millisecond) + defer evts[nID].stop() + } + wg.Wait() + for range cfgChains { + s.Require().NoError(<-errs) + } + return cfgChains +} + +func (s *ConfigurationChainTestSuite) preparePartialSignature( + hash common.Hash, + round uint64, + cfgChains map[types.NodeID]*configurationChain) ( + psigs []*typesDKG.PartialSignature) { + psigs = make([]*typesDKG.PartialSignature, 0, len(cfgChains)) + for nID, cc := range cfgChains { + if _, exist := cc.npks[round]; !exist { + continue + } + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + psig, err := cc.preparePartialSignature(round, hash) + s.Require().NoError(err) + signer, exist := s.signers[cc.ID] + s.Require().True(exist) + err = signer.SignDKGPartialSignature(psig) + s.Require().NoError(err) + psigs = append(psigs, psig) + } + return +} + +// TestConfigurationChain will test the entire DKG+TISG protocol including +// exchanging private shares, recovering share secret, creating partial sign and +// recovering threshold signature. +// All participants are good people in this test. +func (s *ConfigurationChainTestSuite) TestConfigurationChain() { + k := 4 + n := 7 + round := DKGDelayRound + reset := uint64(0) + cfgChains := s.runDKG(k, n, round, reset) + + hash := crypto.Keccak256Hash([]byte("🌚🌝")) + psigs := s.preparePartialSignature(hash, round, cfgChains) + + // We only need k partial signatures. + psigs = psigs[:k] + + tsigs := make([]crypto.Signature, 0, n) + errs := make(chan error, n) + tsigChan := make(chan crypto.Signature, n) + for nID, cc := range cfgChains { + if _, exist := cc.npks[round]; !exist { + continue + } + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + go func(cc *configurationChain) { + tsig, err := cc.runTSig(round, hash, 5*time.Second) + // Prevent racing by collecting errors and check in main thread. + errs <- err + tsigChan <- tsig + }(cc) + for _, psig := range psigs { + err := cc.processPartialSignature(psig) + s.Require().NoError(err) + } + } + for nID, cc := range cfgChains { + if _, exist := cc.npks[round]; !exist { + s.FailNow("Should be qualifyied") + } + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + s.FailNow("Should be qualifyied") + } + s.Require().NoError(<-errs) + tsig := <-tsigChan + for _, prevTsig := range tsigs { + s.Equal(prevTsig, tsig) + } + } +} + +func (s *ConfigurationChainTestSuite) TestDKGMasterPublicKeyDelayAdd() { + k := 4 + n := 7 + round := DKGDelayRound + reset := uint64(0) + lambdaDKG := 1000 * time.Millisecond + minBlockInterval := 100 * time.Millisecond + s.setupNodes(n) + + cfgChains := make(map[types.NodeID]*configurationChain) + recv := newTestCCGlobalReceiver(s) + delayNode := s.nIDs[0] + + for _, nID := range s.nIDs { + state := test.NewState(DKGDelayRound, + s.pubKeys, 100*time.Millisecond, &common.NullLogger{}, true) + gov, err := test.NewGovernance(state, ConfigRoundShift) + s.Require().NoError(err) + s.Require().NoError(state.RequestChange( + test.StateChangeLambdaDKG, lambdaDKG)) + s.Require().NoError(state.RequestChange( + test.StateChangeMinBlockInterval, minBlockInterval)) + cache := utils.NewNodeSetCache(gov) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + cfgChains[nID] = newConfigurationChain( + nID, newTestCCReceiver(nID, recv), gov, cache, dbInst, + &common.NullLogger{}) + recv.nodes[nID] = cfgChains[nID] + recv.govs[nID] = gov + } + + for nID, cc := range cfgChains { + if nID == delayNode { + continue + } + cc.registerDKG(context.Background(), round, reset, k) + } + time.Sleep(lambdaDKG) + cfgChains[delayNode].registerDKG(context.Background(), round, reset, k) + + for _, gov := range recv.govs { + s.Require().Len(gov.DKGMasterPublicKeys(round), n-1) + } + + errs := make(chan error, n) + wg := sync.WaitGroup{} + wg.Add(n) + for _, cc := range cfgChains { + evt := newTestEvent() + go func(cc *configurationChain) { + defer wg.Done() + errs <- cc.runDKG(round, reset, evt.event, 0, 0) + }(cc) + evt.run(100 * time.Millisecond) + defer evt.stop() + } + wg.Wait() + for range cfgChains { + s.Require().NoError(<-errs) + } + for nID, cc := range cfgChains { + shouldExist := nID != delayNode + _, exist := cc.npks[round] + s.Equal(shouldExist, exist) + if !exist { + continue + } + _, exist = cc.npks[round].QualifyNodeIDs[nID] + s.Equal(shouldExist, exist) + } +} + +func (s *ConfigurationChainTestSuite) TestDKGComplaintDelayAdd() { + k := 4 + n := 7 + round := DKGDelayRound + reset := uint64(0) + lambdaDKG := 1000 * time.Millisecond + minBlockInterval := 100 * time.Millisecond + s.setupNodes(n) + + cfgChains := make(map[types.NodeID]*configurationChain) + recv := newTestCCGlobalReceiver(s) + recvs := make(map[types.NodeID]*testCCReceiver) + for _, nID := range s.nIDs { + state := test.NewState(DKGDelayRound, + s.pubKeys, 100*time.Millisecond, &common.NullLogger{}, true) + gov, err := test.NewGovernance(state, ConfigRoundShift) + s.Require().NoError(err) + s.Require().NoError(state.RequestChange( + test.StateChangeLambdaDKG, lambdaDKG)) + s.Require().NoError(state.RequestChange( + test.StateChangeMinBlockInterval, minBlockInterval)) + cache := utils.NewNodeSetCache(gov) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + recvs[nID] = newTestCCReceiver(nID, recv) + cfgChains[nID] = newConfigurationChain(nID, recvs[nID], gov, cache, + dbInst, &common.NullLogger{}) + recv.nodes[nID] = cfgChains[nID] + recv.govs[nID] = gov + } + + for _, cc := range cfgChains { + cc.registerDKG(context.Background(), round, reset, k) + } + + for _, gov := range recv.govs { + s.Require().Len(gov.DKGMasterPublicKeys(round), n) + } + + errs := make(chan error, n) + wg := sync.WaitGroup{} + wg.Add(n) + for _, cc := range cfgChains { + evt := newTestEvent() + go func(cc *configurationChain) { + defer wg.Done() + errs <- cc.runDKG(round, reset, evt.event, 0, 0) + }(cc) + evt.run(minBlockInterval) + defer evt.stop() + } + complaints := -1 + go func() { + // Node 0 proposes NackComplaint to all others at 3λ but they should + // be ignored because NackComplaint should be proposed before 2λ. + time.Sleep(lambdaDKG * 4) + for _, gov := range recv.govs { + if complaints == -1 { + complaints = len(gov.DKGComplaints(round)) + } + s.Require().Len(gov.DKGComplaints(round), complaints) + } + nID := s.nIDs[0] + for _, targetNode := range s.nIDs { + if targetNode == nID { + continue + } + recvs[nID].ProposeDKGComplaint(&typesDKG.Complaint{ + Round: round, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: targetNode, + Round: round, + }, + }) + } + }() + wg.Wait() + complaints += len(s.nIDs) - 1 + for _, gov := range recv.govs { + s.Require().Len(gov.DKGComplaints(round), complaints) + } + for range cfgChains { + s.Require().NoError(<-errs) + } + for nID, cc := range cfgChains { + if _, exist := cc.npks[round]; !exist { + s.FailNow("Should be qualified") + } + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + s.FailNow("Should be qualified") + } + } +} + +func (s *ConfigurationChainTestSuite) TestMultipleTSig() { + k := 2 + n := 7 + round := DKGDelayRound + reset := uint64(0) + cfgChains := s.runDKG(k, n, round, reset) + + hash1 := crypto.Keccak256Hash([]byte("Hash1")) + hash2 := crypto.Keccak256Hash([]byte("Hash2")) + + psigs1 := s.preparePartialSignature(hash1, round, cfgChains) + psigs2 := s.preparePartialSignature(hash2, round, cfgChains) + + tsigs1 := make([]crypto.Signature, 0, n) + tsigs2 := make([]crypto.Signature, 0, n) + + errs := make(chan error, n*2) + tsigChan1 := make(chan crypto.Signature, n) + tsigChan2 := make(chan crypto.Signature, n) + for nID, cc := range cfgChains { + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + go func(cc *configurationChain) { + tsig1, err := cc.runTSig(round, hash1, 5*time.Second) + // Prevent racing by collecting errors and check in main thread. + errs <- err + tsigChan1 <- tsig1 + }(cc) + go func(cc *configurationChain) { + tsig2, err := cc.runTSig(round, hash2, 5*time.Second) + // Prevent racing by collecting errors and check in main thread. + errs <- err + tsigChan2 <- tsig2 + }(cc) + for _, psig := range psigs1 { + err := cc.processPartialSignature(psig) + s.Require().NoError(err) + } + for _, psig := range psigs2 { + err := cc.processPartialSignature(psig) + s.Require().NoError(err) + } + } + for nID, cc := range cfgChains { + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + s.Require().NoError(<-errs) + tsig1 := <-tsigChan1 + for _, prevTsig := range tsigs1 { + s.Equal(prevTsig, tsig1) + } + s.Require().NoError(<-errs) + tsig2 := <-tsigChan2 + for _, prevTsig := range tsigs2 { + s.Equal(prevTsig, tsig2) + } + } +} + +func (s *ConfigurationChainTestSuite) TestTSigTimeout() { + k := 2 + n := 7 + round := DKGDelayRound + reset := uint64(0) + cfgChains := s.runDKG(k, n, round, reset) + timeout := 6 * time.Second + + hash := crypto.Keccak256Hash([]byte("🍯🍋")) + + psigs := s.preparePartialSignature(hash, round, cfgChains) + + errs := make(chan error, n) + qualify := 0 + for nID, cc := range cfgChains { + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + qualify++ + go func(cc *configurationChain) { + _, err := cc.runTSig(round, hash, 5*time.Second) + // Prevent racing by collecting errors and check in main thread. + errs <- err + }(cc) + // Only 1 partial signature is provided. + err := cc.processPartialSignature(psigs[0]) + s.Require().NoError(err) + } + time.Sleep(timeout) + s.Require().Len(errs, qualify) + for nID, cc := range cfgChains { + if _, exist := cc.npks[round].QualifyNodeIDs[nID]; !exist { + continue + } + s.Equal(<-errs, ErrNotEnoughtPartialSignatures) + } +} + +func (s *ConfigurationChainTestSuite) TestDKGSignerRecoverFromDB() { + k := 2 + n := 7 + round := DKGDelayRound + reset := uint64(0) + cfgChains := s.runDKG(k, n, round, reset) + hash := crypto.Keccak256Hash([]byte("Hash1")) + // Make sure we have more than one configurationChain instance. + s.Require().True(len(cfgChains) > 0) + for _, cc := range cfgChains { + psig1, err := cc.preparePartialSignature(round, hash) + s.Require().NoError(err) + // Create a cloned configurationChain, we should be able to recover + // the DKG signer. + clonedCC := newConfigurationChain( + cc.ID, cc.recv, cc.gov, cc.cache, cc.db, cc.logger, + ) + psig2, err := clonedCC.preparePartialSignature(round, hash) + s.Require().NoError(err) + // Make sure the signed signature are equal. + s.Require().Equal(bytes.Compare( + psig1.PartialSignature.Signature, + psig2.PartialSignature.Signature), 0) + } +} + +func (s *ConfigurationChainTestSuite) TestDKGPhasesSnapShot() { + k := 2 + n := 7 + round := DKGDelayRound + cfgChains := s.runDKG(k, n, round, 0) + + for _, cfgChain := range cfgChains { + info, err := cfgChain.db.GetDKGProtocol() + s.Require().NoError(err) + s.Require().Equal(uint64(7), info.Step) + } +} + +func (s *ConfigurationChainTestSuite) TestDKGAbort() { + n := 4 + k := 1 + round := DKGDelayRound + reset := uint64(0) + s.setupNodes(n) + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + s.pubKeys, 100*time.Millisecond, &common.NullLogger{}, true, + ), ConfigRoundShift) + s.Require().NoError(err) + gov.CatchUpWithRound(round + 1) + cache := utils.NewNodeSetCache(gov) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + recv := newTestCCGlobalReceiver(s) + nID := s.nIDs[0] + cc := newConfigurationChain(nID, + newTestCCReceiver(nID, recv), gov, cache, dbInst, + &common.NullLogger{}) + recv.nodes[nID] = cc + recv.govs[nID] = gov + // The first register should not be blocked. + cc.registerDKG(context.Background(), round, reset, k) + // We should be blocked because DKGReady is not enough. + errs := make(chan error, 1) + evt := newTestEvent() + go func() { + errs <- cc.runDKG(round, reset, evt.event, 0, 0) + }() + evt.run(100 * time.Millisecond) + defer evt.stop() + + // The second register shouldn't be blocked, too. + randHash := common.NewRandomHash() + gov.ResetDKG(randHash[:]) + for func() bool { + cc.dkgLock.RLock() + defer cc.dkgLock.RUnlock() + return !cc.dkgRunning + }() { + time.Sleep(100 * time.Millisecond) + } + cc.registerDKG(context.Background(), round, reset+1, k) + err = <-errs + s.Require().EqualError(ErrDKGAborted, err.Error()) + go func() { + errs <- cc.runDKG(round, reset+1, evt.event, 0, 0) + }() + // The third register shouldn't be blocked, too + randHash = common.NewRandomHash() + gov.ProposeCRS(round+1, randHash[:]) + randHash = common.NewRandomHash() + gov.ResetDKG(randHash[:]) + for func() bool { + cc.dkgLock.RLock() + defer cc.dkgLock.RUnlock() + return !cc.dkgRunning + }() { + time.Sleep(100 * time.Millisecond) + } + cc.registerDKG(context.Background(), round+1, reset+1, k) + err = <-errs + s.Require().EqualError(ErrDKGAborted, err.Error()) + go func() { + errs <- cc.runDKG(round+1, reset+1, evt.event, 0, 0) + }() + for func() bool { + cc.dkgLock.RLock() + defer cc.dkgLock.RUnlock() + return !cc.dkgRunning + }() { + time.Sleep(100 * time.Millisecond) + } + // Abort with older round, shouldn't be aborted. + aborted := cc.abortDKG(context.Background(), round, reset+1) + s.Require().False(aborted) + select { + case err = <-errs: + // Should not aborted yet. + s.Require().False(true) + default: + } + // Abort with older reset, shouldn't be aborted. + aborted = cc.abortDKG(context.Background(), round+1, reset) + s.Require().False(aborted) + select { + case err = <-errs: + // Should not aborted yet. + s.Require().False(true) + default: + } + // Abort with same round/reset, should be aborted. + aborted = cc.abortDKG(context.Background(), round+1, reset+1) + s.Require().True(aborted) + err = <-errs + s.Require().EqualError(ErrDKGAborted, err.Error()) + // Abort while not running yet, should return "aborted". + cc.registerDKG(context.Background(), round+1, reset+1, k) + aborted = cc.abortDKG(context.Background(), round+1, reset+1) + s.Require().True(aborted) +} + +func TestConfigurationChain(t *testing.T) { + suite.Run(t, new(ConfigurationChainTestSuite)) +} diff --git a/dex/consensus/core/consensus.go b/dex/consensus/core/consensus.go new file mode 100644 index 000000000..fd8456487 --- /dev/null +++ b/dex/consensus/core/consensus.go @@ -0,0 +1,1567 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors for consensus core. +var ( + ErrProposerNotInNodeSet = fmt.Errorf( + "proposer is not in node set") + ErrIncorrectHash = fmt.Errorf( + "hash of block is incorrect") + ErrIncorrectSignature = fmt.Errorf( + "signature of block is incorrect") + ErrUnknownBlockProposed = fmt.Errorf( + "unknown block is proposed") + ErrIncorrectAgreementResultPosition = fmt.Errorf( + "incorrect agreement result position") + ErrNotEnoughVotes = fmt.Errorf( + "not enought votes") + ErrCRSNotReady = fmt.Errorf( + "CRS not ready") + ErrConfigurationNotReady = fmt.Errorf( + "Configuration not ready") + ErrIncorrectBlockRandomness = fmt.Errorf( + "randomness of block is incorrect") + ErrCannotVerifyBlockRandomness = fmt.Errorf( + "cannot verify block randomness") +) + +type selfAgreementResult types.AgreementResult + +// consensusBAReceiver implements agreementReceiver. +type consensusBAReceiver struct { + consensus *Consensus + agreementModule *agreement + emptyBlockHashMap *sync.Map + isNotary bool + restartNotary chan types.Position + npks *typesDKG.NodePublicKeys + psigSigner *dkgShareSecret +} + +func (recv *consensusBAReceiver) emptyBlockHash(pos types.Position) ( + common.Hash, error) { + hashVal, ok := recv.emptyBlockHashMap.Load(pos) + if ok { + return hashVal.(common.Hash), nil + } + emptyBlock, err := recv.consensus.bcModule.prepareBlock( + pos, time.Time{}, true) + if err != nil { + return common.Hash{}, err + } + hash, err := utils.HashBlock(emptyBlock) + if err != nil { + return common.Hash{}, err + } + recv.emptyBlockHashMap.Store(pos, hash) + return hash, nil +} + +func (recv *consensusBAReceiver) VerifyPartialSignature(vote *types.Vote) ( + bool, bool) { + if vote.Position.Round >= DKGDelayRound && vote.BlockHash != types.SkipBlockHash { + if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { + if recv.npks == nil { + recv.consensus.logger.Debug( + "Unable to verify psig, npks is nil", + "vote", vote) + return false, false + } + if vote.Position.Round != recv.npks.Round { + recv.consensus.logger.Debug( + "Unable to verify psig, round of npks mismatch", + "vote", vote, + "npksRound", recv.npks.Round) + return false, false + } + pubKey, exist := recv.npks.PublicKeys[vote.ProposerID] + if !exist { + recv.consensus.logger.Debug( + "Unable to verify psig, proposer is not qualified", + "vote", vote) + return false, true + } + blockHash := vote.BlockHash + if blockHash == types.NullBlockHash { + var err error + blockHash, err = recv.emptyBlockHash(vote.Position) + if err != nil { + recv.consensus.logger.Error( + "Failed to verify vote for empty block", + "position", vote.Position, + "error", err) + return false, true + } + } + return pubKey.VerifySignature( + blockHash, crypto.Signature(vote.PartialSignature)), true + } + } + return len(vote.PartialSignature.Signature) == 0, true +} + +func (recv *consensusBAReceiver) ProposeVote(vote *types.Vote) { + if !recv.isNotary { + return + } + if recv.psigSigner != nil && + vote.BlockHash != types.SkipBlockHash { + if vote.Type == types.VoteCom || vote.Type == types.VoteFastCom { + if vote.BlockHash == types.NullBlockHash { + hash, err := recv.emptyBlockHash(vote.Position) + if err != nil { + recv.consensus.logger.Error( + "Failed to propose vote for empty block", + "position", vote.Position, + "error", err) + return + } + vote.PartialSignature = recv.psigSigner.sign(hash) + } else { + vote.PartialSignature = recv.psigSigner.sign(vote.BlockHash) + } + } + } + if err := recv.agreementModule.prepareVote(vote); err != nil { + recv.consensus.logger.Error("Failed to prepare vote", "error", err) + return + } + go func() { + if err := recv.agreementModule.processVote(vote); err != nil { + recv.consensus.logger.Error("Failed to process self vote", + "error", err, + "vote", vote) + return + } + recv.consensus.logger.Debug("Calling Network.BroadcastVote", + "vote", vote) + recv.consensus.network.BroadcastVote(vote) + }() +} + +func (recv *consensusBAReceiver) ProposeBlock() common.Hash { + if !recv.isNotary { + return common.Hash{} + } + block, err := recv.consensus.proposeBlock(recv.agreementModule.agreementID()) + if err != nil || block == nil { + recv.consensus.logger.Error("Unable to propose block", "error", err) + return types.NullBlockHash + } + go func() { + if err := recv.consensus.preProcessBlock(block); err != nil { + recv.consensus.logger.Error("Failed to pre-process block", "error", err) + return + } + recv.consensus.logger.Debug("Calling Network.BroadcastBlock", + "block", block) + recv.consensus.network.BroadcastBlock(block) + }() + return block.Hash +} + +func (recv *consensusBAReceiver) ConfirmBlock( + hash common.Hash, votes map[types.NodeID]*types.Vote) { + var ( + block *types.Block + aID = recv.agreementModule.agreementID() + ) + + isEmptyBlockConfirmed := hash == common.Hash{} + if isEmptyBlockConfirmed { + recv.consensus.logger.Info("Empty block is confirmed", "position", aID) + var err error + block, err = recv.consensus.bcModule.addEmptyBlock(aID) + if err != nil { + recv.consensus.logger.Error("Add position for empty failed", + "error", err) + return + } + if block == nil { + // The empty block's parent is not found locally, thus we can't + // propose it at this moment. + // + // We can only rely on block pulling upon receiving + // types.AgreementResult from the next position. + recv.consensus.logger.Warn( + "An empty block is confirmed without its parent", + "position", aID) + return + } + } else { + var exist bool + block, exist = recv.agreementModule.findBlockNoLock(hash) + if !exist { + recv.consensus.logger.Debug("Unknown block confirmed", + "hash", hash.String()[:6]) + ch := make(chan *types.Block) + func() { + recv.consensus.lock.Lock() + defer recv.consensus.lock.Unlock() + recv.consensus.baConfirmedBlock[hash] = ch + }() + go func() { + hashes := common.Hashes{hash} + PullBlockLoop: + for { + recv.consensus.logger.Debug("Calling Network.PullBlock for BA block", + "hash", hash) + recv.consensus.network.PullBlocks(hashes) + select { + case block = <-ch: + break PullBlockLoop + case <-time.After(1 * time.Second): + } + } + recv.consensus.logger.Debug("Receive unknown block", + "hash", hash.String()[:6], + "position", block.Position) + recv.agreementModule.addCandidateBlock(block) + recv.agreementModule.lock.Lock() + defer recv.agreementModule.lock.Unlock() + recv.ConfirmBlock(block.Hash, votes) + }() + return + } + } + + if len(votes) == 0 && len(block.Randomness) == 0 { + recv.consensus.logger.Error("No votes to recover randomness", + "block", block) + } else if votes != nil { + voteList := make([]types.Vote, 0, len(votes)) + IDs := make(cryptoDKG.IDs, 0, len(votes)) + psigs := make([]cryptoDKG.PartialSignature, 0, len(votes)) + for _, vote := range votes { + if vote.BlockHash != hash { + continue + } + if block.Position.Round >= DKGDelayRound { + ID, exist := recv.npks.IDMap[vote.ProposerID] + if !exist { + continue + } + IDs = append(IDs, ID) + psigs = append(psigs, vote.PartialSignature) + } else { + voteList = append(voteList, *vote) + } + } + if block.Position.Round >= DKGDelayRound { + rand, err := cryptoDKG.RecoverSignature(psigs, IDs) + if err != nil { + recv.consensus.logger.Warn("Unable to recover randomness", + "block", block, + "error", err) + } else { + block.Randomness = rand.Signature[:] + } + } else { + block.Randomness = NoRand + } + + if recv.isNotary { + result := &types.AgreementResult{ + BlockHash: block.Hash, + Position: block.Position, + Votes: voteList, + IsEmptyBlock: isEmptyBlockConfirmed, + Randomness: block.Randomness, + } + // touchAgreementResult does not support concurrent access. + go func() { + recv.consensus.priorityMsgChan <- (*selfAgreementResult)(result) + }() + recv.consensus.logger.Debug("Broadcast AgreementResult", + "result", result) + recv.consensus.network.BroadcastAgreementResult(result) + if block.IsEmpty() { + recv.consensus.bcModule.addBlockRandomness( + block.Position, block.Randomness) + } + if block.Position.Round >= DKGDelayRound { + recv.consensus.logger.Debug( + "Broadcast finalized block", + "block", block) + recv.consensus.network.BroadcastBlock(block) + } + } + } + + if !block.IsGenesis() && + !recv.consensus.bcModule.confirmed(block.Position.Height-1) { + go func(hash common.Hash) { + parentHash := hash + for { + recv.consensus.logger.Warn("Parent block not confirmed", + "parent-hash", parentHash.String()[:6], + "cur-position", block.Position) + ch := make(chan *types.Block) + if !func() bool { + recv.consensus.lock.Lock() + defer recv.consensus.lock.Unlock() + if _, exist := recv.consensus.baConfirmedBlock[parentHash]; exist { + return false + } + recv.consensus.baConfirmedBlock[parentHash] = ch + return true + }() { + return + } + var block *types.Block + PullBlockLoop: + for { + recv.consensus.logger.Debug("Calling Network.PullBlock for parent", + "hash", parentHash) + recv.consensus.network.PullBlocks(common.Hashes{parentHash}) + select { + case block = <-ch: + break PullBlockLoop + case <-time.After(1 * time.Second): + } + } + recv.consensus.logger.Info("Receive parent block", + "parent-hash", block.ParentHash.String()[:6], + "cur-position", block.Position) + if !block.IsFinalized() { + // TODO(jimmy): use a seperate message to pull finalized + // block. Here, we pull it again as workaround. + continue + } + recv.consensus.processBlockChan <- block + parentHash = block.ParentHash + if block.IsGenesis() || recv.consensus.bcModule.confirmed( + block.Position.Height-1) { + return + } + } + }(block.ParentHash) + } + if !block.IsEmpty() { + recv.consensus.processBlockChan <- block + } + // Clean the restartNotary channel so BA will not stuck by deadlock. +CleanChannelLoop: + for { + select { + case <-recv.restartNotary: + default: + break CleanChannelLoop + } + } + recv.restartNotary <- block.Position +} + +func (recv *consensusBAReceiver) PullBlocks(hashes common.Hashes) { + if !recv.isNotary { + return + } + recv.consensus.logger.Debug("Calling Network.PullBlocks", "hashes", hashes) + recv.consensus.network.PullBlocks(hashes) +} + +func (recv *consensusBAReceiver) ReportForkVote(v1, v2 *types.Vote) { + recv.consensus.gov.ReportForkVote(v1, v2) +} + +func (recv *consensusBAReceiver) ReportForkBlock(b1, b2 *types.Block) { + b1Clone := b1.Clone() + b2Clone := b2.Clone() + b1Clone.Payload = []byte{} + b2Clone.Payload = []byte{} + recv.consensus.gov.ReportForkBlock(b1Clone, b2Clone) +} + +// consensusDKGReceiver implements dkgReceiver. +type consensusDKGReceiver struct { + ID types.NodeID + gov Governance + signer *utils.Signer + nodeSetCache *utils.NodeSetCache + cfgModule *configurationChain + network Network + logger common.Logger +} + +// ProposeDKGComplaint proposes a DKGComplaint. +func (recv *consensusDKGReceiver) ProposeDKGComplaint( + complaint *typesDKG.Complaint) { + if err := recv.signer.SignDKGComplaint(complaint); err != nil { + recv.logger.Error("Failed to sign DKG complaint", "error", err) + return + } + recv.logger.Debug("Calling Governace.AddDKGComplaint", + "complaint", complaint) + recv.gov.AddDKGComplaint(complaint) +} + +// ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. +func (recv *consensusDKGReceiver) ProposeDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) { + if err := recv.signer.SignDKGMasterPublicKey(mpk); err != nil { + recv.logger.Error("Failed to sign DKG master public key", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGMasterPublicKey", "key", mpk) + recv.gov.AddDKGMasterPublicKey(mpk) +} + +// ProposeDKGPrivateShare propose a DKGPrivateShare. +func (recv *consensusDKGReceiver) ProposeDKGPrivateShare( + prv *typesDKG.PrivateShare) { + if err := recv.signer.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed to sign DKG private share", "error", err) + return + } + receiverPubKey, exists := recv.nodeSetCache.GetPublicKey(prv.ReceiverID) + if !exists { + recv.logger.Error("Public key for receiver not found", + "receiver", prv.ReceiverID.String()[:6]) + return + } + if prv.ReceiverID == recv.ID { + go func() { + if err := recv.cfgModule.processPrivateShare(prv); err != nil { + recv.logger.Error("Failed to process self private share", "prvShare", prv) + } + }() + } else { + recv.logger.Debug("Calling Network.SendDKGPrivateShare", + "receiver", hex.EncodeToString(receiverPubKey.Bytes())) + recv.network.SendDKGPrivateShare(receiverPubKey, prv) + } +} + +// ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. +func (recv *consensusDKGReceiver) ProposeDKGAntiNackComplaint( + prv *typesDKG.PrivateShare) { + if prv.ProposerID == recv.ID { + if err := recv.signer.SignDKGPrivateShare(prv); err != nil { + recv.logger.Error("Failed sign DKG private share", "error", err) + return + } + } + recv.logger.Debug("Calling Network.BroadcastDKGPrivateShare", "share", prv) + recv.network.BroadcastDKGPrivateShare(prv) +} + +// ProposeDKGMPKReady propose a DKGMPKReady message. +func (recv *consensusDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + if err := recv.signer.SignDKGMPKReady(ready); err != nil { + recv.logger.Error("Failed to sign DKG ready", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGMPKReady", "ready", ready) + recv.gov.AddDKGMPKReady(ready) +} + +// ProposeDKGFinalize propose a DKGFinalize message. +func (recv *consensusDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { + if err := recv.signer.SignDKGFinalize(final); err != nil { + recv.logger.Error("Failed to sign DKG finalize", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGFinalize", "final", final) + recv.gov.AddDKGFinalize(final) +} + +// ProposeDKGSuccess propose a DKGSuccess message. +func (recv *consensusDKGReceiver) ProposeDKGSuccess(success *typesDKG.Success) { + if err := recv.signer.SignDKGSuccess(success); err != nil { + recv.logger.Error("Failed to sign DKG successize", "error", err) + return + } + recv.logger.Debug("Calling Governance.AddDKGSuccess", "success", success) + recv.gov.AddDKGSuccess(success) +} + +// Consensus implements DEXON Consensus algorithm. +type Consensus struct { + // Node Info. + ID types.NodeID + signer *utils.Signer + + // BA. + baMgr *agreementMgr + baConfirmedBlock map[common.Hash]chan<- *types.Block + + // DKG. + dkgRunning int32 + dkgReady *sync.Cond + cfgModule *configurationChain + + // Interfaces. + db db.Database + app Application + debugApp Debug + gov Governance + network Network + + // Misc. + bcModule *blockChain + dMoment time.Time + nodeSetCache *utils.NodeSetCache + tsigVerifierCache *TSigVerifierCache + lock sync.RWMutex + ctx context.Context + ctxCancel context.CancelFunc + event *common.Event + roundEvent *utils.RoundEvent + logger common.Logger + resetDeliveryGuardTicker chan struct{} + msgChan chan types.Msg + priorityMsgChan chan interface{} + waitGroup sync.WaitGroup + processBlockChan chan *types.Block + + // Context of Dummy receiver during switching from syncer. + dummyCancel context.CancelFunc + dummyFinished <-chan struct{} + dummyMsgBuffer []types.Msg +} + +// NewConsensus construct an Consensus instance. +func NewConsensus( + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + return newConsensusForRound( + nil, dMoment, app, gov, db, network, prv, logger, true) +} + +// NewConsensusForSimulation creates an instance of Consensus for simulation, +// the only difference with NewConsensus is nonblocking of app. +func NewConsensusForSimulation( + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + return newConsensusForRound( + nil, dMoment, app, gov, db, network, prv, logger, false) +} + +// NewConsensusFromSyncer constructs an Consensus instance from information +// provided from syncer. +// +// You need to provide the initial block for this newly created Consensus +// instance to bootstrap with. A proper choice is the last finalized block you +// delivered to syncer. +// +// NOTE: those confirmed blocks should be organized by chainID and sorted by +// their positions, in ascending order. +func NewConsensusFromSyncer( + initBlock *types.Block, + startWithEmpty bool, + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + networkModule Network, + prv crypto.PrivateKey, + confirmedBlocks []*types.Block, + cachedMessages []types.Msg, + logger common.Logger) (*Consensus, error) { + // Setup Consensus instance. + con := newConsensusForRound(initBlock, dMoment, app, gov, db, + networkModule, prv, logger, true) + // Launch a dummy receiver before we start receiving from network module. + con.dummyMsgBuffer = cachedMessages + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + con.ctx, networkModule.ReceiveChan(), func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + // Dump all BA-confirmed blocks to the consensus instance, make sure these + // added blocks forming a DAG. + refBlock := initBlock + for _, b := range confirmedBlocks { + // Only when its parent block is already added to lattice, we can + // then add this block. If not, our pulling mechanism would stop at + // the block we added, and lost its parent block forever. + if b.Position.Height != refBlock.Position.Height+1 { + break + } + if err := con.processBlock(b); err != nil { + return nil, err + } + refBlock = b + } + if startWithEmpty { + emptyPos := types.Position{ + Round: con.bcModule.tipRound(), + Height: initBlock.Position.Height + 1, + } + _, err := con.bcModule.addEmptyBlock(emptyPos) + if err != nil { + panic(err) + } + } + return con, nil +} + +// newConsensusForRound creates a Consensus instance. +func newConsensusForRound( + initBlock *types.Block, + dMoment time.Time, + app Application, + gov Governance, + db db.Database, + network Network, + prv crypto.PrivateKey, + logger common.Logger, + usingNonBlocking bool) *Consensus { + // TODO(w): load latest blockHeight from DB, and use config at that height. + nodeSetCache := utils.NewNodeSetCache(gov) + // Setup signer module. + signer := utils.NewSigner(prv) + // Check if the application implement Debug interface. + var debugApp Debug + if a, ok := app.(Debug); ok { + debugApp = a + } + // Get configuration for bootstrap round. + initPos := types.Position{ + Round: 0, + Height: types.GenesisHeight, + } + if initBlock != nil { + initPos = initBlock.Position + } + // Init configuration chain. + ID := types.NewNodeID(prv.PublicKey()) + recv := &consensusDKGReceiver{ + ID: ID, + gov: gov, + signer: signer, + nodeSetCache: nodeSetCache, + network: network, + logger: logger, + } + cfgModule := newConfigurationChain(ID, recv, gov, nodeSetCache, db, logger) + recv.cfgModule = cfgModule + signer.SetBLSSigner( + func(round uint64, hash common.Hash) (crypto.Signature, error) { + _, signer, err := cfgModule.getDKGInfo(round, false) + if err != nil { + return crypto.Signature{}, err + } + return crypto.Signature(signer.sign(hash)), nil + }) + appModule := app + if usingNonBlocking { + appModule = newNonBlocking(app, debugApp) + } + tsigVerifierCache := NewTSigVerifierCache(gov, 7) + bcModule := newBlockChain(ID, dMoment, initBlock, appModule, + tsigVerifierCache, signer, logger) + // Construct Consensus instance. + con := &Consensus{ + ID: ID, + app: appModule, + debugApp: debugApp, + gov: gov, + db: db, + network: network, + baConfirmedBlock: make(map[common.Hash]chan<- *types.Block), + dkgReady: sync.NewCond(&sync.Mutex{}), + cfgModule: cfgModule, + bcModule: bcModule, + dMoment: dMoment, + nodeSetCache: nodeSetCache, + tsigVerifierCache: tsigVerifierCache, + signer: signer, + event: common.NewEvent(), + logger: logger, + resetDeliveryGuardTicker: make(chan struct{}), + msgChan: make(chan types.Msg, 1024), + priorityMsgChan: make(chan interface{}, 1024), + processBlockChan: make(chan *types.Block, 1024), + } + con.ctx, con.ctxCancel = context.WithCancel(context.Background()) + var err error + con.roundEvent, err = utils.NewRoundEvent(con.ctx, gov, logger, initPos, + ConfigRoundShift) + if err != nil { + panic(err) + } + if con.baMgr, err = newAgreementMgr(con); err != nil { + panic(err) + } + if err = con.prepare(initBlock); err != nil { + panic(err) + } + return con +} + +// prepare the Consensus instance to be ready for blocks after 'initBlock'. +// 'initBlock' could be either: +// - nil +// - the last finalized block +func (con *Consensus) prepare(initBlock *types.Block) (err error) { + // Trigger the round validation method for the next round of the first + // round. + // The block past from full node should be delivered already or known by + // full node. We don't have to notify it. + initRound := uint64(0) + if initBlock != nil { + initRound = initBlock.Position.Round + } + if initRound == 0 { + if DKGDelayRound == 0 { + panic("not implemented yet") + } + } + // Measure time elapse for each handler of round events. + elapse := func(what string, lastE utils.RoundEventParam) func() { + start := time.Now() + con.logger.Info("Handle round event", + "what", what, + "event", lastE) + return func() { + con.logger.Info("Finish round event", + "what", what, + "event", lastE, + "elapse", time.Since(start)) + } + } + // Register round event handler to purge cached node set. To make sure each + // modules see the up-to-date node set, we need to make sure this action + // should be taken as the first one. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + defer elapse("purge-cache", evts[len(evts)-1])() + for _, e := range evts { + if e.Reset == 0 { + continue + } + con.nodeSetCache.Purge(e.Round + 1) + con.tsigVerifierCache.Purge(e.Round + 1) + } + }) + // Register round event handler to abort previous running DKG if any. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + go func() { + defer elapse("abort-DKG", e)() + if e.Reset > 0 { + aborted := con.cfgModule.abortDKG(con.ctx, e.Round+1, e.Reset-1) + con.logger.Info("DKG aborting result", + "round", e.Round+1, + "reset", e.Reset-1, + "aborted", aborted) + } + }() + }) + // Register round event handler to update BA and BC modules. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + defer elapse("append-config", evts[len(evts)-1])() + // Always updates newer configs to the later modules first in the data + // flow. + if err := con.bcModule.notifyRoundEvents(evts); err != nil { + panic(err) + } + if err := con.baMgr.notifyRoundEvents(evts); err != nil { + panic(err) + } + }) + // Register round event handler to reset DKG if the DKG set for next round + // failed to setup. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("reset-DKG", e)() + nextRound := e.Round + 1 + if nextRound < DKGDelayRound { + return + } + curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round) + if err != nil { + con.logger.Error("Error getting notary set when proposing CRS", + "round", e.Round, + "error", err) + return + } + if _, exist := curNotarySet[con.ID]; !exist { + return + } + con.event.RegisterHeight(e.NextDKGResetHeight(), func(uint64) { + if ok, _ := utils.IsDKGValid( + con.gov, con.logger, nextRound, e.Reset); ok { + return + } + // Aborting all previous running DKG protocol instance if any. + go con.runCRS(e.Round, utils.Rehash(e.CRS, uint(e.Reset+1)), true) + }) + }) + // Register round event handler to propose new CRS. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + // We don't have to propose new CRS during DKG reset, the reset of DKG + // would be done by the notary set in previous round. + e := evts[len(evts)-1] + defer elapse("propose-CRS", e)() + if e.Reset != 0 || e.Round < DKGDelayRound { + return + } + if curNotarySet, err := con.nodeSetCache.GetNotarySet(e.Round); err != nil { + con.logger.Error("Error getting notary set when proposing CRS", + "round", e.Round, + "error", err) + } else { + if _, exist := curNotarySet[con.ID]; !exist { + return + } + con.event.RegisterHeight(e.NextCRSProposingHeight(), func(uint64) { + con.logger.Debug( + "Calling Governance.CRS to check if already proposed", + "round", e.Round+1) + if (con.gov.CRS(e.Round+1) != common.Hash{}) { + con.logger.Debug("CRS already proposed", "round", e.Round+1) + return + } + go con.runCRS(e.Round, e.CRS, false) + }) + } + }) + // Touch nodeSetCache for next round. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("touch-NodeSetCache", e)() + con.event.RegisterHeight(e.NextTouchNodeSetCacheHeight(), func(uint64) { + if e.Reset == 0 { + return + } + go func() { + nextRound := e.Round + 1 + if err := con.nodeSetCache.Touch(nextRound); err != nil { + con.logger.Warn("Failed to update nodeSetCache", + "round", nextRound, + "error", err) + } + }() + }) + }) + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + if e.Reset != 0 { + return + } + defer elapse("touch-DKGCache", e)() + go func() { + if _, err := + con.tsigVerifierCache.Update(e.Round); err != nil { + con.logger.Warn("Failed to update tsig cache", + "round", e.Round, + "error", err) + } + }() + go func() { + threshold := utils.GetDKGThreshold( + utils.GetConfigWithPanic(con.gov, e.Round, con.logger)) + // Restore group public key. + con.logger.Debug( + "Calling Governance.DKGMasterPublicKeys for recoverDKGInfo", + "round", e.Round) + con.logger.Debug( + "Calling Governance.DKGComplaints for recoverDKGInfo", + "round", e.Round) + _, qualifies, err := typesDKG.CalcQualifyNodes( + con.gov.DKGMasterPublicKeys(e.Round), + con.gov.DKGComplaints(e.Round), + threshold) + if err != nil { + con.logger.Warn("Failed to calculate dkg set", + "round", e.Round, + "error", err) + return + } + if _, exist := qualifies[con.ID]; !exist { + return + } + if _, _, err := + con.cfgModule.getDKGInfo(e.Round, true); err != nil { + con.logger.Warn("Failed to recover DKG info", + "round", e.Round, + "error", err) + } + }() + }) + // checkCRS is a generator of checker to check if CRS for that round is + // ready or not. + checkCRS := func(round uint64) func() bool { + return func() bool { + nextCRS := con.gov.CRS(round) + if (nextCRS != common.Hash{}) { + return true + } + con.logger.Debug("CRS is not ready yet. Try again later...", + "nodeID", con.ID, + "round", round) + return false + } + } + // Trigger round validation method for next period. + con.roundEvent.Register(func(evts []utils.RoundEventParam) { + e := evts[len(evts)-1] + defer elapse("next-round", e)() + // Register a routine to trigger round events. + con.event.RegisterHeight(e.NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvent, con.event)) + // Register a routine to register next DKG. + con.event.RegisterHeight(e.NextDKGRegisterHeight(), func(uint64) { + nextRound := e.Round + 1 + if nextRound < DKGDelayRound { + con.logger.Info("Skip runDKG for round", + "round", nextRound, + "reset", e.Reset) + return + } + go func() { + // Normally, gov.CRS would return non-nil. Use this for in case + // of unexpected network fluctuation and ensure the robustness. + if !checkWithCancel( + con.ctx, 500*time.Millisecond, checkCRS(nextRound)) { + con.logger.Debug("unable to prepare CRS for notary set", + "round", nextRound, + "reset", e.Reset) + return + } + nextNotarySet, err := con.nodeSetCache.GetNotarySet(nextRound) + if err != nil { + con.logger.Error("Error getting notary set for next round", + "round", nextRound, + "reset", e.Reset, + "error", err) + return + } + if _, exist := nextNotarySet[con.ID]; !exist { + con.logger.Info("Not selected as notary set", + "round", nextRound, + "reset", e.Reset) + return + } + con.logger.Info("Selected as notary set", + "round", nextRound, + "reset", e.Reset) + nextConfig := utils.GetConfigWithPanic(con.gov, nextRound, + con.logger) + con.cfgModule.registerDKG(con.ctx, nextRound, e.Reset, + utils.GetDKGThreshold(nextConfig)) + con.event.RegisterHeight(e.NextDKGPreparationHeight(), + func(h uint64) { + func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgRunning = 0 + }() + // We want to skip some of the DKG phases when started. + dkgCurrentHeight := h - e.NextDKGPreparationHeight() + con.runDKG( + nextRound, e.Reset, + e.NextDKGPreparationHeight(), dkgCurrentHeight) + }) + }() + }) + }) + con.roundEvent.TriggerInitEvent() + if initBlock != nil { + con.event.NotifyHeight(initBlock.Position.Height) + } + con.baMgr.prepare() + return +} + +// Run starts running DEXON Consensus. +func (con *Consensus) Run() { + // There may have emptys block in blockchain added by force sync. + blocksWithoutRandomness := con.bcModule.pendingBlocksWithoutRandomness() + // Launch BA routines. + con.baMgr.run() + // Launch network handler. + con.logger.Debug("Calling Network.ReceiveChan") + con.waitGroup.Add(1) + go con.deliverNetworkMsg() + con.waitGroup.Add(1) + go con.processMsg() + go con.processBlockLoop() + // Stop dummy receiver if launched. + if con.dummyCancel != nil { + con.logger.Trace("Stop dummy receiver") + con.dummyCancel() + <-con.dummyFinished + // Replay those cached messages. + con.logger.Trace("Dummy receiver stoped, start dumping cached messages", + "count", len(con.dummyMsgBuffer)) + for _, msg := range con.dummyMsgBuffer { + loop: + for { + select { + case con.msgChan <- msg: + break loop + case <-time.After(50 * time.Millisecond): + con.logger.Debug( + "internal message channel is full when syncing") + } + } + } + con.logger.Trace("Finish dumping cached messages") + } + con.generateBlockRandomness(blocksWithoutRandomness) + // Sleep until dMoment come. + time.Sleep(con.dMoment.Sub(time.Now().UTC())) + // Take some time to bootstrap. + time.Sleep(3 * time.Second) + con.waitGroup.Add(1) + go con.deliveryGuard() + // Block until done. + select { + case <-con.ctx.Done(): + } +} + +func (con *Consensus) generateBlockRandomness(blocks []*types.Block) { + con.logger.Debug("Start generating block randomness", "blocks", blocks) + isNotarySet := make(map[uint64]bool) + for _, block := range blocks { + if block.Position.Round < DKGDelayRound { + continue + } + doRun, exist := isNotarySet[block.Position.Round] + if !exist { + curNotarySet, err := con.nodeSetCache.GetNotarySet(block.Position.Round) + if err != nil { + con.logger.Error("Error getting notary set when generate block tsig", + "round", block.Position.Round, + "error", err) + continue + } + _, exist := curNotarySet[con.ID] + isNotarySet[block.Position.Round] = exist + doRun = exist + } + if !doRun { + continue + } + go func(block *types.Block) { + psig, err := con.cfgModule.preparePartialSignature( + block.Position.Round, block.Hash) + if err != nil { + con.logger.Error("Failed to prepare partial signature", + "block", block, + "error", err) + } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { + con.logger.Error("Failed to sign DKG partial signature", + "block", block, + "error", err) + } else if err = con.cfgModule.processPartialSignature(psig); err != nil { + con.logger.Error("Failed to process partial signature", + "block", block, + "error", err) + } else { + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "block", block) + con.network.BroadcastDKGPartialSignature(psig) + sig, err := con.cfgModule.runTSig( + block.Position.Round, + block.Hash, + 60*time.Minute, + ) + if err != nil { + con.logger.Error("Failed to run Block Tsig", + "block", block, + "error", err) + return + } + result := &types.AgreementResult{ + BlockHash: block.Hash, + Position: block.Position, + Randomness: sig.Signature[:], + } + con.bcModule.addBlockRandomness(block.Position, sig.Signature[:]) + con.logger.Debug("Broadcast BlockRandomness", + "block", block, + "result", result) + con.network.BroadcastAgreementResult(result) + if err := con.deliverFinalizedBlocks(); err != nil { + con.logger.Error("Failed to deliver finalized block", + "error", err) + } + } + }(block) + } +} + +// runDKG starts running DKG protocol. +func (con *Consensus) runDKG( + round, reset, dkgBeginHeight, dkgHeight uint64) { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + if con.dkgRunning != 0 { + return + } + con.dkgRunning = 1 + go func() { + defer func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + con.dkgReady.Broadcast() + con.dkgRunning = 2 + }() + if err := + con.cfgModule.runDKG( + round, reset, + con.event, dkgBeginHeight, dkgHeight); err != nil { + con.logger.Error("Failed to runDKG", "error", err) + } + }() +} + +func (con *Consensus) runCRS(round uint64, hash common.Hash, reset bool) { + // Start running next round CRS. + psig, err := con.cfgModule.preparePartialSignature(round, hash) + if err != nil { + con.logger.Error("Failed to prepare partial signature", "error", err) + } else if err = con.signer.SignDKGPartialSignature(psig); err != nil { + con.logger.Error("Failed to sign DKG partial signature", "error", err) + } else if err = con.cfgModule.processPartialSignature(psig); err != nil { + con.logger.Error("Failed to process partial signature", "error", err) + } else { + con.logger.Debug("Calling Network.BroadcastDKGPartialSignature", + "proposer", psig.ProposerID, + "round", psig.Round, + "hash", psig.Hash) + con.network.BroadcastDKGPartialSignature(psig) + con.logger.Debug("Calling Governance.CRS", "round", round) + crs, err := con.cfgModule.runCRSTSig(round, hash) + if err != nil { + con.logger.Error("Failed to run CRS Tsig", "error", err) + } else { + if reset { + con.logger.Debug("Calling Governance.ResetDKG", + "round", round+1, + "crs", hex.EncodeToString(crs)) + con.gov.ResetDKG(crs) + } else { + con.logger.Debug("Calling Governance.ProposeCRS", + "round", round+1, + "crs", hex.EncodeToString(crs)) + con.gov.ProposeCRS(round+1, crs) + } + } + } +} + +// Stop the Consensus core. +func (con *Consensus) Stop() { + con.ctxCancel() + con.baMgr.stop() + con.event.Reset() + con.waitGroup.Wait() + if nbApp, ok := con.app.(*nonBlocking); ok { + nbApp.wait() + } +} + +func (con *Consensus) deliverNetworkMsg() { + defer con.waitGroup.Done() + recv := con.network.ReceiveChan() + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case msg := <-recv: + innerLoop: + for { + select { + case con.msgChan <- msg: + break innerLoop + case <-time.After(500 * time.Millisecond): + con.logger.Debug("internal message channel is full", + "pending", msg) + } + } + case <-con.ctx.Done(): + return + } + } +} + +func (con *Consensus) processMsg() { + defer con.waitGroup.Done() +MessageLoop: + for { + select { + case <-con.ctx.Done(): + return + default: + } + var msg, peer interface{} + select { + case msg = <-con.priorityMsgChan: + default: + } + if msg == nil { + select { + case message := <-con.msgChan: + msg, peer = message.Payload, message.PeerID + case msg = <-con.priorityMsgChan: + case <-con.ctx.Done(): + return + } + } + switch val := msg.(type) { + case *selfAgreementResult: + con.baMgr.touchAgreementResult((*types.AgreementResult)(val)) + case *types.Block: + if ch, exist := func() (chan<- *types.Block, bool) { + con.lock.RLock() + defer con.lock.RUnlock() + ch, e := con.baConfirmedBlock[val.Hash] + return ch, e + }(); exist { + if val.IsEmpty() { + hash, err := utils.HashBlock(val) + if err != nil { + con.logger.Error("Error verifying empty block hash", + "block", val, + "error, err") + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if hash != val.Hash { + con.logger.Error("Incorrect confirmed empty block hash", + "block", val, + "hash", hash) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if _, err := con.bcModule.proposeBlock( + val.Position, time.Time{}, true); err != nil { + con.logger.Error("Error adding empty block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + } else { + if !val.IsFinalized() { + con.logger.Warn("Ignore not finalized block", + "block", val) + continue MessageLoop + } + ok, err := con.bcModule.verifyRandomness( + val.Hash, val.Position.Round, val.Randomness) + if err != nil { + con.logger.Error("Error verifying confirmed block randomness", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if !ok { + con.logger.Error("Incorrect confirmed block randomness", + "block", val) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + if err := utils.VerifyBlockSignature(val); err != nil { + con.logger.Error("VerifyBlockSignature failed", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + continue MessageLoop + } + } + func() { + con.lock.Lock() + defer con.lock.Unlock() + // In case of multiple delivered block. + if _, exist := con.baConfirmedBlock[val.Hash]; !exist { + return + } + delete(con.baConfirmedBlock, val.Hash) + ch <- val + }() + } else if val.IsFinalized() { + if err := con.processFinalizedBlock(val); err != nil { + con.logger.Error("Failed to process finalized block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } else { + if err := con.preProcessBlock(val); err != nil { + con.logger.Error("Failed to pre process block", + "block", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } + case *types.Vote: + if err := con.ProcessVote(val); err != nil { + con.logger.Error("Failed to process vote", + "vote", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + case *types.AgreementResult: + if err := con.ProcessAgreementResult(val); err != nil { + con.logger.Error("Failed to process agreement result", + "result", val, + "error", err) + con.network.ReportBadPeerChan() <- peer + } + case *typesDKG.PrivateShare: + if err := con.cfgModule.processPrivateShare(val); err != nil { + con.logger.Error("Failed to process private share", + "error", err) + con.network.ReportBadPeerChan() <- peer + } + + case *typesDKG.PartialSignature: + if err := con.cfgModule.processPartialSignature(val); err != nil { + con.logger.Error("Failed to process partial signature", + "error", err) + con.network.ReportBadPeerChan() <- peer + } + } + } +} + +// ProcessVote is the entry point to submit ont vote to a Consensus instance. +func (con *Consensus) ProcessVote(vote *types.Vote) (err error) { + err = con.baMgr.processVote(vote) + return +} + +// ProcessAgreementResult processes the randomness request. +func (con *Consensus) ProcessAgreementResult( + rand *types.AgreementResult) error { + if !con.baMgr.touchAgreementResult(rand) { + return nil + } + // Sanity Check. + if err := VerifyAgreementResult(rand, con.nodeSetCache); err != nil { + con.baMgr.untouchAgreementResult(rand) + return err + } + if err := con.bcModule.processAgreementResult(rand); err != nil { + con.baMgr.untouchAgreementResult(rand) + if err == ErrSkipButNoError { + return nil + } + return err + } + // Syncing BA Module. + if err := con.baMgr.processAgreementResult(rand); err != nil { + con.baMgr.untouchAgreementResult(rand) + return err + } + + con.logger.Debug("Rebroadcast AgreementResult", + "result", rand) + con.network.BroadcastAgreementResult(rand) + + return con.deliverFinalizedBlocks() +} + +// preProcessBlock performs Byzantine Agreement on the block. +func (con *Consensus) preProcessBlock(b *types.Block) (err error) { + err = con.baMgr.processBlock(b) + if err == nil && con.debugApp != nil { + con.debugApp.BlockReceived(b.Hash) + } + return +} + +func (con *Consensus) processFinalizedBlock(b *types.Block) (err error) { + if b.Position.Round < DKGDelayRound { + return + } + if err = utils.VerifyBlockSignature(b); err != nil { + return + } + verifier, ok, err := con.tsigVerifierCache.UpdateAndGet(b.Position.Round) + if err != nil { + return + } + if !ok { + err = ErrCannotVerifyBlockRandomness + return + } + if !verifier.VerifySignature(b.Hash, crypto.Signature{ + Type: "bls", + Signature: b.Randomness, + }) { + err = ErrIncorrectBlockRandomness + return + } + err = con.baMgr.processFinalizedBlock(b) + if err == nil && con.debugApp != nil { + con.debugApp.BlockReceived(b.Hash) + } + return +} + +func (con *Consensus) deliveryGuard() { + defer con.waitGroup.Done() + select { + case <-con.ctx.Done(): + case <-time.After(con.dMoment.Sub(time.Now())): + } + // Node takes time to start. + select { + case <-con.ctx.Done(): + case <-time.After(60 * time.Second): + } + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case <-con.ctx.Done(): + return + case <-con.resetDeliveryGuardTicker: + case <-time.After(60 * time.Second): + con.logger.Error("No blocks delivered for too long", "ID", con.ID) + panic(fmt.Errorf("No blocks delivered for too long")) + } + } +} + +// deliverBlock deliver a block to application layer. +func (con *Consensus) deliverBlock(b *types.Block) { + select { + case con.resetDeliveryGuardTicker <- struct{}{}: + default: + } + if err := con.db.PutBlock(*b); err != nil { + panic(err) + } + if err := con.db.PutCompactionChainTipInfo(b.Hash, + b.Position.Height); err != nil { + panic(err) + } + con.logger.Debug("Calling Application.BlockDelivered", "block", b) + con.app.BlockDelivered(b.Hash, b.Position, common.CopyBytes(b.Randomness)) + if con.debugApp != nil { + con.debugApp.BlockReady(b.Hash) + } +} + +// deliverFinalizedBlocks extracts and delivers finalized blocks to application +// layer. +func (con *Consensus) deliverFinalizedBlocks() error { + con.lock.Lock() + defer con.lock.Unlock() + return con.deliverFinalizedBlocksWithoutLock() +} + +func (con *Consensus) deliverFinalizedBlocksWithoutLock() (err error) { + deliveredBlocks := con.bcModule.extractBlocks() + con.logger.Debug("Last blocks in compaction chain", + "delivered", con.bcModule.lastDeliveredBlock(), + "pending", con.bcModule.lastPendingBlock()) + for _, b := range deliveredBlocks { + con.deliverBlock(b) + con.event.NotifyHeight(b.Position.Height) + } + return +} + +func (con *Consensus) processBlockLoop() { + for { + select { + case <-con.ctx.Done(): + return + default: + } + select { + case <-con.ctx.Done(): + return + case block := <-con.processBlockChan: + if err := con.processBlock(block); err != nil { + con.logger.Error("Error processing block", + "block", block, + "error", err) + } + } + } +} + +// processBlock is the entry point to submit one block to a Consensus instance. +func (con *Consensus) processBlock(block *types.Block) (err error) { + // Block processed by blockChain can be out-of-order. But the output from + // blockChain (deliveredBlocks) cannot, thus we need to protect the part + // below with writer lock. + con.lock.Lock() + defer con.lock.Unlock() + if err = con.bcModule.addBlock(block); err != nil { + return + } + if err = con.deliverFinalizedBlocksWithoutLock(); err != nil { + return + } + return +} + +// PrepareBlock would setup header fields of block based on its ProposerID. +func (con *Consensus) proposeBlock(position types.Position) ( + *types.Block, error) { + b, err := con.bcModule.proposeBlock(position, time.Now().UTC(), false) + if err != nil { + return nil, err + } + con.logger.Debug("Calling Governance.CRS", "round", b.Position.Round) + crs := con.gov.CRS(b.Position.Round) + if crs.Equal(common.Hash{}) { + con.logger.Error("CRS for round is not ready, unable to prepare block", + "position", &b.Position) + return nil, ErrCRSNotReady + } + if err = con.signer.SignCRS(b, crs); err != nil { + return nil, err + } + return b, nil +} diff --git a/dex/consensus/core/consensus_test.go b/dex/consensus/core/consensus_test.go new file mode 100644 index 000000000..fb164ad48 --- /dev/null +++ b/dex/consensus/core/consensus_test.go @@ -0,0 +1,438 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "encoding/json" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// network implements core.Network. +type network struct { + nID types.NodeID + conn *networkConnection +} + +// PullBlocks tries to pull blocks from the DEXON network. +func (n *network) PullBlocks(common.Hashes) { +} + +// PullVotes tries to pull votes from the DEXON network. +func (n *network) PullVotes(types.Position) { +} + +// PullRandomness tries to pull randomness from the DEXON network. +func (n *network) PullRandomness(common.Hashes) { +} + +// BroadcastVote broadcasts vote to all nodes in DEXON network. +func (n *network) BroadcastVote(vote *types.Vote) { + n.conn.broadcast(n.nID, vote) +} + +// BroadcastBlock broadcasts block to all nodes in DEXON network. +func (n *network) BroadcastBlock(block *types.Block) { + n.conn.broadcast(n.nID, block) +} + +// BroadcastAgreementResult broadcasts agreement result to DKG set. +func (n *network) BroadcastAgreementResult( + randRequest *types.AgreementResult) { + n.conn.broadcast(n.nID, randRequest) +} + +// SendDKGPrivateShare sends PrivateShare to a DKG participant. +func (n *network) SendDKGPrivateShare( + recv crypto.PublicKey, prvShare *typesDKG.PrivateShare) { + n.conn.send(n.nID, types.NewNodeID(recv), prvShare) +} + +// BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants. +func (n *network) BroadcastDKGPrivateShare( + prvShare *typesDKG.PrivateShare) { + n.conn.broadcast(n.nID, prvShare) +} + +// BroadcastDKGPartialSignature broadcasts partialSignature to all +// DKG participants. +func (n *network) BroadcastDKGPartialSignature( + psig *typesDKG.PartialSignature) { + n.conn.broadcast(n.nID, psig) +} + +// ReceiveChan returns a channel to receive messages from DEXON network. +func (n *network) ReceiveChan() <-chan types.Msg { + return make(chan types.Msg) +} + +// ReportBadPeer reports that a peer is sending bad message. +func (n *network) ReportBadPeerChan() chan<- interface{} { + return n.conn.s.sink +} + +func (nc *networkConnection) broadcast(from types.NodeID, msg interface{}) { + for nID := range nc.cons { + if nID == from { + continue + } + nc.send(from, nID, msg) + } +} + +func (nc *networkConnection) send(from, to types.NodeID, msg interface{}) { + ch, exist := nc.cons[to] + if !exist { + return + } + msgCopy := msg + // Clone msg if necessary. + switch val := msg.(type) { + case *types.Block: + msgCopy = val.Clone() + case *typesDKG.PrivateShare: + // Use Marshal/Unmarshal to do deep copy. + data, err := json.Marshal(val) + if err != nil { + panic(err) + } + valCopy := &typesDKG.PrivateShare{} + if err := json.Unmarshal(data, valCopy); err != nil { + panic(err) + } + msgCopy = valCopy + } + ch <- types.Msg{ + PeerID: from, + Payload: msgCopy, + } +} + +type networkConnection struct { + s *ConsensusTestSuite + cons map[types.NodeID]chan types.Msg +} + +func (nc *networkConnection) newNetwork(nID types.NodeID) *network { + return &network{ + nID: nID, + conn: nc, + } +} + +func (nc *networkConnection) setCon(nID types.NodeID, con *Consensus) { + ch := make(chan types.Msg, 1000) + nc.s.wg.Add(1) + go func() { + defer nc.s.wg.Done() + for { + var msg types.Msg + select { + case msg = <-ch: + case <-nc.s.ctx.Done(): + return + } + var err error + // Testify package does not support concurrent call. + // Use panic() to detact error. + switch val := msg.Payload.(type) { + case *types.Block: + err = con.preProcessBlock(val) + case *types.Vote: + err = con.ProcessVote(val) + case *types.AgreementResult: + err = con.ProcessAgreementResult(val) + case *typesDKG.PrivateShare: + err = con.cfgModule.processPrivateShare(val) + case *typesDKG.PartialSignature: + err = con.cfgModule.processPartialSignature(val) + } + if err != nil { + panic(err) + } + } + }() + nc.cons[nID] = ch +} + +type ConsensusTestSuite struct { + suite.Suite + ctx context.Context + ctxCancel context.CancelFunc + conn *networkConnection + sink chan interface{} + wg sync.WaitGroup +} + +func (s *ConsensusTestSuite) SetupTest() { + s.ctx, s.ctxCancel = context.WithCancel(context.Background()) + s.sink = make(chan interface{}, 1000) + s.wg.Add(1) + go func() { + defer s.wg.Done() + select { + case <-s.sink: + case <-s.ctx.Done(): + return + } + }() +} +func (s *ConsensusTestSuite) TearDownTest() { + s.ctxCancel() + s.wg.Wait() +} + +func (s *ConsensusTestSuite) newNetworkConnection() *networkConnection { + return &networkConnection{ + s: s, + cons: make(map[types.NodeID]chan types.Msg), + } +} + +func (s *ConsensusTestSuite) prepareConsensus( + dMoment time.Time, + gov *test.Governance, + prvKey crypto.PrivateKey, + conn *networkConnection) ( + *test.App, *Consensus) { + + app := test.NewApp(0, nil, nil) + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + network := conn.newNetwork(nID) + con := NewConsensus( + dMoment, app, gov, dbInst, network, prvKey, &common.NullLogger{}) + conn.setCon(nID, con) + return app, con +} + +func (s *ConsensusTestSuite) prepareConsensusWithDB( + dMoment time.Time, + gov *test.Governance, + prvKey crypto.PrivateKey, + conn *networkConnection, + dbInst db.Database) ( + *test.App, *Consensus) { + + app := test.NewApp(0, nil, nil) + nID := types.NewNodeID(prvKey.PublicKey()) + network := conn.newNetwork(nID) + con := NewConsensus( + dMoment, app, gov, dbInst, network, prvKey, &common.NullLogger{}) + conn.setCon(nID, con) + return app, con +} + +func (s *ConsensusTestSuite) TestRegisteredDKGRecover() { + conn := s.newNetworkConnection() + prvKeys, pubKeys, err := test.NewKeys(1) + s.Require().NoError(err) + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, time.Second, &common.NullLogger{}, true), ConfigRoundShift) + s.Require().NoError(err) + gov.State().RequestChange(test.StateChangeRoundLength, uint64(200)) + dMoment := time.Now().UTC() + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + _, con := s.prepareConsensusWithDB(dMoment, gov, prvKeys[0], conn, dbInst) + + s.Require().Nil(con.cfgModule.dkg) + + con.cfgModule.registerDKG(con.ctx, 0, 0, 10) + con.cfgModule.dkgLock.Lock() + defer con.cfgModule.dkgLock.Unlock() + + _, newCon := s.prepareConsensusWithDB(dMoment, gov, prvKeys[0], conn, dbInst) + + newCon.cfgModule.registerDKG(newCon.ctx, 0, 0, 10) + newCon.cfgModule.dkgLock.Lock() + defer newCon.cfgModule.dkgLock.Unlock() + + s.Require().NotNil(newCon.cfgModule.dkg) + s.Require().True(newCon.cfgModule.dkg.prvShares.Equal(con.cfgModule.dkg.prvShares)) +} + +func (s *ConsensusTestSuite) TestDKGCRS() { + n := 21 + lambda := 200 * time.Millisecond + if testing.Short() { + n = 7 + lambda = 100 * time.Millisecond + } + if isTravisCI() { + lambda *= 5 + } + conn := s.newNetworkConnection() + prvKeys, pubKeys, err := test.NewKeys(n) + s.Require().NoError(err) + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, lambda, &common.NullLogger{}, true), ConfigRoundShift) + s.Require().NoError(err) + gov.State().RequestChange(test.StateChangeRoundLength, uint64(200)) + cons := map[types.NodeID]*Consensus{} + dMoment := time.Now().UTC() + for _, key := range prvKeys { + _, con := s.prepareConsensus(dMoment, gov, key, conn) + nID := types.NewNodeID(key.PublicKey()) + cons[nID] = con + } + time.Sleep(gov.Configuration(0).MinBlockInterval * 4) + for _, con := range cons { + go con.runDKG(0, 0, 0, 0) + } + crsFinish := make(chan struct{}, len(cons)) + for _, con := range cons { + go func(con *Consensus) { + height := uint64(0) + Loop: + for { + select { + case <-crsFinish: + break Loop + case <-time.After(lambda): + } + con.event.NotifyHeight(height) + height++ + } + }(con) + } + for _, con := range cons { + func() { + con.dkgReady.L.Lock() + defer con.dkgReady.L.Unlock() + for con.dkgRunning != 2 { + con.dkgReady.Wait() + } + }() + } + for _, con := range cons { + go func(con *Consensus) { + con.runCRS(0, gov.CRS(0), false) + crsFinish <- struct{}{} + }(con) + } + s.NotNil(gov.CRS(1)) +} + +func (s *ConsensusTestSuite) TestSyncBA() { + lambdaBA := time.Second + conn := s.newNetworkConnection() + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, lambdaBA, &common.NullLogger{}, true), ConfigRoundShift) + s.Require().NoError(err) + prvKey := prvKeys[0] + _, con := s.prepareConsensus(time.Now().UTC(), gov, prvKey, conn) + go con.Run() + defer con.Stop() + hash := common.NewRandomHash() + signers := make([]*utils.Signer, 0, len(prvKeys)) + for _, prvKey := range prvKeys { + signers = append(signers, utils.NewSigner(prvKey)) + } + pos := types.Position{ + Round: 0, + Height: 20, + } + baResult := &types.AgreementResult{ + BlockHash: hash, + Position: pos, + } + for _, signer := range signers { + vote := types.NewVote(types.VoteCom, hash, 0) + vote.Position = pos + s.Require().NoError(signer.SignVote(vote)) + baResult.Votes = append(baResult.Votes, *vote) + } + // Make sure each agreement module is running. ProcessAgreementResult only + // works properly when agreement module is running: + // - the bias for round begin time would be 4 * lambda. + // - the ticker is 1 lambdaa. + time.Sleep(5 * lambdaBA) + s.Require().NoError(con.ProcessAgreementResult(baResult)) + aID := con.baMgr.baModule.agreementID() + s.Equal(pos, aID) + + // Negative cases are moved to TestVerifyAgreementResult in utils_test.go. +} + +func (s *ConsensusTestSuite) TestInitialHeightEventTriggered() { + // Initial block is the last block of corresponding round, in this case, + // we should make sure all height event handlers could be triggered after + // returned from Consensus.prepare(). + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + // Prepare a governance instance, whose DKG-reset-count for round 2 is 1. + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, time.Second, &common.NullLogger{}, true), ConfigRoundShift) + gov.State().RequestChange(test.StateChangeRoundLength, uint64(100)) + s.Require().NoError(err) + gov.NotifyRound(2, 201) + gov.NotifyRound(3, 301) + hash := common.NewRandomHash() + gov.ProposeCRS(2, hash[:]) + hash = common.NewRandomHash() + gov.ResetDKG(hash[:]) + s.Require().Equal(gov.DKGResetCount(2), uint64(1)) + prvKey := prvKeys[0] + initBlock := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Round: 1, Height: 200}, + } + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + conn := s.newNetworkConnection() + network := conn.newNetwork(nID) + con, err := NewConsensusFromSyncer( + initBlock, + false, + time.Now().UTC(), + test.NewApp(0, nil, nil), + gov, + dbInst, + network, + prvKey, + []*types.Block(nil), + []types.Msg{}, + &common.NullLogger{}, + ) + s.Require().NoError(err) + // Here is the tricky part, check if block chain module can handle the + // block with height == 200. + s.Require().Equal(con.bcModule.configs[0].RoundID(), uint64(1)) + s.Require().Equal(con.bcModule.configs[0].RoundEndHeight(), uint64(301)) +} + +func TestConsensus(t *testing.T) { + suite.Run(t, new(ConsensusTestSuite)) +} diff --git a/dex/consensus/core/constant.go b/dex/consensus/core/constant.go new file mode 100644 index 000000000..29dae8b73 --- /dev/null +++ b/dex/consensus/core/constant.go @@ -0,0 +1,41 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import "github.com/dexon-foundation/dexon-consensus/core/utils" + +// ConfigRoundShift refers to the difference between block's round and config +// round derived from its state. +// +// For example, when round shift is 2, a block in round 0 should derive config +// for round 2. +const ConfigRoundShift uint64 = 2 + +// DKGDelayRound refers to the round that first DKG is run. +// +// For example, when delay round is 1, new DKG will run at round 1. Round 0 will +// have neither DKG nor CRS. +const DKGDelayRound uint64 = 1 + +// NoRand is the magic placeholder for randomness field in blocks for blocks +// proposed before DKGDelayRound. +var NoRand = []byte("norand") + +func init() { + utils.SetDKGDelayRound(DKGDelayRound) +} diff --git a/dex/consensus/core/crypto/dkg/constant.go b/dex/consensus/core/crypto/dkg/constant.go new file mode 100644 index 000000000..37d873d6f --- /dev/null +++ b/dex/consensus/core/crypto/dkg/constant.go @@ -0,0 +1,26 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "github.com/dexon-foundation/bls/ffi/go/bls" +) + +const ( + curve = bls.BLS12_381 +) diff --git a/dex/consensus/core/crypto/dkg/dkg.go b/dex/consensus/core/crypto/dkg/dkg.go new file mode 100644 index 000000000..ab43f5130 --- /dev/null +++ b/dex/consensus/core/crypto/dkg/dkg.go @@ -0,0 +1,637 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "encoding/json" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/dexon-foundation/bls/ffi/go/bls" + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +var ( + // ErrDuplicatedShare is reported when adding an private key share of same id. + ErrDuplicatedShare = fmt.Errorf("invalid share") + // ErrNoIDToRecover is reported when no id is provided for recovering private + // key. + ErrNoIDToRecover = fmt.Errorf("no id to recover private key") + // ErrShareNotFound is reported when the private key share of id is not found + // when recovering private key. + ErrShareNotFound = fmt.Errorf("share not found") +) + +const cryptoType = "bls" + +var publicKeyLength int + +func init() { + if err := bls.Init(curve); err != nil { + panic(err) + } + + pubKey := &bls.PublicKey{} + publicKeyLength = len(pubKey.Serialize()) +} + +// PrivateKey represents a private key structure implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey bls.SecretKey + publicKey PublicKey +} + +// EncodeRLP implements rlp.Encoder +func (prv *PrivateKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, prv.Bytes()) +} + +// DecodeRLP implements rlp.Decoder +func (prv *PrivateKey) DecodeRLP(s *rlp.Stream) error { + var b []byte + if err := s.Decode(&b); err != nil { + return err + } + return prv.SetBytes(b) +} + +// MarshalJSON implements json.Marshaller. +func (prv *PrivateKey) MarshalJSON() ([]byte, error) { + return json.Marshal(&prv.privateKey) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (prv *PrivateKey) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &prv.privateKey) +} + +// ID is the id for DKG protocol. +type ID = bls.ID + +// IDs is an array of ID. +type IDs []ID + +// PublicKey represents a public key structure implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey bls.PublicKey +} + +// PrivateKeyShares represents a private key shares for DKG protocol. +type PrivateKeyShares struct { + shares []PrivateKey + shareIndex map[ID]int + masterPrivateKey []bls.SecretKey +} + +// Equal check equality between two PrivateKeyShares instances. +func (prvs *PrivateKeyShares) Equal(other *PrivateKeyShares) bool { + // Check shares. + if len(prvs.shareIndex) != len(other.shareIndex) { + return false + } + for dID, idx := range prvs.shareIndex { + otherIdx, exists := other.shareIndex[dID] + if !exists { + return false + } + if !prvs.shares[idx].privateKey.IsEqual( + &other.shares[otherIdx].privateKey) { + return false + } + } + // Check master private keys. + if len(prvs.masterPrivateKey) != len(other.masterPrivateKey) { + return false + } + for idx, m := range prvs.masterPrivateKey { + if m.GetHexString() != other.masterPrivateKey[idx].GetHexString() { + return false + } + } + return true +} + +// EncodeRLP implements rlp.Encoder +func (prvs *PrivateKeyShares) EncodeRLP(w io.Writer) error { + data := make([][][]byte, 3) + shares := make([][]byte, len(prvs.shares)) + for i, s := range prvs.shares { + shares[i] = s.Bytes() + } + data[0] = shares + + shareIndex := make([][]byte, 0) + for k, v := range prvs.shareIndex { + shareIndex = append(shareIndex, k.GetLittleEndian()) + + vBytes, err := rlp.EncodeToBytes(uint64(v)) + if err != nil { + return err + } + shareIndex = append(shareIndex, vBytes) + } + data[1] = shareIndex + + mpks := make([][]byte, len(prvs.masterPrivateKey)) + for i, m := range prvs.masterPrivateKey { + mpks[i] = m.GetLittleEndian() + } + data[2] = mpks + return rlp.Encode(w, data) +} + +// DecodeRLP implements rlp.Decoder +func (prvs *PrivateKeyShares) DecodeRLP(s *rlp.Stream) error { + *prvs = PrivateKeyShares{} + var dec [][][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + var shares []PrivateKey + for _, bs := range dec[0] { + var key PrivateKey + err := key.SetBytes(bs) + if err != nil { + return err + } + shares = append(shares, key) + } + (*prvs).shares = shares + + sharesIndex := map[ID]int{} + for i := 0; i < len(dec[1]); i += 2 { + var key ID + err := key.SetLittleEndian(dec[1][i]) + if err != nil { + return err + } + + var value uint64 + err = rlp.DecodeBytes(dec[1][i+1], &value) + if err != nil { + return err + } + + sharesIndex[key] = int(value) + } + (*prvs).shareIndex = sharesIndex + + var mpks []bls.SecretKey + for _, bs := range dec[2] { + var key bls.SecretKey + if err := key.SetLittleEndian(bs); err != nil { + return err + } + mpks = append(mpks, key) + } + (*prvs).masterPrivateKey = mpks + + return nil +} + +type publicKeySharesCache struct { + share []PublicKey + index map[ID]int +} + +// PublicKeyShares represents a public key shares for DKG protocol. +type PublicKeyShares struct { + cache atomic.Value + lock sync.Mutex + masterPublicKey []bls.PublicKey +} + +// Equal checks equality of two PublicKeyShares instance. +func (pubs *PublicKeyShares) Equal(other *PublicKeyShares) bool { + cache := pubs.cache.Load().(*publicKeySharesCache) + cacheOther := other.cache.Load().(*publicKeySharesCache) + // Check shares. + for dID, idx := range cache.index { + otherIdx, exists := cacheOther.index[dID] + if !exists { + continue + } + if !cache.share[idx].publicKey.IsEqual( + &cacheOther.share[otherIdx].publicKey) { + return false + } + } + // Check master public keys. + if len(pubs.masterPublicKey) != len(other.masterPublicKey) { + return false + } + for idx, m := range pubs.masterPublicKey { + if m.GetHexString() != other.masterPublicKey[idx].GetHexString() { + return false + } + } + return true +} + +// EncodeRLP implements rlp.Encoder +func (pubs *PublicKeyShares) EncodeRLP(w io.Writer) error { + mpks := make([][]byte, len(pubs.masterPublicKey)) + for i, m := range pubs.masterPublicKey { + mpks[i] = m.Serialize() + } + return rlp.Encode(w, mpks) +} + +// DecodeRLP implements rlp.Decoder +func (pubs *PublicKeyShares) DecodeRLP(s *rlp.Stream) error { + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + ps := NewEmptyPublicKeyShares() + for _, k := range dec { + var key bls.PublicKey + if err := key.Deserialize(k); err != nil { + return err + } + ps.masterPublicKey = append(ps.masterPublicKey, key) + } + + *pubs = *ps.Move() + return nil +} + +// MarshalJSON implements json.Marshaller. +func (pubs *PublicKeyShares) MarshalJSON() ([]byte, error) { + type Alias PublicKeyShares + data := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{ + make([]*bls.PublicKey, len(pubs.masterPublicKey)), + } + for i := range pubs.masterPublicKey { + data.MasterPublicKeys[i] = &pubs.masterPublicKey[i] + } + return json.Marshal(data) +} + +// UnmarshalJSON implements json.Unmarshaller. +func (pubs *PublicKeyShares) UnmarshalJSON(data []byte) error { + type Alias PublicKeyShares + aux := &struct { + MasterPublicKeys []*bls.PublicKey `json:"master_public_keys"` + }{} + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + mpk := make([]bls.PublicKey, len(aux.MasterPublicKeys)) + for i, pk := range aux.MasterPublicKeys { + mpk[i] = *pk + } + pubs.masterPublicKey = mpk + return nil +} + +// Clone clones every fields of PublicKeyShares. This method is mainly +// for testing purpose thus would panic when error. +func (pubs *PublicKeyShares) Clone() *PublicKeyShares { + b, err := rlp.EncodeToBytes(pubs) + if err != nil { + panic(err) + } + pubsCopy := NewEmptyPublicKeyShares() + if err := rlp.DecodeBytes(b, pubsCopy); err != nil { + panic(err) + } + return pubsCopy +} + +// NewID creates a ew ID structure. +func NewID(id []byte) ID { + var blsID bls.ID + // #nosec G104 + blsID.SetLittleEndian(id) + return blsID +} + +// BytesID creates a new ID structure, +// It returns err if the byte slice is not valid. +func BytesID(id []byte) (ID, error) { + var blsID bls.ID + // #nosec G104 + err := blsID.SetLittleEndian(id) + return blsID, err +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() *PrivateKey { + var key bls.SecretKey + key.SetByCSPRNG() + return &PrivateKey{ + privateKey: key, + publicKey: *newPublicKey(&key), + } +} + +// NewPrivateKeyShares creates a DKG private key shares of threshold t. +func NewPrivateKeyShares(t int) (*PrivateKeyShares, *PublicKeyShares) { + var prv bls.SecretKey + prv.SetByCSPRNG() + msk := prv.GetMasterSecretKey(t) + mpk := bls.GetMasterPublicKey(msk) + pubShare := NewEmptyPublicKeyShares() + pubShare.masterPublicKey = mpk + return &PrivateKeyShares{ + masterPrivateKey: msk, + shareIndex: make(map[ID]int), + }, pubShare +} + +// NewEmptyPrivateKeyShares creates an empty private key shares. +func NewEmptyPrivateKeyShares() *PrivateKeyShares { + return &PrivateKeyShares{ + shareIndex: make(map[ID]int), + } +} + +// SetParticipants sets the DKG participants. +func (prvs *PrivateKeyShares) SetParticipants(IDs IDs) { + prvs.shares = make([]PrivateKey, len(IDs)) + prvs.shareIndex = make(map[ID]int, len(IDs)) + for idx, ID := range IDs { + // #nosec G104 + prvs.shares[idx].privateKey.Set(prvs.masterPrivateKey, &ID) + prvs.shareIndex[ID] = idx + } +} + +// AddShare adds a share. +func (prvs *PrivateKeyShares) AddShare(ID ID, share *PrivateKey) error { + if idx, exist := prvs.shareIndex[ID]; exist { + if !share.privateKey.IsEqual(&prvs.shares[idx].privateKey) { + return ErrDuplicatedShare + } + return nil + } + prvs.shareIndex[ID] = len(prvs.shares) + prvs.shares = append(prvs.shares, *share) + return nil +} + +// RecoverPrivateKey recovers private key from the shares. +func (prvs *PrivateKeyShares) RecoverPrivateKey(qualifyIDs IDs) ( + *PrivateKey, error) { + var prv PrivateKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + prv.privateKey = prvs.shares[idx].privateKey + continue + } + prv.privateKey.Add(&prvs.shares[idx].privateKey) + } + return &prv, nil +} + +// RecoverPublicKey recovers public key from the shares. +func (prvs *PrivateKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, ErrShareNotFound + } + if i == 0 { + pub.publicKey = *prvs.shares[idx].privateKey.GetPublicKey() + continue + } + pub.publicKey.Add(prvs.shares[idx].privateKey.GetPublicKey()) + } + return &pub, nil +} + +// Share returns the share for the ID. +func (prvs *PrivateKeyShares) Share(ID ID) (*PrivateKey, bool) { + idx, exist := prvs.shareIndex[ID] + if !exist { + return nil, false + } + return &prvs.shares[idx], true +} + +// NewEmptyPublicKeyShares creates an empty public key shares. +func NewEmptyPublicKeyShares() *PublicKeyShares { + pubShares := &PublicKeyShares{} + pubShares.cache.Store(&publicKeySharesCache{ + index: make(map[ID]int), + }) + return pubShares +} + +// Move will invalidate itself. Do not access to original reference. +func (pubs *PublicKeyShares) Move() *PublicKeyShares { + return pubs +} + +// Share returns the share for the ID. +func (pubs *PublicKeyShares) Share(ID ID) (*PublicKey, error) { + cache := pubs.cache.Load().(*publicKeySharesCache) + idx, exist := cache.index[ID] + if exist { + return &cache.share[idx], nil + } + var pk PublicKey + if err := pk.publicKey.Set(pubs.masterPublicKey, &ID); err != nil { + return nil, err + } + if err := pubs.AddShare(ID, &pk); err != nil { + return nil, err + } + return &pk, nil +} + +// AddShare adds a share. +func (pubs *PublicKeyShares) AddShare(shareID ID, share *PublicKey) error { + cache := pubs.cache.Load().(*publicKeySharesCache) + if idx, exist := cache.index[shareID]; exist { + if !share.publicKey.IsEqual(&cache.share[idx].publicKey) { + return ErrDuplicatedShare + } + return nil + } + pubs.lock.Lock() + defer pubs.lock.Unlock() + cache = pubs.cache.Load().(*publicKeySharesCache) + newCache := &publicKeySharesCache{ + index: make(map[ID]int, len(cache.index)+1), + share: make([]PublicKey, len(cache.share), len(cache.share)+1), + } + for k, v := range cache.index { + newCache.index[k] = v + } + copy(newCache.share, cache.share) + newCache.index[shareID] = len(newCache.share) + newCache.share = append(newCache.share, *share) + pubs.cache.Store(newCache) + return nil +} + +// VerifyPrvShare verifies if the private key shares is valid. +func (pubs *PublicKeyShares) VerifyPrvShare(ID ID, share *PrivateKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(share.privateKey.GetPublicKey()), nil +} + +// VerifyPubShare verifies if the public key shares is valid. +func (pubs *PublicKeyShares) VerifyPubShare(ID ID, share *PublicKey) ( + bool, error) { + var pk bls.PublicKey + if err := pk.Set(pubs.masterPublicKey, &ID); err != nil { + return false, err + } + return pk.IsEqual(&share.publicKey), nil +} + +// RecoverPublicKey recovers private key from the shares. +func (pubs *PublicKeyShares) RecoverPublicKey(qualifyIDs IDs) ( + *PublicKey, error) { + var pub PublicKey + if len(qualifyIDs) == 0 { + return nil, ErrNoIDToRecover + } + for i, ID := range qualifyIDs { + pk, err := pubs.Share(ID) + if err != nil { + return nil, err + } + if i == 0 { + pub.publicKey = pk.publicKey + continue + } + pub.publicKey.Add(&pk.publicKey) + } + return &pub, nil +} + +// MasterKeyBytes returns []byte representation of master public key. +func (pubs *PublicKeyShares) MasterKeyBytes() []byte { + bytes := make([]byte, 0, len(pubs.masterPublicKey)*publicKeyLength) + for _, pk := range pubs.masterPublicKey { + bytes = append(bytes, pk.Serialize()...) + } + return bytes +} + +// newPublicKey creates a new PublicKey structure. +func newPublicKey(prvKey *bls.SecretKey) *PublicKey { + return &PublicKey{ + publicKey: *prvKey.GetPublicKey(), + } +} + +// newPublicKeyFromBytes create a new PublicKey structure +// from bytes representation of bls.PublicKey +func newPublicKeyFromBytes(b []byte) (*PublicKey, error) { + var pub PublicKey + err := pub.publicKey.Deserialize(b) + return &pub, err +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return prv.publicKey +} + +// Sign calculates a signature. +func (prv *PrivateKey) Sign(hash common.Hash) (crypto.Signature, error) { + msg := string(hash[:]) + sign := prv.privateKey.Sign(msg) + return crypto.Signature{ + Type: cryptoType, + Signature: sign.Serialize(), + }, nil +} + +// Bytes returns []byte representation of private key. +func (prv *PrivateKey) Bytes() []byte { + return prv.privateKey.GetLittleEndian() +} + +// SetBytes sets the private key data to []byte. +func (prv *PrivateKey) SetBytes(bytes []byte) error { + var key bls.SecretKey + if err := key.SetLittleEndian(bytes); err != nil { + return err + } + prv.privateKey = key + prv.publicKey = *newPublicKey(&prv.privateKey) + return nil +} + +// String returns string representation of privat key. +func (prv *PrivateKey) String() string { + return prv.privateKey.GetHexString() +} + +// VerifySignature checks that the given public key created signature over hash. +func (pub PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + if len(signature.Signature) == 0 { + return false + } + var sig bls.Sign + if err := sig.Deserialize(signature.Signature[:]); err != nil { + fmt.Println(err) + return false + } + msg := string(hash[:]) + return sig.Verify(&pub.publicKey, msg) +} + +// Bytes returns []byte representation of public key. +func (pub PublicKey) Bytes() []byte { + return pub.publicKey.Serialize() +} + +// Serialize return bytes representation of public key. +func (pub *PublicKey) Serialize() []byte { + return pub.publicKey.Serialize() +} + +// Deserialize parses bytes representation of public key. +func (pub *PublicKey) Deserialize(b []byte) error { + return pub.publicKey.Deserialize(b) +} diff --git a/dex/consensus/core/crypto/dkg/dkg_test.go b/dex/consensus/core/crypto/dkg/dkg_test.go new file mode 100644 index 000000000..84dc2d4ac --- /dev/null +++ b/dex/consensus/core/crypto/dkg/dkg_test.go @@ -0,0 +1,661 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "encoding/binary" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + + "github.com/dexon-foundation/bls/ffi/go/bls" + "github.com/dexon-foundation/dexon/rlp" + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +type DKGTestSuite struct { + suite.Suite +} + +type member struct { + id ID + prvShares *PrivateKeyShares + pubShares *PublicKeyShares + receivedPrvShares *PrivateKeyShares + receivedPubShares map[ID]*PublicKeyShares +} + +func (s *DKGTestSuite) genID(k int) IDs { + IDs := make(IDs, 0, k) + for i := 0; i < k; i++ { + id := make([]byte, 8) + binary.LittleEndian.PutUint64(id, rand.Uint64()) + IDs = append(IDs, NewID(id)) + } + return IDs +} + +func (s *DKGTestSuite) sendKey(senders []member, receivers []member) { + receiveFrom := make(map[ID][]member) + for _, sender := range senders { + for _, receiver := range receivers { + // Here's the demonstration of DKG protocol. `pubShares` is broadcasted + // and all the receiver would save it to the `receivedPubShares`. + // Do not optimize the memory usage of this part. + receiver.receivedPubShares[sender.id] = sender.pubShares + prvShare, ok := sender.prvShares.Share(receiver.id) + s.Require().True(ok) + pubShare, err := sender.pubShares.Share(receiver.id) + s.Require().NoError(err) + valid, err := receiver.receivedPubShares[sender.id]. + VerifyPrvShare(receiver.id, prvShare) + s.Require().NoError(err) + s.Require().True(valid) + valid, err = receiver.receivedPubShares[sender.id]. + VerifyPubShare(receiver.id, pubShare) + s.Require().NoError(err) + s.Require().True(valid) + receiveFrom[receiver.id] = append(receiveFrom[receiver.id], sender) + } + } + // The received order do not need to be the same. + for _, receiver := range receivers { + rand.Shuffle(len(senders), func(i, j int) { + receiveFrom[receiver.id][i], receiveFrom[receiver.id][j] = + receiveFrom[receiver.id][j], receiveFrom[receiver.id][i] + }) + for _, sender := range receiveFrom[receiver.id] { + prvShare, ok := sender.prvShares.Share(receiver.id) + s.Require().True(ok) + err := receiver.receivedPrvShares.AddShare(sender.id, prvShare) + s.Require().NoError(err) + } + } +} + +func (s *DKGTestSuite) signWithQualifyIDs( + member member, qualifyIDs IDs, hash common.Hash) PartialSignature { + prvKey, err := member.receivedPrvShares.RecoverPrivateKey(qualifyIDs) + s.Require().NoError(err) + sig, err := prvKey.Sign(hash) + s.Require().NoError(err) + return PartialSignature(sig) +} + +func (s *DKGTestSuite) verifySigWithQualifyIDs( + members []member, qualifyIDs IDs, + signer ID, hash common.Hash, sig PartialSignature) bool { + membersIdx := make(map[ID]int) + for idx, member := range members { + membersIdx[member.id] = idx + } + pubShares := NewEmptyPublicKeyShares() + for _, id := range qualifyIDs { + idx, exist := membersIdx[id] + s.Require().True(exist) + member := members[idx] + pubShare, err := member.pubShares.Share(signer) + s.Require().NoError(err) + err = pubShares.AddShare(id, pubShare) + s.Require().NoError(err) + } + pubKey, err := pubShares.RecoverPublicKey(qualifyIDs) + s.Require().NoError(err) + return pubKey.VerifySignature(hash, crypto.Signature(sig)) +} + +func (s *DKGTestSuite) TestVerifyKeyShares() { + invalidID := NewID([]byte{0}) + ids := []ID{NewID([]byte{1}), NewID([]byte{2}), NewID([]byte{3})} + members := []member{} + for _, id := range ids { + members = append(members, member{ + id: id, + receivedPubShares: make(map[ID]*PublicKeyShares), + }) + } + + prvShares, pubShares := NewPrivateKeyShares(2) + prvShares.SetParticipants(ids) + + _, ok := prvShares.Share(invalidID) + s.False(ok) + for _, id := range ids { + prvShare, ok := prvShares.Share(id) + s.Require().True(ok) + valid, err := pubShares.VerifyPrvShare(id, prvShare) + s.Require().NoError(err) + s.True(valid) + pubShare, err := pubShares.Share(id) + s.Require().NoError(err) + valid, err = pubShares.VerifyPubShare(id, pubShare) + s.Require().NoError(err) + s.True(valid) + } + + // Test of faulty private/public key. + invalidPrvShare := NewPrivateKey() + valid, err := pubShares.VerifyPrvShare(ids[0], invalidPrvShare) + s.Require().NoError(err) + s.False(valid) + + invalidPubShare, ok := invalidPrvShare.PublicKey().(PublicKey) + s.Require().True(ok) + valid, err = pubShares.VerifyPubShare(ids[0], &invalidPubShare) + s.Require().NoError(err) + s.False(valid) + + // Test of faulty signature. + for idx := range members { + members[idx].prvShares, members[idx].pubShares = NewPrivateKeyShares(2) + members[idx].prvShares.SetParticipants(ids) + members[idx].receivedPrvShares = NewEmptyPrivateKeyShares() + } + s.sendKey(members, members) + hash := crypto.Keccak256Hash([]byte("👾👾👾👾👾👾")) + sig, err := invalidPrvShare.Sign(hash) + s.Require().NoError(err) + psig := PartialSignature(sig) + for _, member := range members { + valid = s.verifySigWithQualifyIDs(members, ids, member.id, hash, psig) + s.False(valid) + } + + // Test of faulty group signature. + groupPubShares := make([]*PublicKeyShares, 0, len(members)) + sigs := make([]PartialSignature, 0, len(members)) + for _, member := range members { + sigs = append(sigs, s.signWithQualifyIDs(member, ids, hash)) + groupPubShares = append(groupPubShares, member.pubShares) + } + sigs[0] = psig + recoverSig, err := RecoverSignature(sigs, ids) + s.Require().NoError(err) + + pubKey := RecoverGroupPublicKey(groupPubShares) + s.False(pubKey.VerifySignature(hash, recoverSig)) +} + +func (s *DKGTestSuite) TestDKGProtocol() { + k := 5 + members := []member{} + ids := s.genID((k + 1) * 2) + for _, id := range ids { + members = append(members, member{ + id: id, + receivedPubShares: make(map[ID]*PublicKeyShares), + }) + } + + for idx := range members { + members[idx].prvShares, members[idx].pubShares = NewPrivateKeyShares(k) + members[idx].prvShares.SetParticipants(ids) + members[idx].receivedPrvShares = NewEmptyPrivateKeyShares() + } + // Randomly select non-disqualified members. + nums := make([]int, len(members)) + for i := range nums { + nums[i] = i + } + rand.Shuffle(len(nums), func(i, j int) { + nums[i], nums[j] = nums[j], nums[i] + }) + nums = nums[:rand.Intn(len(members))] + sort.Ints(nums) + qualify := make([]member, 0, len(nums)) + for _, idx := range nums { + qualify = append(qualify, members[idx]) + } + // TODO(jimmy-dexon): Remove below line after finishing test of random select. + qualify = members + // Members are partitioned into two groups. + grp1, grp2 := members[:k+1], members[k+1:] + collectIDs := func(members []member) IDs { + IDs := make(IDs, 0, len(members)) + for _, member := range members { + IDs = append(IDs, member.id) + } + return IDs + } + signMsg := func( + members []member, qualify []member, hash common.Hash) []PartialSignature { + ids := collectIDs(qualify) + sigs := make([]PartialSignature, 0, len(members)) + for _, member := range members { + sig := s.signWithQualifyIDs(member, ids, hash) + sigs = append(sigs, sig) + } + return sigs + } + verifySig := func( + members []member, + signer []ID, sig []PartialSignature, qualify []member, hash common.Hash) bool { + ids := collectIDs(qualify) + for i := range sig { + if !s.verifySigWithQualifyIDs(members, ids, signer[i], hash, sig[i]) { + return false + } + } + return true + } + s.sendKey(qualify, grp1) + s.sendKey(qualify, grp2) + hash := crypto.Keccak256Hash([]byte("🛫")) + sig1 := signMsg(grp1, qualify, hash) + sig2 := signMsg(grp2, qualify, hash) + s.True(verifySig(members, collectIDs(grp1), sig1, qualify, hash)) + s.True(verifySig(members, collectIDs(grp2), sig2, qualify, hash)) + recoverSig1, err := RecoverSignature(sig1, collectIDs(grp1)) + s.Require().NoError(err) + recoverSig2, err := RecoverSignature(sig2, collectIDs(grp2)) + s.Require().NoError(err) + s.Equal(recoverSig1, recoverSig2) + + pubShares := make([]*PublicKeyShares, 0, len(members)) + for _, member := range members { + pubShares = append(pubShares, member.pubShares) + } + groupPK := RecoverGroupPublicKey(pubShares) + s.True(groupPK.VerifySignature(hash, recoverSig1)) + s.True(groupPK.VerifySignature(hash, recoverSig2)) +} + +func (s *DKGTestSuite) TestSignature() { + prvKey := NewPrivateKey() + pubKey := prvKey.PublicKey() + hash := crypto.Keccak256Hash([]byte("🛫")) + sig, err := prvKey.Sign(hash) + s.Require().NoError(err) + s.True(pubKey.VerifySignature(hash, sig)) + sig.Signature[0]++ + s.False(pubKey.VerifySignature(hash, sig)) + sig = crypto.Signature{} + s.False(pubKey.VerifySignature(hash, sig)) +} + +func (s *DKGTestSuite) TestPrivateKeyRLPEncodeDecode() { + k := NewPrivateKey() + b, err := rlp.EncodeToBytes(k) + s.Require().NoError(err) + + var kk PrivateKey + err = rlp.DecodeBytes(b, &kk) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(*k, kk)) +} + +func (s *DKGTestSuite) TestPublicKeySharesRLPEncodeDecode() { + p := NewEmptyPublicKeyShares() + for _, id := range s.genID(1) { + privkey := NewPrivateKey() + pubkey := privkey.PublicKey().(PublicKey) + p.AddShare(id, &pubkey) + p.masterPublicKey = append(p.masterPublicKey, pubkey.publicKey) + } + + b, err := rlp.EncodeToBytes(p) + s.Require().NoError(err) + + var pp PublicKeyShares + err = rlp.DecodeBytes(b, &pp) + s.Require().NoError(err) + + bb, err := rlp.EncodeToBytes(&pp) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(b, bb)) +} + +func (s *DKGTestSuite) TestPrivateKeySharesRLPEncodeDecode() { + privShares, _ := NewPrivateKeyShares(10) + privShares.shares = append(privShares.shares, PrivateKey{}) + privShares.shareIndex = map[ID]int{ + ID{}: 0, + } + + b, err := rlp.EncodeToBytes(privShares) + s.Require().NoError(err) + + var newPrivShares PrivateKeyShares + err = rlp.DecodeBytes(b, &newPrivShares) + s.Require().NoError(err) + + bb, err := rlp.EncodeToBytes(&newPrivShares) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(b, bb)) + s.Require().True(privShares.Equal(&newPrivShares)) +} + +func (s *DKGTestSuite) TestPublicKeySharesEquality() { + var req = s.Require() + IDs := s.genID(2) + _, pubShares1 := NewPrivateKeyShares(4) + // Make a copy from an empty share. + pubShares2 := pubShares1.Clone() + req.True(pubShares1.Equal(pubShares2)) + // Add two shares. + prvKey1 := NewPrivateKey() + pubKey1 := prvKey1.PublicKey().(PublicKey) + req.NoError(pubShares1.AddShare(IDs[0], &pubKey1)) + prvKey2 := NewPrivateKey() + pubKey2 := prvKey2.PublicKey().(PublicKey) + req.True(pubShares1.Equal(pubShares2)) + // Clone the shares. + req.NoError(pubShares2.AddShare(IDs[0], &pubKey1)) + req.NoError(pubShares2.AddShare(IDs[1], &pubKey2)) + // They should be equal now. + req.True(pubShares1.Equal(pubShares2)) + req.True(pubShares2.Equal(pubShares1)) +} + +func (s *DKGTestSuite) TestPublicKeySharesMove() { + var req = s.Require() + IDs := s.genID(2) + _, pubShares1 := NewPrivateKeyShares(4) + // Make a copy from an empty share. + pubShares2 := pubShares1.Clone() + req.True(pubShares1.Equal(pubShares2)) + // Move from pubShare1. + pubShares3 := pubShares1.Move() + // Add two shares. + prvKey1 := NewPrivateKey() + pubKey1 := prvKey1.PublicKey().(PublicKey) + req.NoError(pubShares3.AddShare(IDs[0], &pubKey1)) + prvKey2 := NewPrivateKey() + pubKey2 := prvKey2.PublicKey().(PublicKey) + req.True(pubShares3.Equal(pubShares2)) + // Clone the shares. + req.NoError(pubShares2.AddShare(IDs[0], &pubKey1)) + req.NoError(pubShares2.AddShare(IDs[1], &pubKey2)) + // They should be equal now. + req.True(pubShares3.Equal(pubShares2)) + req.True(pubShares2.Equal(pubShares3)) +} + +func (s *DKGTestSuite) TestPublicKeySharesConcurrent() { + t := 5 + n := 10 + IDs := make(IDs, n) + for i := range IDs { + id := common.NewRandomHash() + IDs[i] = NewID(id[:]) + } + _, pubShare := NewPrivateKeyShares(t) + for _, id := range IDs { + go pubShare.Share(id) + } +} + +func (s *DKGTestSuite) TestPrivateKeySharesEquality() { + var req = s.Require() + IDs := s.genID(2) + prvShares1, _ := NewPrivateKeyShares(4) + // Make a copy of empty share. + prvShares2 := NewEmptyPrivateKeyShares() + req.False(prvShares1.Equal(prvShares2)) + // Clone the master private key. + for _, m := range prvShares1.masterPrivateKey { + var key bls.SecretKey + req.NoError(key.SetLittleEndian(m.GetLittleEndian())) + prvShares2.masterPrivateKey = append(prvShares2.masterPrivateKey, key) + } + // Add two shares. + prvKey1 := NewPrivateKey() + req.NoError(prvShares1.AddShare(IDs[0], prvKey1)) + prvKey2 := NewPrivateKey() + req.NoError(prvShares1.AddShare(IDs[1], prvKey2)) + // They are not equal now. + req.False(prvShares1.Equal(prvShares2)) + // Clone the shares. + req.NoError(prvShares2.AddShare(IDs[0], prvKey1)) + req.NoError(prvShares2.AddShare(IDs[1], prvKey2)) + // They should be equal now. + req.True(prvShares1.Equal(prvShares2)) + req.True(prvShares2.Equal(prvShares1)) +} + +func (s *DKGTestSuite) TestPublicKeySharesClone() { + _, pubShares1 := NewPrivateKeyShares(4) + IDs := s.genID(2) + prvKey1 := NewPrivateKey() + pubKey1 := prvKey1.PublicKey().(PublicKey) + s.Require().NoError(pubShares1.AddShare(IDs[0], &pubKey1)) + pubShares2 := pubShares1.Clone() + s.Require().True(pubShares1.Equal(pubShares2)) +} + +func TestDKG(t *testing.T) { + suite.Run(t, new(DKGTestSuite)) +} + +func BenchmarkDKGProtocol(b *testing.B) { + t := 33 + n := 100 + s := new(DKGTestSuite) + + self := member{} + members := make([]*member, n-1) + ids := make(IDs, n) + + b.Run("DKG", func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + self.id = s.genID(1)[0] + self.receivedPubShares = make(map[ID]*PublicKeyShares, n) + for idx, id := range s.genID(n - 1) { + ids[idx] = id + } + for idx := range members { + members[idx] = &member{ + id: ids[idx], + receivedPubShares: make(map[ID]*PublicKeyShares), + receivedPrvShares: NewEmptyPrivateKeyShares(), + } + } + ids[n-1] = self.id + prvShares := make(map[ID]*PrivateKey, n) + for idx := range members { + members[idx].prvShares, members[idx].pubShares = NewPrivateKeyShares(t) + members[idx].prvShares.SetParticipants(ids) + prvShare, ok := members[idx].prvShares.Share(self.id) + if !ok { + b.FailNow() + } + prvShares[members[idx].id] = prvShare + } + + b.StartTimer() + self.prvShares, self.pubShares = NewPrivateKeyShares(t) + self.prvShares.SetParticipants(ids) + self.receivedPrvShares = NewEmptyPrivateKeyShares() + for _, member := range members { + self.receivedPubShares[member.id] = member.pubShares + } + self.receivedPubShares[self.id] = self.pubShares + prvShare, ok := self.prvShares.Share(self.id) + if !ok { + b.FailNow() + } + prvShares[self.id] = prvShare + for id, prvShare := range prvShares { + ok, err := self.receivedPubShares[id].VerifyPrvShare(self.id, prvShare) + if err != nil { + b.Fatalf("%v", err) + } + if !ok { + b.FailNow() + } + if err := self.receivedPrvShares.AddShare(id, prvShare); err != nil { + b.Fatalf("%v", err) + } + } + if _, err := self.receivedPrvShares.RecoverPrivateKey(ids); err != nil { + b.Fatalf("%v", err) + } + } + }) + + hash := crypto.Keccak256Hash([]byte("🏖")) + b.Run("Share-Sign", func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + prvKey, err := self.receivedPrvShares.RecoverPrivateKey(ids) + if err != nil { + b.Fatalf("%v", err) + } + b.StartTimer() + if _, err := prvKey.Sign(hash); err != nil { + b.Fatalf("%v", err) + } + } + }) + + sendKey := func(sender *member, receiver *member, b *testing.B) { + receiver.receivedPubShares[sender.id] = sender.pubShares + prvShare, ok := sender.prvShares.Share(receiver.id) + if !ok { + b.FailNow() + } + ok, err := receiver.receivedPubShares[sender.id].VerifyPrvShare( + receiver.id, prvShare) + if err != nil { + b.Fatalf("%v", err) + } + if !ok { + b.FailNow() + } + if err := receiver.receivedPrvShares.AddShare(sender.id, prvShare); err != nil { + b.Fatalf("%v", err) + } + } + + members = append(members, &self) + + for _, sender := range members { + wg := sync.WaitGroup{} + for _, receiver := range members { + if sender == receiver { + continue + } + wg.Add(1) + go func(receiver *member) { + sendKey(sender, receiver, b) + wg.Done() + }(receiver) + } + wg.Wait() + } + wg := sync.WaitGroup{} + for _, m := range members { + wg.Add(1) + go func(member *member) { + sendKey(member, member, b) + wg.Done() + }(m) + } + wg.Wait() + + sign := func(member *member) PartialSignature { + prvKey, err := member.receivedPrvShares.RecoverPrivateKey(ids) + if err != nil { + b.Fatalf("%v", err) + } + sig, err := prvKey.Sign(hash) + if err != nil { + b.Fatalf("%v", err) + } + return PartialSignature(sig) + } + + b.Run("Combine-Sign", func(b *testing.B) { + b.StopTimer() + sigs := make([]PartialSignature, n) + for idx, member := range members { + sigs[idx] = sign(member) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + if _, err := RecoverSignature(sigs, ids); err != nil { + b.Fatalf("%v", err) + } + } + }) + + b.Run("Recover-GroupPK", func(b *testing.B) { + b.StopTimer() + pubShares := make([]*PublicKeyShares, 0, len(members)) + for _, member := range members { + pubShares = append(pubShares, member.pubShares) + } + b.StartTimer() + for i := 0; i < b.N; i++ { + RecoverGroupPublicKey(pubShares) + } + }) +} + +func BenchmarkGPKShare81_121(b *testing.B) { benchmarkGPKShare(b, 81, 121) } + +func benchmarkGPKShare(b *testing.B, t, n int) { + _, pubShare := NewPrivateKeyShares(t) + IDs := make(IDs, n) + for i := range IDs { + id := common.NewRandomHash() + IDs[i] = NewID(id[:]) + } + + for _, id := range IDs { + _, err := pubShare.Share(id) + if err != nil { + panic(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, id := range IDs { + pubShare.Share(id) + } + } +} + +func BenchmarkGPKAddShare81_121(b *testing.B) { benchmarkGPKAddShare(b, 81, 121) } + +func benchmarkGPKAddShare(b *testing.B, t, n int) { + IDs := make(IDs, n) + for i := range IDs { + id := common.NewRandomHash() + IDs[i] = NewID(id[:]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + _, pubShare := NewPrivateKeyShares(t) + b.StartTimer() + for _, id := range IDs { + pubShare.Share(id) + } + } +} diff --git a/dex/consensus/core/crypto/dkg/utils.go b/dex/consensus/core/crypto/dkg/utils.go new file mode 100644 index 000000000..9e470f0cf --- /dev/null +++ b/dex/consensus/core/crypto/dkg/utils.go @@ -0,0 +1,92 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "encoding/binary" + "fmt" + "math/rand" + + "github.com/dexon-foundation/bls/ffi/go/bls" + + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +// PartialSignature is a partial signature in DKG+TSIG protocol. +type PartialSignature crypto.Signature + +var ( + // ErrEmptySignature is reported if the signature is empty. + ErrEmptySignature = fmt.Errorf("invalid empty signature") +) + +// RecoverSignature recovers TSIG signature. +func RecoverSignature(sigs []PartialSignature, signerIDs IDs) ( + crypto.Signature, error) { + blsSigs := make([]bls.Sign, len(sigs)) + for i, sig := range sigs { + if len(sig.Signature) == 0 { + return crypto.Signature{}, ErrEmptySignature + } + if err := blsSigs[i].Deserialize([]byte(sig.Signature)); err != nil { + return crypto.Signature{}, err + } + } + var recoverSig bls.Sign + if err := recoverSig.Recover(blsSigs, []bls.ID(signerIDs)); err != nil { + return crypto.Signature{}, err + } + return crypto.Signature{ + Type: cryptoType, + Signature: recoverSig.Serialize()}, nil +} + +// RecoverGroupPublicKey recovers group public key. +func RecoverGroupPublicKey(pubShares []*PublicKeyShares) *PublicKey { + var pub *PublicKey + for _, pubShare := range pubShares { + pk0 := pubShare.masterPublicKey[0] + if pub == nil { + pub = &PublicKey{ + publicKey: pk0, + } + } else { + pub.publicKey.Add(&pk0) + } + } + return pub +} + +// NewRandomPrivateKeyShares constructs a private key shares randomly. +func NewRandomPrivateKeyShares() *PrivateKeyShares { + // Generate IDs. + rndIDs := make(IDs, 0, 10) + for i := range rndIDs { + id := make([]byte, 8) + binary.LittleEndian.PutUint64(id, rand.Uint64()) + rndIDs[i] = NewID(id) + } + prvShares := NewEmptyPrivateKeyShares() + prvShares.SetParticipants(rndIDs) + for _, id := range rndIDs { + if err := prvShares.AddShare(id, NewPrivateKey()); err != nil { + panic(err) + } + } + return prvShares +} diff --git a/dex/consensus/core/crypto/ecdsa/ecdsa.go b/dex/consensus/core/crypto/ecdsa/ecdsa.go new file mode 100644 index 000000000..9ef3260a1 --- /dev/null +++ b/dex/consensus/core/crypto/ecdsa/ecdsa.go @@ -0,0 +1,139 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package ecdsa + +import ( + "crypto/ecdsa" + + dexCrypto "github.com/dexon-foundation/dexon/crypto" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +const cryptoType = "ecdsa" + +func init() { + if err := crypto.RegisterSigToPub(cryptoType, SigToPub); err != nil { + panic(err) + } +} + +// PrivateKey represents a private key structure used in geth and implments +// Crypto.PrivateKey interface. +type PrivateKey struct { + privateKey *ecdsa.PrivateKey +} + +// PublicKey represents a public key structure used in geth and implements +// Crypto.PublicKey interface. +type PublicKey struct { + publicKey *ecdsa.PublicKey +} + +func (p *PublicKey) Oops() *ecdsa.PublicKey { + return p.publicKey +} + +// NewPrivateKey creates a new PrivateKey structure. +func NewPrivateKey() (*PrivateKey, error) { + key, err := dexCrypto.GenerateKey() + if err != nil { + return nil, err + } + return &PrivateKey{privateKey: key}, nil +} + +// NewPrivateKeyFromECDSA creates a new PrivateKey structure from +// ecdsa.PrivateKey. +func NewPrivateKeyFromECDSA(key *ecdsa.PrivateKey) *PrivateKey { + return &PrivateKey{privateKey: key} +} + +// NewPublicKeyFromECDSA creates a new PublicKey structure from +// ecdsa.PublicKey. +func NewPublicKeyFromECDSA(key *ecdsa.PublicKey) *PublicKey { + return &PublicKey{publicKey: key} +} + +// NewPublicKeyFromByteSlice constructs an eth.publicKey instance from +// a byte slice. +func NewPublicKeyFromByteSlice(b []byte) (crypto.PublicKey, error) { + pub, err := dexCrypto.UnmarshalPubkey(b) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: pub}, nil +} + +// PublicKey returns the public key associate this private key. +func (prv *PrivateKey) PublicKey() crypto.PublicKey { + return NewPublicKeyFromECDSA(&(prv.privateKey.PublicKey)) +} + +// Sign calculates an ECDSA signature. +// +// This function is susceptible to chosen plaintext attacks that can leak +// information about the private key that is used for signing. Callers must +// be aware that the given hash cannot be chosen by an adversery. Common +// solution is to hash any input before calculating the signature. +// +// The produced signature is in the [R || S || V] format where V is 0 or 1. +func (prv *PrivateKey) Sign(hash common.Hash) ( + sig crypto.Signature, err error) { + s, err := dexCrypto.Sign(hash[:], prv.privateKey) + sig = crypto.Signature{ + Type: cryptoType, + Signature: s, + } + return +} + +// VerifySignature checks that the given public key created signature over hash. +// The public key should be in compressed (33 bytes) or uncompressed (65 bytes) +// format. +// The signature should have the 64 byte [R || S] format. +func (pub *PublicKey) VerifySignature( + hash common.Hash, signature crypto.Signature) bool { + sig := signature.Signature + if len(sig) == 65 { + // The last byte is for ecrecover. + sig = sig[:64] + } + return dexCrypto.VerifySignature(pub.Bytes(), hash[:], sig) +} + +// Compress encodes a public key to the 33-byte compressed format. +func (pub *PublicKey) Compress() []byte { + return dexCrypto.CompressPubkey(pub.publicKey) +} + +// Bytes returns the []byte representation of uncompressed public key. (65 bytes) +func (pub *PublicKey) Bytes() []byte { + return dexCrypto.FromECDSAPub(pub.publicKey) +} + +// SigToPub returns the PublicKey that created the given signature. +func SigToPub( + hash common.Hash, signature crypto.Signature) (crypto.PublicKey, error) { + key, err := dexCrypto.SigToPub(hash[:], signature.Signature[:]) + if err != nil { + return &PublicKey{}, err + } + return &PublicKey{publicKey: key}, nil +} diff --git a/dex/consensus/core/crypto/ecdsa/ecdsa_test.go b/dex/consensus/core/crypto/ecdsa/ecdsa_test.go new file mode 100644 index 000000000..ada27b24b --- /dev/null +++ b/dex/consensus/core/crypto/ecdsa/ecdsa_test.go @@ -0,0 +1,86 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package ecdsa + +import ( + "testing" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/stretchr/testify/suite" +) + +type ETHCryptoTestSuite struct { + suite.Suite +} + +func (s *ETHCryptoTestSuite) TestSignature() { + prv1, err := NewPrivateKey() + s.Require().Nil(err) + hash1 := common.NewRandomHash() + hash2 := common.NewRandomHash() + + // Test that same private key should produce same signature. + sig11, err := prv1.Sign(hash1) + s.Require().Nil(err) + sig112, err := prv1.Sign(hash1) + s.Require().Nil(err) + s.Equal(sig11, sig112) + + // Test that different private key should produce different signature. + prv2, err := NewPrivateKey() + s.Require().Nil(err) + sig21, err := prv2.Sign(hash1) + s.Require().Nil(err) + s.NotEqual(sig11, sig21) + + // Test that different hash should produce different signature. + sig12, err := prv1.Sign(hash2) + s.Require().Nil(err) + s.NotEqual(sig11, sig12) + + // Test VerifySignature with correct public key. + pub1, ok := prv1.PublicKey().(*PublicKey) + s.Require().True(ok) + s.True(pub1.VerifySignature(hash1, sig11)) + + // Test VerifySignature with wrong hash. + s.False(pub1.VerifySignature(hash2, sig11)) + // Test VerifySignature with wrong signature. + s.False(pub1.VerifySignature(hash1, sig21)) + // Test VerifySignature with wrong public key. + pub2 := prv2.PublicKey() + s.False(pub2.VerifySignature(hash1, sig11)) +} + +func (s *ETHCryptoTestSuite) TestSigToPub() { + prv, err := NewPrivateKey() + s.Require().Nil(err) + data := "DEXON is infinitely scalable and low-latency." + hash := crypto.Keccak256Hash([]byte(data)) + sigmsg, err := prv.Sign(hash) + s.Require().Nil(err) + + pubkey, err := SigToPub(hash, sigmsg) + s.Require().Nil(err) + s.Equal(pubkey, prv.PublicKey()) +} + +func TestCrypto(t *testing.T) { + suite.Run(t, new(ETHCryptoTestSuite)) +} diff --git a/dex/consensus/core/crypto/interfaces.go b/dex/consensus/core/crypto/interfaces.go new file mode 100644 index 000000000..f3e01e42c --- /dev/null +++ b/dex/consensus/core/crypto/interfaces.go @@ -0,0 +1,48 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package crypto + +import ( + "github.com/dexon-foundation/dexon-consensus/common" +) + +// Signature is the basic signature type in DEXON. +type Signature struct { + Type string + Signature []byte +} + +// PrivateKey describes the asymmetric cryptography interface that interacts +// with the private key. +type PrivateKey interface { + // PublicKey returns the public key associate this private key. + PublicKey() PublicKey + + // Sign calculates a signature. + Sign(hash common.Hash) (Signature, error) +} + +// PublicKey describes the asymmetric cryptography interface that interacts +// with the public key. +type PublicKey interface { + // VerifySignature checks that the given public key created signature over hash. + VerifySignature(hash common.Hash, signature Signature) bool + + // Bytes returns the []byte representation of public key. + Bytes() []byte +} diff --git a/dex/consensus/core/crypto/utils.go b/dex/consensus/core/crypto/utils.go new file mode 100644 index 000000000..59e91f5a5 --- /dev/null +++ b/dex/consensus/core/crypto/utils.go @@ -0,0 +1,80 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package crypto + +import ( + "encoding/hex" + "fmt" + + "github.com/dexon-foundation/dexon/crypto" + + "github.com/dexon-foundation/dexon-consensus/common" +) + +var ( + // ErrSigToPubTypeNotFound is reported if the type is already used. + ErrSigToPubTypeNotFound = fmt.Errorf("type of sigToPub is not found") + + // ErrSigToPubTypeAlreadyExist is reported if the type is already used. + ErrSigToPubTypeAlreadyExist = fmt.Errorf("type of sigToPub is already exist") +) + +// SigToPubFn is a function to recover public key from signature. +type SigToPubFn func(hash common.Hash, signature Signature) (PublicKey, error) + +var sigToPubCB map[string]SigToPubFn + +func init() { + sigToPubCB = make(map[string]SigToPubFn) +} + +// Keccak256Hash calculates and returns the Keccak256 hash of the input data, +// converting it to an internal Hash data structure. +func Keccak256Hash(data ...[]byte) (h common.Hash) { + return common.Hash(crypto.Keccak256Hash(data...)) +} + +// Clone returns a deep copy of a signature. +func (sig Signature) Clone() Signature { + return Signature{ + Type: sig.Type, + Signature: sig.Signature[:], + } +} + +func (sig Signature) String() string { + return hex.EncodeToString([]byte(sig.Signature[:])) +} + +// RegisterSigToPub registers a sigToPub function of type. +func RegisterSigToPub(sigType string, sigToPub SigToPubFn) error { + if _, exist := sigToPubCB[sigType]; exist { + return ErrSigToPubTypeAlreadyExist + } + sigToPubCB[sigType] = sigToPub + return nil +} + +// SigToPub recovers public key from signature. +func SigToPub(hash common.Hash, signature Signature) (PublicKey, error) { + sigToPub, exist := sigToPubCB[signature.Type] + if !exist { + return nil, ErrSigToPubTypeNotFound + } + return sigToPub(hash, signature) +} diff --git a/dex/consensus/core/crypto/utils_test.go b/dex/consensus/core/crypto/utils_test.go new file mode 100644 index 000000000..cb7455173 --- /dev/null +++ b/dex/consensus/core/crypto/utils_test.go @@ -0,0 +1,52 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package crypto + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/suite" +) + +type CryptoTestSuite struct { + suite.Suite +} + +func (s *CryptoTestSuite) TestHash() { + cases := []struct { + input string + output string + }{ + {"DEXON ROCKS!", + "1a3f3a424aaa464e51b693585bba3a0c439d5f1ad3b5868e46d9f830225983bd"}, + {"Dexon Foundation", + "25ed4237aa978bfe706cc11c7a46a95de1a46302faea7ff6e900b03fa2b7b480"}, + {"INFINITELY SCALABLE AND LOW-LATENCY", + "ed3384c58a434fbc0bc887a85659eddf997e7da978ab66565ac865f995b77cf1"}, + } + for _, testcase := range cases { + hash := Keccak256Hash([]byte(testcase.input)) + output := hex.EncodeToString(hash[:]) + s.Equal(testcase.output, output) + } +} + +func TestCrypto(t *testing.T) { + suite.Run(t, new(CryptoTestSuite)) +} diff --git a/dex/consensus/core/db/interfaces.go b/dex/consensus/core/db/interfaces.go new file mode 100644 index 000000000..a571a8021 --- /dev/null +++ b/dex/consensus/core/db/interfaces.go @@ -0,0 +1,100 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package db + +import ( + "errors" + "fmt" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +var ( + // ErrBlockExists is the error when block eixsts. + ErrBlockExists = errors.New("block exists") + // ErrBlockDoesNotExist is the error when block does not eixst. + ErrBlockDoesNotExist = errors.New("block does not exist") + // ErrIterationFinished is the error to check if the iteration is finished. + ErrIterationFinished = errors.New("iteration finished") + // ErrEmptyPath is the error when the required path is empty. + ErrEmptyPath = fmt.Errorf("empty path") + // ErrClosed is the error when using DB after it's closed. + ErrClosed = fmt.Errorf("db closed") + // ErrNotImplemented is the error that some interface is not implemented. + ErrNotImplemented = fmt.Errorf("not implemented") + // ErrInvalidCompactionChainTipHeight means the newly updated height of + // the tip of compaction chain is invalid, usually means it's smaller than + // current cached one. + ErrInvalidCompactionChainTipHeight = fmt.Errorf( + "invalid compaction chain tip height") + // ErrDKGPrivateKeyExists raised when attempting to save DKG private key + // that already saved. + ErrDKGPrivateKeyExists = errors.New("dkg private key exists") + // ErrDKGPrivateKeyDoesNotExist raised when the DKG private key of the + // requested round does not exists. + ErrDKGPrivateKeyDoesNotExist = errors.New("dkg private key does not exists") + // ErrDKGProtocolExists raised when attempting to save DKG protocol + // that already saved. + ErrDKGProtocolExists = errors.New("dkg protocol exists") + // ErrDKGProtocolDoesNotExist raised when the DKG protocol of the + // requested round does not exists. + ErrDKGProtocolDoesNotExist = errors.New("dkg protocol does not exists") +) + +// Database is the interface for a Database. +type Database interface { + Reader + Writer + + // Close allows database implementation able to + // release resource when finishing. + Close() error +} + +// Reader defines the interface for reading blocks into DB. +type Reader interface { + HasBlock(hash common.Hash) bool + GetBlock(hash common.Hash) (types.Block, error) + GetAllBlocks() (BlockIterator, error) + + // GetCompactionChainTipInfo returns the block hash and finalization height + // of the tip block of compaction chain. Empty hash and zero height means + // the compaction chain is empty. + GetCompactionChainTipInfo() (common.Hash, uint64) + + // DKG Private Key related methods. + GetDKGPrivateKey(round, reset uint64) (dkg.PrivateKey, error) + GetDKGProtocol() (dkgProtocol DKGProtocolInfo, err error) +} + +// Writer defines the interface for writing blocks into DB. +type Writer interface { + UpdateBlock(block types.Block) error + PutBlock(block types.Block) error + PutCompactionChainTipInfo(common.Hash, uint64) error + PutDKGPrivateKey(round, reset uint64, pk dkg.PrivateKey) error + PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error +} + +// BlockIterator defines an iterator on blocks hold +// in a DB. +type BlockIterator interface { + NextBlock() (types.Block, error) +} diff --git a/dex/consensus/core/db/level-db.go b/dex/consensus/core/db/level-db.go new file mode 100644 index 000000000..da8bc0bc1 --- /dev/null +++ b/dex/consensus/core/db/level-db.go @@ -0,0 +1,573 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package db + +import ( + "encoding/binary" + "io" + + "github.com/syndtr/goleveldb/leveldb" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon/rlp" +) + +var ( + blockKeyPrefix = []byte("b-") + compactionChainTipInfoKey = []byte("cc-tip") + dkgPrivateKeyKeyPrefix = []byte("dkg-prvs") + dkgProtocolInfoKeyPrefix = []byte("dkg-protocol-info") +) + +type compactionChainTipInfo struct { + Height uint64 `json:"height"` + Hash common.Hash `json:"hash"` +} + +// DKGProtocolInfo DKG protocol info. +type DKGProtocolInfo struct { + ID types.NodeID + Round uint64 + Threshold uint64 + IDMap NodeIDToDKGID + MpkMap NodeIDToPubShares + MasterPrivateShare dkg.PrivateKeyShares + IsMasterPrivateShareEmpty bool + PrvShares dkg.PrivateKeyShares + IsPrvSharesEmpty bool + PrvSharesReceived NodeID + NodeComplained NodeID + AntiComplaintReceived NodeIDToNodeIDs + Step uint64 + Reset uint64 +} + +type dkgPrivateKey struct { + PK dkg.PrivateKey + Reset uint64 +} + +// Equal compare with target DKGProtocolInfo. +func (info *DKGProtocolInfo) Equal(target *DKGProtocolInfo) bool { + if !info.ID.Equal(target.ID) || + info.Round != target.Round || + info.Threshold != target.Threshold || + info.IsMasterPrivateShareEmpty != target.IsMasterPrivateShareEmpty || + info.IsPrvSharesEmpty != target.IsPrvSharesEmpty || + info.Step != target.Step || + info.Reset != target.Reset || + !info.MasterPrivateShare.Equal(&target.MasterPrivateShare) || + !info.PrvShares.Equal(&target.PrvShares) { + return false + } + + if len(info.IDMap) != len(target.IDMap) { + return false + } + for k, v := range info.IDMap { + tV, exist := target.IDMap[k] + if !exist { + return false + } + + if !v.IsEqual(&tV) { + return false + } + } + + if len(info.MpkMap) != len(target.MpkMap) { + return false + } + for k, v := range info.MpkMap { + tV, exist := target.MpkMap[k] + if !exist { + return false + } + + if !v.Equal(tV) { + return false + } + } + + if len(info.PrvSharesReceived) != len(target.PrvSharesReceived) { + return false + } + for k := range info.PrvSharesReceived { + _, exist := target.PrvSharesReceived[k] + if !exist { + return false + } + } + + if len(info.NodeComplained) != len(target.NodeComplained) { + return false + } + for k := range info.NodeComplained { + _, exist := target.NodeComplained[k] + if !exist { + return false + } + } + + if len(info.AntiComplaintReceived) != len(target.AntiComplaintReceived) { + return false + } + for k, v := range info.AntiComplaintReceived { + tV, exist := target.AntiComplaintReceived[k] + if !exist { + return false + } + + if len(v) != len(tV) { + return false + } + for kk := range v { + _, exist := tV[kk] + if !exist { + return false + } + } + } + + return true +} + +// NodeIDToNodeIDs the map with NodeID to NodeIDs. +type NodeIDToNodeIDs map[types.NodeID]map[types.NodeID]struct{} + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToNodeIDs) EncodeRLP(w io.Writer) error { + var allBytes [][][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, [][]byte{kBytes}) + + var vBytes [][]byte + for subK := range v { + bytes, err := subK.MarshalText() + if err != nil { + return err + } + vBytes = append(vBytes, bytes) + } + allBytes = append(allBytes, vBytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToNodeIDs) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToNodeIDs) + var dec [][][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i][0]) + if err != nil { + return err + } + + valueMap := map[types.NodeID]struct{}{} + for _, v := range dec[i+1] { + value := types.NodeID{} + err := value.UnmarshalText(v) + if err != nil { + return err + } + + valueMap[value] = struct{}{} + } + + (*m)[key] = valueMap + } + + return nil +} + +// NodeID the map with NodeID. +type NodeID map[types.NodeID]struct{} + +// EncodeRLP implements rlp.Encoder +func (m NodeID) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeID) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeID) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i++ { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + (*m)[key] = struct{}{} + } + + return nil +} + +// NodeIDToPubShares the map with NodeID to PublicKeyShares. +type NodeIDToPubShares map[types.NodeID]*dkg.PublicKeyShares + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToPubShares) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + + bytes, err := rlp.EncodeToBytes(v) + if err != nil { + return err + } + allBytes = append(allBytes, bytes) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToPubShares) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToPubShares) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + value := dkg.PublicKeyShares{} + err = rlp.DecodeBytes(dec[i+1], &value) + if err != nil { + return err + } + + (*m)[key] = &value + } + + return nil +} + +// NodeIDToDKGID the map with NodeID to DKGID. +type NodeIDToDKGID map[types.NodeID]dkg.ID + +// EncodeRLP implements rlp.Encoder +func (m NodeIDToDKGID) EncodeRLP(w io.Writer) error { + var allBytes [][]byte + for k, v := range m { + kBytes, err := k.MarshalText() + if err != nil { + return err + } + allBytes = append(allBytes, kBytes) + allBytes = append(allBytes, v.GetLittleEndian()) + } + + return rlp.Encode(w, allBytes) +} + +// DecodeRLP implements rlp.Encoder +func (m *NodeIDToDKGID) DecodeRLP(s *rlp.Stream) error { + *m = make(NodeIDToDKGID) + var dec [][]byte + if err := s.Decode(&dec); err != nil { + return err + } + + for i := 0; i < len(dec); i += 2 { + key := types.NodeID{} + err := key.UnmarshalText(dec[i]) + if err != nil { + return err + } + + value := dkg.ID{} + err = value.SetLittleEndian(dec[i+1]) + if err != nil { + return err + } + + (*m)[key] = value + } + + return nil +} + +// LevelDBBackedDB is a leveldb backed DB implementation. +type LevelDBBackedDB struct { + db *leveldb.DB +} + +// NewLevelDBBackedDB initialize a leveldb-backed database. +func NewLevelDBBackedDB( + path string) (lvl *LevelDBBackedDB, err error) { + + dbInst, err := leveldb.OpenFile(path, nil) + if err != nil { + return + } + lvl = &LevelDBBackedDB{db: dbInst} + return +} + +// Close implement Closer interface, which would release allocated resource. +func (lvl *LevelDBBackedDB) Close() error { + return lvl.db.Close() +} + +// HasBlock implements the Reader.Has method. +func (lvl *LevelDBBackedDB) HasBlock(hash common.Hash) bool { + exists, err := lvl.internalHasBlock(lvl.getBlockKey(hash)) + if err != nil { + panic(err) + } + return exists +} + +func (lvl *LevelDBBackedDB) internalHasBlock(key []byte) (bool, error) { + return lvl.db.Has(key, nil) +} + +// GetBlock implements the Reader.GetBlock method. +func (lvl *LevelDBBackedDB) GetBlock( + hash common.Hash) (block types.Block, err error) { + queried, err := lvl.db.Get(lvl.getBlockKey(hash), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrBlockDoesNotExist + } + return + } + err = rlp.DecodeBytes(queried, &block) + return +} + +// UpdateBlock implements the Writer.UpdateBlock method. +func (lvl *LevelDBBackedDB) UpdateBlock(block types.Block) (err error) { + // NOTE: we didn't handle changes of block hash (and it + // should not happen). + marshaled, err := rlp.EncodeToBytes(&block) + if err != nil { + return + } + blockKey := lvl.getBlockKey(block.Hash) + exists, err := lvl.internalHasBlock(blockKey) + if err != nil { + return + } + if !exists { + err = ErrBlockDoesNotExist + return + } + err = lvl.db.Put(blockKey, marshaled, nil) + return +} + +// PutBlock implements the Writer.PutBlock method. +func (lvl *LevelDBBackedDB) PutBlock(block types.Block) (err error) { + marshaled, err := rlp.EncodeToBytes(&block) + if err != nil { + return + } + blockKey := lvl.getBlockKey(block.Hash) + exists, err := lvl.internalHasBlock(blockKey) + if err != nil { + return + } + if exists { + err = ErrBlockExists + return + } + err = lvl.db.Put(blockKey, marshaled, nil) + return +} + +// GetAllBlocks implements Reader.GetAllBlocks method, which allows callers +// to retrieve all blocks in DB. +func (lvl *LevelDBBackedDB) GetAllBlocks() (BlockIterator, error) { + return nil, ErrNotImplemented +} + +// PutCompactionChainTipInfo saves tip of compaction chain into the database. +func (lvl *LevelDBBackedDB) PutCompactionChainTipInfo( + blockHash common.Hash, height uint64) error { + marshaled, err := rlp.EncodeToBytes(&compactionChainTipInfo{ + Hash: blockHash, + Height: height, + }) + if err != nil { + return err + } + // Check current cached tip info to make sure the one to be updated is + // valid. + info, err := lvl.internalGetCompactionChainTipInfo() + if err != nil { + return err + } + if info.Height+1 != height { + return ErrInvalidCompactionChainTipHeight + } + return lvl.db.Put(compactionChainTipInfoKey, marshaled, nil) +} + +func (lvl *LevelDBBackedDB) internalGetCompactionChainTipInfo() ( + info compactionChainTipInfo, err error) { + queried, err := lvl.db.Get(compactionChainTipInfoKey, nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = nil + } + return + } + err = rlp.DecodeBytes(queried, &info) + return +} + +// GetCompactionChainTipInfo get the tip info of compaction chain into the +// database. +func (lvl *LevelDBBackedDB) GetCompactionChainTipInfo() ( + hash common.Hash, height uint64) { + info, err := lvl.internalGetCompactionChainTipInfo() + if err != nil { + panic(err) + } + hash, height = info.Hash, info.Height + return +} + +// GetDKGPrivateKey get DKG private key of one round. +func (lvl *LevelDBBackedDB) GetDKGPrivateKey(round, reset uint64) ( + prv dkg.PrivateKey, err error) { + queried, err := lvl.db.Get(lvl.getDKGPrivateKeyKey(round), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrDKGPrivateKeyDoesNotExist + } + return + } + pk := dkgPrivateKey{} + err = rlp.DecodeBytes(queried, &pk) + if pk.Reset != reset { + err = ErrDKGPrivateKeyDoesNotExist + return + } + prv = pk.PK + return +} + +// PutDKGPrivateKey save DKG private key of one round. +func (lvl *LevelDBBackedDB) PutDKGPrivateKey( + round, reset uint64, prv dkg.PrivateKey) error { + // Check existence. + _, err := lvl.GetDKGPrivateKey(round, reset) + if err == nil { + return ErrDKGPrivateKeyExists + } + if err != ErrDKGPrivateKeyDoesNotExist { + return err + } + pk := &dkgPrivateKey{ + PK: prv, + Reset: reset, + } + marshaled, err := rlp.EncodeToBytes(&pk) + if err != nil { + return err + } + return lvl.db.Put( + lvl.getDKGPrivateKeyKey(round), marshaled, nil) +} + +// GetDKGProtocol get DKG protocol. +func (lvl *LevelDBBackedDB) GetDKGProtocol() ( + info DKGProtocolInfo, err error) { + queried, err := lvl.db.Get(lvl.getDKGProtocolInfoKey(), nil) + if err != nil { + if err == leveldb.ErrNotFound { + err = ErrDKGProtocolDoesNotExist + } + return + } + + err = rlp.DecodeBytes(queried, &info) + return +} + +// PutOrUpdateDKGProtocol save DKG protocol. +func (lvl *LevelDBBackedDB) PutOrUpdateDKGProtocol(info DKGProtocolInfo) error { + marshaled, err := rlp.EncodeToBytes(&info) + if err != nil { + return err + } + return lvl.db.Put(lvl.getDKGProtocolInfoKey(), marshaled, nil) +} + +func (lvl *LevelDBBackedDB) getBlockKey(hash common.Hash) (ret []byte) { + ret = make([]byte, len(blockKeyPrefix)+len(hash[:])) + copy(ret, blockKeyPrefix) + copy(ret[len(blockKeyPrefix):], hash[:]) + return +} + +func (lvl *LevelDBBackedDB) getDKGPrivateKeyKey( + round uint64) (ret []byte) { + ret = make([]byte, len(dkgPrivateKeyKeyPrefix)+8) + copy(ret, dkgPrivateKeyKeyPrefix) + binary.LittleEndian.PutUint64( + ret[len(dkgPrivateKeyKeyPrefix):], round) + return +} + +func (lvl *LevelDBBackedDB) getDKGProtocolInfoKey() (ret []byte) { + ret = make([]byte, len(dkgProtocolInfoKeyPrefix)+8) + copy(ret, dkgProtocolInfoKeyPrefix) + return +} diff --git a/dex/consensus/core/db/level-db_test.go b/dex/consensus/core/db/level-db_test.go new file mode 100644 index 000000000..8b3c448ca --- /dev/null +++ b/dex/consensus/core/db/level-db_test.go @@ -0,0 +1,326 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package db + +import ( + "bytes" + "fmt" + "reflect" + "testing" + "time" + + "os" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon/rlp" +) + +type LevelDBTestSuite struct { + suite.Suite +} + +func (s *LevelDBTestSuite) TestBasicUsage() { + dbName := fmt.Sprintf("test-db-%v.db", time.Now().UTC()) + dbInst, err := NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + defer func(dbName string) { + err = dbInst.Close() + s.NoError(err) + err = os.RemoveAll(dbName) + s.NoError(err) + }(dbName) + + // Queried something from an empty database. + hash1 := common.NewRandomHash() + _, err = dbInst.GetBlock(hash1) + s.Equal(ErrBlockDoesNotExist, err) + + // Update on an empty database should not success. + node1 := types.NodeID{Hash: common.NewRandomHash()} + block1 := types.Block{ + ProposerID: node1, + Hash: hash1, + Position: types.Position{ + Height: 1, + }, + } + err = dbInst.UpdateBlock(block1) + s.Equal(ErrBlockDoesNotExist, err) + + // Put to create a new record should just work fine. + err = dbInst.PutBlock(block1) + s.NoError(err) + + // Get it back should work fine. + queried, err := dbInst.GetBlock(block1.Hash) + s.NoError(err) + s.Equal(queried.ProposerID, block1.ProposerID) + + // Test Update. + now := time.Now().UTC() + queried.Timestamp = now + + err = dbInst.UpdateBlock(queried) + s.NoError(err) + + // Try to get it back via NodeID and height. + queried, err = dbInst.GetBlock(block1.Hash) + + s.NoError(err) + s.Equal(now, queried.Timestamp) +} + +func (s *LevelDBTestSuite) TestSyncIndex() { + dbName := fmt.Sprintf("test-db-%v-si.db", time.Now().UTC()) + dbInst, err := NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + defer func(dbName string) { + err = dbInst.Close() + s.NoError(err) + err = os.RemoveAll(dbName) + s.NoError(err) + }(dbName) + + // Create some blocks. + blocks := [10]types.Block{} + for i := range blocks { + block := types.Block{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Hash: common.NewRandomHash(), + Position: types.Position{ + Height: uint64(i), + }, + } + dbInst.PutBlock(block) + blocks[i] = block + } + + // Save blocks to db. + err = dbInst.Close() + s.NoError(err) + + // Load back blocks(syncIndex is called). + dbInst, err = NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + + // Verify result. + for _, block := range blocks { + queried, err := dbInst.GetBlock(block.Hash) + s.NoError(err) + s.Equal(block.ProposerID, queried.ProposerID) + s.Equal(block.Position.Height, queried.Position.Height) + } +} + +func (s *LevelDBTestSuite) TestCompactionChainTipInfo() { + dbName := fmt.Sprintf("test-db-%v-cc-tip.db", time.Now().UTC()) + dbInst, err := NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + defer func(dbName string) { + err = dbInst.Close() + s.NoError(err) + err = os.RemoveAll(dbName) + s.NoError(err) + }(dbName) + // Save some tip info. + hash := common.NewRandomHash() + s.Require().NoError(dbInst.PutCompactionChainTipInfo(hash, 1)) + // Get it back to check. + hashBack, height := dbInst.GetCompactionChainTipInfo() + s.Require().Equal(hash, hashBack) + s.Require().Equal(height, uint64(1)) + // Unable to put compaction chain tip info with lower height. + err = dbInst.PutCompactionChainTipInfo(hash, 0) + s.Require().Equal(err.Error(), ErrInvalidCompactionChainTipHeight.Error()) + // Unable to put compaction chain tip info with height not incremental by 1. + err = dbInst.PutCompactionChainTipInfo(hash, 3) + s.Require().Equal(err.Error(), ErrInvalidCompactionChainTipHeight.Error()) + // It's OK to put compaction chain tip info with height incremental by 1. + s.Require().NoError(dbInst.PutCompactionChainTipInfo(hash, 2)) +} + +func (s *LevelDBTestSuite) TestDKGPrivateKey() { + dbName := fmt.Sprintf("test-db-%v-dkg-prv.db", time.Now().UTC()) + dbInst, err := NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + defer func(dbName string) { + err = dbInst.Close() + s.NoError(err) + err = os.RemoveAll(dbName) + s.NoError(err) + }(dbName) + p := dkg.NewPrivateKey() + // We should be unable to get it. + _, err = dbInst.GetDKGPrivateKey(1, 0) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyDoesNotExist.Error()) + // Put it. + s.Require().NoError(dbInst.PutDKGPrivateKey(1, 0, *p)) + // We should be unable to get it because reset is different. + _, err = dbInst.GetDKGPrivateKey(1, 1) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyDoesNotExist.Error()) + // Put it again, should not success. + err = dbInst.PutDKGPrivateKey(1, 0, *p) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyExists.Error()) + // Get it back. + tmpPrv, err := dbInst.GetDKGPrivateKey(1, 0) + s.Require().NoError(err) + s.Require().Equal(bytes.Compare(p.Bytes(), tmpPrv.Bytes()), 0) + // Put it at different reset. + p2 := dkg.NewPrivateKey() + s.Require().NoError(dbInst.PutDKGPrivateKey(1, 1, *p2)) + // We should be unable to get it because reset is different. + _, err = dbInst.GetDKGPrivateKey(1, 0) + // Get it back. + tmpPrv, err = dbInst.GetDKGPrivateKey(1, 1) + s.Require().NoError(err) + s.Require().Equal(bytes.Compare(p2.Bytes(), tmpPrv.Bytes()), 0) + s.Require().NotEqual(bytes.Compare(p2.Bytes(), p.Bytes()), 0) +} + +func (s *LevelDBTestSuite) TestDKGProtocol() { + dbName := fmt.Sprintf("test-db-%v-dkg-master-prv-shares.db", time.Now().UTC()) + dbInst, err := NewLevelDBBackedDB(dbName) + s.Require().NoError(err) + defer func(dbName string) { + err = dbInst.Close() + s.NoError(err) + err = os.RemoveAll(dbName) + s.NoError(err) + }(dbName) + + _, err = dbInst.GetDKGProtocol() + s.Require().Equal(err.Error(), ErrDKGProtocolDoesNotExist.Error()) + + s.Require().NoError(dbInst.PutOrUpdateDKGProtocol(DKGProtocolInfo{})) +} + +func (s *LevelDBTestSuite) TestDKGProtocolInfoRLPEncodeDecode() { + protocol := DKGProtocolInfo{ + ID: types.NodeID{Hash: common.Hash{0x11}}, + Round: 5, + Threshold: 10, + IDMap: NodeIDToDKGID{ + types.NodeID{Hash: common.Hash{0x01}}: dkg.ID{}, + types.NodeID{Hash: common.Hash{0x02}}: dkg.ID{}, + }, + MpkMap: NodeIDToPubShares{ + types.NodeID{Hash: common.Hash{0x01}}: dkg.NewEmptyPublicKeyShares(), + types.NodeID{Hash: common.Hash{0x02}}: dkg.NewEmptyPublicKeyShares(), + }, + AntiComplaintReceived: NodeIDToNodeIDs{ + types.NodeID{Hash: common.Hash{0x01}}: map[types.NodeID]struct{}{ + types.NodeID{Hash: common.Hash{0x02}}: {}, + }, + types.NodeID{Hash: common.Hash{0x03}}: map[types.NodeID]struct{}{ + types.NodeID{Hash: common.Hash{0x04}}: {}, + }, + }, + PrvSharesReceived: NodeID{ + types.NodeID{Hash: common.Hash{0x01}}: struct{}{}, + }, + } + + b, err := rlp.EncodeToBytes(&protocol) + s.Require().NoError(err) + + newProtocol := DKGProtocolInfo{} + err = rlp.DecodeBytes(b, &newProtocol) + s.Require().NoError(err) + + s.Require().True(protocol.Equal(&newProtocol)) +} + +func (s *LevelDBTestSuite) TestNodeIDToNodeIDsRLPEncodeDecode() { + m := NodeIDToNodeIDs{ + types.NodeID{Hash: common.Hash{0x01}}: map[types.NodeID]struct{}{ + types.NodeID{Hash: common.Hash{0x02}}: {}, + }, + types.NodeID{Hash: common.Hash{0x03}}: map[types.NodeID]struct{}{ + types.NodeID{Hash: common.Hash{0x04}}: {}, + }, + } + + b, err := rlp.EncodeToBytes(&m) + s.Require().NoError(err) + + newM := NodeIDToNodeIDs{} + err = rlp.DecodeBytes(b, &newM) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(m, newM)) +} + +func (s *LevelDBTestSuite) TestNodeIDRLPEncodeDecode() { + m := NodeID{ + types.NodeID{Hash: common.Hash{0x01}}: struct{}{}, + types.NodeID{Hash: common.Hash{0x02}}: struct{}{}, + } + + b, err := rlp.EncodeToBytes(&m) + s.Require().NoError(err) + + newM := NodeID{} + err = rlp.DecodeBytes(b, &newM) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(m, newM)) +} + +func (s *LevelDBTestSuite) TestNodeIDToPubSharesRLPEncodeDecode() { + m := NodeIDToPubShares{ + types.NodeID{Hash: common.Hash{0x01}}: dkg.NewEmptyPublicKeyShares(), + types.NodeID{Hash: common.Hash{0x02}}: dkg.NewEmptyPublicKeyShares(), + } + + b, err := rlp.EncodeToBytes(&m) + s.Require().NoError(err) + + newM := NodeIDToPubShares{} + err = rlp.DecodeBytes(b, &newM) + s.Require().NoError(err) + + for k, v := range m { + newV, exist := newM[k] + s.Require().True(exist) + s.Require().True(newV.Equal(v)) + } +} + +func (s *LevelDBTestSuite) TestNodeIDToDKGIDRLPEncodeDecode() { + m := NodeIDToDKGID{ + types.NodeID{Hash: common.Hash{0x01}}: dkg.ID{}, + types.NodeID{Hash: common.Hash{0x02}}: dkg.ID{}, + } + + b, err := rlp.EncodeToBytes(&m) + s.Require().NoError(err) + + newM := NodeIDToDKGID{} + err = rlp.DecodeBytes(b, &newM) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(m, newM)) +} + +func TestLevelDB(t *testing.T) { + suite.Run(t, new(LevelDBTestSuite)) +} diff --git a/dex/consensus/core/db/memory.go b/dex/consensus/core/db/memory.go new file mode 100644 index 000000000..6555de855 --- /dev/null +++ b/dex/consensus/core/db/memory.go @@ -0,0 +1,262 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package db + +import ( + "encoding/json" + "io/ioutil" + "os" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type blockSeqIterator struct { + idx int + db *MemBackedDB +} + +// NextBlock implemenets BlockIterator.NextBlock method. +func (seq *blockSeqIterator) NextBlock() (types.Block, error) { + curIdx := seq.idx + seq.idx++ + return seq.db.getBlockByIndex(curIdx) +} + +// MemBackedDB is a memory backed DB implementation. +type MemBackedDB struct { + blocksLock sync.RWMutex + blockHashSequence common.Hashes + blocksByHash map[common.Hash]*types.Block + compactionChainTipLock sync.RWMutex + compactionChainTipHash common.Hash + compactionChainTipHeight uint64 + dkgPrivateKeysLock sync.RWMutex + dkgPrivateKeys map[uint64]*dkgPrivateKey + dkgProtocolLock sync.RWMutex + dkgProtocolInfo *DKGProtocolInfo + persistantFilePath string +} + +// NewMemBackedDB initialize a memory-backed database. +func NewMemBackedDB(persistantFilePath ...string) ( + dbInst *MemBackedDB, err error) { + dbInst = &MemBackedDB{ + blockHashSequence: common.Hashes{}, + blocksByHash: make(map[common.Hash]*types.Block), + dkgPrivateKeys: make(map[uint64]*dkgPrivateKey), + } + if len(persistantFilePath) == 0 || len(persistantFilePath[0]) == 0 { + return + } + dbInst.persistantFilePath = persistantFilePath[0] + buf, err := ioutil.ReadFile(dbInst.persistantFilePath) + if err != nil { + if !os.IsNotExist(err) { + // Something unexpected happened. + return + } + // It's expected behavior that file doesn't exists, we should not + // report error on it. + err = nil + return + } + + // Init this instance by file content, it's a temporary way + // to export those private field for JSON encoding. + toLoad := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{} + err = json.Unmarshal(buf, &toLoad) + if err != nil { + return + } + dbInst.blockHashSequence = toLoad.Sequence + dbInst.blocksByHash = toLoad.ByHash + return +} + +// HasBlock returns wheter or not the DB has a block identified with the hash. +func (m *MemBackedDB) HasBlock(hash common.Hash) bool { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + _, ok := m.blocksByHash[hash] + return ok +} + +// GetBlock returns a block given a hash. +func (m *MemBackedDB) GetBlock(hash common.Hash) (types.Block, error) { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + return m.internalGetBlock(hash) +} + +func (m *MemBackedDB) internalGetBlock(hash common.Hash) (types.Block, error) { + b, ok := m.blocksByHash[hash] + if !ok { + return types.Block{}, ErrBlockDoesNotExist + } + return *b, nil +} + +// PutBlock inserts a new block into the database. +func (m *MemBackedDB) PutBlock(block types.Block) error { + if m.HasBlock(block.Hash) { + return ErrBlockExists + } + + m.blocksLock.Lock() + defer m.blocksLock.Unlock() + + m.blockHashSequence = append(m.blockHashSequence, block.Hash) + m.blocksByHash[block.Hash] = &block + return nil +} + +// UpdateBlock updates a block in the database. +func (m *MemBackedDB) UpdateBlock(block types.Block) error { + if !m.HasBlock(block.Hash) { + return ErrBlockDoesNotExist + } + + m.blocksLock.Lock() + defer m.blocksLock.Unlock() + + m.blocksByHash[block.Hash] = &block + return nil +} + +// PutCompactionChainTipInfo saves tip of compaction chain into the database. +func (m *MemBackedDB) PutCompactionChainTipInfo( + blockHash common.Hash, height uint64) error { + m.compactionChainTipLock.Lock() + defer m.compactionChainTipLock.Unlock() + if m.compactionChainTipHeight+1 != height { + return ErrInvalidCompactionChainTipHeight + } + m.compactionChainTipHeight = height + m.compactionChainTipHash = blockHash + return nil +} + +// GetCompactionChainTipInfo get the tip info of compaction chain into the +// database. +func (m *MemBackedDB) GetCompactionChainTipInfo() ( + hash common.Hash, height uint64) { + m.compactionChainTipLock.RLock() + defer m.compactionChainTipLock.RUnlock() + return m.compactionChainTipHash, m.compactionChainTipHeight +} + +// GetDKGPrivateKey get DKG private key of one round. +func (m *MemBackedDB) GetDKGPrivateKey(round, reset uint64) ( + dkg.PrivateKey, error) { + m.dkgPrivateKeysLock.RLock() + defer m.dkgPrivateKeysLock.RUnlock() + if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { + return prv.PK, nil + } + return dkg.PrivateKey{}, ErrDKGPrivateKeyDoesNotExist +} + +// PutDKGPrivateKey save DKG private key of one round. +func (m *MemBackedDB) PutDKGPrivateKey( + round, reset uint64, prv dkg.PrivateKey) error { + m.dkgPrivateKeysLock.Lock() + defer m.dkgPrivateKeysLock.Unlock() + if prv, exists := m.dkgPrivateKeys[round]; exists && prv.Reset == reset { + return ErrDKGPrivateKeyExists + } + m.dkgPrivateKeys[round] = &dkgPrivateKey{ + PK: prv, + Reset: reset, + } + return nil +} + +// GetDKGProtocol get DKG protocol. +func (m *MemBackedDB) GetDKGProtocol() ( + DKGProtocolInfo, error) { + m.dkgProtocolLock.RLock() + defer m.dkgProtocolLock.RUnlock() + if m.dkgProtocolInfo == nil { + return DKGProtocolInfo{}, ErrDKGProtocolDoesNotExist + } + + return *m.dkgProtocolInfo, nil +} + +// PutOrUpdateDKGProtocol save DKG protocol. +func (m *MemBackedDB) PutOrUpdateDKGProtocol(dkgProtocol DKGProtocolInfo) error { + m.dkgProtocolLock.Lock() + defer m.dkgProtocolLock.Unlock() + m.dkgProtocolInfo = &dkgProtocol + return nil +} + +// Close implement Closer interface, which would release allocated resource. +func (m *MemBackedDB) Close() (err error) { + // Save internal state to a pretty-print json file. It's a temporary way + // to dump private file via JSON encoding. + if len(m.persistantFilePath) == 0 { + return + } + + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + toDump := struct { + Sequence common.Hashes + ByHash map[common.Hash]*types.Block + }{ + Sequence: m.blockHashSequence, + ByHash: m.blocksByHash, + } + + // Dump to JSON with 2-space indent. + buf, err := json.Marshal(&toDump) + if err != nil { + return + } + + err = ioutil.WriteFile(m.persistantFilePath, buf, 0644) + return +} + +func (m *MemBackedDB) getBlockByIndex(idx int) (types.Block, error) { + m.blocksLock.RLock() + defer m.blocksLock.RUnlock() + + if idx >= len(m.blockHashSequence) { + return types.Block{}, ErrIterationFinished + } + + hash := m.blockHashSequence[idx] + return m.internalGetBlock(hash) +} + +// GetAllBlocks implement Reader.GetAllBlocks method, which allows caller +// to retrieve all blocks in DB. +func (m *MemBackedDB) GetAllBlocks() (BlockIterator, error) { + return &blockSeqIterator{db: m}, nil +} diff --git a/dex/consensus/core/db/memory_test.go b/dex/consensus/core/db/memory_test.go new file mode 100644 index 000000000..33e011121 --- /dev/null +++ b/dex/consensus/core/db/memory_test.go @@ -0,0 +1,186 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package db + +import ( + "bytes" + "os" + "testing" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type MemBackedDBTestSuite struct { + suite.Suite + + v0 types.NodeID + b00, b01, b02 *types.Block +} + +func (s *MemBackedDBTestSuite) SetupSuite() { + s.v0 = types.NodeID{Hash: common.NewRandomHash()} + + genesisHash := common.NewRandomHash() + s.b00 = &types.Block{ + ProposerID: s.v0, + ParentHash: genesisHash, + Hash: genesisHash, + Position: types.Position{ + Height: 0, + }, + } + s.b01 = &types.Block{ + ProposerID: s.v0, + ParentHash: s.b00.Hash, + Hash: common.NewRandomHash(), + Position: types.Position{ + Height: 1, + }, + } + s.b02 = &types.Block{ + ProposerID: s.v0, + ParentHash: s.b01.Hash, + Hash: common.NewRandomHash(), + Position: types.Position{ + Height: 2, + }, + } +} + +func (s *MemBackedDBTestSuite) TestSaveAndLoad() { + // Make sure we are able to save/load from file. + dbPath := "test-save-and-load.db" + + // Make sure the file pointed by 'dbPath' doesn't exist. + _, err := os.Stat(dbPath) + s.Require().Error(err) + + dbInst, err := NewMemBackedDB(dbPath) + s.Require().NoError(err) + s.Require().NotNil(dbInst) + defer func() { + if dbInst != nil { + s.NoError(os.Remove(dbPath)) + dbInst = nil + } + }() + + s.NoError(dbInst.PutBlock(*s.b00)) + s.NoError(dbInst.PutBlock(*s.b01)) + s.NoError(dbInst.PutBlock(*s.b02)) + s.NoError(dbInst.Close()) + + // Load the json file back to check if all inserted blocks + // exists. + dbInst, err = NewMemBackedDB(dbPath) + s.Require().NoError(err) + s.Require().NotNil(dbInst) + s.True(dbInst.HasBlock(s.b00.Hash)) + s.True(dbInst.HasBlock(s.b01.Hash)) + s.True(dbInst.HasBlock(s.b02.Hash)) + s.NoError(dbInst.Close()) +} + +func (s *MemBackedDBTestSuite) TestIteration() { + // Make sure the file pointed by 'dbPath' doesn't exist. + dbInst, err := NewMemBackedDB() + s.Require().NoError(err) + s.Require().NotNil(dbInst) + + // Setup database. + s.NoError(dbInst.PutBlock(*s.b00)) + s.NoError(dbInst.PutBlock(*s.b01)) + s.NoError(dbInst.PutBlock(*s.b02)) + + // Check if we can iterate all 3 blocks. + iter, err := dbInst.GetAllBlocks() + s.Require().NoError(err) + touched := common.Hashes{} + for { + b, err := iter.NextBlock() + if err == ErrIterationFinished { + break + } + s.Require().NoError(err) + touched = append(touched, b.Hash) + } + s.Len(touched, 3) + s.Contains(touched, s.b00.Hash) + s.Contains(touched, s.b01.Hash) + s.Contains(touched, s.b02.Hash) +} + +func (s *MemBackedDBTestSuite) TestCompactionChainTipInfo() { + dbInst, err := NewMemBackedDB() + s.Require().NoError(err) + s.Require().NotNil(dbInst) + // Save some tip info. + hash := common.NewRandomHash() + s.Require().NoError(dbInst.PutCompactionChainTipInfo(hash, 1)) + // Get it back to check. + hashBack, height := dbInst.GetCompactionChainTipInfo() + s.Require().Equal(hash, hashBack) + s.Require().Equal(height, uint64(1)) + // Unable to put compaction chain tip info with lower height. + err = dbInst.PutCompactionChainTipInfo(hash, 0) + s.Require().Equal(err.Error(), ErrInvalidCompactionChainTipHeight.Error()) + // Unable to put compaction chain tip info with height not incremental by 1. + err = dbInst.PutCompactionChainTipInfo(hash, 3) + s.Require().Equal(err.Error(), ErrInvalidCompactionChainTipHeight.Error()) + // It's OK to put compaction chain tip info with height incremental by 1. + s.Require().NoError(dbInst.PutCompactionChainTipInfo(hash, 2)) +} + +func (s *MemBackedDBTestSuite) TestDKGPrivateKey() { + dbInst, err := NewMemBackedDB() + s.Require().NoError(err) + s.Require().NotNil(dbInst) + p := dkg.NewPrivateKey() + // We should be unable to get it. + _, err = dbInst.GetDKGPrivateKey(1, 0) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyDoesNotExist.Error()) + // Put it. + s.Require().NoError(dbInst.PutDKGPrivateKey(1, 0, *p)) + // We should be unable to get it because reset is different. + _, err = dbInst.GetDKGPrivateKey(1, 1) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyDoesNotExist.Error()) + // Put it again, should not success. + err = dbInst.PutDKGPrivateKey(1, 0, *p) + s.Require().Equal(err.Error(), ErrDKGPrivateKeyExists.Error()) + // Get it back. + tmpPrv, err := dbInst.GetDKGPrivateKey(1, 0) + s.Require().NoError(err) + s.Require().Equal(bytes.Compare(p.Bytes(), tmpPrv.Bytes()), 0) + // Put it at different reset. + p2 := dkg.NewPrivateKey() + s.Require().NoError(dbInst.PutDKGPrivateKey(1, 1, *p2)) + // We should be unable to get it because reset is different. + _, err = dbInst.GetDKGPrivateKey(1, 0) + // Get it back. + tmpPrv, err = dbInst.GetDKGPrivateKey(1, 1) + s.Require().NoError(err) + s.Require().Equal(bytes.Compare(p2.Bytes(), tmpPrv.Bytes()), 0) + s.Require().NotEqual(bytes.Compare(p2.Bytes(), p.Bytes()), 0) +} + +func TestMemBackedDB(t *testing.T) { + suite.Run(t, new(MemBackedDBTestSuite)) +} diff --git a/dex/consensus/core/dkg-tsig-protocol.go b/dex/consensus/core/dkg-tsig-protocol.go new file mode 100644 index 000000000..ce5c89c47 --- /dev/null +++ b/dex/consensus/core/dkg-tsig-protocol.go @@ -0,0 +1,709 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors for dkg module. +var ( + ErrNotDKGParticipant = fmt.Errorf( + "not a DKG participant") + ErrNotQualifyDKGParticipant = fmt.Errorf( + "not a qualified DKG participant") + ErrIDShareNotFound = fmt.Errorf( + "private share not found for specific ID") + ErrIncorrectPrivateShareSignature = fmt.Errorf( + "incorrect private share signature") + ErrMismatchPartialSignatureHash = fmt.Errorf( + "mismatch partialSignature hash") + ErrIncorrectPartialSignatureSignature = fmt.Errorf( + "incorrect partialSignature signature") + ErrIncorrectPartialSignature = fmt.Errorf( + "incorrect partialSignature") + ErrNotEnoughtPartialSignatures = fmt.Errorf( + "not enough of partial signatures") + ErrRoundAlreadyPurged = fmt.Errorf( + "cache of round already been purged") + ErrTSigNotReady = fmt.Errorf( + "tsig not ready") + ErrSelfMPKNotRegister = fmt.Errorf( + "self mpk not registered") + ErrUnableGetSelfPrvShare = fmt.Errorf( + "unable to get self DKG PrivateShare") + ErrSelfPrvShareMismatch = fmt.Errorf( + "self privateShare does not match mpk registered") +) + +// ErrUnexpectedDKGResetCount represents receiving a DKG message with unexpected +// DKG reset count. +type ErrUnexpectedDKGResetCount struct { + expect, actual uint64 + proposerID types.NodeID +} + +func (e ErrUnexpectedDKGResetCount) Error() string { + return fmt.Sprintf( + "unexpected DKG reset count, from:%s expect:%d actual:%d", + e.proposerID.String()[:6], e.expect, e.actual) +} + +// ErrUnexpectedRound represents receiving a DKG message with unexpected round. +type ErrUnexpectedRound struct { + expect, actual uint64 + proposerID types.NodeID +} + +func (e ErrUnexpectedRound) Error() string { + return fmt.Sprintf("unexpected round, from:%s expect:%d actual:%d", + e.proposerID.String()[:6], e.expect, e.actual) +} + +type dkgReceiver interface { + // ProposeDKGComplaint proposes a DKGComplaint. + ProposeDKGComplaint(complaint *typesDKG.Complaint) + + // ProposeDKGMasterPublicKey propose a DKGMasterPublicKey. + ProposeDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) + + // ProposeDKGPrivateShare propose a DKGPrivateShare. + ProposeDKGPrivateShare(prv *typesDKG.PrivateShare) + + // ProposeDKGAntiNackComplaint propose a DKGPrivateShare as an anti complaint. + ProposeDKGAntiNackComplaint(prv *typesDKG.PrivateShare) + + // ProposeDKGMPKReady propose a DKGMPKReady message. + ProposeDKGMPKReady(ready *typesDKG.MPKReady) + + // ProposeDKGFinalize propose a DKGFinalize message. + ProposeDKGFinalize(final *typesDKG.Finalize) + + // ProposeDKGSuccess propose a DKGSuccess message. + ProposeDKGSuccess(final *typesDKG.Success) +} + +type dkgProtocol struct { + ID types.NodeID + recv dkgReceiver + round uint64 + reset uint64 + threshold int + idMap map[types.NodeID]dkg.ID + mpkMap map[types.NodeID]*dkg.PublicKeyShares + masterPrivateShare *dkg.PrivateKeyShares + prvShares *dkg.PrivateKeyShares + prvSharesReceived map[types.NodeID]struct{} + nodeComplained map[types.NodeID]struct{} + // Complaint[from][to]'s anti is saved to antiComplaint[from][to]. + antiComplaintReceived map[types.NodeID]map[types.NodeID]struct{} + // The completed step in `runDKG`. + step int +} + +func (d *dkgProtocol) convertFromInfo(info db.DKGProtocolInfo) { + d.ID = info.ID + d.idMap = info.IDMap + d.round = info.Round + d.threshold = int(info.Threshold) + d.idMap = info.IDMap + d.mpkMap = info.MpkMap + d.prvSharesReceived = info.PrvSharesReceived + d.nodeComplained = info.NodeComplained + d.antiComplaintReceived = info.AntiComplaintReceived + d.step = int(info.Step) + d.reset = info.Reset + if info.IsMasterPrivateShareEmpty { + d.masterPrivateShare = nil + } else { + d.masterPrivateShare = &info.MasterPrivateShare + } + + if info.IsPrvSharesEmpty { + d.prvShares = nil + } else { + d.prvShares = &info.PrvShares + } +} + +func (d *dkgProtocol) toDKGProtocolInfo() db.DKGProtocolInfo { + info := db.DKGProtocolInfo{ + ID: d.ID, + Round: d.round, + Threshold: uint64(d.threshold), + IDMap: d.idMap, + MpkMap: d.mpkMap, + PrvSharesReceived: d.prvSharesReceived, + NodeComplained: d.nodeComplained, + AntiComplaintReceived: d.antiComplaintReceived, + Step: uint64(d.step), + Reset: d.reset, + } + + if d.masterPrivateShare != nil { + info.MasterPrivateShare = *d.masterPrivateShare + } else { + info.IsMasterPrivateShareEmpty = true + } + + if d.prvShares != nil { + info.PrvShares = *d.prvShares + } else { + info.IsPrvSharesEmpty = true + } + + return info +} + +type dkgShareSecret struct { + privateKey *dkg.PrivateKey +} + +// TSigVerifier is the interface verifying threshold signature. +type TSigVerifier interface { + VerifySignature(hash common.Hash, sig crypto.Signature) bool +} + +// TSigVerifierCacheInterface specifies interface used by TSigVerifierCache. +type TSigVerifierCacheInterface interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool +} + +// TSigVerifierCache is the cache for TSigVerifier. +type TSigVerifierCache struct { + intf TSigVerifierCacheInterface + verifier map[uint64]TSigVerifier + minRound uint64 + cacheSize int + lock sync.RWMutex +} + +type tsigProtocol struct { + nodePublicKeys *typesDKG.NodePublicKeys + hash common.Hash + sigs map[dkg.ID]dkg.PartialSignature + threshold int +} + +func newDKGProtocol( + ID types.NodeID, + recv dkgReceiver, + round uint64, + reset uint64, + threshold int) *dkgProtocol { + + prvShare, pubShare := dkg.NewPrivateKeyShares(threshold) + + recv.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(ID), + PublicKeyShares: *pubShare.Move(), + }) + + return &dkgProtocol{ + ID: ID, + recv: recv, + round: round, + reset: reset, + threshold: threshold, + idMap: make(map[types.NodeID]dkg.ID), + mpkMap: make(map[types.NodeID]*dkg.PublicKeyShares), + masterPrivateShare: prvShare, + prvShares: dkg.NewEmptyPrivateKeyShares(), + prvSharesReceived: make(map[types.NodeID]struct{}), + nodeComplained: make(map[types.NodeID]struct{}), + antiComplaintReceived: make(map[types.NodeID]map[types.NodeID]struct{}), + } +} + +func recoverDKGProtocol( + ID types.NodeID, + recv dkgReceiver, + round uint64, + reset uint64, + coreDB db.Database) (*dkgProtocol, error) { + dkgProtocolInfo, err := coreDB.GetDKGProtocol() + if err != nil { + if err == db.ErrDKGProtocolDoesNotExist { + return nil, nil + } + return nil, err + } + + dkgProtocol := dkgProtocol{ + recv: recv, + } + dkgProtocol.convertFromInfo(dkgProtocolInfo) + + if dkgProtocol.ID != ID || dkgProtocol.round != round || dkgProtocol.reset != reset { + return nil, nil + } + + return &dkgProtocol, nil +} + +func (d *dkgProtocol) processMasterPublicKeys( + mpks []*typesDKG.MasterPublicKey) (err error) { + d.idMap = make(map[types.NodeID]dkg.ID, len(mpks)) + d.mpkMap = make(map[types.NodeID]*dkg.PublicKeyShares, len(mpks)) + d.prvSharesReceived = make(map[types.NodeID]struct{}, len(mpks)) + ids := make(dkg.IDs, len(mpks)) + for i := range mpks { + if mpks[i].Reset != d.reset { + return ErrUnexpectedDKGResetCount{ + expect: d.reset, + actual: mpks[i].Reset, + proposerID: mpks[i].ProposerID, + } + } + nID := mpks[i].ProposerID + d.idMap[nID] = mpks[i].DKGID + d.mpkMap[nID] = &mpks[i].PublicKeyShares + ids[i] = mpks[i].DKGID + } + d.masterPrivateShare.SetParticipants(ids) + if err = d.verifySelfPrvShare(); err != nil { + return + } + for _, mpk := range mpks { + share, ok := d.masterPrivateShare.Share(mpk.DKGID) + if !ok { + err = ErrIDShareNotFound + continue + } + d.recv.ProposeDKGPrivateShare(&typesDKG.PrivateShare{ + ReceiverID: mpk.ProposerID, + Round: d.round, + Reset: d.reset, + PrivateShare: *share, + }) + } + return +} + +func (d *dkgProtocol) verifySelfPrvShare() error { + selfMPK, exist := d.mpkMap[d.ID] + if !exist { + return ErrSelfMPKNotRegister + } + share, ok := d.masterPrivateShare.Share(d.idMap[d.ID]) + if !ok { + return ErrUnableGetSelfPrvShare + } + ok, err := selfMPK.VerifyPrvShare( + d.idMap[d.ID], share) + if err != nil { + return err + } + if !ok { + return ErrSelfPrvShareMismatch + } + return nil +} + +func (d *dkgProtocol) proposeNackComplaints() { + for nID := range d.mpkMap { + if _, exist := d.prvSharesReceived[nID]; exist { + continue + } + d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: nID, + Round: d.round, + Reset: d.reset, + }, + }) + } +} + +func (d *dkgProtocol) processNackComplaints(complaints []*typesDKG.Complaint) ( + err error) { + if err = d.verifySelfPrvShare(); err != nil { + return + } + for _, complaint := range complaints { + if !complaint.IsNack() { + continue + } + if complaint.Reset != d.reset { + continue + } + if complaint.PrivateShare.ProposerID != d.ID { + continue + } + id, exist := d.idMap[complaint.ProposerID] + if !exist { + err = ErrNotDKGParticipant + continue + } + share, ok := d.masterPrivateShare.Share(id) + if !ok { + err = ErrIDShareNotFound + continue + } + d.recv.ProposeDKGAntiNackComplaint(&typesDKG.PrivateShare{ + ProposerID: d.ID, + ReceiverID: complaint.ProposerID, + Round: d.round, + Reset: d.reset, + PrivateShare: *share, + }) + } + return +} + +func (d *dkgProtocol) enforceNackComplaints(complaints []*typesDKG.Complaint) { + complained := make(map[types.NodeID]struct{}) + // Do not propose nack complaint to itself. + complained[d.ID] = struct{}{} + for _, complaint := range complaints { + if d.round != complaint.Round || d.reset != complaint.Reset { + continue + } + if !complaint.IsNack() { + continue + } + if complaint.Reset != d.reset { + continue + } + to := complaint.PrivateShare.ProposerID + if _, exist := complained[to]; exist { + continue + } + from := complaint.ProposerID + // Nack complaint is already proposed. + if from == d.ID { + continue + } + if _, exist := + d.antiComplaintReceived[from][to]; !exist { + complained[to] = struct{}{} + d.recv.ProposeDKGComplaint(&typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: to, + Round: d.round, + Reset: d.reset, + }, + }) + } + } +} + +func (d *dkgProtocol) sanityCheck(prvShare *typesDKG.PrivateShare) error { + if d.round != prvShare.Round { + return ErrUnexpectedRound{ + expect: d.round, + actual: prvShare.Round, + proposerID: prvShare.ProposerID, + } + } + if d.reset != prvShare.Reset { + return ErrUnexpectedDKGResetCount{ + expect: d.reset, + actual: prvShare.Reset, + proposerID: prvShare.ProposerID, + } + } + if _, exist := d.idMap[prvShare.ProposerID]; !exist { + return ErrNotDKGParticipant + } + ok, err := utils.VerifyDKGPrivateShareSignature(prvShare) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPrivateShareSignature + } + return nil +} + +func (d *dkgProtocol) processPrivateShare( + prvShare *typesDKG.PrivateShare) error { + receiverID, exist := d.idMap[prvShare.ReceiverID] + // This node is not a DKG participant, ignore the private share. + if !exist { + return nil + } + if prvShare.ReceiverID == d.ID { + if _, exist := d.prvSharesReceived[prvShare.ProposerID]; exist { + return nil + } + } else { + if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; exist { + if _, exist := + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; exist { + return nil + } + } + } + if err := d.sanityCheck(prvShare); err != nil { + return err + } + mpk := d.mpkMap[prvShare.ProposerID] + ok, err := mpk.VerifyPrvShare(receiverID, &prvShare.PrivateShare) + if err != nil { + return err + } + if prvShare.ReceiverID == d.ID { + d.prvSharesReceived[prvShare.ProposerID] = struct{}{} + } + if !ok { + if _, exist := d.nodeComplained[prvShare.ProposerID]; exist { + return nil + } + complaint := &typesDKG.Complaint{ + Round: d.round, + Reset: d.reset, + PrivateShare: *prvShare, + } + d.nodeComplained[prvShare.ProposerID] = struct{}{} + d.recv.ProposeDKGComplaint(complaint) + } else if prvShare.ReceiverID == d.ID { + sender := d.idMap[prvShare.ProposerID] + if err := d.prvShares.AddShare(sender, &prvShare.PrivateShare); err != nil { + return err + } + } else { + // The prvShare is an anti complaint. + if _, exist := d.antiComplaintReceived[prvShare.ReceiverID]; !exist { + d.antiComplaintReceived[prvShare.ReceiverID] = + make(map[types.NodeID]struct{}) + } + if _, exist := + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID]; !exist { + d.recv.ProposeDKGAntiNackComplaint(prvShare) + d.antiComplaintReceived[prvShare.ReceiverID][prvShare.ProposerID] = + struct{}{} + } + } + return nil +} + +func (d *dkgProtocol) proposeMPKReady() { + d.recv.ProposeDKGMPKReady(&typesDKG.MPKReady{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) proposeFinalize() { + d.recv.ProposeDKGFinalize(&typesDKG.Finalize{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) proposeSuccess() { + d.recv.ProposeDKGSuccess(&typesDKG.Success{ + ProposerID: d.ID, + Round: d.round, + Reset: d.reset, + }) +} + +func (d *dkgProtocol) recoverShareSecret(qualifyIDs dkg.IDs) ( + *dkgShareSecret, error) { + if len(qualifyIDs) < d.threshold { + return nil, typesDKG.ErrNotReachThreshold + } + prvKey, err := d.prvShares.RecoverPrivateKey(qualifyIDs) + if err != nil { + return nil, err + } + return &dkgShareSecret{ + privateKey: prvKey, + }, nil +} + +func (ss *dkgShareSecret) sign(hash common.Hash) dkg.PartialSignature { + // DKG sign will always success. + sig, _ := ss.privateKey.Sign(hash) + return dkg.PartialSignature(sig) +} + +// NewTSigVerifierCache creats a TSigVerifierCache instance. +func NewTSigVerifierCache( + intf TSigVerifierCacheInterface, cacheSize int) *TSigVerifierCache { + return &TSigVerifierCache{ + intf: intf, + verifier: make(map[uint64]TSigVerifier), + cacheSize: cacheSize, + } +} + +// UpdateAndGet calls Update and then Get. +func (tc *TSigVerifierCache) UpdateAndGet(round uint64) ( + TSigVerifier, bool, error) { + ok, err := tc.Update(round) + if err != nil { + return nil, false, err + } + if !ok { + return nil, false, nil + } + v, ok := tc.Get(round) + return v, ok, nil +} + +// Purge the cache. +func (tc *TSigVerifierCache) Purge(round uint64) { + tc.lock.Lock() + defer tc.lock.Unlock() + delete(tc.verifier, round) +} + +// Update the cache and returns if success. +func (tc *TSigVerifierCache) Update(round uint64) (bool, error) { + tc.lock.Lock() + defer tc.lock.Unlock() + if round < tc.minRound { + return false, ErrRoundAlreadyPurged + } + if _, exist := tc.verifier[round]; exist { + return true, nil + } + if !tc.intf.IsDKGFinal(round) { + return false, nil + } + gpk, err := typesDKG.NewGroupPublicKey(round, + tc.intf.DKGMasterPublicKeys(round), + tc.intf.DKGComplaints(round), + utils.GetDKGThreshold(utils.GetConfigWithPanic(tc.intf, round, nil))) + if err != nil { + return false, err + } + if len(tc.verifier) == 0 { + tc.minRound = round + } + tc.verifier[round] = gpk + if len(tc.verifier) > tc.cacheSize { + delete(tc.verifier, tc.minRound) + } + for { + if _, exist := tc.verifier[tc.minRound]; !exist { + tc.minRound++ + } else { + break + } + } + return true, nil +} + +// Delete the cache of given round. +func (tc *TSigVerifierCache) Delete(round uint64) { + tc.lock.Lock() + defer tc.lock.Unlock() + delete(tc.verifier, round) +} + +// Get the TSigVerifier of round and returns if it exists. +func (tc *TSigVerifierCache) Get(round uint64) (TSigVerifier, bool) { + tc.lock.RLock() + defer tc.lock.RUnlock() + verifier, exist := tc.verifier[round] + return verifier, exist +} + +func newTSigProtocol( + npks *typesDKG.NodePublicKeys, + hash common.Hash) *tsigProtocol { + return &tsigProtocol{ + nodePublicKeys: npks, + hash: hash, + sigs: make(map[dkg.ID]dkg.PartialSignature, npks.Threshold+1), + } +} + +func (tsig *tsigProtocol) sanityCheck(psig *typesDKG.PartialSignature) error { + _, exist := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + ok, err := utils.VerifyDKGPartialSignatureSignature(psig) + if err != nil { + return err + } + if !ok { + return ErrIncorrectPartialSignatureSignature + } + if psig.Hash != tsig.hash { + return ErrMismatchPartialSignatureHash + } + return nil +} + +func (tsig *tsigProtocol) processPartialSignature( + psig *typesDKG.PartialSignature) error { + if psig.Round != tsig.nodePublicKeys.Round { + return nil + } + id, exist := tsig.nodePublicKeys.IDMap[psig.ProposerID] + if !exist { + return ErrNotQualifyDKGParticipant + } + if err := tsig.sanityCheck(psig); err != nil { + return err + } + pubKey := tsig.nodePublicKeys.PublicKeys[psig.ProposerID] + if !pubKey.VerifySignature( + tsig.hash, crypto.Signature(psig.PartialSignature)) { + return ErrIncorrectPartialSignature + } + tsig.sigs[id] = psig.PartialSignature + return nil +} + +func (tsig *tsigProtocol) signature() (crypto.Signature, error) { + if len(tsig.sigs) < tsig.nodePublicKeys.Threshold { + return crypto.Signature{}, ErrNotEnoughtPartialSignatures + } + ids := make(dkg.IDs, 0, len(tsig.sigs)) + psigs := make([]dkg.PartialSignature, 0, len(tsig.sigs)) + for id, psig := range tsig.sigs { + ids = append(ids, id) + psigs = append(psigs, psig) + } + return dkg.RecoverSignature(psigs, ids) +} diff --git a/dex/consensus/core/dkg-tsig-protocol_test.go b/dex/consensus/core/dkg-tsig-protocol_test.go new file mode 100644 index 000000000..184460b1b --- /dev/null +++ b/dex/consensus/core/dkg-tsig-protocol_test.go @@ -0,0 +1,1160 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type DKGTSIGProtocolTestSuite struct { + suite.Suite + + nIDs types.NodeIDs + dkgIDs map[types.NodeID]dkg.ID + signers map[types.NodeID]*utils.Signer +} + +type testDKGReceiver struct { + s *DKGTSIGProtocolTestSuite + + signer *utils.Signer + complaints map[types.NodeID]*typesDKG.Complaint + mpk *typesDKG.MasterPublicKey + prvShare map[types.NodeID]*typesDKG.PrivateShare + antiComplaints map[types.NodeID]*typesDKG.PrivateShare + ready []*typesDKG.MPKReady + final []*typesDKG.Finalize + success []*typesDKG.Success +} + +func newTestDKGReceiver(s *DKGTSIGProtocolTestSuite, + signer *utils.Signer) *testDKGReceiver { + return &testDKGReceiver{ + s: s, + signer: signer, + complaints: make(map[types.NodeID]*typesDKG.Complaint), + prvShare: make(map[types.NodeID]*typesDKG.PrivateShare), + antiComplaints: make(map[types.NodeID]*typesDKG.PrivateShare), + } +} + +func (r *testDKGReceiver) ProposeDKGComplaint(complaint *typesDKG.Complaint) { + complaint = test.CloneDKGComplaint(complaint) + err := r.signer.SignDKGComplaint(complaint) + r.s.Require().NoError(err) + r.complaints[complaint.PrivateShare.ProposerID] = complaint +} + +func (r *testDKGReceiver) ProposeDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) { + mpk = test.CloneDKGMasterPublicKey(mpk) + err := r.signer.SignDKGMasterPublicKey(mpk) + r.s.Require().NoError(err) + r.mpk = mpk +} + +func (r *testDKGReceiver) ProposeDKGPrivateShare( + prv *typesDKG.PrivateShare) { + prv = test.CloneDKGPrivateShare(prv) + err := r.signer.SignDKGPrivateShare(prv) + r.s.Require().NoError(err) + r.prvShare[prv.ReceiverID] = prv +} + +func (r *testDKGReceiver) ProposeDKGAntiNackComplaint( + prv *typesDKG.PrivateShare) { + prv = test.CloneDKGPrivateShare(prv) + err := r.signer.SignDKGPrivateShare(prv) + r.s.Require().NoError(err) + r.antiComplaints[prv.ReceiverID] = prv +} + +func (r *testDKGReceiver) ProposeDKGMPKReady(ready *typesDKG.MPKReady) { + r.ready = append(r.ready, ready) +} + +func (r *testDKGReceiver) ProposeDKGFinalize(final *typesDKG.Finalize) { + r.final = append(r.final, final) +} + +func (r *testDKGReceiver) ProposeDKGSuccess(success *typesDKG.Success) { + r.success = append(r.success, success) +} + +func (s *DKGTSIGProtocolTestSuite) setupDKGParticipants(n int) { + s.nIDs = make(types.NodeIDs, 0, n) + s.signers = make(map[types.NodeID]*utils.Signer, n) + s.dkgIDs = make(map[types.NodeID]dkg.ID) + ids := make(dkg.IDs, 0, n) + for i := 0; i < n; i++ { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID := types.NewNodeID(prvKey.PublicKey()) + s.nIDs = append(s.nIDs, nID) + s.signers[nID] = utils.NewSigner(prvKey) + id := dkg.NewID(nID.Hash[:]) + ids = append(ids, id) + s.dkgIDs[nID] = id + } +} + +func (s *DKGTSIGProtocolTestSuite) newGov( + pubKeys []crypto.PublicKey, + round, reset uint64) *test.Governance { + // NOTE: this method doesn't make the tip round in governance to the input + // one. + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, 100, &common.NullLogger{}, true), ConfigRoundShift) + s.Require().NoError(err) + for i := uint64(0); i < reset; i++ { + s.Require().NoError(gov.State().RequestChange(test.StateResetDKG, + common.NewRandomHash())) + } + s.Require().Equal(gov.DKGResetCount(round), reset) + return gov +} + +func (s *DKGTSIGProtocolTestSuite) newProtocols(k, n int, round, reset uint64) ( + map[types.NodeID]*testDKGReceiver, map[types.NodeID]*dkgProtocol) { + s.setupDKGParticipants(n) + + receivers := make(map[types.NodeID]*testDKGReceiver, n) + protocols := make(map[types.NodeID]*dkgProtocol, n) + for _, nID := range s.nIDs { + receivers[nID] = newTestDKGReceiver(s, s.signers[nID]) + protocols[nID] = newDKGProtocol( + nID, + receivers[nID], + round, + reset, + k, + ) + s.Require().NotNil(receivers[nID].mpk) + } + return receivers, protocols +} + +// TestDKGTSIGProtocol will test the entire DKG+TISG protocol including +// exchanging private shares, recovering share secret, creating partial sign and +// recovering threshold signature. +// All participants are good people in this test. +func (s *DKGTSIGProtocolTestSuite) TestDKGTSIGProtocol() { + k := 2 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + for _, receiver := range receivers { + s.Require().Len(receiver.prvShare, n) + for nID, prvShare := range receiver.prvShare { + s.Require().NoError(protocols[nID].processPrivateShare(prvShare)) + } + } + + for _, protocol := range protocols { + protocol.proposeNackComplaints() + } + + for _, recv := range receivers { + s.Require().Len(recv.complaints, 0) + } + + for _, receiver := range receivers { + for _, complaint := range receiver.complaints { + gov.AddDKGComplaint(complaint) + } + } + + for _, protocol := range protocols { + s.Require().NoError(protocol.processNackComplaints( + gov.DKGComplaints(round))) + } + + for _, recv := range receivers { + s.Require().Len(recv.antiComplaints, 0) + } + + for _, protocol := range protocols { + protocol.enforceNackComplaints(gov.DKGComplaints(round)) + } + + for _, recv := range receivers { + s.Require().Len(recv.complaints, 0) + } + + // DKG is fininished. + gpk, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), gov.DKGComplaints(round), + k) + s.Require().NoError(err) + s.Require().Len(gpk.QualifyIDs, n) + qualifyIDs := make(map[dkg.ID]struct{}, len(gpk.QualifyIDs)) + for _, id := range gpk.QualifyIDs { + qualifyIDs[id] = struct{}{} + } + + for nID := range gpk.QualifyNodeIDs { + id, exist := gpk.IDMap[nID] + s.Require().True(exist) + _, exist = qualifyIDs[id] + s.Require().True(exist) + } + + shareSecrets := make( + map[types.NodeID]*dkgShareSecret, len(qualifyIDs)) + + for nID, protocol := range protocols { + _, exist := qualifyIDs[s.dkgIDs[nID]] + s.Require().True(exist) + var err error + shareSecrets[nID], err = protocol.recoverShareSecret(gpk.QualifyIDs) + s.Require().NoError(err) + } + + npks, err := typesDKG.NewNodePublicKeys(round, + gov.DKGMasterPublicKeys(round), gov.DKGComplaints(round), k) + s.Require().NoError(err) + msgHash := crypto.Keccak256Hash([]byte("🏖🍹")) + tsig := newTSigProtocol(npks, msgHash) + for nID, shareSecret := range shareSecrets { + psig := &typesDKG.PartialSignature{ + ProposerID: nID, + Round: round, + Hash: msgHash, + PartialSignature: shareSecret.sign(msgHash), + } + err := s.signers[nID].SignDKGPartialSignature(psig) + s.Require().NoError(err) + s.Require().NoError(tsig.processPartialSignature(psig)) + if len(tsig.sigs) >= k { + break + } + } + + sig, err := tsig.signature() + s.Require().NoError(err) + s.True(gpk.VerifySignature(msgHash, sig)) +} + +func (s *DKGTSIGProtocolTestSuite) TestErrMPKRegistered() { + k := 2 + n := 10 + round := uint64(1) + reset := uint64(2) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + notRegisterID := s.nIDs[0] + errRegisterID := s.nIDs[1] + + for ID, receiver := range receivers { + if ID == notRegisterID { + continue + } + if ID == errRegisterID { + _, mpk := dkg.NewPrivateKeyShares(k) + receiver.ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(ID), + PublicKeyShares: *mpk.Move(), + }) + } + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for ID, protocol := range protocols { + err := protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round)) + if ID == notRegisterID { + s.Require().Equal(ErrSelfMPKNotRegister, err) + } else if ID == errRegisterID { + s.Require().Equal(ErrSelfPrvShareMismatch, err) + } else { + s.Require().NoError(err) + } + } + + for ID, receiver := range receivers { + if ID == notRegisterID || ID == errRegisterID { + continue + } + s.Require().Len(receiver.prvShare, n-1) + for nID, prvShare := range receiver.prvShare { + s.Require().NoError(protocols[nID].processPrivateShare(prvShare)) + } + } + + for ID, protocol := range protocols { + if ID == notRegisterID { + continue + } + protocol.proposeNackComplaints() + } + + for ID, recv := range receivers { + if ID == notRegisterID || ID == errRegisterID { + continue + } + s.Require().Len(recv.complaints, 1) + for _, complaint := range recv.complaints { + s.Require().True(complaint.IsNack()) + s.Require().Equal(errRegisterID, complaint.PrivateShare.ProposerID) + } + } + + for _, receiver := range receivers { + for _, complaint := range receiver.complaints { + gov.AddDKGComplaint(complaint) + } + } + + s.Require().Len(gov.DKGComplaints(round), n-1) + + for ID, protocol := range protocols { + err := protocol.processNackComplaints(gov.DKGComplaints(round)) + if ID == notRegisterID { + s.Require().Equal(ErrSelfMPKNotRegister, err) + } else if ID == errRegisterID { + s.Require().Equal(ErrSelfPrvShareMismatch, err) + } else { + s.Require().NoError(err) + } + } + + for _, recv := range receivers { + s.Require().Len(recv.antiComplaints, 0) + } + + for _, protocol := range protocols { + protocol.enforceNackComplaints(gov.DKGComplaints(round)) + } + + for ID, recv := range receivers { + if ID == notRegisterID || ID == errRegisterID { + continue + } + s.Require().Len(recv.complaints, 1) + for _, complaint := range recv.complaints { + s.Require().True(complaint.IsNack()) + s.Require().Equal(errRegisterID, complaint.PrivateShare.ProposerID) + } + } + + // DKG is fininished. + gpk, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), gov.DKGComplaints(round), + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk.QualifyIDs, n-2) + qualifyIDs := make(map[dkg.ID]struct{}, len(gpk.QualifyIDs)) + for _, id := range gpk.QualifyIDs { + qualifyIDs[id] = struct{}{} + } + + for nID := range gpk.QualifyNodeIDs { + if nID == notRegisterID || nID == errRegisterID { + continue + } + id, exist := gpk.IDMap[nID] + s.Require().True(exist) + _, exist = qualifyIDs[id] + s.Require().True(exist) + } + +} + +func (s *DKGTSIGProtocolTestSuite) TestNackComplaint() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + for senderID, receiver := range receivers { + s.Require().Len(receiver.prvShare, n) + if senderID == byzantineID { + continue + } + for nID, prvShare := range receiver.prvShare { + s.Require().NoError(protocols[nID].processPrivateShare(prvShare)) + } + } + + for _, protocol := range protocols { + protocol.proposeNackComplaints() + } + + for _, recv := range receivers { + complaint, exist := recv.complaints[byzantineID] + s.True(complaint.IsNack()) + s.Require().True(exist) + s.True(utils.VerifyDKGComplaintSignature(complaint)) + } +} + +// TestComplaint tests if the received private share is not valid, a complaint +// should be proposed. +func (s *DKGTSIGProtocolTestSuite) TestComplaint() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + targetID := s.nIDs[1] + receiver := receivers[targetID] + protocol := protocols[targetID] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + // These messages are not valid. + err = protocol.processPrivateShare(&typesDKG.PrivateShare{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + ReceiverID: targetID, + Round: round, + Reset: reset, + }) + s.Equal(ErrNotDKGParticipant, err) + receivers[byzantineID].ProposeDKGPrivateShare(&typesDKG.PrivateShare{ + ProposerID: byzantineID, + ReceiverID: targetID, + Round: round, + Reset: reset, + }) + invalidShare := receivers[byzantineID].prvShare[targetID] + invalidShare.ReceiverID = s.nIDs[2] + err = protocol.processPrivateShare(invalidShare) + s.Equal(ErrIncorrectPrivateShareSignature, err) + delete(receivers[byzantineID].prvShare, targetID) + + // Byzantine node is sending incorrect private share. + receivers[byzantineID].ProposeDKGPrivateShare(&typesDKG.PrivateShare{ + ProposerID: byzantineID, + ReceiverID: targetID, + Round: round, + Reset: reset, + PrivateShare: *dkg.NewPrivateKey(), + }) + invalidShare = receivers[byzantineID].prvShare[targetID] + s.Require().NoError(protocol.processPrivateShare(invalidShare)) + s.Require().Len(receiver.complaints, 1) + complaint, exist := receiver.complaints[byzantineID] + s.True(exist) + s.Equal(byzantineID, complaint.PrivateShare.ProposerID) + + // Sending the incorrect private share again should not complain. + delete(receiver.complaints, byzantineID) + s.Require().NoError(protocol.processPrivateShare(invalidShare)) + s.Len(receiver.complaints, 0) +} + +// TestDuplicateComplaint tests if the duplicated complaint is process properly. +func (s *DKGTSIGProtocolTestSuite) TestDuplicateComplaint() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, _ := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + victomID := s.nIDs[1] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + // Test for nack complaints. + complaints := make([]*typesDKG.Complaint, k+1) + for i := range complaints { + complaints[i] = &typesDKG.Complaint{ + ProposerID: byzantineID, + Round: round, + Reset: reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: victomID, + Round: round, + Reset: reset, + }, + } + s.Require().True(complaints[i].IsNack()) + } + + gpk, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), complaints, + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk.QualifyIDs, n) +} + +// TestAntiComplaint tests if a nack complaint is received, +// create an anti complaint. +func (s *DKGTSIGProtocolTestSuite) TestAntiComplaint() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + targetID := s.nIDs[1] + thirdPerson := s.nIDs[2] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + // Creating Nack complaint. + protocols[targetID].proposeNackComplaints() + protocols[thirdPerson].proposeNackComplaints() + complaint, exist := receivers[targetID].complaints[byzantineID] + s.Require().True(exist) + s.Require().True(complaint.IsNack()) + s.Require().Equal(byzantineID, complaint.PrivateShare.ProposerID) + + complaint2, exist := receivers[thirdPerson].complaints[byzantineID] + s.Require().True(exist) + s.Require().True(complaint2.IsNack()) + s.Require().Equal(byzantineID, complaint2.PrivateShare.ProposerID) + + // Creating an anti-nack complaint. + err = protocols[byzantineID].processNackComplaints( + []*typesDKG.Complaint{complaint}) + s.Require().NoError(err) + s.Require().Len(receivers[byzantineID].antiComplaints, 1) + antiComplaint, exist := receivers[byzantineID].antiComplaints[targetID] + s.Require().True(exist) + s.Require().Equal(targetID, antiComplaint.ReceiverID) + + // The anti-complaint should be successfully verified by all others. + receivers[targetID].complaints = make(map[types.NodeID]*typesDKG.Complaint) + s.Require().NoError(protocols[targetID].processPrivateShare(antiComplaint)) + s.Len(receivers[targetID].complaints, 0) + + receivers[thirdPerson].complaints = make(map[types.NodeID]*typesDKG.Complaint) + s.Require().NoError(protocols[thirdPerson].processPrivateShare(antiComplaint)) + s.Len(receivers[thirdPerson].complaints, 0) +} + +// TestEncorceNackComplaint tests if the nack complaint is enforced properly. +func (s *DKGTSIGProtocolTestSuite) TestEncorceNackComplaint() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + targetID := s.nIDs[1] + thirdPerson := s.nIDs[2] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + // Creating nack complaint. + protocols[targetID].proposeNackComplaints() + complaint, exist := receivers[targetID].complaints[byzantineID] + s.Require().True(exist) + s.Require().True(complaint.IsNack()) + s.Require().Equal(byzantineID, complaint.PrivateShare.ProposerID) + + // Encorce nack complaint. + protocols[thirdPerson].enforceNackComplaints([]*typesDKG.Complaint{complaint}) + complaint2, exist := receivers[thirdPerson].complaints[byzantineID] + s.Require().True(exist) + s.Require().True(complaint2.IsNack()) + s.Require().Equal(byzantineID, complaint2.PrivateShare.ProposerID) + + // Received valid private share, do not enforce nack complaint. + delete(receivers[thirdPerson].complaints, byzantineID) + err = protocols[byzantineID].processNackComplaints( + []*typesDKG.Complaint{complaint}) + s.Require().NoError(err) + antiComplaint, exist := receivers[byzantineID].antiComplaints[targetID] + s.Require().True(exist) + s.Require().Equal(targetID, antiComplaint.ReceiverID) + s.Require().NoError(protocols[thirdPerson].processPrivateShare(antiComplaint)) + protocols[thirdPerson].enforceNackComplaints([]*typesDKG.Complaint{complaint}) + _, exist = receivers[thirdPerson].complaints[byzantineID] + s.Require().False(exist) +} + +// TestQualifyIDs tests if there is a id with t+1 nack complaints +// or a complaint, it should not be in the qualifyIDs. +func (s *DKGTSIGProtocolTestSuite) TestQualifyIDs() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, _ := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + // Test for nack complaints. + complaints := make([]*typesDKG.Complaint, k+1) + for i := range complaints { + nID := s.nIDs[i] + complaints[i] = &typesDKG.Complaint{ + ProposerID: nID, + Round: round, + Reset: reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: byzantineID, + Round: round, + Reset: reset, + }, + } + s.Require().True(complaints[i].IsNack()) + } + + gpk, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), complaints, + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk.QualifyIDs, n-1) + for _, id := range gpk.QualifyIDs { + s.NotEqual(id, byzantineID) + } + + gpk2, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), complaints[:k-1], + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk2.QualifyIDs, n) + + // Test for complaint. + complaints[0].PrivateShare.Signature = crypto.Signature{Signature: []byte{0}} + s.Require().False(complaints[0].IsNack()) + gpk3, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), complaints[:1], + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk3.QualifyIDs, n-1) + for _, id := range gpk3.QualifyIDs { + s.NotEqual(id, byzantineID) + } +} + +// TestPartialSignature tests if tsigProtocol can handle incorrect partial +// signature and report error. +func (s *DKGTSIGProtocolTestSuite) TestPartialSignature() { + k := 3 + n := 10 + round := uint64(1) + reset := uint64(3) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + + receivers, protocols := s.newProtocols(k, n, round, reset) + + byzantineID := s.nIDs[0] + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + s.Require().NoError( + protocol.processMasterPublicKeys(gov.DKGMasterPublicKeys(round))) + } + + for senderID, receiver := range receivers { + s.Require().Len(receiver.prvShare, n) + if senderID == byzantineID { + continue + } + for nID, prvShare := range receiver.prvShare { + s.Require().NoError(protocols[nID].processPrivateShare(prvShare)) + } + } + + for _, protocol := range protocols { + protocol.proposeNackComplaints() + } + + for _, recv := range receivers { + s.Require().Len(recv.complaints, 1) + complaint, exist := recv.complaints[byzantineID] + s.Require().True(exist) + gov.AddDKGComplaint(complaint) + } + + // DKG is fininished. + gpk, err := typesDKG.NewGroupPublicKey(round, + gov.DKGMasterPublicKeys(round), gov.DKGComplaints(round), + k, + ) + s.Require().NoError(err) + s.Require().Len(gpk.QualifyIDs, n-1) + qualifyIDs := make(map[dkg.ID]struct{}, len(gpk.QualifyIDs)) + for _, id := range gpk.QualifyIDs { + qualifyIDs[id] = struct{}{} + } + + shareSecrets := make( + map[types.NodeID]*dkgShareSecret, len(qualifyIDs)) + + for nID, protocol := range protocols { + _, exist := qualifyIDs[s.dkgIDs[nID]] + if nID == byzantineID { + exist = !exist + } + s.Require().True(exist) + var err error + shareSecrets[nID], err = protocol.recoverShareSecret(gpk.QualifyIDs) + s.Require().NoError(err) + } + + msgHash := crypto.Keccak256Hash([]byte("🏖🍹")) + npks, err := typesDKG.NewNodePublicKeys(round, + gov.DKGMasterPublicKeys(round), gov.DKGComplaints(round), k) + s.Require().NoError(err) + tsig := newTSigProtocol(npks, msgHash) + byzantineID2 := s.nIDs[1] + byzantineID3 := s.nIDs[2] + for nID, shareSecret := range shareSecrets { + psig := &typesDKG.PartialSignature{ + ProposerID: nID, + Round: round, + Hash: msgHash, + PartialSignature: shareSecret.sign(msgHash), + } + switch nID { + case byzantineID2: + psig.PartialSignature = shareSecret.sign( + crypto.Keccak256Hash([]byte("💣"))) + case byzantineID3: + psig.Hash = common.NewRandomHash() + } + err := s.signers[nID].SignDKGPartialSignature(psig) + s.Require().NoError(err) + err = tsig.processPartialSignature(psig) + switch nID { + case byzantineID: + s.Require().Equal(ErrNotQualifyDKGParticipant, err) + case byzantineID2: + s.Require().Equal(ErrIncorrectPartialSignature, err) + case byzantineID3: + s.Require().Equal(ErrMismatchPartialSignatureHash, err) + default: + s.Require().NoError(err) + } + } + + sig, err := tsig.signature() + s.Require().NoError(err) + s.True(gpk.VerifySignature(msgHash, sig)) +} + +func (s *DKGTSIGProtocolTestSuite) TestProposeReady() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + recv := newTestDKGReceiver(s, utils.NewSigner(prvKey)) + nID := types.NewNodeID(prvKey.PublicKey()) + protocol := newDKGProtocol(nID, recv, 1, 3, 2) + protocol.proposeMPKReady() + s.Require().Len(recv.ready, 1) + ready := recv.ready[0] + s.Equal(&typesDKG.MPKReady{ + ProposerID: nID, + Round: 1, + Reset: 3, + }, ready) +} + +func (s *DKGTSIGProtocolTestSuite) TestProposeFinalize() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + recv := newTestDKGReceiver(s, utils.NewSigner(prvKey)) + nID := types.NewNodeID(prvKey.PublicKey()) + protocol := newDKGProtocol(nID, recv, 1, 3, 2) + protocol.proposeFinalize() + s.Require().Len(recv.final, 1) + final := recv.final[0] + s.Equal(&typesDKG.Finalize{ + ProposerID: nID, + Round: 1, + Reset: 3, + }, final) +} + +func (s *DKGTSIGProtocolTestSuite) TestProposeSuccess() { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + recv := newTestDKGReceiver(s, utils.NewSigner(prvKey)) + nID := types.NewNodeID(prvKey.PublicKey()) + protocol := newDKGProtocol(nID, recv, 1, 3, 2) + protocol.proposeSuccess() + s.Require().Len(recv.success, 1) + success := recv.success[0] + s.Equal(&typesDKG.Success{ + ProposerID: nID, + Round: 1, + Reset: 3, + }, success) +} + +func (s *DKGTSIGProtocolTestSuite) TestTSigVerifierCache() { + k := 3 + n := 10 + round := uint64(10) + reset := uint64(0) + _, pubKeys, err := test.NewKeys(n) + s.Require().NoError(err) + gov := s.newGov(pubKeys, round, reset) + gov.CatchUpWithRound(round) + for i := 0; i < 10; i++ { + round := uint64(i + 1) + receivers, protocols := s.newProtocols(k, n, round, reset) + + for _, receiver := range receivers { + gov.AddDKGMasterPublicKey(receiver.mpk) + } + + for _, protocol := range protocols { + protocol.proposeMPKReady() + } + for _, recv := range receivers { + s.Require().Len(recv.ready, 1) + gov.AddDKGMPKReady(recv.ready[0]) + } + s.Require().True(gov.IsDKGMPKReady(round)) + + for _, protocol := range protocols { + protocol.proposeFinalize() + } + + for _, recv := range receivers { + s.Require().Len(recv.final, 1) + gov.AddDKGFinalize(recv.final[0]) + } + s.Require().True(gov.IsDKGFinal(round)) + } + + cache := NewTSigVerifierCache(gov, 3) + for i := 0; i < 5; i++ { + round := uint64(i + 1) + ok, err := cache.Update(round) + s.Require().NoError(err) + s.True(ok) + } + s.Len(cache.verifier, 3) + + for i := 0; i < 2; i++ { + round := uint64(i + 1) + _, exist := cache.Get(round) + s.False(exist) + } + + for i := 3; i < 5; i++ { + round := uint64(i + 1) + _, exist := cache.Get(round) + s.True(exist) + } + + ok, err := cache.Update(uint64(1)) + s.Require().Equal(ErrRoundAlreadyPurged, err) + + cache.Delete(uint64(5)) + s.Len(cache.verifier, 2) + _, exist := cache.Get(uint64(5)) + s.False(exist) + + cache = NewTSigVerifierCache(gov, 1) + ok, err = cache.Update(uint64(3)) + s.Require().NoError(err) + s.Require().True(ok) + s.Equal(uint64(3), cache.minRound) + + ok, err = cache.Update(uint64(5)) + s.Require().NoError(err) + s.Require().True(ok) + s.Equal(uint64(5), cache.minRound) + + cache.Purge(5) + s.Require().Len(cache.verifier, 0) + s.Require().Equal(uint64(5), cache.minRound) +} + +func (s *DKGTSIGProtocolTestSuite) TestUnexpectedDKGResetCount() { + // MPKs and private shares from unexpected reset count should be ignored. + k := 2 + n := 10 + round := uint64(1) + reset := uint64(3) + receivers, protocols := s.newProtocols(k, n, round, reset) + var sourceID, targetID types.NodeID + for sourceID = range receivers { + break + } + for targetID = range receivers { + break + } + // Test private share + s.Require().NoError(protocols[targetID].processMasterPublicKeys( + []*typesDKG.MasterPublicKey{ + receivers[targetID].mpk, + receivers[sourceID].mpk})) + receivers[sourceID].ProposeDKGPrivateShare(&typesDKG.PrivateShare{ + ProposerID: sourceID, + ReceiverID: targetID, + Round: round, + Reset: reset + 1, + PrivateShare: *dkg.NewPrivateKey(), + }) + err := protocols[targetID].processPrivateShare( + receivers[sourceID].prvShare[targetID]) + s.Require().IsType(ErrUnexpectedDKGResetCount{}, err) + // Test MPK. + _, mpk := dkg.NewPrivateKeyShares(k) + receivers[sourceID].ProposeDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + Round: round, + Reset: reset + 1, + DKGID: typesDKG.NewID(sourceID), + PublicKeyShares: *mpk.Move(), + }) + err = protocols[sourceID].processMasterPublicKeys( + []*typesDKG.MasterPublicKey{receivers[sourceID].mpk}) + s.Require().IsType(ErrUnexpectedDKGResetCount{}, err) +} + +func TestDKGTSIGProtocol(t *testing.T) { + suite.Run(t, new(DKGTSIGProtocolTestSuite)) +} + +func BenchmarkGPK4_7(b *testing.B) { benchmarkDKGGroupPubliKey(4, 7, b) } +func BenchmarkGPK9_13(b *testing.B) { benchmarkDKGGroupPubliKey(9, 13, b) } +func BenchmarkGPK17_24(b *testing.B) { benchmarkDKGGroupPubliKey(17, 24, b) } +func BenchmarkGPK81_121(b *testing.B) { benchmarkDKGGroupPubliKey(81, 121, b) } + +func benchmarkDKGGroupPubliKey(k, n int, b *testing.B) { + round := uint64(1) + reset := uint64(0) + _, pubKeys, err := test.NewKeys(n) + if err != nil { + panic(err) + } + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, 100, &common.NullLogger{}, true), ConfigRoundShift) + if err != nil { + panic(err) + } + + for _, pk := range pubKeys { + _, pubShare := dkg.NewPrivateKeyShares(k) + gov.AddDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + ProposerID: types.NewNodeID(pk), + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(types.NewNodeID(pk)), + PublicKeyShares: *pubShare.Move(), + }) + } + + mpk := gov.DKGMasterPublicKeys(round) + comp := gov.DKGComplaints(round) + b.ResetTimer() + for i := 0; i < b.N; i++ { + // DKG is fininished. + gpk, err := typesDKG.NewGroupPublicKey(round, mpk, comp, k) + if err != nil { + panic(err) + } + if len(gpk.QualifyIDs) != n { + panic("not enough of qualify id") + } + } +} + +func BenchmarkNPKs4_7(b *testing.B) { benchmarkDKGNodePubliKeys(4, 7, b) } +func BenchmarkNPKs9_13(b *testing.B) { benchmarkDKGNodePubliKeys(9, 13, b) } +func BenchmarkNPKs17_24(b *testing.B) { benchmarkDKGNodePubliKeys(17, 24, b) } +func BenchmarkNPKs81_121(b *testing.B) { benchmarkDKGNodePubliKeys(81, 121, b) } + +func benchmarkDKGNodePubliKeys(k, n int, b *testing.B) { + round := uint64(1) + reset := uint64(0) + _, pubKeys, err := test.NewKeys(n) + if err != nil { + panic(err) + } + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, 100, &common.NullLogger{}, true), ConfigRoundShift) + if err != nil { + panic(err) + } + + for _, pk := range pubKeys { + _, pubShare := dkg.NewPrivateKeyShares(k) + gov.AddDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + ProposerID: types.NewNodeID(pk), + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(types.NewNodeID(pk)), + PublicKeyShares: *pubShare.Move(), + }) + } + + mpk := gov.DKGMasterPublicKeys(round) + comp := gov.DKGComplaints(round) + b.ResetTimer() + for i := 0; i < b.N; i++ { + // DKG is fininished. + npks, err := typesDKG.NewNodePublicKeys(round, mpk, comp, k) + if err != nil { + panic(err) + } + if len(npks.QualifyIDs) != n { + panic("not enough of qualify id") + } + } +} + +func BenchmarkCalcQ4_7(b *testing.B) { benchmarkCalcQualified(4, 7, b) } +func BenchmarkCalcQ9_13(b *testing.B) { benchmarkCalcQualified(9, 13, b) } +func BenchmarkCalcQ17_24(b *testing.B) { benchmarkCalcQualified(17, 24, b) } +func BenchmarkCalcQ81_121(b *testing.B) { benchmarkCalcQualified(81, 121, b) } + +func benchmarkCalcQualified(k, n int, b *testing.B) { + round := uint64(1) + reset := uint64(0) + _, pubKeys, err := test.NewKeys(n) + if err != nil { + panic(err) + } + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, 100, &common.NullLogger{}, true), ConfigRoundShift) + if err != nil { + panic(err) + } + + for _, pk := range pubKeys { + _, pubShare := dkg.NewPrivateKeyShares(k) + gov.AddDKGMasterPublicKey(&typesDKG.MasterPublicKey{ + ProposerID: types.NewNodeID(pk), + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(types.NewNodeID(pk)), + PublicKeyShares: *pubShare.Move(), + }) + } + + mpk := gov.DKGMasterPublicKeys(round) + comp := gov.DKGComplaints(round) + b.ResetTimer() + for i := 0; i < b.N; i++ { + // DKG is fininished. + _, q, err := typesDKG.CalcQualifyNodes(mpk, comp, k) + if err != nil { + panic(err) + } + if len(q) != n { + panic("not enough of qualify id") + } + } +} diff --git a/dex/consensus/core/interfaces.go b/dex/consensus/core/interfaces.go new file mode 100644 index 000000000..c88b3dcb4 --- /dev/null +++ b/dex/consensus/core/interfaces.go @@ -0,0 +1,182 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +// Application describes the application interface that interacts with DEXON +// consensus core. +type Application interface { + // PreparePayload is called when consensus core is preparing a block. + PreparePayload(position types.Position) ([]byte, error) + + // PrepareWitness will return the witness data no lower than consensusHeight. + PrepareWitness(consensusHeight uint64) (types.Witness, error) + + // VerifyBlock verifies if the block is valid. + VerifyBlock(block *types.Block) types.BlockVerifyStatus + + // BlockConfirmed is called when a block is confirmed and added to lattice. + BlockConfirmed(block types.Block) + + // BlockDelivered is called when a block is added to the compaction chain. + BlockDelivered(hash common.Hash, position types.Position, rand []byte) +} + +// Debug describes the application interface that requires +// more detailed consensus execution. +type Debug interface { + // BlockReceived is called when the block received in agreement. + BlockReceived(common.Hash) + // BlockReady is called when the block's randomness is ready. + BlockReady(common.Hash) +} + +// Network describs the network interface that interacts with DEXON consensus +// core. +type Network interface { + // PullBlocks tries to pull blocks from the DEXON network. + PullBlocks(hashes common.Hashes) + + // PullVotes tries to pull votes from the DEXON network. + PullVotes(position types.Position) + + // BroadcastVote broadcasts vote to all nodes in DEXON network. + BroadcastVote(vote *types.Vote) + + // BroadcastBlock broadcasts block to all nodes in DEXON network. + BroadcastBlock(block *types.Block) + + // BroadcastAgreementResult broadcasts agreement result to DKG set. + BroadcastAgreementResult(randRequest *types.AgreementResult) + + // SendDKGPrivateShare sends PrivateShare to a DKG participant. + SendDKGPrivateShare(pub crypto.PublicKey, prvShare *typesDKG.PrivateShare) + + // BroadcastDKGPrivateShare broadcasts PrivateShare to all DKG participants. + BroadcastDKGPrivateShare(prvShare *typesDKG.PrivateShare) + + // BroadcastDKGPartialSignature broadcasts partialSignature to all + // DKG participants. + BroadcastDKGPartialSignature(psig *typesDKG.PartialSignature) + + // ReceiveChan returns a channel to receive messages from DEXON network. + ReceiveChan() <-chan types.Msg + + // ReportBadPeerChan returns a channel to report bad peer. + ReportBadPeerChan() chan<- interface{} +} + +// Governance interface specifies interface to control the governance contract. +// Note that there are a lot more methods in the governance contract, that this +// interface only define those that are required to run the consensus algorithm. +type Governance interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. Return the genesis CRS if + // round == 0. + // + // The CRS returned is the proposed or latest reseted one, it would be + // changed later if corresponding DKG set failed to generate group public + // key. + CRS(round uint64) common.Hash + + // Propose a CRS of round. + ProposeCRS(round uint64, signedCRS []byte) + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey + + // Get the begin height of a round. + GetRoundHeight(round uint64) uint64 + + //// DKG-related methods. + + // AddDKGComplaint adds a DKGComplaint. + AddDKGComplaint(complaint *typesDKG.Complaint) + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // AddDKGMasterPublicKey adds a DKGMasterPublicKey. + AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey) + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // AddDKGMPKReady adds a DKG ready message. + AddDKGMPKReady(ready *typesDKG.MPKReady) + + // IsDKGMPKReady checks if DKG's master public key preparation is ready. + IsDKGMPKReady(round uint64) bool + + // AddDKGFinalize adds a DKG finalize message. + AddDKGFinalize(final *typesDKG.Finalize) + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool + + // AddDKGSuccess adds a DKG success message. + AddDKGSuccess(success *typesDKG.Success) + + // IsDKGSuccess checks if DKG is success. + IsDKGSuccess(round uint64) bool + + // ReportForkVote reports a node for forking votes. + ReportForkVote(vote1, vote2 *types.Vote) + + // ReportForkBlock reports a node for forking blocks. + ReportForkBlock(block1, block2 *types.Block) + + // ResetDKG resets latest DKG data and propose new CRS. + ResetDKG(newSignedCRS []byte) + + // DKGResetCount returns the reset count for DKG of given round. + DKGResetCount(round uint64) uint64 +} + +// Ticker define the capability to tick by interval. +type Ticker interface { + // Tick would return a channel, which would be triggered until next tick. + Tick() <-chan time.Time + + // Stop the ticker. + Stop() + + // Retart the ticker and clear all internal data. + Restart() +} + +// Recovery interface for interacting with recovery information. +type Recovery interface { + // ProposeSkipBlock proposes a skip block. + ProposeSkipBlock(height uint64) error + + // Votes gets the number of votes of given height. + Votes(height uint64) (uint64, error) +} diff --git a/dex/consensus/core/leader-selector.go b/dex/consensus/core/leader-selector.go new file mode 100644 index 000000000..91b2e9979 --- /dev/null +++ b/dex/consensus/core/leader-selector.go @@ -0,0 +1,149 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "math/big" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type validLeaderFn func(block *types.Block, crs common.Hash) (bool, error) + +// Some constant value. +var ( + maxHash *big.Int + one *big.Rat +) + +func init() { + hash := make([]byte, common.HashLength) + for i := range hash { + hash[i] = 0xff + } + maxHash = big.NewInt(0).SetBytes(hash) + one = big.NewRat(1, 1) +} + +type leaderSelector struct { + hashCRS common.Hash + numCRS *big.Int + minCRSBlock *big.Int + minBlockHash common.Hash + pendingBlocks map[common.Hash]*types.Block + validLeader validLeaderFn + lock sync.Mutex + logger common.Logger +} + +func newLeaderSelector( + validLeader validLeaderFn, logger common.Logger) *leaderSelector { + return &leaderSelector{ + minCRSBlock: maxHash, + validLeader: validLeader, + logger: logger, + } +} + +func (l *leaderSelector) distance(sig crypto.Signature) *big.Int { + hash := crypto.Keccak256Hash(sig.Signature[:]) + num := big.NewInt(0) + num.SetBytes(hash[:]) + num.Abs(num.Sub(l.numCRS, num)) + return num +} + +func (l *leaderSelector) probability(sig crypto.Signature) float64 { + dis := l.distance(sig) + prob := big.NewRat(1, 1).SetFrac(dis, maxHash) + p, _ := prob.Sub(one, prob).Float64() + return p +} + +func (l *leaderSelector) restart(crs common.Hash) { + numCRS := big.NewInt(0) + numCRS.SetBytes(crs[:]) + l.lock.Lock() + defer l.lock.Unlock() + l.numCRS = numCRS + l.hashCRS = crs + l.minCRSBlock = maxHash + l.minBlockHash = types.NullBlockHash + l.pendingBlocks = make(map[common.Hash]*types.Block) +} + +func (l *leaderSelector) leaderBlockHash() common.Hash { + l.lock.Lock() + defer l.lock.Unlock() + for _, b := range l.pendingBlocks { + ok, dist := l.potentialLeader(b) + if !ok { + continue + } + ok, err := l.validLeader(b, l.hashCRS) + if err != nil { + l.logger.Error("Error checking validLeader", "error", err, "block", b) + delete(l.pendingBlocks, b.Hash) + continue + } + if ok { + l.updateLeader(b, dist) + delete(l.pendingBlocks, b.Hash) + } + } + return l.minBlockHash +} + +func (l *leaderSelector) processBlock(block *types.Block) error { + l.lock.Lock() + defer l.lock.Unlock() + ok, dist := l.potentialLeader(block) + if !ok { + return nil + } + ok, err := l.validLeader(block, l.hashCRS) + if err != nil { + return err + } + if !ok { + l.pendingBlocks[block.Hash] = block + return nil + } + l.updateLeader(block, dist) + return nil +} + +func (l *leaderSelector) potentialLeader(block *types.Block) (bool, *big.Int) { + dist := l.distance(block.CRSSignature) + cmp := l.minCRSBlock.Cmp(dist) + return (cmp > 0 || (cmp == 0 && block.Hash.Less(l.minBlockHash))), dist +} + +func (l *leaderSelector) updateLeader(block *types.Block, dist *big.Int) { + l.minCRSBlock = dist + l.minBlockHash = block.Hash +} + +func (l *leaderSelector) findPendingBlock( + hash common.Hash) (*types.Block, bool) { + b, e := l.pendingBlocks[hash] + return b, e +} diff --git a/dex/consensus/core/leader-selector_test.go b/dex/consensus/core/leader-selector_test.go new file mode 100644 index 000000000..473866308 --- /dev/null +++ b/dex/consensus/core/leader-selector_test.go @@ -0,0 +1,184 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type LeaderSelectorTestSuite struct { + suite.Suite + mockValidLeaderDefault bool + mockValidLeaderDB map[common.Hash]bool + mockValidLeader validLeaderFn +} + +func (s *LeaderSelectorTestSuite) SetupTest() { + s.mockValidLeaderDefault = true + s.mockValidLeaderDB = make(map[common.Hash]bool) + s.mockValidLeader = func(b *types.Block, _ common.Hash) (bool, error) { + if ret, exist := s.mockValidLeaderDB[b.Hash]; exist { + return ret, nil + } + return s.mockValidLeaderDefault, nil + } +} + +func (s *LeaderSelectorTestSuite) newLeader() *leaderSelector { + l := newLeaderSelector(s.mockValidLeader, &common.NullLogger{}) + l.restart(common.NewRandomHash()) + return l +} + +func (s *LeaderSelectorTestSuite) TestDistance() { + leader := s.newLeader() + hash := common.NewRandomHash() + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + sig, err := prv.Sign(hash) + s.Require().NoError(err) + dis := leader.distance(sig) + s.Equal(-1, dis.Cmp(maxHash)) +} + +func (s *LeaderSelectorTestSuite) TestProbability() { + leader := s.newLeader() + prv1, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + prv2, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + for { + hash := common.NewRandomHash() + sig1, err := prv1.Sign(hash) + s.Require().NoError(err) + sig2, err := prv2.Sign(hash) + s.Require().NoError(err) + dis1 := leader.distance(sig1) + dis2 := leader.distance(sig2) + prob1 := leader.probability(sig1) + prob2 := leader.probability(sig2) + s.True(prob1 <= 1 && prob1 >= 0) + s.True(prob2 <= 1 && prob2 >= 0) + cmp := dis1.Cmp(dis2) + if cmp == 0 { + s.True(dis1.Cmp(dis2) == 0) + continue + } + if cmp == 1 { + s.True(prob2 > prob1) + } else if cmp == -1 { + s.True(prob2 < prob1) + } + break + } +} + +func (s *LeaderSelectorTestSuite) TestLeaderBlockHash() { + leader := s.newLeader() + blocks := make(map[common.Hash]*types.Block) + for i := 0; i < 10; i++ { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + block := &types.Block{ + ProposerID: types.NewNodeID(prv.PublicKey()), + Hash: common.NewRandomHash(), + } + s.Require().NoError( + utils.NewSigner(prv).SignCRS(block, leader.hashCRS)) + s.Require().NoError(leader.processBlock(block)) + blocks[block.Hash] = block + } + blockHash := leader.leaderBlockHash() + leaderBlock, exist := blocks[blockHash] + s.Require().True(exist) + leaderDist := leader.distance(leaderBlock.CRSSignature) + for _, block := range blocks { + if block == leaderBlock { + continue + } + dist := leader.distance(block.CRSSignature) + s.Equal(-1, leaderDist.Cmp(dist)) + } +} + +func (s *LeaderSelectorTestSuite) TestValidLeaderFn() { + leader := s.newLeader() + blocks := make(map[common.Hash]*types.Block) + for i := 0; i < 10; i++ { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + block := &types.Block{ + ProposerID: types.NewNodeID(prv.PublicKey()), + Hash: common.NewRandomHash(), + } + s.Require().NoError( + utils.NewSigner(prv).SignCRS(block, leader.hashCRS)) + s.Require().NoError(leader.processBlock(block)) + blocks[block.Hash] = block + } + blockHash := leader.leaderBlockHash() + + s.mockValidLeaderDB[blockHash] = false + leader.restart(leader.hashCRS) + for _, b := range blocks { + s.Require().NoError(leader.processBlock(b)) + } + s.NotEqual(blockHash, leader.leaderBlockHash()) + s.mockValidLeaderDB[blockHash] = true + s.Equal(blockHash, leader.leaderBlockHash()) + s.Len(leader.pendingBlocks, 0) +} + +func (s *LeaderSelectorTestSuite) TestPotentialLeader() { + leader := s.newLeader() + blocks := make(map[common.Hash]*types.Block) + for i := 0; i < 10; i++ { + if i > 0 { + s.mockValidLeaderDefault = false + } + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + block := &types.Block{ + ProposerID: types.NewNodeID(prv.PublicKey()), + Hash: common.NewRandomHash(), + } + s.Require().NoError( + utils.NewSigner(prv).SignCRS(block, leader.hashCRS)) + ok, _ := leader.potentialLeader(block) + s.Require().NoError(leader.processBlock(block)) + if i > 0 { + if ok { + s.Contains(leader.pendingBlocks, block.Hash) + } else { + s.NotContains(leader.pendingBlocks, block.Hash) + } + blocks[block.Hash] = block + } + } +} + +func TestLeaderSelector(t *testing.T) { + suite.Run(t, new(LeaderSelectorTestSuite)) +} diff --git a/dex/consensus/core/nonblocking.go b/dex/consensus/core/nonblocking.go new file mode 100644 index 000000000..10b47b822 --- /dev/null +++ b/dex/consensus/core/nonblocking.go @@ -0,0 +1,137 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type blockConfirmedEvent struct { + block *types.Block +} + +type blockDeliveredEvent struct { + blockHash common.Hash + blockPosition types.Position + rand []byte +} + +// nonBlocking implements these interfaces and is a decorator for +// them that makes the methods to be non-blocking. +// - Application +// - Debug +// - It also provides nonblockig for db update. +type nonBlocking struct { + app Application + debug Debug + eventChan chan interface{} + events []interface{} + eventsChange *sync.Cond + running sync.WaitGroup +} + +func newNonBlocking(app Application, debug Debug) *nonBlocking { + nonBlockingModule := &nonBlocking{ + app: app, + debug: debug, + eventChan: make(chan interface{}, 6), + events: make([]interface{}, 0, 100), + eventsChange: sync.NewCond(&sync.Mutex{}), + } + go nonBlockingModule.run() + return nonBlockingModule +} + +func (nb *nonBlocking) addEvent(event interface{}) { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + nb.events = append(nb.events, event) + nb.eventsChange.Broadcast() +} + +func (nb *nonBlocking) run() { + // This go routine consume the first event from events and call the + // corresponding methods of Application/Debug/db. + for { + var event interface{} + func() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) == 0 { + nb.eventsChange.Wait() + } + event = nb.events[0] + nb.events = nb.events[1:] + nb.running.Add(1) + }() + switch e := event.(type) { + case blockConfirmedEvent: + nb.app.BlockConfirmed(*e.block) + case blockDeliveredEvent: + nb.app.BlockDelivered(e.blockHash, e.blockPosition, e.rand) + default: + fmt.Printf("Unknown event %v.", e) + } + nb.running.Done() + nb.eventsChange.Broadcast() + } +} + +// wait will wait for all event in events finishes. +func (nb *nonBlocking) wait() { + nb.eventsChange.L.Lock() + defer nb.eventsChange.L.Unlock() + for len(nb.events) > 0 { + nb.eventsChange.Wait() + } + nb.running.Wait() +} + +// PreparePayload cannot be non-blocking. +func (nb *nonBlocking) PreparePayload(position types.Position) ([]byte, error) { + return nb.app.PreparePayload(position) +} + +// PrepareWitness cannot be non-blocking. +func (nb *nonBlocking) PrepareWitness(height uint64) (types.Witness, error) { + return nb.app.PrepareWitness(height) +} + +// VerifyBlock cannot be non-blocking. +func (nb *nonBlocking) VerifyBlock(block *types.Block) types.BlockVerifyStatus { + return nb.app.VerifyBlock(block) +} + +// BlockConfirmed is called when a block is confirmed and added to lattice. +func (nb *nonBlocking) BlockConfirmed(block types.Block) { + nb.addEvent(blockConfirmedEvent{&block}) +} + +// BlockDelivered is called when a block is add to the compaction chain. +func (nb *nonBlocking) BlockDelivered(blockHash common.Hash, + blockPosition types.Position, rand []byte) { + nb.addEvent(blockDeliveredEvent{ + blockHash: blockHash, + blockPosition: blockPosition, + rand: rand, + }) +} diff --git a/dex/consensus/core/nonblocking_test.go b/dex/consensus/core/nonblocking_test.go new file mode 100644 index 000000000..4816186aa --- /dev/null +++ b/dex/consensus/core/nonblocking_test.go @@ -0,0 +1,160 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// slowApp is an Application instance slow things down in every method. +type slowApp struct { + sleep time.Duration + blockConfirmed map[common.Hash]struct{} + blockDelivered map[common.Hash]struct{} +} + +func newSlowApp(sleep time.Duration) *slowApp { + return &slowApp{ + sleep: sleep, + blockConfirmed: make(map[common.Hash]struct{}), + blockDelivered: make(map[common.Hash]struct{}), + } +} + +func (app *slowApp) PreparePayload(_ types.Position) ([]byte, error) { + return []byte{}, nil +} + +func (app *slowApp) PrepareWitness(_ uint64) (types.Witness, error) { + return types.Witness{}, nil +} + +func (app *slowApp) VerifyBlock(_ *types.Block) types.BlockVerifyStatus { + return types.VerifyOK +} + +func (app *slowApp) BlockConfirmed(block types.Block) { + time.Sleep(app.sleep) + app.blockConfirmed[block.Hash] = struct{}{} +} + +func (app *slowApp) BlockDelivered(blockHash common.Hash, + blockPosition types.Position, _ []byte) { + time.Sleep(app.sleep) + app.blockDelivered[blockHash] = struct{}{} +} + +func (app *slowApp) BlockReceived(hash common.Hash) {} + +func (app *slowApp) BlockReady(hash common.Hash) {} + +// noDebugApp is to make sure nonBlocking works when Debug interface +// is not implemented by the provided Application instance. +type noDebugApp struct { + blockConfirmed map[common.Hash]struct{} + blockDelivered map[common.Hash]struct{} +} + +func newNoDebugApp() *noDebugApp { + return &noDebugApp{ + blockConfirmed: make(map[common.Hash]struct{}), + blockDelivered: make(map[common.Hash]struct{}), + } +} + +func (app *noDebugApp) PreparePayload(_ types.Position) ([]byte, error) { + panic("test") +} + +func (app *noDebugApp) PrepareWitness(_ uint64) (types.Witness, error) { + panic("test") +} + +func (app *noDebugApp) VerifyBlock(_ *types.Block) types.BlockVerifyStatus { + panic("test") +} + +func (app *noDebugApp) BlockConfirmed(block types.Block) { + app.blockConfirmed[block.Hash] = struct{}{} +} + +func (app *noDebugApp) BlockDelivered(blockHash common.Hash, + blockPosition types.Position, _ []byte) { + app.blockDelivered[blockHash] = struct{}{} +} + +type NonBlockingTestSuite struct { + suite.Suite +} + +func (s *NonBlockingTestSuite) TestNonBlocking() { + sleep := 50 * time.Millisecond + app := newSlowApp(sleep) + nbModule := newNonBlocking(app, app) + hashes := make(common.Hashes, 10) + for idx := range hashes { + hashes[idx] = common.NewRandomHash() + } + now := time.Now().UTC() + shouldFinish := now.Add(100 * time.Millisecond) + + // Start doing some 'heavy' job. + for _, hash := range hashes { + nbModule.BlockConfirmed(types.Block{ + Hash: hash, + Witness: types.Witness{}, + }) + nbModule.BlockDelivered(hash, types.Position{}, []byte(nil)) + } + + // nonBlocking should be non-blocking. + s.True(shouldFinish.After(time.Now().UTC())) + + nbModule.wait() + for _, hash := range hashes { + s.Contains(app.blockConfirmed, hash) + s.Contains(app.blockDelivered, hash) + } +} + +func (s *NonBlockingTestSuite) TestNoDebug() { + app := newNoDebugApp() + nbModule := newNonBlocking(app, nil) + hash := common.NewRandomHash() + // Test BlockConfirmed. + nbModule.BlockConfirmed(types.Block{Hash: hash}) + // Test BlockDelivered + nbModule.BlockDelivered(hash, types.Position{}, []byte(nil)) + nbModule.wait() + s.Contains(app.blockConfirmed, hash) + s.Contains(app.blockDelivered, hash) + // Test other synchronous methods. + s.Panics(func() { nbModule.PreparePayload(types.Position{}) }) + s.Panics(func() { nbModule.PrepareWitness(0) }) + s.Panics(func() { nbModule.VerifyBlock(nil) }) +} + +func TestNonBlocking(t *testing.T) { + suite.Run(t, new(NonBlockingTestSuite)) +} diff --git a/dex/consensus/core/syncer/agreement.go b/dex/consensus/core/syncer/agreement.go new file mode 100644 index 000000000..d39c24627 --- /dev/null +++ b/dex/consensus/core/syncer/agreement.go @@ -0,0 +1,301 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package syncer + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Struct agreement implements struct of BA (Byzantine Agreement) protocol +// needed in syncer, which only receives agreement results. +type agreement struct { + chainTip uint64 + cache *utils.NodeSetCache + tsigVerifierCache *core.TSigVerifierCache + inputChan chan interface{} + outputChan chan<- *types.Block + pullChan chan<- common.Hash + blocks map[types.Position]map[common.Hash]*types.Block + agreementResults map[common.Hash][]byte + latestCRSRound uint64 + pendingAgrs map[uint64]map[common.Hash]*types.AgreementResult + pendingBlocks map[uint64]map[common.Hash]*types.Block + logger common.Logger + confirmedBlocks map[common.Hash]struct{} + ctx context.Context + ctxCancel context.CancelFunc +} + +// newAgreement creates a new agreement instance. +func newAgreement(chainTip uint64, + ch chan<- *types.Block, pullChan chan<- common.Hash, + cache *utils.NodeSetCache, verifier *core.TSigVerifierCache, + logger common.Logger) *agreement { + a := &agreement{ + chainTip: chainTip, + cache: cache, + tsigVerifierCache: verifier, + inputChan: make(chan interface{}, 1000), + outputChan: ch, + pullChan: pullChan, + blocks: make(map[types.Position]map[common.Hash]*types.Block), + agreementResults: make(map[common.Hash][]byte), + logger: logger, + pendingAgrs: make( + map[uint64]map[common.Hash]*types.AgreementResult), + pendingBlocks: make( + map[uint64]map[common.Hash]*types.Block), + confirmedBlocks: make(map[common.Hash]struct{}), + } + a.ctx, a.ctxCancel = context.WithCancel(context.Background()) + return a +} + +// run starts the agreement, this does not start a new routine, go a new +// routine explicitly in the caller. +func (a *agreement) run() { + defer a.ctxCancel() + for { + select { + case val, ok := <-a.inputChan: + if !ok { + // InputChan is closed by network when network ends. + return + } + switch v := val.(type) { + case *types.Block: + if v.Position.Round >= core.DKGDelayRound && v.IsFinalized() { + a.processFinalizedBlock(v) + } else { + a.processBlock(v) + } + case *types.AgreementResult: + a.processAgreementResult(v) + case uint64: + a.processNewCRS(v) + } + } + } +} + +func (a *agreement) processBlock(b *types.Block) { + if _, exist := a.confirmedBlocks[b.Hash]; exist { + return + } + if rand, exist := a.agreementResults[b.Hash]; exist { + if len(b.Randomness) == 0 { + b.Randomness = rand + } + a.confirm(b) + } else { + if _, exist := a.blocks[b.Position]; !exist { + a.blocks[b.Position] = make(map[common.Hash]*types.Block) + } + a.blocks[b.Position][b.Hash] = b + } +} + +func (a *agreement) processFinalizedBlock(block *types.Block) { + // Cache those results that CRS is not ready yet. + if _, exists := a.confirmedBlocks[block.Hash]; exists { + a.logger.Trace("finalized block already confirmed", "block", block) + return + } + if block.Position.Round > a.latestCRSRound { + pendingsForRound, exists := a.pendingBlocks[block.Position.Round] + if !exists { + pendingsForRound = make(map[common.Hash]*types.Block) + a.pendingBlocks[block.Position.Round] = pendingsForRound + } + pendingsForRound[block.Hash] = block + a.logger.Trace("finalized block cached", "block", block) + return + } + if err := utils.VerifyBlockSignature(block); err != nil { + return + } + verifier, ok, err := a.tsigVerifierCache.UpdateAndGet( + block.Position.Round) + if err != nil { + a.logger.Error("error verifying block randomness", + "block", block, + "error", err) + return + } + if !ok { + a.logger.Error("cannot verify block randomness", "block", block) + return + } + if !verifier.VerifySignature(block.Hash, crypto.Signature{ + Type: "bls", + Signature: block.Randomness, + }) { + a.logger.Error("incorrect block randomness", "block", block) + return + } + a.confirm(block) +} + +func (a *agreement) processAgreementResult(r *types.AgreementResult) { + // Cache those results that CRS is not ready yet. + if _, exists := a.confirmedBlocks[r.BlockHash]; exists { + a.logger.Trace("Agreement result already confirmed", "result", r) + return + } + if r.Position.Round > a.latestCRSRound { + pendingsForRound, exists := a.pendingAgrs[r.Position.Round] + if !exists { + pendingsForRound = make(map[common.Hash]*types.AgreementResult) + a.pendingAgrs[r.Position.Round] = pendingsForRound + } + pendingsForRound[r.BlockHash] = r + a.logger.Trace("Agreement result cached", "result", r) + return + } + if err := core.VerifyAgreementResult(r, a.cache); err != nil { + a.logger.Error("Agreement result verification failed", + "result", r, + "error", err) + return + } + if r.Position.Round >= core.DKGDelayRound { + verifier, ok, err := a.tsigVerifierCache.UpdateAndGet(r.Position.Round) + if err != nil { + a.logger.Error("error verifying agreement result randomness", + "result", r, + "error", err) + return + } + if !ok { + a.logger.Error("cannot verify agreement result randomness", "result", r) + return + } + if !verifier.VerifySignature(r.BlockHash, crypto.Signature{ + Type: "bls", + Signature: r.Randomness, + }) { + a.logger.Error("incorrect agreement result randomness", "result", r) + return + } + } else { + // Special case for rounds before DKGDelayRound. + if bytes.Compare(r.Randomness, core.NoRand) != 0 { + a.logger.Error("incorrect agreement result randomness", "result", r) + return + } + } + if r.IsEmptyBlock { + b := &types.Block{ + Position: r.Position, + Randomness: r.Randomness, + } + // Empty blocks should be confirmed directly, they won't be sent over + // the wire. + a.confirm(b) + return + } + if bs, exist := a.blocks[r.Position]; exist { + if b, exist := bs[r.BlockHash]; exist { + b.Randomness = r.Randomness + a.confirm(b) + return + } + } + a.agreementResults[r.BlockHash] = r.Randomness +loop: + for { + select { + case a.pullChan <- r.BlockHash: + break loop + case <-a.ctx.Done(): + a.logger.Error("Pull request is not sent", + "position", &r.Position, + "hash", r.BlockHash.String()[:6]) + return + case <-time.After(500 * time.Millisecond): + a.logger.Debug("Pull request is unable to send", + "position", &r.Position, + "hash", r.BlockHash.String()[:6]) + } + } +} + +func (a *agreement) processNewCRS(round uint64) { + if round <= a.latestCRSRound { + return + } + prevRound := a.latestCRSRound + 1 + a.latestCRSRound = round + // Verify all pending results. + for r := prevRound; r <= a.latestCRSRound; r++ { + pendingsForRound := a.pendingAgrs[r] + if pendingsForRound == nil { + continue + } + delete(a.pendingAgrs, r) + for _, res := range pendingsForRound { + if err := core.VerifyAgreementResult(res, a.cache); err != nil { + a.logger.Error("Invalid agreement result", + "result", res, + "error", err) + continue + } + a.logger.Error("Flush agreement result", "result", res) + a.processAgreementResult(res) + break + } + } +} + +// confirm notifies consensus the confirmation of a block in BA. +func (a *agreement) confirm(b *types.Block) { + if !b.IsFinalized() { + panic(fmt.Errorf("confirm a block %s without randomness", b)) + } + if _, exist := a.confirmedBlocks[b.Hash]; !exist { + delete(a.blocks, b.Position) + delete(a.agreementResults, b.Hash) + loop: + for { + select { + case a.outputChan <- b: + break loop + case <-a.ctx.Done(): + a.logger.Error("Confirmed block is not sent", "block", b) + return + case <-time.After(500 * time.Millisecond): + a.logger.Debug("Agreement output channel is full", "block", b) + } + } + a.confirmedBlocks[b.Hash] = struct{}{} + } + if b.Position.Height > a.chainTip+1 { + if _, exist := a.confirmedBlocks[b.ParentHash]; !exist { + a.pullChan <- b.ParentHash + } + } +} diff --git a/dex/consensus/core/syncer/consensus.go b/dex/consensus/core/syncer/consensus.go new file mode 100644 index 000000000..496c0f9a8 --- /dev/null +++ b/dex/consensus/core/syncer/consensus.go @@ -0,0 +1,543 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package syncer + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +var ( + // ErrAlreadySynced is reported when syncer is synced. + ErrAlreadySynced = fmt.Errorf("already synced") + // ErrNotSynced is reported when syncer is not synced yet. + ErrNotSynced = fmt.Errorf("not synced yet") + // ErrGenesisBlockReached is reported when genesis block reached. + ErrGenesisBlockReached = fmt.Errorf("genesis block reached") + // ErrInvalidBlockOrder is reported when SyncBlocks receives unordered + // blocks. + ErrInvalidBlockOrder = fmt.Errorf("invalid block order") + // ErrInvalidSyncingHeight raised when the blocks to sync is not following + // the compaction chain tip in database. + ErrInvalidSyncingHeight = fmt.Errorf("invalid syncing height") +) + +// Consensus is for syncing consensus module. +type Consensus struct { + db db.Database + gov core.Governance + dMoment time.Time + logger common.Logger + app core.Application + prv crypto.PrivateKey + network core.Network + nodeSetCache *utils.NodeSetCache + tsigVerifier *core.TSigVerifierCache + + blocks types.BlocksByPosition + agreementModule *agreement + agreementRoundCut uint64 + heightEvt *common.Event + roundEvt *utils.RoundEvent + + // lock for accessing all fields. + lock sync.RWMutex + duringBuffering bool + latestCRSRound uint64 + waitGroup sync.WaitGroup + agreementWaitGroup sync.WaitGroup + pullChan chan common.Hash + receiveChan chan *types.Block + ctx context.Context + ctxCancel context.CancelFunc + syncedLastBlock *types.Block + syncedConsensus *core.Consensus + syncedSkipNext bool + dummyCancel context.CancelFunc + dummyFinished <-chan struct{} + dummyMsgBuffer []types.Msg + initChainTipHeight uint64 +} + +// NewConsensus creates an instance for Consensus (syncer consensus). +func NewConsensus( + initHeight uint64, + dMoment time.Time, + app core.Application, + gov core.Governance, + db db.Database, + network core.Network, + prv crypto.PrivateKey, + logger common.Logger) *Consensus { + + con := &Consensus{ + dMoment: dMoment, + app: app, + gov: gov, + db: db, + network: network, + nodeSetCache: utils.NewNodeSetCache(gov), + tsigVerifier: core.NewTSigVerifierCache(gov, 7), + prv: prv, + logger: logger, + receiveChan: make(chan *types.Block, 1000), + pullChan: make(chan common.Hash, 1000), + heightEvt: common.NewEvent(), + } + con.ctx, con.ctxCancel = context.WithCancel(context.Background()) + _, con.initChainTipHeight = db.GetCompactionChainTipInfo() + con.agreementModule = newAgreement( + con.initChainTipHeight, + con.receiveChan, + con.pullChan, + con.nodeSetCache, + con.tsigVerifier, + con.logger) + con.agreementWaitGroup.Add(1) + go func() { + defer con.agreementWaitGroup.Done() + con.agreementModule.run() + }() + if err := con.deliverPendingBlocks(initHeight); err != nil { + panic(err) + } + return con +} + +func (con *Consensus) deliverPendingBlocks(height uint64) error { + if height >= con.initChainTipHeight { + return nil + } + blocks := make([]*types.Block, 0, con.initChainTipHeight-height) + hash, _ := con.db.GetCompactionChainTipInfo() + for { + block, err := con.db.GetBlock(hash) + if err != nil { + return err + } + if block.Position.Height == height { + break + } + blocks = append(blocks, &block) + hash = block.ParentHash + } + sort.Sort(types.BlocksByPosition(blocks)) + for _, b := range blocks { + con.logger.Debug("Syncer BlockConfirmed", "block", b) + con.app.BlockConfirmed(*b) + con.logger.Debug("Syncer BlockDelivered", "block", b) + con.app.BlockDelivered(b.Hash, b.Position, b.Randomness) + } + return nil +} + +func (con *Consensus) assureBuffering() { + if func() bool { + con.lock.RLock() + defer con.lock.RUnlock() + return con.duringBuffering + }() { + return + } + con.lock.Lock() + defer con.lock.Unlock() + if con.duringBuffering { + return + } + con.duringBuffering = true + // Get latest block to prepare utils.RoundEvent. + var ( + err error + blockHash, height = con.db.GetCompactionChainTipInfo() + ) + if height == 0 { + con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, con.logger, + types.Position{}, core.ConfigRoundShift) + } else { + var b types.Block + if b, err = con.db.GetBlock(blockHash); err == nil { + con.roundEvt, err = utils.NewRoundEvent(con.ctx, con.gov, + con.logger, b.Position, core.ConfigRoundShift) + } + } + if err != nil { + panic(err) + } + // Make sure con.roundEvt stopped before stopping con.agreementModule. + con.waitGroup.Add(1) + // Register a round event handler to reset node set cache, this handler + // should be the highest priority. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + for _, e := range evts { + if e.Reset == 0 { + continue + } + con.nodeSetCache.Purge(e.Round + 1) + con.tsigVerifier.Purge(e.Round + 1) + } + }) + // Register a round event handler to notify CRS to agreementModule. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + con.waitGroup.Add(1) + go func() { + defer con.waitGroup.Done() + for _, e := range evts { + select { + case <-con.ctx.Done(): + return + default: + } + for func() bool { + select { + case <-con.ctx.Done(): + return false + case con.agreementModule.inputChan <- e.Round: + return false + case <-time.After(500 * time.Millisecond): + con.logger.Warn( + "Agreement input channel is full when notifying new round", + "round", e.Round, + ) + return true + } + }() { + } + } + }() + }) + // Register a round event handler to validate next round. + con.roundEvt.Register(func(evts []utils.RoundEventParam) { + con.heightEvt.RegisterHeight( + evts[len(evts)-1].NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(con.roundEvt, con.heightEvt), + ) + }) + con.roundEvt.TriggerInitEvent() + con.startAgreement() + con.startNetwork() +} + +func (con *Consensus) checkIfSynced(blocks []*types.Block) (synced bool) { + con.lock.RLock() + defer con.lock.RUnlock() + defer func() { + con.logger.Debug("Syncer synced status", + "last-block", blocks[len(blocks)-1], + "synced", synced, + ) + }() + if len(con.blocks) == 0 || len(blocks) == 0 { + return + } + synced = !blocks[len(blocks)-1].Position.Older(con.blocks[0].Position) + return +} + +func (con *Consensus) buildAllEmptyBlocks() { + con.lock.Lock() + defer con.lock.Unlock() + // Clean empty blocks on tips of chains. + for len(con.blocks) > 0 && con.isEmptyBlock(con.blocks[0]) { + con.blocks = con.blocks[1:] + } + // Build empty blocks. + for i, b := range con.blocks { + if con.isEmptyBlock(b) { + if con.blocks[i-1].Position.Height+1 == b.Position.Height { + con.buildEmptyBlock(b, con.blocks[i-1]) + } + } + } +} + +// ForceSync forces syncer to become synced. +func (con *Consensus) ForceSync(lastPos types.Position, skip bool) { + if con.syncedLastBlock != nil { + return + } + hash, height := con.db.GetCompactionChainTipInfo() + if height < lastPos.Height { + panic(fmt.Errorf("compaction chain not synced height %d, tip %d", + lastPos.Height, height)) + } else if height > lastPos.Height { + skip = false + } + block, err := con.db.GetBlock(hash) + if err != nil { + panic(err) + } + con.syncedLastBlock = &block + con.stopBuffering() + // We might call stopBuffering without calling assureBuffering. + if con.dummyCancel == nil { + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + context.Background(), con.network.ReceiveChan(), + func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + } + con.syncedSkipNext = skip + con.logger.Info("Force Sync", "block", &block, "skip", skip) +} + +// SyncBlocks syncs blocks from compaction chain, latest is true if the caller +// regards the blocks are the latest ones. Notice that latest can be true for +// many times. +// NOTICE: parameter "blocks" should be consecutive in compaction height. +// NOTICE: this method is not expected to be called concurrently. +func (con *Consensus) SyncBlocks( + blocks []*types.Block, latest bool) (synced bool, err error) { + defer func() { + con.logger.Debug("SyncBlocks returned", + "synced", synced, + "error", err, + "last-block", con.syncedLastBlock, + ) + }() + if con.syncedLastBlock != nil { + synced, err = true, ErrAlreadySynced + return + } + if len(blocks) == 0 { + return + } + // Check if blocks are consecutive. + for i := 1; i < len(blocks); i++ { + if blocks[i].Position.Height != blocks[i-1].Position.Height+1 { + err = ErrInvalidBlockOrder + return + } + } + // Make sure the first block is the next block of current compaction chain + // tip in DB. + _, tipHeight := con.db.GetCompactionChainTipInfo() + if blocks[0].Position.Height != tipHeight+1 { + con.logger.Error("Mismatched block height", + "now", blocks[0].Position.Height, + "expected", tipHeight+1, + ) + err = ErrInvalidSyncingHeight + return + } + con.logger.Trace("SyncBlocks", + "position", &blocks[0].Position, + "len", len(blocks), + "latest", latest, + ) + for _, b := range blocks { + if err = con.db.PutBlock(*b); err != nil { + // A block might be put into db when confirmed by BA, but not + // finalized yet. + if err == db.ErrBlockExists { + err = con.db.UpdateBlock(*b) + } + if err != nil { + return + } + } + if err = con.db.PutCompactionChainTipInfo( + b.Hash, b.Position.Height); err != nil { + return + } + con.heightEvt.NotifyHeight(b.Position.Height) + } + if latest { + con.assureBuffering() + con.buildAllEmptyBlocks() + // Check if compaction and agreements' blocks are overlapped. The + // overlapping of compaction chain and BA's oldest blocks means the + // syncing is done. + if con.checkIfSynced(blocks) { + con.stopBuffering() + con.syncedLastBlock = blocks[len(blocks)-1] + synced = true + } + } + return +} + +// GetSyncedConsensus returns the core.Consensus instance after synced. +func (con *Consensus) GetSyncedConsensus() (*core.Consensus, error) { + con.lock.Lock() + defer con.lock.Unlock() + if con.syncedConsensus != nil { + return con.syncedConsensus, nil + } + if con.syncedLastBlock == nil { + return nil, ErrNotSynced + } + // flush all blocks in con.blocks into core.Consensus, and build + // core.Consensus from syncer. + con.dummyCancel() + <-con.dummyFinished + var err error + con.syncedConsensus, err = core.NewConsensusFromSyncer( + con.syncedLastBlock, + con.syncedSkipNext, + con.dMoment, + con.app, + con.gov, + con.db, + con.network, + con.prv, + con.blocks, + con.dummyMsgBuffer, + con.logger) + return con.syncedConsensus, err +} + +// stopBuffering stops the syncer buffering routines. +// +// This method is mainly for caller to stop the syncer before synced, the syncer +// would call this method automatically after being synced. +func (con *Consensus) stopBuffering() { + if func() (notBuffering bool) { + con.lock.RLock() + defer con.lock.RUnlock() + notBuffering = !con.duringBuffering + return + }() { + return + } + if func() (alreadyCanceled bool) { + con.lock.Lock() + defer con.lock.Unlock() + if !con.duringBuffering { + alreadyCanceled = true + return + } + con.duringBuffering = false + con.logger.Trace("Syncer is about to stop") + // Stop network and CRS routines, wait until they are all stoped. + con.ctxCancel() + return + }() { + return + } + con.logger.Trace("Stop syncer modules") + con.roundEvt.Stop() + con.waitGroup.Done() + // Wait for all routines depends on con.agreementModule stopped. + con.waitGroup.Wait() + // Since there is no one waiting for the receive channel of fullnode, we + // need to launch a dummy receiver right away. + con.dummyCancel, con.dummyFinished = utils.LaunchDummyReceiver( + context.Background(), con.network.ReceiveChan(), + func(msg types.Msg) { + con.dummyMsgBuffer = append(con.dummyMsgBuffer, msg) + }) + // Stop agreements. + con.logger.Trace("Stop syncer agreement modules") + con.stopAgreement() + con.logger.Trace("Syncer stopped") + return +} + +// isEmptyBlock checks if a block is an empty block by both its hash and parent +// hash are empty. +func (con *Consensus) isEmptyBlock(b *types.Block) bool { + return b.Hash == common.Hash{} && b.ParentHash == common.Hash{} +} + +// buildEmptyBlock builds an empty block in agreement. +func (con *Consensus) buildEmptyBlock(b *types.Block, parent *types.Block) { + cfg := utils.GetConfigWithPanic(con.gov, b.Position.Round, con.logger) + b.Timestamp = parent.Timestamp.Add(cfg.MinBlockInterval) + b.Witness.Height = parent.Witness.Height + b.Witness.Data = make([]byte, len(parent.Witness.Data)) + copy(b.Witness.Data, parent.Witness.Data) +} + +// startAgreement starts agreements for receiving votes and agreements. +func (con *Consensus) startAgreement() { + // Start a routine for listening receive channel and pull block channel. + go func() { + for { + select { + case b, ok := <-con.receiveChan: + if !ok { + return + } + func() { + con.lock.Lock() + defer con.lock.Unlock() + if len(con.blocks) > 0 && + !b.Position.Newer(con.blocks[0].Position) { + return + } + con.blocks = append(con.blocks, b) + sort.Sort(con.blocks) + }() + case h, ok := <-con.pullChan: + if !ok { + return + } + con.network.PullBlocks(common.Hashes{h}) + } + } + }() +} + +// startNetwork starts network for receiving blocks and agreement results. +func (con *Consensus) startNetwork() { + con.waitGroup.Add(1) + go func() { + defer con.waitGroup.Done() + loop: + for { + select { + case val := <-con.network.ReceiveChan(): + switch v := val.Payload.(type) { + case *types.Block: + case *types.AgreementResult: + // Avoid byzantine nodes attack by broadcasting older + // agreement results. Normal nodes might report 'synced' + // while still fall behind other nodes. + if v.Position.Height <= con.initChainTipHeight { + continue loop + } + default: + continue loop + } + con.agreementModule.inputChan <- val.Payload + case <-con.ctx.Done(): + break loop + } + } + }() +} + +func (con *Consensus) stopAgreement() { + if con.agreementModule.inputChan != nil { + close(con.agreementModule.inputChan) + } + con.agreementWaitGroup.Wait() + con.agreementModule.inputChan = nil + close(con.receiveChan) + close(con.pullChan) +} diff --git a/dex/consensus/core/syncer/watch-cat.go b/dex/consensus/core/syncer/watch-cat.go new file mode 100644 index 000000000..f2e197ebe --- /dev/null +++ b/dex/consensus/core/syncer/watch-cat.go @@ -0,0 +1,156 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package syncer + +import ( + "context" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type configReader interface { + Configuration(round uint64) *types.Config +} + +// WatchCat is reponsible for signaling if syncer object should be terminated. +type WatchCat struct { + recovery core.Recovery + timeout time.Duration + configReader configReader + feed chan types.Position + lastPosition types.Position + polling time.Duration + ctx context.Context + cancel context.CancelFunc + logger common.Logger +} + +// NewWatchCat creats a new WatchCat 🐱 object. +func NewWatchCat( + recovery core.Recovery, + configReader configReader, + polling time.Duration, + timeout time.Duration, + logger common.Logger) *WatchCat { + wc := &WatchCat{ + recovery: recovery, + timeout: timeout, + configReader: configReader, + feed: make(chan types.Position), + polling: polling, + logger: logger, + } + return wc +} + +// Feed the WatchCat so it won't produce the termination signal. +func (wc *WatchCat) Feed(position types.Position) { + wc.feed <- position +} + +// Start the WatchCat. +func (wc *WatchCat) Start() { + wc.Stop() + wc.lastPosition = types.Position{} + wc.ctx, wc.cancel = context.WithCancel(context.Background()) + go func() { + var lastPos types.Position + MonitorLoop: + for { + select { + case <-wc.ctx.Done(): + return + default: + } + select { + case <-wc.ctx.Done(): + return + case pos := <-wc.feed: + if !pos.Newer(lastPos) { + wc.logger.Warn("Feed with older height", + "pos", pos, "lastPos", lastPos) + continue + } + lastPos = pos + case <-time.After(wc.timeout): + break MonitorLoop + } + } + go func() { + for { + select { + case <-wc.ctx.Done(): + return + case <-wc.feed: + } + } + }() + defer wc.cancel() + proposed := false + threshold := uint64( + utils.GetConfigWithPanic(wc.configReader, lastPos.Round, wc.logger). + NotarySetSize / 2) + wc.logger.Info("Threshold for recovery", "votes", threshold) + ResetLoop: + for { + if !proposed { + wc.logger.Info("Calling Recovery.ProposeSkipBlock", + "height", lastPos.Height) + if err := wc.recovery.ProposeSkipBlock(lastPos.Height); err != nil { + wc.logger.Warn("Failed to proposeSkipBlock", "height", lastPos.Height, "error", err) + } else { + proposed = true + } + } + votes, err := wc.recovery.Votes(lastPos.Height) + if err != nil { + wc.logger.Error("Failed to get recovery votes", "height", lastPos.Height, "error", err) + } else if votes > threshold { + wc.logger.Info("Threshold for recovery reached!") + wc.lastPosition = lastPos + break ResetLoop + } + select { + case <-wc.ctx.Done(): + return + case <-time.After(wc.polling): + } + } + }() +} + +// Stop the WatchCat. +func (wc *WatchCat) Stop() { + if wc.cancel != nil { + wc.cancel() + } +} + +// Meow return a closed channel if syncer should be terminated. +func (wc *WatchCat) Meow() <-chan struct{} { + return wc.ctx.Done() +} + +// LastPosition returns the last position for recovery. +func (wc *WatchCat) LastPosition() types.Position { + return wc.lastPosition +} diff --git a/dex/consensus/core/syncer/watch-cat_test.go b/dex/consensus/core/syncer/watch-cat_test.go new file mode 100644 index 000000000..b1f1bf818 --- /dev/null +++ b/dex/consensus/core/syncer/watch-cat_test.go @@ -0,0 +1,124 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. // +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package syncer + +import ( + "sync" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type WatchCatTestSuite struct { + suite.Suite +} + +type testConfigAccessor struct { + notarySetSize uint32 +} + +func (cfg *testConfigAccessor) Configuration(uint64) *types.Config { + return &types.Config{ + NotarySetSize: cfg.notarySetSize, + } +} + +type recovery struct { + lock sync.RWMutex + votes map[uint64]uint64 +} + +func (rec *recovery) ProposeSkipBlock(height uint64) error { + rec.lock.Lock() + defer rec.lock.Unlock() + rec.votes[height]++ + return nil +} + +func (rec *recovery) Votes(height uint64) (uint64, error) { + rec.lock.RLock() + defer rec.lock.RUnlock() + return rec.votes[height], nil +} + +func (s *WatchCatTestSuite) newWatchCat( + notarySetSize uint32, polling, timeout time.Duration) (*WatchCat, *recovery) { + cfg := &testConfigAccessor{ + notarySetSize: notarySetSize, + } + recovery := &recovery{ + votes: make(map[uint64]uint64), + } + wc := NewWatchCat(recovery, cfg, polling, timeout, &common.NullLogger{}) + return wc, recovery +} + +func (s *WatchCatTestSuite) TestBasicUsage() { + polling := 50 * time.Millisecond + timeout := 50 * time.Millisecond + notarySet := uint32(24) + watchCat, rec := s.newWatchCat(notarySet, polling, timeout) + watchCat.Start() + defer watchCat.Stop() + pos := types.Position{ + Height: 10, + } + + for i := 0; i < 10; i++ { + pos.Height++ + watchCat.Feed(pos) + time.Sleep(timeout / 2) + select { + case <-watchCat.Meow(): + s.FailNow("unexpected terminated") + default: + } + } + + time.Sleep(timeout) + rec.lock.RLock() + s.Require().Equal(1, len(rec.votes)) + s.Require().Equal(uint64(1), rec.votes[pos.Height]) + rec.lock.RUnlock() + + time.Sleep(polling * 2) + select { + case <-watchCat.Meow(): + s.FailNow("unexpected terminated") + default: + } + + rec.lock.Lock() + rec.votes[pos.Height] = uint64(notarySet/2 + 1) + rec.lock.Unlock() + + time.Sleep(polling * 2) + select { + case <-watchCat.Meow(): + default: + s.FailNow("expecting terminated") + } + s.Equal(pos, watchCat.LastPosition()) +} + +func TestWatchCat(t *testing.T) { + suite.Run(t, new(WatchCatTestSuite)) +} diff --git a/dex/consensus/core/test/app.go b/dex/consensus/core/test/app.go new file mode 100644 index 000000000..769683ecc --- /dev/null +++ b/dex/consensus/core/test/app.go @@ -0,0 +1,384 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "bytes" + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +var ( + // ErrEmptyDeliverSequence means there is no delivery event in this App + // instance. + ErrEmptyDeliverSequence = fmt.Errorf("empty deliver sequence") + // ErrMismatchBlockHashSequence means the delivering sequence between two App + // instances are different. + ErrMismatchBlockHashSequence = fmt.Errorf("mismatch block hash sequence") + // ErrMismatchRandomness means the randomness between two blocks with the + // same hash from two App instances are different. + ErrMismatchRandomness = fmt.Errorf("mismatch randomness") + // ErrApplicationIntegrityFailed means the internal datum in a App instance + // is not integrated. + ErrApplicationIntegrityFailed = fmt.Errorf("application integrity failed") + // ErrTimestampOutOfOrder means the later delivered block has timestamp + // older than previous block. + ErrTimestampOutOfOrder = fmt.Errorf("timestamp out of order") + // ErrHeightOutOfOrder means the later delivered block has height not equal + // to height of previous block plus one. + ErrHeightOutOfOrder = fmt.Errorf("height out of order") + // ErrDeliveredBlockNotConfirmed means some block delivered (confirmed) but + // not confirmed. + ErrDeliveredBlockNotConfirmed = fmt.Errorf("delivered block not confirmed") + // ErrAckingBlockNotDelivered means the delivered sequence not forming a + // DAG. + ErrAckingBlockNotDelivered = fmt.Errorf("acking block not delivered") + // ErrLowerPendingHeight raised when lastPendingHeight is lower than the + // height to be prepared. + ErrLowerPendingHeight = fmt.Errorf( + "last pending height < consensus height") + // ErrConfirmedHeightNotIncreasing raise when the height of the confirmed + // block doesn't follow previous confirmed block on the same chain. + ErrConfirmedHeightNotIncreasing = fmt.Errorf( + "confirmed height not increasing") + // ErrParentBlockNotDelivered raised when the parent block is not seen by + // this app. + ErrParentBlockNotDelivered = fmt.Errorf("parent block not delivered") + // ErrMismatchDeliverPosition raised when the block hash and position are + // mismatched when calling BlockDelivered. + ErrMismatchDeliverPosition = fmt.Errorf("mismatch deliver position") + // ErrEmptyRandomness raised when the block contains empty randomness. + ErrEmptyRandomness = fmt.Errorf("empty randomness") + // ErrInvalidHeight refers to invalid value for block height. + ErrInvalidHeight = fmt.Errorf("invalid height") +) + +// AppDeliveredRecord caches information when this application received +// a block delivered notification. +type AppDeliveredRecord struct { + Rand []byte + When time.Time + Pos types.Position +} + +// App implements Application interface for testing purpose. +type App struct { + Confirmed map[common.Hash]*types.Block + LastConfirmedHeight uint64 + confirmedLock sync.RWMutex + Delivered map[common.Hash]*AppDeliveredRecord + DeliverSequence common.Hashes + deliveredLock sync.RWMutex + state *State + gov *Governance + rEvt *utils.RoundEvent + hEvt *common.Event + roundToNotify uint64 +} + +// NewApp constructs a TestApp instance. +func NewApp(initRound uint64, gov *Governance, rEvt *utils.RoundEvent) ( + app *App) { + app = &App{ + Confirmed: make(map[common.Hash]*types.Block), + Delivered: make(map[common.Hash]*AppDeliveredRecord), + DeliverSequence: common.Hashes{}, + gov: gov, + rEvt: rEvt, + hEvt: common.NewEvent(), + roundToNotify: initRound, + } + if gov != nil { + app.state = gov.State() + } + if rEvt != nil { + rEvt.Register(func(evts []utils.RoundEventParam) { + app.hEvt.RegisterHeight( + evts[len(evts)-1].NextRoundValidationHeight(), + utils.RoundEventRetryHandlerGenerator(rEvt, app.hEvt), + ) + }) + rEvt.TriggerInitEvent() + } + return app +} + +// PreparePayload implements Application interface. +func (app *App) PreparePayload(position types.Position) ([]byte, error) { + if app.state == nil { + return []byte{}, nil + } + return app.state.PackRequests() +} + +// PrepareWitness implements Application interface. +func (app *App) PrepareWitness(height uint64) (types.Witness, error) { + // Although we only perform reading operations here, to make sure what we + // prepared unique under concurrent access to this method, writer lock is + // used. + app.deliveredLock.Lock() + defer app.deliveredLock.Unlock() + hash, lastRec := app.LastDeliveredRecordNoLock() + if lastRec == nil { + return types.Witness{}, nil + } + if lastRec.Pos.Height < height { + return types.Witness{}, ErrLowerPendingHeight + } + return types.Witness{ + Height: lastRec.Pos.Height, + Data: hash.Bytes(), + }, nil +} + +// VerifyBlock implements Application interface. +func (app *App) VerifyBlock(block *types.Block) types.BlockVerifyStatus { + // Make sure we can handle the witness carried by this block. + app.deliveredLock.RLock() + defer app.deliveredLock.RUnlock() + _, rec := app.LastDeliveredRecordNoLock() + if rec != nil && rec.Pos.Height < block.Witness.Height { + return types.VerifyRetryLater + } + // Confirm if the consensus height matches corresponding block hash. + var h common.Hash + copy(h[:], block.Witness.Data) + app.confirmedLock.RLock() + defer app.confirmedLock.RUnlock() + if block.Witness.Height >= types.GenesisHeight { + // Make sure the hash and height are matched. + confirmed, exist := app.Confirmed[h] + if !exist || block.Witness.Height != confirmed.Position.Height { + return types.VerifyInvalidBlock + } + } + if block.Position.Height != types.GenesisHeight { + // This check is copied from fullnode, below is quoted from coresponding + // comment: + // + // Check if target block is the next height to be verified, we can only + // verify the next block in a given chain. + if app.LastConfirmedHeight+1 != block.Position.Height { + return types.VerifyRetryLater + } + } + return types.VerifyOK +} + +// BlockConfirmed implements Application interface. +func (app *App) BlockConfirmed(b types.Block) { + app.confirmedLock.Lock() + defer app.confirmedLock.Unlock() + app.Confirmed[b.Hash] = &b + if app.LastConfirmedHeight+1 != b.Position.Height { + panic(ErrConfirmedHeightNotIncreasing) + } + app.LastConfirmedHeight = b.Position.Height +} + +// ClearUndeliveredBlocks -- +func (app *App) ClearUndeliveredBlocks() { + app.deliveredLock.RLock() + defer app.deliveredLock.RUnlock() + app.confirmedLock.Lock() + defer app.confirmedLock.Unlock() + app.LastConfirmedHeight = uint64(len(app.DeliverSequence)) +} + +// BlockDelivered implements Application interface. +func (app *App) BlockDelivered(blockHash common.Hash, pos types.Position, + rand []byte) { + func() { + app.deliveredLock.Lock() + defer app.deliveredLock.Unlock() + app.Delivered[blockHash] = &AppDeliveredRecord{ + Rand: common.CopyBytes(rand), + When: time.Now().UTC(), + Pos: pos, + } + if len(app.DeliverSequence) > 0 { + // Make sure parent block also delivered. + lastHash := app.DeliverSequence[len(app.DeliverSequence)-1] + d, exists := app.Delivered[lastHash] + if !exists { + panic(ErrParentBlockNotDelivered) + } + if d.Pos.Height+1 != pos.Height { + panic(ErrHeightOutOfOrder) + } + } + app.DeliverSequence = append(app.DeliverSequence, blockHash) + }() + // Apply packed state change requests in payload. + func() { + if app.state == nil { + return + } + app.confirmedLock.RLock() + defer app.confirmedLock.RUnlock() + b, exists := app.Confirmed[blockHash] + if !exists { + panic(ErrDeliveredBlockNotConfirmed) + } + if !b.Position.Equal(pos) { + panic(ErrMismatchDeliverPosition) + } + if err := app.state.Apply(b.Payload); err != nil { + if err != ErrDuplicatedChange { + panic(err) + } + } + if app.roundToNotify == pos.Round { + if app.gov != nil { + app.gov.NotifyRound(app.roundToNotify, pos.Height) + app.roundToNotify++ + } + } + }() + app.hEvt.NotifyHeight(pos.Height) +} + +// GetLatestDeliveredPosition would return the latest position of delivered +// block seen by this application instance. +func (app *App) GetLatestDeliveredPosition() types.Position { + app.deliveredLock.RLock() + defer app.deliveredLock.RUnlock() + app.confirmedLock.RLock() + defer app.confirmedLock.RUnlock() + if len(app.DeliverSequence) == 0 { + return types.Position{} + } + return app.Confirmed[app.DeliverSequence[len(app.DeliverSequence)-1]].Position +} + +// Compare performs these checks against another App instance +// and return erros if not passed: +// - deliver sequence by comparing block hashes. +// - consensus timestamp of each block are equal. +func (app *App) Compare(other *App) (err error) { + app.WithLock(func(app *App) { + other.WithLock(func(other *App) { + minLength := len(app.DeliverSequence) + if minLength > len(other.DeliverSequence) { + minLength = len(other.DeliverSequence) + } + if minLength == 0 { + err = ErrEmptyDeliverSequence + return + } + // Here we assumes both Apps begin from the same height. + for idx, h := range app.DeliverSequence[:minLength] { + hOther := other.DeliverSequence[idx] + if hOther != h { + err = ErrMismatchBlockHashSequence + return + } + if bytes.Compare(app.Delivered[h].Rand, + other.Delivered[h].Rand) != 0 { + err = ErrMismatchRandomness + return + } + } + }) + }) + return +} + +// Verify checks the integrity of date received by this App instance. +func (app *App) Verify() error { + app.confirmedLock.RLock() + defer app.confirmedLock.RUnlock() + app.deliveredLock.RLock() + defer app.deliveredLock.RUnlock() + + if len(app.DeliverSequence) == 0 { + return ErrEmptyDeliverSequence + } + if len(app.DeliverSequence) != len(app.Delivered) { + return ErrApplicationIntegrityFailed + } + expectHeight := uint64(1) + prevTime := time.Time{} + for _, h := range app.DeliverSequence { + _, exist := app.Confirmed[h] + if !exist { + return ErrDeliveredBlockNotConfirmed + } + _, exist = app.Delivered[h] + if !exist { + return ErrApplicationIntegrityFailed + } + b, exist := app.Confirmed[h] + if !exist { + return ErrApplicationIntegrityFailed + } + // Make sure the consensus time is incremental. + if prevTime.After(b.Timestamp) { + return ErrTimestampOutOfOrder + } + prevTime = b.Timestamp + // Make sure the consensus height is incremental. + rec, exist := app.Delivered[h] + if !exist { + return ErrApplicationIntegrityFailed + } + if len(rec.Rand) == 0 { + return ErrEmptyRandomness + } + // Make sure height is valid. + if b.Position.Height < types.GenesisHeight { + return ErrInvalidHeight + } + if expectHeight != rec.Pos.Height { + return ErrHeightOutOfOrder + } + expectHeight++ + } + return nil +} + +// BlockReceived implements interface Debug. +func (app *App) BlockReceived(hash common.Hash) {} + +// BlockReady implements interface Debug. +func (app *App) BlockReady(hash common.Hash) {} + +// WithLock provides a backdoor to check status of App with reader lock. +func (app *App) WithLock(function func(*App)) { + app.confirmedLock.RLock() + defer app.confirmedLock.RUnlock() + app.deliveredLock.RLock() + defer app.deliveredLock.RUnlock() + + function(app) +} + +// LastDeliveredRecordNoLock returns the latest AppDeliveredRecord under lock. +func (app *App) LastDeliveredRecordNoLock() (common.Hash, *AppDeliveredRecord) { + var hash common.Hash + if len(app.DeliverSequence) == 0 { + return hash, nil + } + hash = app.DeliverSequence[len(app.DeliverSequence)-1] + return hash, app.Delivered[hash] +} diff --git a/dex/consensus/core/test/app_test.go b/dex/consensus/core/test/app_test.go new file mode 100644 index 000000000..828a3c35b --- /dev/null +++ b/dex/consensus/core/test/app_test.go @@ -0,0 +1,350 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +func getCRS(round, reset uint64) []byte { + return []byte(fmt.Sprintf("r#%d,reset#%d", round, reset)) +} + +type evtParamToCheck struct { + round uint64 + reset uint64 + height uint64 + crs common.Hash +} + +type AppTestSuite struct { + suite.Suite + + pubKeys []crypto.PublicKey + signers []*utils.Signer + logger common.Logger +} + +func (s *AppTestSuite) SetupSuite() { + prvKeys, pubKeys, err := NewKeys(4) + s.Require().NoError(err) + s.pubKeys = pubKeys + for _, k := range prvKeys { + s.signers = append(s.signers, utils.NewSigner(k)) + } + s.logger = &common.NullLogger{} +} + +func (s *AppTestSuite) prepareGov() *Governance { + gov, err := NewGovernance( + NewState(1, s.pubKeys, 100*time.Millisecond, s.logger, true), + core.ConfigRoundShift) + s.Require().NoError(err) + return gov +} + +func (s *AppTestSuite) proposeMPK( + gov *Governance, + round, reset uint64, + count int) { + for idx, pubKey := range s.pubKeys[:count] { + _, pubShare := dkg.NewPrivateKeyShares(utils.GetDKGThreshold( + gov.Configuration(round))) + mpk := &typesDKG.MasterPublicKey{ + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(types.NewNodeID(pubKey)), + PublicKeyShares: *pubShare.Move(), + } + s.Require().NoError(s.signers[idx].SignDKGMasterPublicKey(mpk)) + gov.AddDKGMasterPublicKey(mpk) + } +} + +func (s *AppTestSuite) proposeFinalize( + gov *Governance, + round, reset uint64, + count int) { + for idx, pubKey := range s.pubKeys[:count] { + final := &typesDKG.Finalize{ + ProposerID: types.NewNodeID(pubKey), + Round: round, + Reset: reset, + } + s.Require().NoError(s.signers[idx].SignDKGFinalize(final)) + gov.AddDKGFinalize(final) + } +} + +func (s *AppTestSuite) TestCompare() { + var ( + b0 = types.Block{ + Hash: common.Hash{}, + Position: types.Position{Height: types.GenesisHeight}, + Randomness: []byte("b0")} + b1 = types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: types.GenesisHeight + 1}, + Randomness: []byte("b1"), + } + ) + // Prepare an OK App instance. + app1 := NewApp(0, nil, nil) + app1.BlockConfirmed(b0) + app1.BlockConfirmed(b1) + app1.BlockDelivered(b0.Hash, b0.Position, b0.Randomness) + app1.BlockDelivered(b1.Hash, b1.Position, b1.Randomness) + app2 := NewApp(0, nil, nil) + s.Require().EqualError(ErrEmptyDeliverSequence, + app1.Compare(app2).Error()) + app2.BlockConfirmed(b0) + app2.BlockDelivered(b0.Hash, b0.Position, b0.Randomness) + b1Bad := types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: types.GenesisHeight + 1}, + Randomness: []byte("b1Bad"), + } + app2.BlockConfirmed(b1Bad) + app2.BlockDelivered(b1Bad.Hash, b1Bad.Position, b1Bad.Randomness) + s.Require().EqualError(ErrMismatchBlockHashSequence, + app1.Compare(app2).Error()) + app2 = NewApp(0, nil, nil) + app2.BlockConfirmed(b0) + app2.BlockDelivered(b0.Hash, b0.Position, []byte("b0-another")) + s.Require().EqualError(ErrMismatchRandomness, app1.Compare(app2).Error()) +} + +func (s *AppTestSuite) TestVerify() { + var ( + now = time.Now().UTC() + b0 = types.Block{ + Hash: common.Hash{}, + Position: types.Position{Height: types.GenesisHeight}, + Randomness: []byte("b0"), + Timestamp: now, + } + b1 = types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: types.GenesisHeight + 1}, + Randomness: []byte("b1"), + Timestamp: now.Add(1 * time.Second), + } + ) + // ErrDeliveredBlockNotConfirmed + app := NewApp(0, nil, nil) + s.Require().Equal(ErrEmptyDeliverSequence.Error(), app.Verify().Error()) + app.BlockDelivered(b0.Hash, b0.Position, b0.Randomness) + app.BlockDelivered(b1.Hash, b1.Position, b1.Randomness) + s.Require().EqualError(ErrDeliveredBlockNotConfirmed, app.Verify().Error()) + // ErrTimestampOutOfOrder. + app = NewApp(0, nil, nil) + now = time.Now().UTC() + b0Bad := *(b0.Clone()) + b0Bad.Timestamp = now + b1Bad := *(b1.Clone()) + b1Bad.Timestamp = now.Add(-1 * time.Second) + app.BlockConfirmed(b0Bad) + app.BlockDelivered(b0Bad.Hash, b0Bad.Position, b0Bad.Randomness) + app.BlockConfirmed(b1Bad) + app.BlockDelivered(b1Bad.Hash, b1Bad.Position, b1Bad.Randomness) + s.Require().EqualError(ErrTimestampOutOfOrder, app.Verify().Error()) + // ErrInvalidHeight. + app = NewApp(0, nil, nil) + b0Bad = *(b0.Clone()) + b0Bad.Position.Height = 0 + s.Require().Panics(func() { app.BlockConfirmed(b0Bad) }) + b0Bad.Position.Height = 2 + s.Require().Panics(func() { app.BlockConfirmed(b0Bad) }) + // ErrEmptyRandomness + app = NewApp(0, nil, nil) + app.BlockConfirmed(b0) + app.BlockDelivered(b0.Hash, b0.Position, []byte{}) + s.Require().EqualError(ErrEmptyRandomness, app.Verify().Error()) + // OK. + app = NewApp(0, nil, nil) + app.BlockConfirmed(b0) + app.BlockConfirmed(b1) + app.BlockDelivered(b0.Hash, b0.Position, b0.Randomness) + app.BlockDelivered(b1.Hash, b1.Position, b1.Randomness) + s.Require().NoError(app.Verify()) +} + +func (s *AppTestSuite) TestWitness() { + // Deliver several blocks, there is only one chain only. + app := NewApp(0, nil, nil) + deliver := func(b *types.Block) { + app.BlockConfirmed(*b) + app.BlockDelivered(b.Hash, b.Position, b.Randomness) + } + b00 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 1}, + Timestamp: time.Now().UTC(), + Randomness: common.GenerateRandomBytes(), + } + b01 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 2}, + Timestamp: time.Now().UTC(), + Randomness: common.GenerateRandomBytes(), + Witness: types.Witness{ + Height: b00.Position.Height, + Data: b00.Hash.Bytes(), + }} + b02 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 3}, + Timestamp: time.Now().UTC(), + Randomness: common.GenerateRandomBytes(), + Witness: types.Witness{ + Height: b00.Position.Height, + Data: b00.Hash.Bytes(), + }} + deliver(b00) + deliver(b01) + deliver(b02) + // A block with higher witness height, should retry later. + s.Require().Equal(types.VerifyRetryLater, app.VerifyBlock(&types.Block{ + Witness: types.Witness{Height: 4}})) + // Mismatched witness height and data, should return invalid. + s.Require().Equal(types.VerifyInvalidBlock, app.VerifyBlock(&types.Block{ + Witness: types.Witness{ + Height: 1, + Data: b01.Hash.Bytes(), + }})) + // We can only verify a block followed last confirmed block. + s.Require().Equal(types.VerifyRetryLater, app.VerifyBlock(&types.Block{ + Witness: types.Witness{ + Height: b01.Position.Height, + Data: b01.Hash.Bytes()}, + Position: types.Position{Height: 5}})) + // It's the OK case. + s.Require().Equal(types.VerifyOK, app.VerifyBlock(&types.Block{ + Witness: types.Witness{ + Height: b01.Position.Height, + Data: b01.Hash.Bytes()}, + Position: types.Position{Height: 4}})) + // Check current last pending height. + _, lastRec := app.LastDeliveredRecordNoLock() + s.Require().Equal(lastRec.Pos.Height, uint64(3)) + // We can only prepare witness for what've delivered. + _, err := app.PrepareWitness(4) + s.Require().Equal(err.Error(), ErrLowerPendingHeight.Error()) + // It should be ok to prepare for height that already delivered. + w, err := app.PrepareWitness(3) + s.Require().NoError(err) + s.Require().Equal(w.Height, b02.Position.Height) + s.Require().Equal(0, bytes.Compare(w.Data, b02.Hash[:])) +} + +func (s *AppTestSuite) TestAttachedWithRoundEvent() { + // This test case is copied/modified from + // integraion.RoundEventTestSuite.TestFromRoundN, the difference is the + // calls to utils.RoundEvent.ValidateNextRound is not explicitly called but + // triggered by App.BlockDelivered. + var ( + gov = s.prepareGov() + roundLength = uint64(100) + ) + s.Require().NoError(gov.State().RequestChange(StateChangeRoundLength, + roundLength)) + for r := uint64(2); r <= uint64(20); r++ { + gov.ProposeCRS(r, getCRS(r, 0)) + } + for r := uint64(1); r <= uint64(19); r++ { + gov.NotifyRound(r, utils.GetRoundHeight(gov, r-1)+roundLength) + } + gov.NotifyRound(20, 2201) + // Reset round#20 twice, then make it done DKG preparation. + gov.ResetDKG(getCRS(20, 1)) + gov.ResetDKG(getCRS(20, 2)) + s.proposeMPK(gov, 20, 2, 3) + s.proposeFinalize(gov, 20, 2, 3) + s.Require().Equal(gov.DKGResetCount(20), uint64(2)) + // Propose CRS for round#21, and it works without reset. + gov.ProposeCRS(21, getCRS(21, 0)) + s.proposeMPK(gov, 21, 0, 3) + s.proposeFinalize(gov, 21, 0, 3) + // Propose CRS for round#22, and it works without reset. + gov.ProposeCRS(22, getCRS(22, 0)) + s.proposeMPK(gov, 22, 0, 3) + s.proposeFinalize(gov, 22, 0, 3) + // Prepare utils.RoundEvent, starts from round#19, reset(for round#20)#1. + rEvt, err := utils.NewRoundEvent(context.Background(), gov, s.logger, + types.Position{Round: 19, Height: 2019}, core.ConfigRoundShift) + s.Require().NoError(err) + // Register a handler to collects triggered events. + evts := make(chan evtParamToCheck, 3) + rEvt.Register(func(params []utils.RoundEventParam) { + for _, p := range params { + evts <- evtParamToCheck{ + round: p.Round, + reset: p.Reset, + height: p.BeginHeight, + crs: p.CRS, + } + } + }) + // Setup App instance. + app := NewApp(19, gov, rEvt) + deliver := func(round, start, end uint64) { + for i := start; i <= end; i++ { + b := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Round: round, Height: i}, + Randomness: common.GenerateRandomBytes(), + } + app.BlockConfirmed(*b) + app.BlockDelivered(b.Hash, b.Position, b.Randomness) + } + } + // Deliver blocks from height=2020 to height=2092. + for r := uint64(0); r <= uint64(19); r++ { + begin := utils.GetRoundHeight(gov, r) + deliver(r, begin, begin+roundLength-1) + } + deliver(19, 2001, 2092) + s.Require().Equal(<-evts, evtParamToCheck{19, 1, 2001, gov.CRS(19)}) + s.Require().Equal(<-evts, evtParamToCheck{19, 2, 2101, gov.CRS(19)}) + s.Require().Equal(<-evts, evtParamToCheck{20, 0, 2201, gov.CRS(20)}) + // Deliver blocks from height=2082 to height=2281. + deliver(19, 2093, 2200) + deliver(20, 2201, 2292) + s.Require().Equal(<-evts, evtParamToCheck{21, 0, 2301, gov.CRS(21)}) + // Deliver blocks from height=2282 to height=2381. + deliver(20, 2293, 2300) + deliver(21, 2301, 2392) + s.Require().Equal(<-evts, evtParamToCheck{22, 0, 2401, gov.CRS(22)}) +} + +func TestApp(t *testing.T) { + suite.Run(t, new(AppTestSuite)) +} diff --git a/dex/consensus/core/test/block-revealer.go b/dex/consensus/core/test/block-revealer.go new file mode 100644 index 000000000..e104f04e6 --- /dev/null +++ b/dex/consensus/core/test/block-revealer.go @@ -0,0 +1,106 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "errors" + "sort" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// Errors returns from block-revealer. +var ( + ErrNotValidCompactionChain = errors.New("not valid compaction chain") +) + +// loadAllBlocks is a helper to load all blocks from db.BlockIterator. +func loadAllBlocks(iter db.BlockIterator) ( + blocks map[common.Hash]*types.Block, err error) { + + blocks = make(map[common.Hash]*types.Block) + for { + block, err := iter.NextBlock() + if err != nil { + if err == db.ErrIterationFinished { + // It's safe to ignore iteraion-finished error. + err = nil + } + break + } + blocks[block.Hash] = &block + } + return +} + +// BlockRevealerByPosition implements BlockRevealer interface, which would +// load all blocks from db, reveal them in the order of compaction chain, +// from the genesis block to the latest one. +type BlockRevealerByPosition struct { + blocks types.BlocksByPosition + nextRevealIndex int +} + +// NewBlockRevealerByPosition constructs a block revealer in the order of +// compaction chain. +func NewBlockRevealerByPosition(iter db.BlockIterator, startHeight uint64) ( + r *BlockRevealerByPosition, err error) { + blocksByHash, err := loadAllBlocks(iter) + if err != nil { + return + } + blocks := types.BlocksByPosition{} + for _, b := range blocksByHash { + if b.Position.Height < startHeight { + continue + } + blocks = append(blocks, b) + } + sort.Sort(types.BlocksByPosition(blocks)) + // Make sure the height of blocks are incremental with step 1. + for idx, b := range blocks { + if idx == 0 { + continue + } + if b.Position.Height != blocks[idx-1].Position.Height+1 { + err = ErrNotValidCompactionChain + return + } + } + r = &BlockRevealerByPosition{blocks: blocks} + r.Reset() + return +} + +// NextBlock implements Revealer.Next method, which would reveal blocks in the +// order of compaction chain. +func (r *BlockRevealerByPosition) NextBlock() (types.Block, error) { + if r.nextRevealIndex == len(r.blocks) { + return types.Block{}, db.ErrIterationFinished + } + b := r.blocks[r.nextRevealIndex] + r.nextRevealIndex++ + return *b, nil +} + +// Reset implement Revealer.Reset method, which would reset revealing. +func (r *BlockRevealerByPosition) Reset() { + r.nextRevealIndex = 0 +} diff --git a/dex/consensus/core/test/block-revealer_test.go b/dex/consensus/core/test/block-revealer_test.go new file mode 100644 index 000000000..dd2aeb862 --- /dev/null +++ b/dex/consensus/core/test/block-revealer_test.go @@ -0,0 +1,90 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "testing" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type BlockRevealerTestSuite struct { + suite.Suite +} + +func (s *BlockRevealerTestSuite) TestBlockRevealByPosition() { + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + // Put several blocks with position field ready. + b1 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 1}, + } + b2 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 2}, + } + b3 := &types.Block{ + Hash: common.NewRandomHash(), + Position: types.Position{Height: 3}, + } + s.Require().NoError(dbInst.PutBlock(*b1)) + s.Require().NoError(dbInst.PutBlock(*b3)) + iter, err := dbInst.GetAllBlocks() + s.Require().NoError(err) + // The compaction chain is not complete, we can't construct a revealer + // instance successfully. + r, err := NewBlockRevealerByPosition(iter, 0) + s.Require().Nil(r) + s.Require().Equal(ErrNotValidCompactionChain.Error(), err.Error()) + // Put a block to make the compaction chain complete. + s.Require().NoError(dbInst.PutBlock(*b2)) + // We can construct that revealer now. + iter, err = dbInst.GetAllBlocks() + s.Require().NoError(err) + r, err = NewBlockRevealerByPosition(iter, 0) + s.Require().NotNil(r) + s.Require().NoError(err) + // The revealing order should be ok. + chk := func(h uint64) { + b, err := r.NextBlock() + s.Require().NoError(err) + s.Require().Equal(b.Position.Height, h) + } + chk(1) + chk(2) + chk(3) + // Iteration should be finished + _, err = r.NextBlock() + s.Require().Equal(db.ErrIterationFinished.Error(), err.Error()) + // Test 'startHeight' parameter. + iter, err = dbInst.GetAllBlocks() + s.Require().NoError(err) + r, err = NewBlockRevealerByPosition(iter, 2) + s.Require().NotNil(r) + s.Require().NoError(err) + chk(2) + chk(3) +} + +func TestBlockRevealer(t *testing.T) { + suite.Run(t, new(BlockRevealerTestSuite)) +} diff --git a/dex/consensus/core/test/fake-transport.go b/dex/consensus/core/test/fake-transport.go new file mode 100644 index 000000000..cecac5430 --- /dev/null +++ b/dex/consensus/core/test/fake-transport.go @@ -0,0 +1,213 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "fmt" + "time" + + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type fakePeerRecord struct { + sendChannel chan<- *TransportEnvelope + pubKey crypto.PublicKey +} + +type fakeHandshake struct { + dMoment time.Time + peers map[types.NodeID]fakePeerRecord +} + +// FakeTransport implement TransportServer and TransportClient interface +// by using golang channel. +type FakeTransport struct { + peerType TransportPeerType + nID types.NodeID + pubKey crypto.PublicKey + recvChannel chan *TransportEnvelope + serverChannel chan<- *TransportEnvelope + peers map[types.NodeID]fakePeerRecord + dMoment time.Time +} + +// NewFakeTransportServer constructs FakeTransport instance for peer server. +func NewFakeTransportServer() TransportServer { + return &FakeTransport{ + peerType: TransportPeerServer, + recvChannel: make(chan *TransportEnvelope, 1000), + } +} + +// NewFakeTransportClient constructs FakeTransport instance for peer. +func NewFakeTransportClient(pubKey crypto.PublicKey) TransportClient { + return &FakeTransport{ + peerType: TransportPeer, + recvChannel: make(chan *TransportEnvelope, 1000), + nID: types.NewNodeID(pubKey), + pubKey: pubKey, + } +} + +// Disconnect implements Transport.Disconnect method. +func (t *FakeTransport) Disconnect(endpoint types.NodeID) { + delete(t.peers, endpoint) +} + +// Send implements Transport.Send method. +func (t *FakeTransport) Send( + endpoint types.NodeID, msg interface{}) (err error) { + rec, exists := t.peers[endpoint] + if !exists { + err = fmt.Errorf("the endpoint does not exists: %v", endpoint) + return + } + go func(ch chan<- *TransportEnvelope) { + ch <- &TransportEnvelope{ + PeerType: t.peerType, + From: t.nID, + Msg: msg, + } + }(rec.sendChannel) + return +} + +// Report implements Transport.Report method. +func (t *FakeTransport) Report(msg interface{}) (err error) { + go func() { + t.serverChannel <- &TransportEnvelope{ + PeerType: TransportPeer, + From: t.nID, + Msg: msg, + } + }() + return +} + +// Broadcast implements Transport.Broadcast method. +func (t *FakeTransport) Broadcast(endpoints map[types.NodeID]struct{}, + latency LatencyModel, msg interface{}) (err error) { + for ID := range endpoints { + if ID == t.nID { + continue + } + go func(nID types.NodeID) { + time.Sleep(latency.Delay()) + // #nosec G104 + t.Send(nID, msg) + }(ID) + } + return +} + +// Close implements Transport.Close method. +func (t *FakeTransport) Close() (err error) { + close(t.recvChannel) + return +} + +// Peers implements Transport.Peers method. +func (t *FakeTransport) Peers() (peers []crypto.PublicKey) { + for _, rec := range t.peers { + peers = append(peers, rec.pubKey) + } + return +} + +// Join implements TransportClient.Join method. +func (t *FakeTransport) Join( + serverEndpoint interface{}) (<-chan *TransportEnvelope, error) { + + var ( + envelopes = []*TransportEnvelope{} + ok bool + ) + if t.serverChannel, ok = serverEndpoint.(chan *TransportEnvelope); !ok { + return nil, fmt.Errorf("accept channel of *TransportEnvelope when join") + } + if err := t.Report(t); err != nil { + panic(err) + } + // Wait for peers info. + for { + envelope := <-t.recvChannel + if envelope.PeerType != TransportPeerServer { + envelopes = append(envelopes, envelope) + continue + } + if handShake, ok := envelope.Msg.(fakeHandshake); ok { + t.dMoment = handShake.dMoment + t.peers = handShake.peers + } else { + envelopes = append(envelopes, envelope) + continue + } + for _, envelope := range envelopes { + t.recvChannel <- envelope + } + break + } + return t.recvChannel, nil +} + +// DMoment implments TrnansportClient.DMoment method. +func (t *FakeTransport) DMoment() time.Time { + return t.dMoment +} + +// Host implements TransportServer.Host method. +func (t *FakeTransport) Host() (chan *TransportEnvelope, error) { + return t.recvChannel, nil +} + +// SetDMoment implements TransportServer.SetDMoment method. +func (t *FakeTransport) SetDMoment(dMoment time.Time) { + t.dMoment = dMoment +} + +// WaitForPeers implements TransportServer.WaitForPeers method. +func (t *FakeTransport) WaitForPeers(numPeers uint32) (err error) { + t.peers = make(map[types.NodeID]fakePeerRecord) + for { + envelope := <-t.recvChannel + // Panic here if some peer send other stuffs before + // receiving peer lists. + newPeer := envelope.Msg.(*FakeTransport) + t.peers[envelope.From] = fakePeerRecord{ + sendChannel: newPeer.recvChannel, + pubKey: newPeer.pubKey, + } + if uint32(len(t.peers)) == numPeers { + break + } + } + // The collected peer channels are shared for all peers. + peers := make(map[types.NodeID]struct{}) + for ID := range t.peers { + peers[ID] = struct{}{} + } + handShake := fakeHandshake{ + dMoment: t.dMoment, + peers: t.peers, + } + if err = t.Broadcast(peers, &FixedLatencyModel{}, handShake); err != nil { + return + } + return +} diff --git a/dex/consensus/core/test/governance.go b/dex/consensus/core/test/governance.go new file mode 100644 index 000000000..204e68bf8 --- /dev/null +++ b/dex/consensus/core/test/governance.go @@ -0,0 +1,547 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "encoding/hex" + "errors" + "fmt" + "reflect" + "sort" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +// TODO(mission): add a method to compare config/crs between governance +// instances. + +// Governance is an implementation of Goverance for testing purpose. +type Governance struct { + roundShift uint64 + configs []*types.Config + nodeSets [][]crypto.PublicKey + roundBeginHeights []uint64 + stateModule *State + networkModule *Network + pendingConfigChanges map[uint64]map[StateChangeType]interface{} + prohibitedTypes map[StateChangeType]struct{} + lock sync.RWMutex +} + +// NewGovernance constructs a Governance instance. +func NewGovernance(state *State, roundShift uint64) (g *Governance, err error) { + // Setup a State instance. + g = &Governance{ + roundShift: roundShift, + pendingConfigChanges: make(map[uint64]map[StateChangeType]interface{}), + stateModule: state, + prohibitedTypes: make(map[StateChangeType]struct{}), + roundBeginHeights: []uint64{types.GenesisHeight}, + } + return +} + +// NodeSet implements Governance interface to return current +// notary set. +func (g *Governance) NodeSet(round uint64) []crypto.PublicKey { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.nodeSets)) { + return nil + } + return g.nodeSets[round] +} + +// Configuration returns the configuration at a given block height. +func (g *Governance) Configuration(round uint64) *types.Config { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.configs)) { + return nil + } + return g.configs[round] +} + +// GetRoundHeight returns the begin height of a round. +func (g *Governance) GetRoundHeight(round uint64) uint64 { + // This is a workaround to fit fullnode's behavior, their 0 is reserved for + // a genesis block unseen to core. + if round == 0 { + return 0 + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.roundBeginHeights)) { + panic(fmt.Errorf("round begin height is not ready: %d %d", + round, len(g.roundBeginHeights))) + } + return g.roundBeginHeights[round] +} + +// CRS returns the CRS for a given round. +func (g *Governance) CRS(round uint64) common.Hash { + return g.stateModule.CRS(round) +} + +// NotifyRound notifies governace contract to snapshot config, and broadcast +// pending state change requests for next round if any. +func (g *Governance) NotifyRound(round, beginHeight uint64) { + // Snapshot configuration for the shifted round, this behavior is synced with + // full node's implementation. + shiftedRound := round + g.roundShift + g.CatchUpWithRound(shiftedRound) + // Apply change request for next round. + func() { + g.lock.Lock() + defer g.lock.Unlock() + // Check if there is any pending changes for previous rounds. + for r := range g.pendingConfigChanges { + if r < shiftedRound+1 { + panic(fmt.Errorf("pending change no longer applied: %v, now: %v", + r, shiftedRound+1)) + } + } + for t, v := range g.pendingConfigChanges[shiftedRound+1] { + if err := g.stateModule.RequestChange(t, v); err != nil { + panic(err) + } + } + delete(g.pendingConfigChanges, shiftedRound+1) + g.broadcastPendingStateChanges() + if round == uint64(len(g.roundBeginHeights)) { + g.roundBeginHeights = append(g.roundBeginHeights, beginHeight) + } else if round < uint64(len(g.roundBeginHeights)) { + if beginHeight != g.roundBeginHeights[round] { + panic(fmt.Errorf("mismatched round begin height: %d %d %d", + round, beginHeight, g.roundBeginHeights[round])) + } + } else { + panic(fmt.Errorf("discontinuous round begin height: %d %d %d", + round, beginHeight, len(g.roundBeginHeights))) + } + }() +} + +// ProposeCRS propose a CRS. +func (g *Governance) ProposeCRS(round uint64, signedCRS []byte) { + g.lock.Lock() + defer g.lock.Unlock() + crs := crypto.Keccak256Hash(signedCRS) + if err := g.stateModule.ProposeCRS(round, crs); err != nil { + // CRS can be proposed multiple times, other errors are not + // accepted. + if err != ErrDuplicatedChange { + panic(err) + } + return + } + g.broadcastPendingStateChanges() +} + +// AddDKGComplaint add a DKGComplaint. +func (g *Governance) AddDKGComplaint(complaint *typesDKG.Complaint) { + if g.isProhibited(StateAddDKGComplaint) { + return + } + if g.IsDKGFinal(complaint.Round) { + return + } + if err := g.stateModule.RequestChange( + StateAddDKGComplaint, complaint); err != nil { + if err != ErrChangeWontApply { + panic(err) + } + } + g.broadcastPendingStateChanges() +} + +// DKGComplaints returns the DKGComplaints of round. +func (g *Governance) DKGComplaints(round uint64) []*typesDKG.Complaint { + return g.stateModule.DKGComplaints(round) +} + +// AddDKGMasterPublicKey adds a DKGMasterPublicKey. +func (g *Governance) AddDKGMasterPublicKey(masterPublicKey *typesDKG.MasterPublicKey) { + if g.isProhibited(StateAddDKGMasterPublicKey) { + return + } + if g.IsDKGMPKReady(masterPublicKey.Round) { + return + } + if err := g.stateModule.RequestChange( + StateAddDKGMasterPublicKey, masterPublicKey); err != nil { + if err != ErrChangeWontApply { + panic(err) + } + } + g.broadcastPendingStateChanges() +} + +// DKGMasterPublicKeys returns the DKGMasterPublicKeys of round. +func (g *Governance) DKGMasterPublicKeys( + round uint64) []*typesDKG.MasterPublicKey { + return g.stateModule.DKGMasterPublicKeys(round) +} + +// AddDKGMPKReady adds a DKG ready message. +func (g *Governance) AddDKGMPKReady(ready *typesDKG.MPKReady) { + if err := g.stateModule.RequestChange( + StateAddDKGMPKReady, ready); err != nil { + if err != ErrChangeWontApply { + panic(err) + } + } + g.broadcastPendingStateChanges() +} + +// IsDKGMPKReady checks if DKG is ready. +func (g *Governance) IsDKGMPKReady(round uint64) bool { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.configs)) { + return false + } + return g.stateModule.IsDKGMPKReady(round, int(g.configs[round].NotarySetSize)*2/3+1) +} + +// AddDKGFinalize adds a DKG finalize message. +func (g *Governance) AddDKGFinalize(final *typesDKG.Finalize) { + if g.isProhibited(StateAddDKGFinal) { + return + } + if err := g.stateModule.RequestChange(StateAddDKGFinal, final); err != nil { + if err != ErrChangeWontApply { + panic(err) + } + } + g.broadcastPendingStateChanges() +} + +// IsDKGFinal checks if DKG is final. +func (g *Governance) IsDKGFinal(round uint64) bool { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.configs)) { + return false + } + return g.stateModule.IsDKGFinal(round, int(g.configs[round].NotarySetSize)*2/3+1) +} + +// AddDKGSuccess adds a DKG success message. +func (g *Governance) AddDKGSuccess(success *typesDKG.Success) { + if g.isProhibited(StateAddDKGSuccess) { + return + } + if err := g.stateModule.RequestChange(StateAddDKGSuccess, success); err != nil { + if err != ErrChangeWontApply { + panic(err) + } + } + g.broadcastPendingStateChanges() +} + +// IsDKGSuccess checks if DKG is success. +func (g *Governance) IsDKGSuccess(round uint64) bool { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.configs)) { + return false + } + return g.stateModule.IsDKGFinal(round, int(g.configs[round].NotarySetSize)*5/6) +} + +// ReportForkVote reports a node for forking votes. +func (g *Governance) ReportForkVote(vote1, vote2 *types.Vote) { +} + +// ReportForkBlock reports a node for forking blocks. +func (g *Governance) ReportForkBlock(block1, block2 *types.Block) { +} + +// ResetDKG resets latest DKG data and propose new CRS. +func (g *Governance) ResetDKG(newSignedCRS []byte) { + g.lock.Lock() + defer g.lock.Unlock() + crs := crypto.Keccak256Hash(newSignedCRS) + if err := g.stateModule.RequestChange(StateResetDKG, crs); err != nil { + // ResetDKG can be proposed multiple times, other errors are not + // accepted. + if err != ErrDuplicatedChange { + panic(err) + } + return + } + g.broadcastPendingStateChanges() +} + +// DKGResetCount returns the reset count for DKG of given round. +func (g *Governance) DKGResetCount(round uint64) uint64 { + return g.stateModule.DKGResetCount(round) +} + +// +// Test Utilities +// + +type packedStateChanges []byte + +// This method broadcasts pending state change requests in the underlying +// State instance, this behavior is to simulate tx-gossiping in full nodes. +func (g *Governance) broadcastPendingStateChanges() { + if g.networkModule == nil { + return + } + packed, err := g.stateModule.PackOwnRequests() + if err != nil { + panic(err) + } + if err := g.networkModule.Broadcast(packedStateChanges(packed)); err != nil { + panic(err) + } +} + +// State allows to access embed State instance. +func (g *Governance) State() *State { + return g.stateModule +} + +// CatchUpWithRound attempts to perform state snapshot to +// provide configuration/nodeSet for round R. +func (g *Governance) CatchUpWithRound(round uint64) { + if func() bool { + g.lock.RLock() + defer g.lock.RUnlock() + return uint64(len(g.configs)) > round + }() { + return + } + g.lock.Lock() + defer g.lock.Unlock() + for uint64(len(g.configs)) <= round { + config, nodeSet := g.stateModule.Snapshot() + g.configs = append(g.configs, config) + g.nodeSets = append(g.nodeSets, nodeSet) + } + if round >= 1 && len(g.roundBeginHeights) == 1 { + // begin height of round 0 and round 1 should be ready, they won't be + // afected by DKG reset mechanism. + g.roundBeginHeights = append(g.roundBeginHeights, + g.configs[0].RoundLength+g.roundBeginHeights[0]) + } +} + +// Clone a governance instance with replicate internal state. +func (g *Governance) Clone() *Governance { + g.lock.RLock() + defer g.lock.RUnlock() + // Clone state. + copiedState := g.stateModule.Clone() + // Clone configs. + copiedConfigs := []*types.Config{} + for _, c := range g.configs { + copiedConfigs = append(copiedConfigs, c.Clone()) + } + // Clone node sets. + copiedPendingChanges := make(map[uint64]map[StateChangeType]interface{}) + for round, forRound := range g.pendingConfigChanges { + copiedForRound := make(map[StateChangeType]interface{}) + for k, v := range forRound { + copiedForRound[k] = v + } + copiedPendingChanges[round] = copiedForRound + } + // NOTE: here I assume the key is from ecdsa. + copiedNodeSets := [][]crypto.PublicKey{} + for _, nodeSetForRound := range g.nodeSets { + copiedNodeSet := []crypto.PublicKey{} + for _, node := range nodeSetForRound { + pubKey, err := ecdsa.NewPublicKeyFromByteSlice(node.Bytes()) + if err != nil { + panic(err) + } + copiedNodeSet = append(copiedNodeSet, pubKey) + } + copiedNodeSets = append(copiedNodeSets, copiedNodeSet) + } + // Clone prohibited flag. + copiedProhibitedTypes := make(map[StateChangeType]struct{}) + for t := range g.prohibitedTypes { + copiedProhibitedTypes[t] = struct{}{} + } + // Clone pending changes. + return &Governance{ + roundShift: g.roundShift, + configs: copiedConfigs, + stateModule: copiedState, + nodeSets: copiedNodeSets, + pendingConfigChanges: copiedPendingChanges, + prohibitedTypes: copiedProhibitedTypes, + } +} + +// Equal checks equality between two Governance instances. +func (g *Governance) Equal(other *Governance, checkState bool) bool { + // Check roundShift. + if g.roundShift != other.roundShift { + return false + } + // Check configs. + if !reflect.DeepEqual(g.configs, other.configs) { + return false + } + // Check node sets. + if len(g.nodeSets) != len(other.nodeSets) { + return false + } + // Check pending changes. + if !reflect.DeepEqual(g.pendingConfigChanges, other.pendingConfigChanges) { + return false + } + // Check prohibited types. + if !reflect.DeepEqual(g.prohibitedTypes, other.prohibitedTypes) { + return false + } + getSortedKeys := func(keys []crypto.PublicKey) (encoded []string) { + for _, key := range keys { + encoded = append(encoded, hex.EncodeToString(key.Bytes())) + } + sort.Strings(encoded) + return + } + for round, nodeSetsForRound := range g.nodeSets { + otherNodeSetsForRound := other.nodeSets[round] + if len(nodeSetsForRound) != len(otherNodeSetsForRound) { + return false + } + if !reflect.DeepEqual( + getSortedKeys(nodeSetsForRound), + getSortedKeys(otherNodeSetsForRound)) { + return false + } + } + // Check state if needed. + // + // While testing, it's expected that two governance instances contain + // different state, only the snapshots (configs and node sets) are + // essentially equal. + if checkState { + return g.stateModule.Equal(other.stateModule) == nil + } + return true +} + +// RegisterConfigChange tells this governance instance to request some +// configuration change at some round. +// NOTE: you can't request config change for round 0, 1, they are genesis +// rounds. +// NOTE: this function should be called before running. +func (g *Governance) RegisterConfigChange( + round uint64, t StateChangeType, v interface{}) (err error) { + if t < StateAddCRS || t > StateChangeNotarySetSize { + return fmt.Errorf("state changes to register is not supported: %v", t) + } + if round < 2 { + return errors.New( + "attempt to register state change for genesis rounds") + } + g.lock.Lock() + defer g.lock.Unlock() + if round < uint64(len(g.configs)) { + return errors.New( + "attempt to register state change for prepared rounds") + } + pendingChangesForRound, exists := g.pendingConfigChanges[round] + if !exists { + pendingChangesForRound = make(map[StateChangeType]interface{}) + g.pendingConfigChanges[round] = pendingChangesForRound + } + pendingChangesForRound[t] = v + return nil +} + +// SwitchToRemoteMode would switch this governance instance to remote mode, +// which means: it will broadcast all changes from its underlying state +// instance. +func (g *Governance) SwitchToRemoteMode(n *Network) { + if g.networkModule != nil { + panic(errors.New("not in local mode before switching")) + } + g.stateModule.SwitchToRemoteMode() + g.networkModule = n + n.addStateModule(g.stateModule) +} + +// Prohibit would prohibit DKG related state change requests. +// +// Note this method only prevents local modification, state changes related to +// DKG from others won't be prohibited. +func (g *Governance) Prohibit(t StateChangeType) { + g.lock.Lock() + defer g.lock.Unlock() + switch t { + case StateAddDKGMasterPublicKey, StateAddDKGFinal, StateAddDKGComplaint: + g.prohibitedTypes[t] = struct{}{} + default: + panic(fmt.Errorf("unsupported state change type to prohibit: %s", t)) + } +} + +// Unprohibit would unprohibit DKG related state change requests. +func (g *Governance) Unprohibit(t StateChangeType) { + g.lock.Lock() + defer g.lock.Unlock() + delete(g.prohibitedTypes, t) +} + +// isProhibited checks if a state change request is prohibited or not. +func (g *Governance) isProhibited(t StateChangeType) (prohibited bool) { + g.lock.RLock() + defer g.lock.RUnlock() + _, prohibited = g.prohibitedTypes[t] + return +} diff --git a/dex/consensus/core/test/governance_test.go b/dex/consensus/core/test/governance_test.go new file mode 100644 index 000000000..e07563841 --- /dev/null +++ b/dex/consensus/core/test/governance_test.go @@ -0,0 +1,188 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +type GovernanceTestSuite struct { + suite.Suite +} + +func (s *GovernanceTestSuite) TestEqual() { + var req = s.Require() + // Setup a base governance. + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + g1, err := NewGovernance(NewState( + 1, genesisNodes, 100*time.Millisecond, &common.NullLogger{}, true), 2) + req.NoError(err) + // Create a governance with different lambda. + g2, err := NewGovernance(NewState( + 1, genesisNodes, 50*time.Millisecond, &common.NullLogger{}, true), 2) + req.NoError(err) + req.False(g1.Equal(g2, true)) + // Create configs for 3 rounds for g1. + g1.CatchUpWithRound(3) + // Make a clone. + g3 := g1.Clone() + req.True(g1.Equal(g3, true)) + // Create a new round for g1. + g1.CatchUpWithRound(4) + req.False(g1.Equal(g3, true)) + // Make another clone. + g4 := g1.Clone() + req.True(g1.Equal(g4, true)) + // Add a node to g4. + _, newNodes, err := NewKeys(1) + req.NoError(err) + g4.State().RequestChange(StateAddNode, newNodes[0]) + g1.CatchUpWithRound(5) + g4.CatchUpWithRound(5) + req.False(g1.Equal(g4, true)) + // Make a clone. + g5 := g1.Clone() + // Change its roundShift + g5.roundShift = 3 + req.False(g1.Equal(g5, true)) + // Prohibit some change. + g1.Prohibit(StateAddDKGFinal) + // Make a clone and should be equal. + g6 := g1.Clone() + req.True(g1.Equal(g6, true)) + g6.Unprohibit(StateAddDKGFinal) + req.False(g1.Equal(g6, true)) +} + +func (s *GovernanceTestSuite) TestRegisterChange() { + var ( + req = s.Require() + roundLength uint64 = 100 + ) + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + g, err := NewGovernance(NewState( + 1, genesisNodes, 100*time.Millisecond, &common.NullLogger{}, true), 2) + req.NoError(err) + req.NoError(g.State().RequestChange(StateChangeRoundLength, + uint64(roundLength))) + // Unable to register change for genesis round. + req.Error(g.RegisterConfigChange(0, StateChangeNotarySetSize, uint32(32))) + // Make some round prepared. + g.CatchUpWithRound(4) + req.Equal(g.Configuration(4).NotarySetSize, uint32(20)) + // Unable to register change for prepared round. + req.Error(g.RegisterConfigChange(4, StateChangeNotarySetSize, uint32(32))) + // It's ok to make some change when condition is met. + req.NoError(g.RegisterConfigChange(5, StateChangeNotarySetSize, uint32(32))) + req.NoError(g.RegisterConfigChange(6, StateChangeNotarySetSize, uint32(32))) + req.NoError(g.RegisterConfigChange(7, StateChangeNotarySetSize, uint32(40))) + // In local mode, state for round 6 would be ready after notified with + // round 2. + g.NotifyRound(2, roundLength*2) + g.NotifyRound(3, roundLength*3) + // In local mode, state for round 7 would be ready after notified with + // round 6. + g.NotifyRound(4, roundLength*4) + // Notify governance to take a snapshot for round 7's configuration. + g.NotifyRound(5, roundLength*5) + req.Equal(g.Configuration(6).NotarySetSize, uint32(32)) + req.Equal(g.Configuration(7).NotarySetSize, uint32(40)) +} + +func (s *GovernanceTestSuite) TestProhibit() { + round := uint64(1) + prvKeys, genesisNodes, err := NewKeys(4) + s.Require().NoError(err) + gov, err := NewGovernance(NewState( + 1, genesisNodes, 100*time.Millisecond, &common.NullLogger{}, true), 2) + s.Require().NoError(err) + // Test MPK. + proposeMPK := func(k crypto.PrivateKey) { + signer := utils.NewSigner(k) + _, pubShare := dkg.NewPrivateKeyShares(utils.GetDKGThreshold( + gov.Configuration(round))) + mpk := &typesDKG.MasterPublicKey{ + Round: round, + DKGID: typesDKG.NewID(types.NewNodeID(k.PublicKey())), + PublicKeyShares: *pubShare.Move(), + } + s.Require().NoError(signer.SignDKGMasterPublicKey(mpk)) + gov.AddDKGMasterPublicKey(mpk) + } + proposeMPK(prvKeys[0]) + s.Require().Len(gov.DKGMasterPublicKeys(round), 1) + gov.Prohibit(StateAddDKGMasterPublicKey) + proposeMPK(prvKeys[1]) + s.Require().Len(gov.DKGMasterPublicKeys(round), 1) + gov.Unprohibit(StateAddDKGMasterPublicKey) + proposeMPK(prvKeys[1]) + s.Require().Len(gov.DKGMasterPublicKeys(round), 2) + // Test Complaint. + proposeComplaint := func(k crypto.PrivateKey) { + signer := utils.NewSigner(k) + comp := &typesDKG.Complaint{ + ProposerID: types.NewNodeID(k.PublicKey()), + Round: round, + } + s.Require().NoError(signer.SignDKGComplaint(comp)) + gov.AddDKGComplaint(comp) + } + proposeComplaint(prvKeys[0]) + s.Require().Len(gov.DKGComplaints(round), 1) + gov.Prohibit(StateAddDKGComplaint) + proposeComplaint(prvKeys[1]) + s.Require().Len(gov.DKGComplaints(round), 1) + gov.Unprohibit(StateAddDKGComplaint) + proposeComplaint(prvKeys[1]) + s.Require().Len(gov.DKGComplaints(round), 2) + // Test DKG Final. + proposeFinal := func(k crypto.PrivateKey) { + signer := utils.NewSigner(k) + final := &typesDKG.Finalize{ + Round: round, + ProposerID: types.NewNodeID(k.PublicKey()), + } + s.Require().NoError(signer.SignDKGFinalize(final)) + gov.AddDKGFinalize(final) + } + gov.Prohibit(StateAddDKGFinal) + for _, k := range prvKeys { + proposeFinal(k) + } + s.Require().False(gov.IsDKGFinal(round)) + gov.Unprohibit(StateAddDKGFinal) + for _, k := range prvKeys { + proposeFinal(k) + } + s.Require().True(gov.IsDKGFinal(round)) +} + +func TestGovernance(t *testing.T) { + suite.Run(t, new(GovernanceTestSuite)) +} diff --git a/dex/consensus/core/test/interface.go b/dex/consensus/core/test/interface.go new file mode 100644 index 000000000..58a3cedfa --- /dev/null +++ b/dex/consensus/core/test/interface.go @@ -0,0 +1,108 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "time" + + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// BlockRevealer defines the interface to reveal a group +// of pre-generated blocks. +type BlockRevealer interface { + db.BlockIterator + + // Reset the revealing. + Reset() +} + +// TransportPeerType defines the type of peer, either 'peer' or 'server'. +type TransportPeerType string + +const ( + // TransportPeerServer is the type of peer server. + TransportPeerServer TransportPeerType = "server" + // TransportPeer is the type of peer. + TransportPeer TransportPeerType = "peer" +) + +// TransportEnvelope define the payload format of a message when transporting. +type TransportEnvelope struct { + // PeerType defines the type of source peer, could be either "peer" or + // "server". + PeerType TransportPeerType + // From defines the nodeID of the source peer. + From types.NodeID + // Msg is the actual payload of this message. + Msg interface{} +} + +// TransportServer defines the peer server in the network. +type TransportServer interface { + Transport + // Host the server, consider it's a setup procedure. The + // returned channel could be used after 'WaitForPeers' returns. + Host() (chan *TransportEnvelope, error) + // WaitForPeers waits for all peers to join the network. + WaitForPeers(numPeers uint32) error + + // SetDMoment + SetDMoment(time.Time) +} + +// TransportClient defines those peers in the network. +type TransportClient interface { + Transport + // Report a message to the peer server. + Report(msg interface{}) error + // Join the network, should block until joined. + Join(serverEndpoint interface{}) (<-chan *TransportEnvelope, error) + + // DMoment returns the DMoment of the network. + DMoment() time.Time +} + +// Transport defines the interface for basic transportation capabilities. +type Transport interface { + // Broadcast a message to all peers in network. + Broadcast(endpoints map[types.NodeID]struct{}, latency LatencyModel, + msg interface{}) error + // Send one message to a peer. + Send(endpoint types.NodeID, msg interface{}) error + // Close would cleanup allocated resources. + Close() error + + // Peers return public keys of all connected nodes in p2p favor. + // This method should be accessed after ether 'Join' or 'WaitForPeers' + // returned. + Peers() []crypto.PublicKey + + Disconnect(endpoint types.NodeID) +} + +// Marshaller defines an interface to convert between interface{} and []byte. +type Marshaller interface { + // Unmarshal converts a []byte back to interface{} based on the type + // of message. + Unmarshal(msgType string, payload []byte) (msg interface{}, err error) + // Marshal converts a message to byte string + Marshal(msg interface{}) (msgType string, payload []byte, err error) +} diff --git a/dex/consensus/core/test/latency.go b/dex/consensus/core/test/latency.go new file mode 100644 index 000000000..58669ae7f --- /dev/null +++ b/dex/consensus/core/test/latency.go @@ -0,0 +1,54 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "math/rand" + "time" +) + +// LatencyModel defines an interface to randomly decide latency +// for one operation. +type LatencyModel interface { + Delay() time.Duration +} + +// NormalLatencyModel would return latencies in normal distribution. +type NormalLatencyModel struct { + Sigma float64 + Mean float64 +} + +// Delay implements LatencyModel interface. +func (m *NormalLatencyModel) Delay() time.Duration { + delay := rand.NormFloat64()*m.Sigma + m.Mean + if delay < 0 { + delay = m.Sigma / 2 + } + return time.Duration(delay) * time.Millisecond +} + +// FixedLatencyModel return fixed latencies. +type FixedLatencyModel struct { + Latency float64 +} + +// Delay implements LatencyModel interface. +func (m *FixedLatencyModel) Delay() time.Duration { + return time.Duration(m.Latency) * time.Millisecond +} diff --git a/dex/consensus/core/test/marshaller.go b/dex/consensus/core/test/marshaller.go new file mode 100644 index 000000000..91a3057ed --- /dev/null +++ b/dex/consensus/core/test/marshaller.go @@ -0,0 +1,156 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "encoding/json" + "fmt" + + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +// DefaultMarshaller is the default marshaller for testing core.Consensus. +type DefaultMarshaller struct { + fallback Marshaller +} + +// NewDefaultMarshaller constructs an DefaultMarshaller instance. +func NewDefaultMarshaller(fallback Marshaller) *DefaultMarshaller { + return &DefaultMarshaller{ + fallback: fallback, + } +} + +// Unmarshal implements Marshaller interface. +func (m *DefaultMarshaller) Unmarshal( + msgType string, payload []byte) (msg interface{}, err error) { + switch msgType { + case "block": + block := &types.Block{} + if err = json.Unmarshal(payload, block); err != nil { + break + } + msg = block + case "vote": + vote := &types.Vote{} + if err = json.Unmarshal(payload, vote); err != nil { + break + } + msg = vote + case "agreement-result": + result := &types.AgreementResult{} + if err = json.Unmarshal(payload, result); err != nil { + break + } + msg = result + case "dkg-private-share": + privateShare := &typesDKG.PrivateShare{} + if err = json.Unmarshal(payload, privateShare); err != nil { + break + } + msg = privateShare + case "dkg-master-public-key": + masterPublicKey := typesDKG.NewMasterPublicKey() + if err = json.Unmarshal(payload, masterPublicKey); err != nil { + break + } + msg = masterPublicKey + case "dkg-complaint": + complaint := &typesDKG.Complaint{} + if err = json.Unmarshal(payload, complaint); err != nil { + break + } + msg = complaint + case "dkg-partial-signature": + psig := &typesDKG.PartialSignature{} + if err = json.Unmarshal(payload, psig); err != nil { + break + } + msg = psig + case "dkg-finalize": + final := &typesDKG.Finalize{} + if err = json.Unmarshal(payload, final); err != nil { + break + } + msg = final + case "packed-state-changes": + packed := &packedStateChanges{} + if err = json.Unmarshal(payload, packed); err != nil { + break + } + msg = *packed + case "pull-request": + req := &PullRequest{} + if err = json.Unmarshal(payload, req); err != nil { + break + } + msg = req + default: + if m.fallback == nil { + err = fmt.Errorf("unknown msg type: %v", msgType) + break + } + msg, err = m.fallback.Unmarshal(msgType, payload) + } + return +} + +// Marshal implements Marshaller interface. +func (m *DefaultMarshaller) Marshal( + msg interface{}) (msgType string, payload []byte, err error) { + switch msg.(type) { + case *types.Block: + msgType = "block" + payload, err = json.Marshal(msg) + case *types.Vote: + msgType = "vote" + payload, err = json.Marshal(msg) + case *types.AgreementResult: + msgType = "agreement-result" + payload, err = json.Marshal(msg) + case *typesDKG.PrivateShare: + msgType = "dkg-private-share" + payload, err = json.Marshal(msg) + case *typesDKG.MasterPublicKey: + msgType = "dkg-master-public-key" + payload, err = json.Marshal(msg) + case *typesDKG.Complaint: + msgType = "dkg-complaint" + payload, err = json.Marshal(msg) + case *typesDKG.PartialSignature: + msgType = "dkg-partial-signature" + payload, err = json.Marshal(msg) + case *typesDKG.Finalize: + msgType = "dkg-finalize" + payload, err = json.Marshal(msg) + case packedStateChanges: + msgType = "packed-state-changes" + payload, err = json.Marshal(msg) + case *PullRequest: + msgType = "pull-request" + payload, err = json.Marshal(msg) + default: + if m.fallback == nil { + err = fmt.Errorf("unknwon message type: %v", msg) + break + } + msgType, payload, err = m.fallback.Marshal(msg) + } + return +} diff --git a/dex/consensus/core/test/network.go b/dex/consensus/core/test/network.go new file mode 100644 index 000000000..c0ec255e7 --- /dev/null +++ b/dex/consensus/core/test/network.go @@ -0,0 +1,733 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "strconv" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +const ( + // Count of maximum count of peers to pull votes from. + maxPullingPeerCount = 3 + maxBlockCache = 1000 + maxVoteCache = 128 + + // Gossiping parameter. + maxAgreementResultBroadcast = 3 + gossipAgreementResultPercent = 33 +) + +// NetworkType is the simulation network type. +type NetworkType string + +// NetworkType enums. +const ( + NetworkTypeTCP NetworkType = "tcp" + NetworkTypeTCPLocal NetworkType = "tcp-local" + NetworkTypeFake NetworkType = "fake" +) + +// NetworkConfig is the configuration for Network module. +type NetworkConfig struct { + Type NetworkType + PeerServer string + PeerPort int + DirectLatency LatencyModel + GossipLatency LatencyModel + Marshaller Marshaller +} + +// PullRequest is a generic request to pull everything (ex. vote, block...). +type PullRequest struct { + Requester types.NodeID + Type string + Identity interface{} +} + +// MarshalJSON implements json.Marshaller. +func (req *PullRequest) MarshalJSON() (b []byte, err error) { + var idAsBytes []byte + // Make sure caller prepare correct identity for pull requests. + switch req.Type { + case "block": + idAsBytes, err = json.Marshal(req.Identity.(common.Hashes)) + case "vote": + idAsBytes, err = json.Marshal(req.Identity.(types.Position)) + default: + err = fmt.Errorf("unknown ID type for pull request: %v", req.Type) + } + if err != nil { + return + } + b, err = json.Marshal(&struct { + Requester types.NodeID `json:"req"` + Type string `json:"type"` + Identity []byte `json:"id"` + }{req.Requester, req.Type, idAsBytes}) + return +} + +// UnmarshalJSON iumplements json.Unmarshaller. +func (req *PullRequest) UnmarshalJSON(data []byte) (err error) { + rawReq := &struct { + Requester types.NodeID `json:"req"` + Type string `json:"type"` + Identity []byte `json:"id"` + }{} + if err = json.Unmarshal(data, rawReq); err != nil { + return + } + var ID interface{} + switch rawReq.Type { + case "block": + hashes := common.Hashes{} + if err = json.Unmarshal(rawReq.Identity, &hashes); err != nil { + break + } + ID = hashes + case "vote": + pos := types.Position{} + if err = json.Unmarshal(rawReq.Identity, &pos); err != nil { + break + } + ID = pos + default: + err = fmt.Errorf("unknown pull request type: %v", rawReq.Type) + } + if err != nil { + return + } + req.Requester = rawReq.Requester + req.Type = rawReq.Type + req.Identity = ID + return +} + +// NetworkCensor is a interface to determine if a message should be censored. +type NetworkCensor interface { + Censor(interface{}) bool +} + +type censorClient struct { + TransportClient + + censor NetworkCensor + lock sync.RWMutex +} + +func (cc *censorClient) Send(ID types.NodeID, msg interface{}) error { + if func() bool { + cc.lock.RLock() + defer cc.lock.RUnlock() + return cc.censor.Censor(msg) + }() { + return nil + } + return cc.TransportClient.Send(ID, msg) +} + +func (cc *censorClient) Broadcast( + IDs map[types.NodeID]struct{}, latency LatencyModel, msg interface{}) error { + if func() bool { + cc.lock.RLock() + defer cc.lock.RUnlock() + return cc.censor.Censor(msg) + }() { + return nil + } + return cc.TransportClient.Broadcast(IDs, latency, msg) +} + +type dummyCensor struct{} + +func (dc *dummyCensor) Censor(interface{}) bool { return false } + +// Network implements core.Network interface based on TransportClient. +type Network struct { + ID types.NodeID + config NetworkConfig + ctx context.Context + ctxCancel context.CancelFunc + trans *censorClient + dMoment time.Time + fromTransport <-chan *TransportEnvelope + toConsensus chan types.Msg + toNode chan interface{} + badPeerChan chan interface{} + sentAgreementLock sync.Mutex + sentAgreement map[common.Hash]struct{} + blockCacheLock sync.RWMutex + blockCache map[common.Hash]*types.Block + voteCacheLock sync.RWMutex + voteCache map[types.Position]map[types.VoteHeader]*types.Vote + voteCacheSize int + votePositions []types.Position + stateModule *State + peers map[types.NodeID]struct{} + unreceivedBlocksLock sync.RWMutex + unreceivedBlocks map[common.Hash]chan<- common.Hash + cache *utils.NodeSetCache + notarySetCachesLock sync.Mutex + notarySetCaches map[uint64]map[types.NodeID]struct{} + censor NetworkCensor + censorLock sync.RWMutex +} + +// NewNetwork setup network stuffs for nodes, which provides an +// implementation of core.Network based on TransportClient. +func NewNetwork(pubKey crypto.PublicKey, config NetworkConfig) ( + n *Network) { + // Construct basic network instance. + n = &Network{ + ID: types.NewNodeID(pubKey), + config: config, + toConsensus: make(chan types.Msg, 1000), + toNode: make(chan interface{}, 1000), + badPeerChan: make(chan interface{}, 1000), + sentAgreement: make(map[common.Hash]struct{}), + blockCache: make(map[common.Hash]*types.Block, maxBlockCache), + unreceivedBlocks: make(map[common.Hash]chan<- common.Hash), + peers: make(map[types.NodeID]struct{}), + notarySetCaches: make(map[uint64]map[types.NodeID]struct{}), + voteCache: make( + map[types.Position]map[types.VoteHeader]*types.Vote), + censor: &dummyCensor{}, + } + n.ctx, n.ctxCancel = context.WithCancel(context.Background()) + // Construct transport layer. + var trans TransportClient + switch config.Type { + case NetworkTypeTCPLocal: + trans = NewTCPTransportClient(pubKey, config.Marshaller, true) + case NetworkTypeTCP: + trans = NewTCPTransportClient(pubKey, config.Marshaller, false) + case NetworkTypeFake: + trans = NewFakeTransportClient(pubKey) + default: + panic(fmt.Errorf("unknown network type: %v", config.Type)) + } + n.trans = &censorClient{ + TransportClient: trans, + censor: &dummyCensor{}, + } + return +} + +// SetCensor to this network module. +func (n *Network) SetCensor(censorIn, censorOut NetworkCensor) { + if censorIn == nil { + censorIn = &dummyCensor{} + } + if censorOut == nil { + censorOut = &dummyCensor{} + } + func() { + n.censorLock.Lock() + defer n.censorLock.Unlock() + n.censor = censorIn + }() + func() { + n.trans.lock.Lock() + defer n.trans.lock.Unlock() + n.trans.censor = censorOut + }() +} + +// PullBlocks implements core.Network interface. +func (n *Network) PullBlocks(hashes common.Hashes) { + go n.pullBlocksAsync(hashes) +} + +// PullVotes implements core.Network interface. +func (n *Network) PullVotes(pos types.Position) { + go n.pullVotesAsync(pos) +} + +// BroadcastVote implements core.Network interface. +func (n *Network) BroadcastVote(vote *types.Vote) { + if err := n.trans.Broadcast(n.getNotarySet(vote.Position.Round), + n.config.DirectLatency, vote); err != nil { + panic(err) + } + n.addVoteToCache(vote) +} + +// BroadcastBlock implements core.Network interface. +func (n *Network) BroadcastBlock(block *types.Block) { + // Avoid data race in fake transport. + block = n.cloneForFake(block).(*types.Block) + notarySet := n.getNotarySet(block.Position.Round) + if !block.IsFinalized() { + if err := n.trans.Broadcast( + notarySet, n.config.DirectLatency, block); err != nil { + panic(err) + } + } + if err := n.trans.Broadcast(getComplementSet(n.peers, notarySet), + n.config.GossipLatency, block); err != nil { + panic(err) + } + n.addBlockToCache(block) + if block.IsFinalized() { + n.addBlockRandomnessToCache(block.Hash, block.Randomness) + } +} + +// BroadcastAgreementResult implements core.Network interface. +func (n *Network) BroadcastAgreementResult( + result *types.AgreementResult) { + if !n.markAgreementResultAsSent(result.BlockHash) { + return + } + n.addBlockRandomnessToCache(result.BlockHash, result.Randomness) + notarySet := n.getNotarySet(result.Position.Round) + count := maxAgreementResultBroadcast + for nID := range notarySet { + if count--; count < 0 { + break + } + if err := n.trans.Send(nID, result); err != nil { + panic(err) + } + } + // Gossip to other nodes. + if err := n.trans.Broadcast(getComplementSet(n.peers, notarySet), + n.config.GossipLatency, result); err != nil { + panic(err) + } +} + +// SendDKGPrivateShare implements core.Network interface. +func (n *Network) SendDKGPrivateShare( + recv crypto.PublicKey, prvShare *typesDKG.PrivateShare) { + n.send(types.NewNodeID(recv), prvShare) +} + +// BroadcastDKGPrivateShare implements core.Network interface. +func (n *Network) BroadcastDKGPrivateShare( + prvShare *typesDKG.PrivateShare) { + if err := n.trans.Broadcast(n.getNotarySet(prvShare.Round), + n.config.DirectLatency, prvShare); err != nil { + panic(err) + } +} + +// BroadcastDKGPartialSignature implements core.Network interface. +func (n *Network) BroadcastDKGPartialSignature( + psig *typesDKG.PartialSignature) { + if err := n.trans.Broadcast( + n.getNotarySet(psig.Round), n.config.DirectLatency, psig); err != nil { + panic(err) + } +} + +// ReceiveChan implements core.Network interface. +func (n *Network) ReceiveChan() <-chan types.Msg { + return n.toConsensus +} + +// Setup transport layer. +func (n *Network) Setup(serverEndpoint interface{}) (err error) { + // Join the p2p network. + switch n.config.Type { + case NetworkTypeTCP, NetworkTypeTCPLocal: + addr := net.JoinHostPort( + n.config.PeerServer, strconv.Itoa(n.config.PeerPort)) + n.fromTransport, err = n.trans.Join(addr) + case NetworkTypeFake: + n.fromTransport, err = n.trans.Join(serverEndpoint) + default: + err = fmt.Errorf("unknown network type: %v", n.config.Type) + } + if err != nil { + return + } + peerKeys := n.trans.Peers() + for _, k := range peerKeys { + n.peers[types.NewNodeID(k)] = struct{}{} + } + return +} + +func (n *Network) dispatchMsg(e *TransportEnvelope) { + if func() bool { + n.censorLock.RLock() + defer n.censorLock.RUnlock() + return n.censor.Censor(e.Msg) + }() { + return + } + msg := n.cloneForFake(e.Msg) + switch v := msg.(type) { + case *types.Block: + n.addBlockToCache(v) + // Notify pulling routine about the newly arrived block. + func() { + n.unreceivedBlocksLock.Lock() + defer n.unreceivedBlocksLock.Unlock() + if ch, exists := n.unreceivedBlocks[v.Hash]; exists { + ch <- v.Hash + } + delete(n.unreceivedBlocks, v.Hash) + }() + n.toConsensus <- types.Msg{ + PeerID: e.From, + Payload: v, + } + case *types.Vote: + // Add this vote to cache. + n.addVoteToCache(v) + n.toConsensus <- types.Msg{ + PeerID: e.From, + Payload: v, + } + case *types.AgreementResult, + *typesDKG.PrivateShare, *typesDKG.PartialSignature: + n.toConsensus <- types.Msg{ + PeerID: e.From, + Payload: v, + } + case packedStateChanges: + if n.stateModule == nil { + panic(errors.New( + "receive packed state change request without state attached")) + } + if err := n.stateModule.AddRequestsFromOthers([]byte(v)); err != nil { + panic(err) + } + case *PullRequest: + go n.handlePullRequest(v) + default: + n.toNode <- v + } +} + +func (n *Network) handlePullRequest(req *PullRequest) { + switch req.Type { + case "block": + hashes := req.Identity.(common.Hashes) + func() { + n.blockCacheLock.Lock() + defer n.blockCacheLock.Unlock() + All: + for _, h := range hashes { + b, exists := n.blockCache[h] + if !exists { + continue + } + select { + case <-n.ctx.Done(): + break All + default: + } + n.send(req.Requester, b) + } + }() + case "vote": + pos := req.Identity.(types.Position) + func() { + n.voteCacheLock.Lock() + defer n.voteCacheLock.Unlock() + if votes, exists := n.voteCache[pos]; exists { + for _, v := range votes { + n.send(req.Requester, v) + } + } + }() + default: + panic(fmt.Errorf("unknown type of pull request: %v", req.Type)) + } +} + +// Run the main loop. +func (n *Network) Run() { +Loop: + for { + select { + case <-n.ctx.Done(): + break Loop + default: + } + select { + case peer := <-n.badPeerChan: + if peer == nil { + continue Loop + } + n.trans.Disconnect(peer.(types.NodeID)) + case <-n.ctx.Done(): + break Loop + case e, ok := <-n.fromTransport: + if !ok { + break Loop + } + go n.dispatchMsg(e) + } + } +} + +// Close stops the network. +func (n *Network) Close() (err error) { + n.ctxCancel() + close(n.toConsensus) + n.toConsensus = nil + close(n.toNode) + n.toNode = nil + if err = n.trans.Close(); err != nil { + return + } + return +} + +// Report exports 'Report' method of TransportClient. +func (n *Network) Report(msg interface{}) error { + return n.trans.Report(msg) +} + +// Broadcast a message to all peers. +func (n *Network) Broadcast(msg interface{}) error { + return n.trans.Broadcast(n.peers, &FixedLatencyModel{}, msg) +} + +// Peers exports 'Peers' method of Transport. +func (n *Network) Peers() []crypto.PublicKey { + return n.trans.Peers() +} + +// DMoment exports 'DMoment' method of Transport. +func (n *Network) DMoment() time.Time { + return n.trans.DMoment() +} + +// ReceiveChanForNode returns a channel for messages not handled by +// core.Consensus. +func (n *Network) ReceiveChanForNode() <-chan interface{} { + return n.toNode +} + +// addStateModule attaches a State instance to this network. +func (n *Network) addStateModule(s *State) { + // This variable should be attached before run, no lock to protect it. + n.stateModule = s +} + +// AttachNodeSetCache attaches an utils.NodeSetCache to this module. Once attached +// The behavior of Broadcast-X methods would be switched to broadcast to correct +// set of peers, instead of all peers. +func (n *Network) AttachNodeSetCache(cache *utils.NodeSetCache) { + // This variable should be attached before run, no lock to protect it. + n.cache = cache +} + +// PurgeNodeSetCache purges cache of some round in attached utils.NodeSetCache. +func (n *Network) PurgeNodeSetCache(round uint64) { + n.cache.Purge(round) +} + +// ReportBadPeerChan reports that a peer is sending bad message. +func (n *Network) ReportBadPeerChan() chan<- interface{} { + return n.badPeerChan +} + +func (n *Network) pullBlocksAsync(hashes common.Hashes) { + // Setup notification channels for each block hash. + notYetReceived := make(map[common.Hash]struct{}) + ch := make(chan common.Hash, len(hashes)) + func() { + n.unreceivedBlocksLock.Lock() + defer n.unreceivedBlocksLock.Unlock() + for _, h := range hashes { + if _, exists := n.unreceivedBlocks[h]; exists { + continue + } + n.unreceivedBlocks[h] = ch + notYetReceived[h] = struct{}{} + } + }() + req := &PullRequest{ + Requester: n.ID, + Type: "block", + Identity: hashes, + } + // Randomly pick peers to send pull requests. +Loop: + for nID := range n.peers { + if nID == n.ID { + continue + } + n.send(nID, req) + select { + case <-n.ctx.Done(): + break Loop + case <-time.After(2 * n.config.DirectLatency.Delay()): + // Consume everything in the notification channel. + for { + select { + case h, ok := <-ch: + if !ok { + // This network module is closed. + break Loop + } + delete(notYetReceived, h) + if len(notYetReceived) == 0 { + break Loop + } + default: + continue Loop + } + } + } + } +} + +func (n *Network) pullVotesAsync(pos types.Position) { + // Randomly pick several peers to pull votes from. + req := &PullRequest{ + Requester: n.ID, + Type: "vote", + Identity: pos, + } + // Get corresponding notary set. + notarySet := n.getNotarySet(pos.Round) + // Randomly select one peer from notary set and send a pull request. + sentCount := 0 + for nID := range notarySet { + n.send(nID, req) + sentCount++ + if sentCount >= maxPullingPeerCount { + break + } + } +} + +func (n *Network) addBlockToCache(b *types.Block) { + n.blockCacheLock.Lock() + defer n.blockCacheLock.Unlock() + if len(n.blockCache) > maxBlockCache { + // Randomly purge one block from cache. + for k := range n.blockCache { + delete(n.blockCache, k) + break + } + } + n.blockCache[b.Hash] = b.Clone() +} + +func (n *Network) addBlockRandomnessToCache(hash common.Hash, rand []byte) { + n.blockCacheLock.Lock() + defer n.blockCacheLock.Unlock() + block, exist := n.blockCache[hash] + if !exist { + return + } + block.Randomness = rand +} + +func (n *Network) addVoteToCache(v *types.Vote) { + n.voteCacheLock.Lock() + defer n.voteCacheLock.Unlock() + if n.voteCacheSize >= maxVoteCache { + pos := n.votePositions[0] + n.voteCacheSize -= len(n.voteCache[pos]) + delete(n.voteCache, pos) + n.votePositions = n.votePositions[1:] + } + if _, exists := n.voteCache[v.Position]; !exists { + n.votePositions = append(n.votePositions, v.Position) + n.voteCache[v.Position] = + make(map[types.VoteHeader]*types.Vote) + } + if _, exists := n.voteCache[v.Position][v.VoteHeader]; exists { + return + } + n.voteCache[v.Position][v.VoteHeader] = v + n.voteCacheSize++ +} + +func (n *Network) markAgreementResultAsSent(blockHash common.Hash) bool { + n.sentAgreementLock.Lock() + defer n.sentAgreementLock.Unlock() + if _, exist := n.sentAgreement[blockHash]; exist { + return false + } + if len(n.sentAgreement) > 1000 { + // Randomly drop one entry. + for k := range n.sentAgreement { + delete(n.sentAgreement, k) + break + } + } + n.sentAgreement[blockHash] = struct{}{} + return true +} + +func (n *Network) cloneForFake(v interface{}) interface{} { + if n.config.Type != NetworkTypeFake { + return v + } + switch val := v.(type) { + case *types.Block: + return val.Clone() + case *types.AgreementResult: + // Perform deep copy for randomness result. + return cloneAgreementResult(val) + } + return v +} + +// getNotarySet gets notary set for that (round, chain) from cache. +func (n *Network) getNotarySet(round uint64) map[types.NodeID]struct{} { + if n.cache == nil { + // Default behavior is to broadcast to all peers, which makes it easier + // to be used in simple test cases. + return n.peers + } + n.notarySetCachesLock.Lock() + defer n.notarySetCachesLock.Unlock() + set, exists := n.notarySetCaches[round] + if !exists { + var err error + set, err = n.cache.GetNotarySet(round) + if err != nil { + panic(err) + } + n.notarySetCaches[round] = set + } + return set +} + +func (n *Network) send(endpoint types.NodeID, msg interface{}) { + go func() { + time.Sleep(n.config.DirectLatency.Delay()) + if err := n.trans.Send(endpoint, msg); err != nil { + panic(err) + } + }() +} diff --git a/dex/consensus/core/test/network_test.go b/dex/consensus/core/test/network_test.go new file mode 100644 index 000000000..27d25e6dc --- /dev/null +++ b/dex/consensus/core/test/network_test.go @@ -0,0 +1,379 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "context" + "encoding/json" + "math/rand" + "sync" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +type NetworkTestSuite struct { + suite.Suite +} + +func (s *NetworkTestSuite) setupNetworks( + pubKeys []crypto.PublicKey) map[types.NodeID]*Network { + var ( + server = NewFakeTransportServer() + wg sync.WaitGroup + ) + serverChannel, err := server.Host() + s.Require().NoError(err) + // Setup several network modules. + networks := make(map[types.NodeID]*Network) + for _, key := range pubKeys { + n := NewNetwork(key, NetworkConfig{ + Type: NetworkTypeFake, + DirectLatency: &FixedLatencyModel{}, + GossipLatency: &FixedLatencyModel{}, + Marshaller: NewDefaultMarshaller(nil)}) + networks[n.ID] = n + wg.Add(1) + go func() { + defer wg.Done() + s.Require().NoError(n.Setup(serverChannel)) + go n.Run() + }() + } + s.Require().NoError(server.WaitForPeers(uint32(len(pubKeys)))) + wg.Wait() + return networks +} + +func (s *NetworkTestSuite) TestPullRequestMarshaling() { + // Verify pull request for blocks is able to be marshalled. + blockHashes := common.Hashes{ + common.NewRandomHash(), + common.NewRandomHash(), + common.NewRandomHash(), + } + req := &PullRequest{ + Requester: GenerateRandomNodeIDs(1)[0], + Type: "block", + Identity: blockHashes, + } + b, err := json.Marshal(req) + s.Require().NoError(err) + req2 := &PullRequest{} + s.Require().NoError(json.Unmarshal(b, req2)) + s.Require().Equal(req.Requester, req2.Requester) + s.Require().Equal(req.Type, req2.Type) + s.Require().Equal(blockHashes, req2.Identity) + // Verify pull request for votes is able to be marshalled. + req = &PullRequest{ + Requester: GenerateRandomNodeIDs(1)[0], + Type: "vote", + Identity: types.Position{ + Round: 1, + Height: 3, + }} + b, err = json.Marshal(req) + s.Require().NoError(err) + req2 = &PullRequest{} + s.Require().NoError(json.Unmarshal(b, req2)) + s.Require().Equal(req.Requester, req2.Requester) + s.Require().Equal(req.Type, req2.Type) + s.Require().Equal(req.Identity.(types.Position).Round, + req.Identity.(types.Position).Round) + s.Require().Equal(req.Identity.(types.Position).Height, + req.Identity.(types.Position).Height) +} + +func (s *NetworkTestSuite) TestPullBlocks() { + var ( + peerCount = 10 + req = s.Require() + ) + _, pubKeys, err := NewKeys(peerCount) + req.NoError(err) + networks := s.setupNetworks(pubKeys) + // Generate several random hashes. + hashes := common.Hashes{} + for range networks { + hashes = append(hashes, common.NewRandomHash()) + } + // Randomly pick one network instance as master. + var master *Network + for _, master = range networks { + break + } + // Send a fake block to a random network (except master) by those hashes. + for _, h := range hashes { + for _, n := range networks { + if n.ID == master.ID { + continue + } + req.NoError(master.trans.Send(n.ID, &types.Block{Hash: h})) + } + } + // Make sure each node receive their blocks. + time.Sleep(1 * time.Second) + // Initiate a pull request from network 0 by removing corresponding hash in + // hashes. + master.PullBlocks(hashes) + awaitMap := make(map[common.Hash]struct{}) + for _, h := range hashes { + awaitMap[h] = struct{}{} + } + // We should be able to receive all hashes. + ctx, cancelFunc := context.WithTimeout(context.Background(), 3*time.Second) + defer func() { cancelFunc() }() + for { + select { + case v := <-master.ReceiveChan(): + b, ok := v.Payload.(*types.Block) + if !ok { + break + } + delete(awaitMap, b.Hash) + case <-ctx.Done(): + // This test case fails, we didn't receive pulled blocks. + req.False(true) + } + if len(awaitMap) == 0 { + break + } + } +} + +func (s *NetworkTestSuite) TestPullVotes() { + var ( + peerCount = maxPullingPeerCount + maxRound = uint64(5) + voteCount = maxVoteCache + voteTestCount = maxVoteCache / 2 + req = s.Require() + ) + _, pubKeys, err := NewKeys(peerCount) + req.NoError(err) + networks := s.setupNetworks(pubKeys) + // Randomly pick one network instance as master. + var master *Network + for _, master = range networks { + break + } + // Prepare notary sets. + notarySets := []map[types.NodeID]struct{}{} + for i := uint64(0); i <= maxRound; i++ { + notarySets = append(notarySets, make(map[types.NodeID]struct{})) + } + // Randomly generate votes to random peers, except master. + votes := make(map[types.VoteHeader]*types.Vote) + randObj := rand.New(rand.NewSource(time.Now().UnixNano())) + for len(votes) < voteCount { + for _, n := range networks { + if n.ID == master.ID { + continue + } + v := types.NewVote( + types.VoteInit, common.NewRandomHash(), randObj.Uint64()) + v.Position = types.Position{ + Height: randObj.Uint64(), + Round: uint64(randObj.Intn(int(maxRound + 1))), + } + req.NoError(master.trans.Send(n.ID, v)) + votes[v.VoteHeader] = v + // Add this node to corresponding notary set for this vote. + notarySets[v.Position.Round][n.ID] = struct{}{} + } + } + // Randomly generate votes set to test. + votesToTest := make(map[types.VoteHeader]struct{}) + for len(votesToTest) < voteTestCount { + // Randomly pick a vote + for _, v := range votes { + votesToTest[v.VoteHeader] = struct{}{} + break + } + } + time.Sleep(1 * time.Second) + // Try to pull all votes with timeout. + for len(votesToTest) > 0 { + for vHeader := range votesToTest { + master.PullVotes(vHeader.Position) + break + } + ctx, cancelFunc := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancelFunc() + select { + case v := <-master.ReceiveChan(): + vv, ok := v.Payload.(*types.Vote) + if !ok { + break + } + delete(votesToTest, vv.VoteHeader) + case <-ctx.Done(): + s.FailNow("PullVote Fail") + } + } +} + +func (s *NetworkTestSuite) TestBroadcastToSet() { + // Make sure when a network module attached to a utils.NodeSetCache, + // These function would broadcast to correct nodes, not all peers. + // - BroadcastVote, notary set. + // - BroadcastBlock, notary set. + // - BroadcastDKGPrivateShare, DKG set. + // - BroadcastDKGPartialSignature, DKG set. + var ( + req = s.Require() + peerCount = 5 + round = uint64(1) + ) + _, pubKeys, err := NewKeys(peerCount) + req.NoError(err) + gov, err := NewGovernance(NewState( + 1, pubKeys, time.Second, &common.NullLogger{}, true), 2) + req.NoError(err) + req.NoError(gov.State().RequestChange(StateChangeNotarySetSize, uint32(1))) + gov.NotifyRound(round, + utils.GetRoundHeight(gov, 0)+gov.Configuration(0).RoundLength) + networks := s.setupNetworks(pubKeys) + cache := utils.NewNodeSetCache(gov) + // Cache required set of nodeIDs. + notarySet, err := cache.GetNotarySet(round) + req.NoError(err) + req.Len(notarySet, 1) + var ( + // Some node don't belong to any set. + nerd *Network + notaryNode *Network + ) + for nID, n := range networks { + if _, exists := notarySet[nID]; exists { + continue + } + nerd = n + break + } + for nID := range notarySet { + notaryNode = networks[nID] + break + } + req.NotNil(nerd) + req.NotNil(notaryNode) + nerd.AttachNodeSetCache(cache) + pos := types.Position{Round: round, Height: types.GenesisHeight} + // Try broadcasting with datum from round 0, and make sure only node belongs + // to that set receiving the message. + nerd.BroadcastVote(&types.Vote{VoteHeader: types.VoteHeader{Position: pos}}) + msg := <-notaryNode.ReceiveChan() + req.IsType(&types.Vote{}, msg.Payload) + nerd.BroadcastDKGPrivateShare(&typesDKG.PrivateShare{Round: pos.Round}) + msg = <-notaryNode.ReceiveChan() + req.IsType(&typesDKG.PrivateShare{}, msg.Payload) + nerd.BroadcastDKGPartialSignature(&typesDKG.PartialSignature{Round: pos.Round}) + msg = <-notaryNode.ReceiveChan() + req.IsType(&typesDKG.PartialSignature{}, msg.Payload) + nerd.BroadcastBlock(&types.Block{Position: pos}) + msg = <-notaryNode.ReceiveChan() + req.IsType(&types.Block{}, msg.Payload) +} + +type testVoteCensor struct{} + +func (vc *testVoteCensor) Censor(msg interface{}) bool { + if _, ok := msg.(*types.Vote); ok { + return true + } + return false +} + +func (s *NetworkTestSuite) TestCensor() { + var ( + req = s.Require() + peerCount = 5 + ) + _, pubKeys, err := NewKeys(peerCount) + req.NoError(err) + networks := s.setupNetworks(pubKeys) + receiveChans := make(map[types.NodeID]<-chan types.Msg, peerCount) + for nID, node := range networks { + receiveChans[nID] = node.ReceiveChan() + } + + censor := &testVoteCensor{} + vote := &types.Vote{} + censorNodeID := types.NewNodeID(pubKeys[0]) + otherNodeID := types.NewNodeID(pubKeys[1]) + censorNode := networks[censorNodeID] + otherNode := networks[otherNodeID] + + // Censor incomming votes. + censorNode.SetCensor(censor, nil) + otherNode.BroadcastVote(vote) + time.Sleep(50 * time.Millisecond) + for nID, receiveChan := range receiveChans { + if nID == otherNodeID || nID == censorNodeID { + req.Equal(0, len(receiveChan)) + } else { + req.Equal(1, len(receiveChan)) + msg := <-receiveChan + req.IsType(&types.Vote{}, msg.Payload) + } + } + + // Censor outgoing votes. + censorNode.SetCensor(nil, censor) + censorNode.BroadcastVote(vote) + time.Sleep(50 * time.Millisecond) + for _, receiveChan := range receiveChans { + req.Equal(0, len(receiveChan)) + } + + // No censorship. + censorNode.SetCensor(nil, nil) + otherNode.BroadcastVote(vote) + time.Sleep(50 * time.Millisecond) + for nID, receiveChan := range receiveChans { + if nID == otherNodeID { + req.Equal(0, len(receiveChan)) + } else { + req.Equal(1, len(receiveChan)) + msg := <-receiveChan + req.IsType(&types.Vote{}, msg.Payload) + } + } + censorNode.BroadcastVote(vote) + time.Sleep(50 * time.Millisecond) + for nID, receiveChan := range receiveChans { + if nID == censorNodeID { + req.Equal(0, len(receiveChan)) + } else { + req.Equal(1, len(receiveChan)) + msg := <-receiveChan + req.IsType(&types.Vote{}, msg.Payload) + } + } + +} + +func TestNetwork(t *testing.T) { + suite.Run(t, new(NetworkTestSuite)) +} diff --git a/dex/consensus/core/test/state-change-request.go b/dex/consensus/core/test/state-change-request.go new file mode 100644 index 000000000..4ddd40f29 --- /dev/null +++ b/dex/consensus/core/test/state-change-request.go @@ -0,0 +1,206 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "fmt" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon/rlp" +) + +// StateChangeType is the type of state change request. +type StateChangeType uint8 + +// Types of state change. +const ( + StateChangeNothing StateChangeType = iota + // DKG & CRS + StateAddCRS + StateAddDKGComplaint + StateAddDKGMasterPublicKey + StateAddDKGMPKReady + StateAddDKGFinal + StateAddDKGSuccess + StateResetDKG + // Configuration related. + StateChangeLambdaBA + StateChangeLambdaDKG + StateChangeRoundLength + StateChangeMinBlockInterval + StateChangeNotarySetSize + // Node set related. + StateAddNode +) + +func (t StateChangeType) String() string { + switch t { + case StateChangeNothing: + return "Nothing" + case StateAddCRS: + return "AddCRS" + case StateAddDKGComplaint: + return "AddDKGComplaint" + case StateAddDKGMasterPublicKey: + return "AddDKGMasterPublicKey" + case StateAddDKGMPKReady: + return "AddDKGMPKReady" + case StateAddDKGFinal: + return "AddDKGFinal" + case StateChangeLambdaBA: + return "ChangeLambdaBA" + case StateChangeLambdaDKG: + return "ChangeLambdaDKG" + case StateChangeRoundLength: + return "ChangeRoundLength" + case StateChangeMinBlockInterval: + return "ChangeMinBlockInterval" + case StateChangeNotarySetSize: + return "ChangeNotarySetSize" + case StateAddNode: + return "AddNode" + } + panic(fmt.Errorf("attempting to dump unknown type of state change: %d", t)) +} + +// StateChangeRequest carries information of state change request. +type StateChangeRequest struct { + Type StateChangeType `json:"type"` + Payload interface{} `json:"payload"` + // The purpose of these fields are aiming to provide an unique ID for each + // change request. + Hash common.Hash + Timestamp uint64 +} + +// this structure is mainly for marshalling for StateChangeRequest. +type rawStateChangeRequest struct { + Type StateChangeType + Payload rlp.RawValue + Hash common.Hash + Timestamp uint64 +} + +// NewStateChangeRequest constructs an StateChangeRequest instance. +func NewStateChangeRequest( + t StateChangeType, payload interface{}) *StateChangeRequest { + now := uint64(time.Now().UTC().UnixNano()) + b, err := rlp.EncodeToBytes(struct { + Type StateChangeType + Payload interface{} + Timestamp uint64 + }{t, payload, now}) + if err != nil { + panic(err) + } + return &StateChangeRequest{ + Hash: crypto.Keccak256Hash(b), + Type: t, + Payload: payload, + Timestamp: now, + } +} + +// Clone a StateChangeRequest instance. +func (req *StateChangeRequest) Clone() (copied *StateChangeRequest) { + copied = &StateChangeRequest{ + Type: req.Type, + Hash: req.Hash, + Timestamp: req.Timestamp, + } + // NOTE: The cloned DKGx structs would be different from sources in binary + // level, thus would produce different hash from the source. + // I don't want different hash for source/copied requests thus would + // copy the hash from source directly. + switch req.Type { + case StateAddNode: + srcBytes := req.Payload.([]byte) + copiedBytes := make([]byte, len(srcBytes)) + copy(copiedBytes, srcBytes) + req.Payload = copiedBytes + case StateAddCRS: + crsReq := req.Payload.(*crsAdditionRequest) + copied.Payload = &crsAdditionRequest{ + Round: crsReq.Round, + CRS: crsReq.CRS, + } + case StateAddDKGMPKReady: + copied.Payload = CloneDKGMPKReady(req.Payload.(*typesDKG.MPKReady)) + case StateAddDKGFinal: + copied.Payload = CloneDKGFinalize(req.Payload.(*typesDKG.Finalize)) + case StateAddDKGMasterPublicKey: + copied.Payload = CloneDKGMasterPublicKey( + req.Payload.(*typesDKG.MasterPublicKey)) + case StateAddDKGComplaint: + copied.Payload = CloneDKGComplaint(req.Payload.(*typesDKG.Complaint)) + default: + copied.Payload = req.Payload + } + return +} + +// Equal checks equality between two StateChangeRequest. +func (req *StateChangeRequest) Equal(other *StateChangeRequest) error { + if req.Hash == other.Hash { + return nil + } + return ErrStatePendingChangesNotEqual +} + +// String dump the state change request into string form. +func (req *StateChangeRequest) String() (ret string) { + ret = fmt.Sprintf("stateChangeRequest{Type:%s", req.Type) + switch req.Type { + case StateChangeNothing: + case StateAddCRS: + crsReq := req.Payload.(*crsAdditionRequest) + ret += fmt.Sprintf( + "Round:%v CRS:%s", crsReq.Round, crsReq.CRS.String()[:6]) + case StateAddDKGComplaint: + ret += fmt.Sprintf("%s", req.Payload.(*typesDKG.Complaint)) + case StateAddDKGMasterPublicKey: + ret += fmt.Sprintf("%s", req.Payload.(*typesDKG.MasterPublicKey)) + case StateAddDKGMPKReady: + ret += fmt.Sprintf("%s", req.Payload.(*typesDKG.MPKReady)) + case StateAddDKGFinal: + ret += fmt.Sprintf("%s", req.Payload.(*typesDKG.Finalize)) + case StateChangeLambdaBA: + ret += fmt.Sprintf("%v", time.Duration(req.Payload.(uint64))) + case StateChangeLambdaDKG: + ret += fmt.Sprintf("%v", time.Duration(req.Payload.(uint64))) + case StateChangeRoundLength: + ret += fmt.Sprintf("%v", time.Duration(req.Payload.(uint64))) + case StateChangeMinBlockInterval: + ret += fmt.Sprintf("%v", time.Duration(req.Payload.(uint64))) + case StateChangeNotarySetSize: + ret += fmt.Sprintf("%v", req.Payload.(uint32)) + case StateAddNode: + ret += fmt.Sprintf( + "%s", types.NewNodeID(req.Payload.(crypto.PublicKey)).String()[:6]) + default: + panic(fmt.Errorf( + "attempting to dump unknown type of state change request: %v", + req.Type)) + } + ret += "}" + return +} diff --git a/dex/consensus/core/test/state-change-request_test.go b/dex/consensus/core/test/state-change-request_test.go new file mode 100644 index 000000000..517a929bf --- /dev/null +++ b/dex/consensus/core/test/state-change-request_test.go @@ -0,0 +1,54 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "testing" + + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/stretchr/testify/suite" +) + +type StateChangeRequestTestSuite struct { + suite.Suite +} + +func (s *StateChangeRequestTestSuite) TestEqual() { + // Basically, only the cloned one would be equal. + st00 := NewStateChangeRequest(StateChangeNotarySetSize, uint32(4)) + st01 := NewStateChangeRequest(StateChangeNotarySetSize, uint32(4)) + s.Error(ErrStatePendingChangesNotEqual, st00.Equal(st01)) + // Even with identical payload, they would be different. + mKey := typesDKG.NewMasterPublicKey() + st10 := NewStateChangeRequest(StateAddDKGMasterPublicKey, mKey) + st11 := NewStateChangeRequest(StateAddDKGMasterPublicKey, mKey) + s.Error(ErrStatePendingChangesNotEqual, st10.Equal(st11)) +} + +func (s *StateChangeRequestTestSuite) TestClone() { + // The cloned one should be no error when compared with 'Equal' method. + st00 := NewStateChangeRequest(StateChangeNotarySetSize, uint32(7)) + s.NoError(st00.Equal(st00.Clone())) + st10 := NewStateChangeRequest( + StateAddDKGMasterPublicKey, typesDKG.NewMasterPublicKey()) + s.NoError(st10.Equal(st10.Clone())) +} + +func TestStateChangeRequest(t *testing.T) { + suite.Run(t, new(StateChangeRequestTestSuite)) +} diff --git a/dex/consensus/core/test/state.go b/dex/consensus/core/test/state.go new file mode 100644 index 000000000..759737793 --- /dev/null +++ b/dex/consensus/core/test/state.go @@ -0,0 +1,953 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "bytes" + "errors" + "sort" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon/rlp" +) + +var ( + // ErrDuplicatedChange means the change request is already applied. + ErrDuplicatedChange = errors.New("duplicated change") + // ErrForkedCRS means a different CRS for one round is proposed. + ErrForkedCRS = errors.New("forked CRS") + // ErrMissingPreviousCRS means previous CRS not found when + // proposing a specific round of CRS. + ErrMissingPreviousCRS = errors.New("missing previous CRS") + // ErrUnknownStateChangeType means a StateChangeType is not recognized. + ErrUnknownStateChangeType = errors.New("unknown state change type") + // ErrProposerMPKIsReady means a proposer of one mpk is ready. + ErrProposerMPKIsReady = errors.New("proposer mpk is ready") + // ErrProposerIsFinal means a proposer of one complaint is finalized. + ErrProposerIsFinal = errors.New("proposer is final") + // ErrStateConfigNotEqual means configuration part of two states is not + // equal. + ErrStateConfigNotEqual = errors.New("config not equal") + // ErrStateLocalFlagNotEqual means local flag of two states is not equal. + ErrStateLocalFlagNotEqual = errors.New("local flag not equal") + // ErrStateNodeSetNotEqual means node sets of two states are not equal. + ErrStateNodeSetNotEqual = errors.New("node set not equal") + // ErrStateDKGComplaintsNotEqual means DKG complaints for two states are not + // equal. + ErrStateDKGComplaintsNotEqual = errors.New("dkg complaints not equal") + // ErrStateDKGMasterPublicKeysNotEqual means DKG master public keys of two + // states are not equal. + ErrStateDKGMasterPublicKeysNotEqual = errors.New( + "dkg master public keys not equal") + // ErrStateDKGMPKReadysNotEqual means DKG readys of two states are not + // equal. + ErrStateDKGMPKReadysNotEqual = errors.New("dkg readys not equal") + // ErrStateDKGFinalsNotEqual means DKG finalizations of two states are not + // equal. + ErrStateDKGFinalsNotEqual = errors.New("dkg finalizations not equal") + // ErrStateDKGSuccessesNotEqual means DKG successes of two states are not + // equal. + ErrStateDKGSuccessesNotEqual = errors.New("dkg successes not equal") + // ErrStateCRSsNotEqual means CRSs of two states are not equal. + ErrStateCRSsNotEqual = errors.New("crs not equal") + // ErrStateDKGResetCountNotEqual means dkgResetCount of two states are not + // equal. + ErrStateDKGResetCountNotEqual = errors.New("dkg reset count not equal") + // ErrStatePendingChangesNotEqual means pending change requests of two + // states are not equal. + ErrStatePendingChangesNotEqual = errors.New("pending changes not equal") + // ErrChangeWontApply means the state change won't be applied for some + // reason. + ErrChangeWontApply = errors.New("change won't apply") + // ErrNotInRemoteMode means callers attempts to call functions for remote + // mode when the State instance is still in local mode. + ErrNotInRemoteMode = errors.New( + "attempting to use remote functions in local mode") +) + +type crsAdditionRequest struct { + Round uint64 `json:"round"` + CRS common.Hash `json:"crs"` +} + +// State emulates what the global state in governace contract on a fullnode. +type State struct { + // Configuration related. + lambdaBA time.Duration + lambdaDKG time.Duration + notarySetSize uint32 + roundInterval uint64 + minBlockInterval time.Duration + // Nodes + nodes map[types.NodeID]crypto.PublicKey + // DKG & CRS + dkgComplaints map[uint64]map[types.NodeID][]*typesDKG.Complaint + dkgMasterPublicKeys map[uint64]map[types.NodeID]*typesDKG.MasterPublicKey + dkgReadys map[uint64]map[types.NodeID]*typesDKG.MPKReady + dkgFinals map[uint64]map[types.NodeID]*typesDKG.Finalize + dkgSuccesses map[uint64]map[types.NodeID]*typesDKG.Success + crs []common.Hash + dkgResetCount map[uint64]uint64 + // Other stuffs + local bool + logger common.Logger + lock sync.RWMutex + appliedRequests map[common.Hash]struct{} + // Pending change requests. + ownRequests map[common.Hash]*StateChangeRequest + globalRequests map[common.Hash]*StateChangeRequest +} + +// NewState constructs an State instance with genesis information, including: +// - node set +// - crs +func NewState( + dkgDelayRound uint64, + nodePubKeys []crypto.PublicKey, + lambda time.Duration, + logger common.Logger, + local bool) *State { + nodes := make(map[types.NodeID]crypto.PublicKey) + for _, key := range nodePubKeys { + nodes[types.NewNodeID(key)] = key + } + genesisCRS := crypto.Keccak256Hash([]byte("__ DEXON")) + crs := make([]common.Hash, dkgDelayRound+1) + for i := range crs { + crs[i] = genesisCRS + genesisCRS = crypto.Keccak256Hash(genesisCRS[:]) + } + return &State{ + local: local, + logger: logger, + lambdaBA: lambda, + lambdaDKG: lambda * 10, + roundInterval: 1000, + minBlockInterval: 4 * lambda, + crs: crs, + nodes: nodes, + notarySetSize: uint32(len(nodes)), + ownRequests: make(map[common.Hash]*StateChangeRequest), + globalRequests: make(map[common.Hash]*StateChangeRequest), + dkgReadys: make( + map[uint64]map[types.NodeID]*typesDKG.MPKReady), + dkgFinals: make( + map[uint64]map[types.NodeID]*typesDKG.Finalize), + dkgSuccesses: make( + map[uint64]map[types.NodeID]*typesDKG.Success), + dkgComplaints: make( + map[uint64]map[types.NodeID][]*typesDKG.Complaint), + dkgMasterPublicKeys: make( + map[uint64]map[types.NodeID]*typesDKG.MasterPublicKey), + dkgResetCount: make(map[uint64]uint64), + appliedRequests: make(map[common.Hash]struct{}), + } +} + +// SwitchToRemoteMode turn this State instance into remote mode: all changes +// are pending, and need to be packed/unpacked to apply. Once this state switch +// to remote mode, there would be no way to switch back to local mode. +func (s *State) SwitchToRemoteMode() { + s.lock.Lock() + defer s.lock.Unlock() + s.local = false +} + +// Snapshot returns configration that could be snapshotted. +func (s *State) Snapshot() (*types.Config, []crypto.PublicKey) { + s.lock.RLock() + defer s.lock.RUnlock() + // Clone a node set. + nodes := make([]crypto.PublicKey, 0, len(s.nodes)) + for _, key := range s.nodes { + nodes = append(nodes, key) + } + cfg := &types.Config{ + LambdaBA: s.lambdaBA, + LambdaDKG: s.lambdaDKG, + NotarySetSize: s.notarySetSize, + RoundLength: s.roundInterval, + MinBlockInterval: s.minBlockInterval, + } + s.logger.Info("Snapshot config", "config", cfg) + return cfg, nodes +} + +// AttachLogger allows to attach custom logger. +func (s *State) AttachLogger(logger common.Logger) { + s.logger = logger +} + +func (s *State) unpackPayload( + raw *rawStateChangeRequest) (v interface{}, err error) { + switch raw.Type { + case StateAddCRS: + v = &crsAdditionRequest{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGComplaint: + v = &typesDKG.Complaint{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGMasterPublicKey: + v = &typesDKG.MasterPublicKey{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGMPKReady: + v = &typesDKG.MPKReady{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGFinal: + v = &typesDKG.Finalize{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGSuccess: + v = &typesDKG.Success{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateResetDKG: + var tmp common.Hash + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeLambdaBA: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeLambdaDKG: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeRoundLength: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeMinBlockInterval: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeNotarySetSize: + var tmp uint32 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateAddNode: + var tmp []byte + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + default: + err = ErrUnknownStateChangeType + } + if err != nil { + return + } + return +} + +func (s *State) unpackRequests( + b []byte) (reqs []*StateChangeRequest, err error) { + // Try to unmarshal this byte stream into []*StateChangeRequest. + rawReqs := []*rawStateChangeRequest{} + if err = rlp.DecodeBytes(b, &rawReqs); err != nil { + return + } + for _, r := range rawReqs { + var payload interface{} + if payload, err = s.unpackPayload(r); err != nil { + return + } + reqs = append(reqs, &StateChangeRequest{ + Type: r.Type, + Payload: payload, + Hash: r.Hash, + Timestamp: r.Timestamp, + }) + } + return +} + +// Equal checks equality between State instance. +func (s *State) Equal(other *State) error { + // Check configuration part. + configEqual := s.lambdaBA == other.lambdaBA && + s.lambdaDKG == other.lambdaDKG && + s.notarySetSize == other.notarySetSize && + s.roundInterval == other.roundInterval && + s.minBlockInterval == other.minBlockInterval + if !configEqual { + return ErrStateConfigNotEqual + } + // Check local flag. + if s.local != other.local { + return ErrStateLocalFlagNotEqual + } + // Check node set. + if len(s.nodes) != len(other.nodes) { + return ErrStateNodeSetNotEqual + } + for nID, key := range s.nodes { + otherKey, exists := other.nodes[nID] + if !exists { + return ErrStateNodeSetNotEqual + } + if bytes.Compare(key.Bytes(), otherKey.Bytes()) != 0 { + return ErrStateNodeSetNotEqual + } + } + // Check DKG Complaints, here I assume the addition sequence of complaints + // proposed by one node would be identical on each node (this should be true + // when state change requests are carried by blocks and executed in order). + if len(s.dkgComplaints) != len(other.dkgComplaints) { + return ErrStateDKGComplaintsNotEqual + } + for round, compsForRound := range s.dkgComplaints { + otherCompsForRound, exists := other.dkgComplaints[round] + if !exists { + return ErrStateDKGComplaintsNotEqual + } + if len(compsForRound) != len(otherCompsForRound) { + return ErrStateDKGComplaintsNotEqual + } + for nID, comps := range compsForRound { + otherComps, exists := otherCompsForRound[nID] + if !exists { + return ErrStateDKGComplaintsNotEqual + } + if len(comps) != len(otherComps) { + return ErrStateDKGComplaintsNotEqual + } + for idx, comp := range comps { + if !comp.Equal(otherComps[idx]) { + return ErrStateDKGComplaintsNotEqual + } + } + } + } + // Check DKG master public keys. + if len(s.dkgMasterPublicKeys) != len(other.dkgMasterPublicKeys) { + return ErrStateDKGMasterPublicKeysNotEqual + } + for round, mKeysForRound := range s.dkgMasterPublicKeys { + otherMKeysForRound, exists := other.dkgMasterPublicKeys[round] + if !exists { + return ErrStateDKGMasterPublicKeysNotEqual + } + if len(mKeysForRound) != len(otherMKeysForRound) { + return ErrStateDKGMasterPublicKeysNotEqual + } + for nID, mKey := range mKeysForRound { + otherMKey, exists := otherMKeysForRound[nID] + if !exists { + return ErrStateDKGMasterPublicKeysNotEqual + } + if !mKey.Equal(otherMKey) { + return ErrStateDKGMasterPublicKeysNotEqual + } + } + } + // Check DKG readys. + if len(s.dkgReadys) != len(other.dkgReadys) { + return ErrStateDKGMPKReadysNotEqual + } + for round, readysForRound := range s.dkgReadys { + otherReadysForRound, exists := other.dkgReadys[round] + if !exists { + return ErrStateDKGMPKReadysNotEqual + } + if len(readysForRound) != len(otherReadysForRound) { + return ErrStateDKGMPKReadysNotEqual + } + for nID, ready := range readysForRound { + otherReady, exists := otherReadysForRound[nID] + if !exists { + return ErrStateDKGMPKReadysNotEqual + } + if !ready.Equal(otherReady) { + return ErrStateDKGMPKReadysNotEqual + } + } + } + // Check DKG finals. + if len(s.dkgFinals) != len(other.dkgFinals) { + return ErrStateDKGFinalsNotEqual + } + for round, finalsForRound := range s.dkgFinals { + otherFinalsForRound, exists := other.dkgFinals[round] + if !exists { + return ErrStateDKGFinalsNotEqual + } + if len(finalsForRound) != len(otherFinalsForRound) { + return ErrStateDKGFinalsNotEqual + } + for nID, final := range finalsForRound { + otherFinal, exists := otherFinalsForRound[nID] + if !exists { + return ErrStateDKGFinalsNotEqual + } + if !final.Equal(otherFinal) { + return ErrStateDKGFinalsNotEqual + } + } + } + // Check DKG successes. + if len(s.dkgSuccesses) != len(other.dkgSuccesses) { + return ErrStateDKGSuccessesNotEqual + } + for round, successesForRound := range s.dkgSuccesses { + otherSuccessesForRound, exists := other.dkgSuccesses[round] + if !exists { + return ErrStateDKGSuccessesNotEqual + } + if len(successesForRound) != len(otherSuccessesForRound) { + return ErrStateDKGSuccessesNotEqual + } + for nID, success := range successesForRound { + otherSuccesse, exists := otherSuccessesForRound[nID] + if !exists { + return ErrStateDKGSuccessesNotEqual + } + if !success.Equal(otherSuccesse) { + return ErrStateDKGSuccessesNotEqual + } + } + } + // Check CRS part. + if len(s.crs) != len(other.crs) { + return ErrStateCRSsNotEqual + } + for idx, crs := range s.crs { + if crs != other.crs[idx] { + return ErrStateCRSsNotEqual + } + } + // Check dkgResetCount. + if len(s.dkgResetCount) != len(other.dkgResetCount) { + return ErrStateDKGResetCountNotEqual + } + for idx, count := range s.dkgResetCount { + if count != other.dkgResetCount[idx] { + return ErrStateDKGResetCountNotEqual + } + } + // Check pending changes. + checkPending := func( + src, target map[common.Hash]*StateChangeRequest) error { + if len(src) != len(target) { + return ErrStatePendingChangesNotEqual + } + for k, v := range src { + otherV, exists := target[k] + if !exists { + return ErrStatePendingChangesNotEqual + } + if err := v.Equal(otherV); err != nil { + return err + } + } + return nil + } + if err := checkPending(s.ownRequests, other.ownRequests); err != nil { + return err + } + if err := checkPending(s.globalRequests, other.globalRequests); err != nil { + return err + } + return nil +} + +// Clone returns a copied State instance. +func (s *State) Clone() (copied *State) { + // Clone configuration parts. + copied = &State{ + lambdaBA: s.lambdaBA, + lambdaDKG: s.lambdaDKG, + notarySetSize: s.notarySetSize, + roundInterval: s.roundInterval, + minBlockInterval: s.minBlockInterval, + local: s.local, + logger: s.logger, + nodes: make(map[types.NodeID]crypto.PublicKey), + dkgComplaints: make( + map[uint64]map[types.NodeID][]*typesDKG.Complaint), + dkgMasterPublicKeys: make( + map[uint64]map[types.NodeID]*typesDKG.MasterPublicKey), + dkgReadys: make(map[uint64]map[types.NodeID]*typesDKG.MPKReady), + dkgFinals: make(map[uint64]map[types.NodeID]*typesDKG.Finalize), + dkgSuccesses: make(map[uint64]map[types.NodeID]*typesDKG.Success), + appliedRequests: make(map[common.Hash]struct{}), + } + // Nodes + for nID, key := range s.nodes { + copied.nodes[nID] = key + } + // DKG & CRS + for round, complaintsForRound := range s.dkgComplaints { + copied.dkgComplaints[round] = + make(map[types.NodeID][]*typesDKG.Complaint) + for nID, comps := range complaintsForRound { + tmpComps := []*typesDKG.Complaint{} + for _, comp := range comps { + tmpComps = append(tmpComps, CloneDKGComplaint(comp)) + } + copied.dkgComplaints[round][nID] = tmpComps + } + } + for round, mKeysForRound := range s.dkgMasterPublicKeys { + copied.dkgMasterPublicKeys[round] = + make(map[types.NodeID]*typesDKG.MasterPublicKey) + for nID, mKey := range mKeysForRound { + copied.dkgMasterPublicKeys[round][nID] = + CloneDKGMasterPublicKey(mKey) + } + } + for round, readysForRound := range s.dkgReadys { + copied.dkgReadys[round] = make(map[types.NodeID]*typesDKG.MPKReady) + for nID, ready := range readysForRound { + copied.dkgReadys[round][nID] = CloneDKGMPKReady(ready) + } + } + for round, finalsForRound := range s.dkgFinals { + copied.dkgFinals[round] = make(map[types.NodeID]*typesDKG.Finalize) + for nID, final := range finalsForRound { + copied.dkgFinals[round][nID] = CloneDKGFinalize(final) + } + } + for round, successesForRound := range s.dkgSuccesses { + copied.dkgSuccesses[round] = make(map[types.NodeID]*typesDKG.Success) + for nID, success := range successesForRound { + copied.dkgSuccesses[round][nID] = CloneDKGSuccess(success) + } + } + for _, crs := range s.crs { + copied.crs = append(copied.crs, crs) + } + copied.dkgResetCount = make(map[uint64]uint64, len(s.dkgResetCount)) + for round, count := range s.dkgResetCount { + copied.dkgResetCount[round] = count + } + for hash := range s.appliedRequests { + copied.appliedRequests[hash] = struct{}{} + } + // Pending Changes + copied.ownRequests = make(map[common.Hash]*StateChangeRequest) + for k, req := range s.ownRequests { + copied.ownRequests[k] = req.Clone() + } + copied.globalRequests = make(map[common.Hash]*StateChangeRequest) + for k, req := range s.globalRequests { + copied.globalRequests[k] = req.Clone() + } + return +} + +type reqByTime []*StateChangeRequest + +func (req reqByTime) Len() int { return len(req) } +func (req reqByTime) Swap(i, j int) { req[i], req[j] = req[j], req[i] } +func (req reqByTime) Less(i, j int) bool { + return req[i].Timestamp < req[j].Timestamp +} + +// Apply change requests, this function would also +// be called when we extract these request from delivered blocks. +func (s *State) Apply(reqsAsBytes []byte) (err error) { + if len(reqsAsBytes) == 0 { + return + } + // Try to unmarshal this byte stream into []*StateChangeRequest. + reqs, err := s.unpackRequests(reqsAsBytes) + if err != nil { + return + } + sort.Sort(reqByTime(reqs)) + s.lock.Lock() + defer s.lock.Unlock() + for _, req := range reqs { + s.logger.Debug("Apply Request", "req", req) + // Remove this request from pending set once it's about to apply. + delete(s.globalRequests, req.Hash) + delete(s.ownRequests, req.Hash) + if _, exist := s.appliedRequests[req.Hash]; exist { + continue + } + if err = s.isValidRequest(req); err != nil { + if err == ErrDuplicatedChange { + err = nil + continue + } + return + } + if err = s.applyRequest(req); err != nil { + return + } + s.appliedRequests[req.Hash] = struct{}{} + } + return +} + +// AddRequestsFromOthers add requests from others, they won't be packed by +// 'PackOwnRequests'. +func (s *State) AddRequestsFromOthers(reqsAsBytes []byte) (err error) { + if s.local { + err = ErrNotInRemoteMode + return + } + reqs, err := s.unpackRequests(reqsAsBytes) + if err != nil { + return + } + s.lock.Lock() + defer s.lock.Unlock() + for _, req := range reqs { + s.globalRequests[req.Hash] = req + } + return +} + +// PackRequests pack all current pending requests, include those from others. +func (s *State) PackRequests() (b []byte, err error) { + if s.local { + // Convert own requests to global one for packing. + if _, err = s.PackOwnRequests(); err != nil { + return + } + } + // Pack requests in global pool. + packed := []*StateChangeRequest{} + s.lock.Lock() + defer s.lock.Unlock() + for _, v := range s.globalRequests { + s.logger.Debug("Pack Request", "req", v) + packed = append(packed, v) + } + return rlp.EncodeToBytes(packed) +} + +// PackOwnRequests pack current pending requests as byte slice, which +// could be sent as blocks' payload and unmarshall back to apply. +// +// Once a request is packed as own request, it would be turned into a normal +// pending request and won't be packed by this function. This would ensure +// each request broadcasted(gossip) once. +// +// This function is not required to call in local mode. +func (s *State) PackOwnRequests() (b []byte, err error) { + packed := []*StateChangeRequest{} + s.lock.Lock() + defer s.lock.Unlock() + for k, v := range s.ownRequests { + delete(s.ownRequests, k) + s.globalRequests[k] = v + packed = append(packed, v) + } + if b, err = rlp.EncodeToBytes(packed); err != nil { + return + } + return +} + +// isValidRequest checks if this request is valid to proceed or not. +func (s *State) isValidRequest(req *StateChangeRequest) error { + // NOTE: there would be no lock in this helper, callers should be + // responsible for acquiring appropriate lock. + switch req.Type { + case StateAddDKGMPKReady: + ready := req.Payload.(*typesDKG.MPKReady) + if ready.Reset != s.dkgResetCount[ready.Round] { + return ErrChangeWontApply + } + case StateAddDKGFinal: + final := req.Payload.(*typesDKG.Finalize) + if final.Reset != s.dkgResetCount[final.Round] { + return ErrChangeWontApply + } + case StateAddDKGSuccess: + success := req.Payload.(*typesDKG.Success) + if success.Reset != s.dkgResetCount[success.Round] { + return ErrChangeWontApply + } + case StateAddDKGMasterPublicKey: + mpk := req.Payload.(*typesDKG.MasterPublicKey) + if mpk.Reset != s.dkgResetCount[mpk.Round] { + return ErrChangeWontApply + } + // If we've received identical MPK, ignore it. + mpkForRound, exists := s.dkgMasterPublicKeys[mpk.Round] + if exists { + if oldMpk, exists := mpkForRound[mpk.ProposerID]; exists { + if !oldMpk.Equal(mpk) { + return ErrDuplicatedChange + } + return ErrChangeWontApply + } + } + // If we've received MPK from that proposer, we would ignore + // its mpk. + if _, exists := s.dkgReadys[mpk.Round][mpk.ProposerID]; exists { + return ErrProposerMPKIsReady + } + case StateAddDKGComplaint: + comp := req.Payload.(*typesDKG.Complaint) + if comp.Reset != s.dkgResetCount[comp.Round] { + return ErrChangeWontApply + } + // If we've received DKG final from that proposer, we would ignore + // its complaint. + if _, exists := s.dkgFinals[comp.Round][comp.ProposerID]; exists { + return ErrProposerIsFinal + } + // If we've received identical complaint, ignore it. + compForRound, exists := s.dkgComplaints[comp.Round] + if !exists { + break + } + comps, exists := compForRound[comp.ProposerID] + if !exists { + break + } + for _, tmpComp := range comps { + if tmpComp == comp { + return ErrDuplicatedChange + } + } + case StateAddCRS: + crsReq := req.Payload.(*crsAdditionRequest) + if uint64(len(s.crs)) > crsReq.Round { + if !s.crs[crsReq.Round].Equal(crsReq.CRS) { + return ErrForkedCRS + } + return ErrDuplicatedChange + } else if uint64(len(s.crs)) == crsReq.Round { + return nil + } else { + return ErrMissingPreviousCRS + } + case StateResetDKG: + newCRS := req.Payload.(common.Hash) + if s.crs[len(s.crs)-1].Equal(newCRS) { + return ErrDuplicatedChange + } + // TODO(mission): find a smart way to make sure the caller call request + // this change with correct resetCount. + } + return nil +} + +// applyRequest applies a single StateChangeRequest. +func (s *State) applyRequest(req *StateChangeRequest) error { + // NOTE: there would be no lock in this helper, callers should be + // responsible for acquiring appropriate lock. + switch req.Type { + case StateAddNode: + pubKey, err := ecdsa.NewPublicKeyFromByteSlice(req.Payload.([]byte)) + if err != nil { + return err + } + s.nodes[types.NewNodeID(pubKey)] = pubKey + case StateAddCRS: + crsRequest := req.Payload.(*crsAdditionRequest) + if crsRequest.Round != uint64(len(s.crs)) { + return ErrDuplicatedChange + } + s.crs = append(s.crs, crsRequest.CRS) + case StateAddDKGComplaint: + comp := req.Payload.(*typesDKG.Complaint) + if _, exists := s.dkgComplaints[comp.Round]; !exists { + s.dkgComplaints[comp.Round] = make( + map[types.NodeID][]*typesDKG.Complaint) + } + s.dkgComplaints[comp.Round][comp.ProposerID] = append( + s.dkgComplaints[comp.Round][comp.ProposerID], comp) + case StateAddDKGMasterPublicKey: + mKey := req.Payload.(*typesDKG.MasterPublicKey) + if _, exists := s.dkgMasterPublicKeys[mKey.Round]; !exists { + s.dkgMasterPublicKeys[mKey.Round] = make( + map[types.NodeID]*typesDKG.MasterPublicKey) + } + s.dkgMasterPublicKeys[mKey.Round][mKey.ProposerID] = mKey + case StateAddDKGMPKReady: + ready := req.Payload.(*typesDKG.MPKReady) + if _, exists := s.dkgReadys[ready.Round]; !exists { + s.dkgReadys[ready.Round] = make(map[types.NodeID]*typesDKG.MPKReady) + } + s.dkgReadys[ready.Round][ready.ProposerID] = ready + case StateAddDKGFinal: + final := req.Payload.(*typesDKG.Finalize) + if _, exists := s.dkgFinals[final.Round]; !exists { + s.dkgFinals[final.Round] = make(map[types.NodeID]*typesDKG.Finalize) + } + s.dkgFinals[final.Round][final.ProposerID] = final + case StateAddDKGSuccess: + success := req.Payload.(*typesDKG.Success) + if _, exists := s.dkgSuccesses[success.Round]; !exists { + s.dkgSuccesses[success.Round] = + make(map[types.NodeID]*typesDKG.Success) + } + s.dkgSuccesses[success.Round][success.ProposerID] = success + case StateResetDKG: + round := uint64(len(s.crs) - 1) + s.crs[round] = req.Payload.(common.Hash) + s.dkgResetCount[round]++ + delete(s.dkgMasterPublicKeys, round) + delete(s.dkgReadys, round) + delete(s.dkgComplaints, round) + delete(s.dkgFinals, round) + delete(s.dkgSuccesses, round) + case StateChangeLambdaBA: + s.lambdaBA = time.Duration(req.Payload.(uint64)) + case StateChangeLambdaDKG: + s.lambdaDKG = time.Duration(req.Payload.(uint64)) + case StateChangeRoundLength: + s.roundInterval = req.Payload.(uint64) + case StateChangeMinBlockInterval: + s.minBlockInterval = time.Duration(req.Payload.(uint64)) + case StateChangeNotarySetSize: + s.notarySetSize = req.Payload.(uint32) + default: + return errors.New("you are definitely kidding me") + } + return nil +} + +// ProposeCRS propose a new CRS for a specific round. +func (s *State) ProposeCRS(round uint64, crs common.Hash) (err error) { + err = s.RequestChange(StateAddCRS, &crsAdditionRequest{ + Round: round, + CRS: crs, + }) + return +} + +// RequestChange submits a state change request. +func (s *State) RequestChange( + t StateChangeType, payload interface{}) (err error) { + s.logger.Info("Request Change to State", "type", t, "value", payload) + // Patch input parameter's type. + switch t { + case StateAddNode: + payload = payload.(crypto.PublicKey).Bytes() + case StateChangeLambdaBA, + StateChangeLambdaDKG, + StateChangeMinBlockInterval: + payload = uint64(payload.(time.Duration)) + // These cases for for type assertion, make sure callers pass expected types. + case StateAddCRS: + payload = payload.(*crsAdditionRequest) + case StateAddDKGMPKReady: + payload = payload.(*typesDKG.MPKReady) + case StateAddDKGFinal: + payload = payload.(*typesDKG.Finalize) + case StateAddDKGSuccess: + payload = payload.(*typesDKG.Success) + case StateAddDKGMasterPublicKey: + payload = payload.(*typesDKG.MasterPublicKey) + case StateAddDKGComplaint: + payload = payload.(*typesDKG.Complaint) + case StateResetDKG: + payload = payload.(common.Hash) + } + req := NewStateChangeRequest(t, payload) + s.lock.Lock() + defer s.lock.Unlock() + if s.local { + if err = s.isValidRequest(req); err != nil { + return + } + err = s.applyRequest(req) + } else { + if err = s.isValidRequest(req); err != nil { + return + } + s.ownRequests[req.Hash] = req + } + return +} + +// CRS access crs proposed for that round. +func (s *State) CRS(round uint64) common.Hash { + s.lock.RLock() + defer s.lock.RUnlock() + if round >= uint64(len(s.crs)) { + return common.Hash{} + } + return s.crs[round] +} + +// DKGComplaints access current received dkg complaints for that round. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) DKGComplaints(round uint64) []*typesDKG.Complaint { + s.lock.RLock() + defer s.lock.RUnlock() + comps, exists := s.dkgComplaints[round] + if !exists { + return nil + } + tmpComps := make([]*typesDKG.Complaint, 0, len(comps)) + for _, compProp := range comps { + for _, comp := range compProp { + tmpComps = append(tmpComps, CloneDKGComplaint(comp)) + } + } + return tmpComps +} + +// DKGMasterPublicKeys access current received dkg master public keys for that +// round. This information won't be snapshot, thus can't be cached in +// test.Governance. +func (s *State) DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey { + s.lock.RLock() + defer s.lock.RUnlock() + masterPublicKeys, exists := s.dkgMasterPublicKeys[round] + if !exists { + return nil + } + mpks := make([]*typesDKG.MasterPublicKey, 0, len(masterPublicKeys)) + for _, mpk := range masterPublicKeys { + mpks = append(mpks, CloneDKGMasterPublicKey(mpk)) + } + return mpks +} + +// IsDKGMPKReady checks if current received dkg readys exceeds threshold. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) IsDKGMPKReady(round uint64, threshold int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.dkgReadys[round]) >= threshold +} + +// IsDKGFinal checks if current received dkg finals exceeds threshold. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) IsDKGFinal(round uint64, threshold int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.dkgFinals[round]) >= threshold +} + +// IsDKGSuccess checks if current received dkg successes exceeds threshold. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) IsDKGSuccess(round uint64, threshold int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.dkgSuccesses[round]) >= threshold +} + +// DKGResetCount returns the reset count for DKG of given round. +func (s *State) DKGResetCount(round uint64) uint64 { + s.lock.RLock() + defer s.lock.RUnlock() + return s.dkgResetCount[round] +} diff --git a/dex/consensus/core/test/state_test.go b/dex/consensus/core/test/state_test.go new file mode 100644 index 000000000..63f9d27b4 --- /dev/null +++ b/dex/consensus/core/test/state_test.go @@ -0,0 +1,492 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "sort" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +type StateTestSuite struct { + suite.Suite +} + +func (s *StateTestSuite) newDKGMasterPublicKey( + round uint64, reset uint64) *typesDKG.MasterPublicKey { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pubKey := prvKey.PublicKey() + nodeID := types.NewNodeID(pubKey) + _, pubShare := dkg.NewPrivateKeyShares(3) + dID, err := dkg.BytesID(nodeID.Hash[:]) + s.Require().NoError(err) + return &typesDKG.MasterPublicKey{ + ProposerID: nodeID, + Round: round, + Reset: reset, + DKGID: dID, + PublicKeyShares: *pubShare.Move(), + } +} + +func (s *StateTestSuite) newDKGComplaint( + round uint64, reset uint64) *typesDKG.Complaint { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nodeID := types.NewNodeID(prvKey.PublicKey()) + comp := &typesDKG.Complaint{ + Round: round, + Reset: reset, + PrivateShare: typesDKG.PrivateShare{ + ProposerID: nodeID, + ReceiverID: nodeID, + Round: round, + Reset: reset, + PrivateShare: *dkg.NewPrivateKey(), + }, + } + signer := utils.NewSigner(prvKey) + s.Require().NoError(signer.SignDKGComplaint(comp)) + s.Require().NoError(signer.SignDKGPrivateShare(&comp.PrivateShare)) + s.Require().False(comp.IsNack()) + return comp +} + +func (s *StateTestSuite) newDKGMPKReady( + round uint64, reset uint64) *typesDKG.MPKReady { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + ready := &typesDKG.MPKReady{Round: round, Reset: reset} + s.Require().NoError(utils.NewSigner(prvKey).SignDKGMPKReady(ready)) + return ready +} +func (s *StateTestSuite) newDKGFinal( + round uint64, reset uint64) *typesDKG.Finalize { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + final := &typesDKG.Finalize{Round: round, Reset: reset} + s.Require().NoError(utils.NewSigner(prvKey).SignDKGFinalize(final)) + return final +} + +func (s *StateTestSuite) compareNodes(node1, node2 []crypto.PublicKey) bool { + id1 := common.Hashes{} + for _, n := range node1 { + id1 = append(id1, types.NewNodeID(n).Hash) + } + sort.Sort(id1) + id2 := common.Hashes{} + for _, n := range node2 { + id2 = append(id2, types.NewNodeID(n).Hash) + } + sort.Sort(id2) + if len(id1) != len(id2) { + return false + } + for idx, id := range id1 { + if id != id2[idx] { + return false + } + } + return true +} + +func (s *StateTestSuite) findNode( + nodes []crypto.PublicKey, node crypto.PublicKey) bool { + nodeID := types.NewNodeID(node) + for _, n := range nodes { + nID := types.NewNodeID(n) + if nID == nodeID { + return true + } + } + return false +} + +func (s *StateTestSuite) makeDKGChanges( + st *State, + masterPubKey *typesDKG.MasterPublicKey, + ready *typesDKG.MPKReady, + complaint *typesDKG.Complaint, + final *typesDKG.Finalize) { + s.Require().NoError(st.RequestChange(StateAddDKGMasterPublicKey, + masterPubKey)) + s.Require().NoError(st.RequestChange(StateAddDKGMPKReady, ready)) + s.Require().NoError(st.RequestChange(StateAddDKGComplaint, complaint)) + s.Require().NoError(st.RequestChange(StateAddDKGFinal, final)) +} + +func (s *StateTestSuite) makeConfigChanges(st *State) { + st.RequestChange(StateChangeLambdaBA, time.Nanosecond) + st.RequestChange(StateChangeLambdaDKG, time.Millisecond) + st.RequestChange(StateChangeRoundLength, uint64(1001)) + st.RequestChange(StateChangeMinBlockInterval, time.Second) + st.RequestChange(StateChangeNotarySetSize, uint32(5)) +} + +func (s *StateTestSuite) checkConfigChanges(config *types.Config) { + req := s.Require() + req.Equal(config.LambdaBA, time.Nanosecond) + req.Equal(config.LambdaDKG, time.Millisecond) + req.Equal(config.RoundLength, uint64(1001)) + req.Equal(config.MinBlockInterval, time.Second) + req.Equal(config.NotarySetSize, uint32(5)) +} + +func (s *StateTestSuite) TestEqual() { + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + st := NewState(1, genesisNodes, lambda, &common.NullLogger{}, true) + req.NoError(st.Equal(st)) + // One node is missing. + st1 := NewState(1, genesisNodes, lambda, &common.NullLogger{}, true) + for nID := range st1.nodes { + delete(st1.nodes, nID) + break + } + req.Equal(st.Equal(st1), ErrStateNodeSetNotEqual) + // Make some changes. + st2 := st.Clone() + req.NoError(st.Equal(st2)) + s.makeConfigChanges(st) + req.EqualError(ErrStateConfigNotEqual, st.Equal(st2).Error()) + req.NoError(st.ProposeCRS(2, common.NewRandomHash())) + req.NoError(st.RequestChange(StateResetDKG, common.NewRandomHash())) + masterPubKey := s.newDKGMasterPublicKey(2, 1) + ready := s.newDKGMPKReady(2, 1) + comp := s.newDKGComplaint(2, 1) + final := s.newDKGFinal(2, 1) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) + // Remove dkg complaints from cloned one to check if equal. + st3 := st.Clone() + req.NoError(st.Equal(st3)) + delete(st3.dkgComplaints, uint64(2)) + req.EqualError(ErrStateDKGComplaintsNotEqual, st.Equal(st3).Error()) + // Remove dkg master public key from cloned one to check if equal. + st4 := st.Clone() + req.NoError(st.Equal(st4)) + delete(st4.dkgMasterPublicKeys, uint64(2)) + req.EqualError(ErrStateDKGMasterPublicKeysNotEqual, st.Equal(st4).Error()) + // Remove dkg ready from cloned one to check if equal. + st4a := st.Clone() + req.NoError(st.Equal(st4a)) + delete(st4a.dkgReadys, uint64(2)) + req.EqualError(ErrStateDKGMPKReadysNotEqual, st.Equal(st4a).Error()) + // Remove dkg finalize from cloned one to check if equal. + st5 := st.Clone() + req.NoError(st.Equal(st5)) + delete(st5.dkgFinals, uint64(2)) + req.EqualError(ErrStateDKGFinalsNotEqual, st.Equal(st5).Error()) + // Remove dkgResetCount from cloned one to check if equal. + st6 := st.Clone() + req.NoError(st.Equal(st6)) + delete(st6.dkgResetCount, uint64(2)) + req.EqualError(ErrStateDKGResetCountNotEqual, st.Equal(st6).Error()) + + // Switch to remote mode. + st.SwitchToRemoteMode() + // Make some change. + req.NoError(st.RequestChange(StateChangeNotarySetSize, uint32(100))) + str := st.Clone() + req.NoError(st.Equal(str)) + // Remove the pending change, should not be equal. + req.Len(str.ownRequests, 1) + for k := range str.ownRequests { + delete(str.ownRequests, k) + } + req.EqualError(ErrStatePendingChangesNotEqual, st.Equal(str).Error()) +} + +func (s *StateTestSuite) TestPendingChangesEqual() { + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + // Setup a non-local mode State instance. + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + st := NewState(1, genesisNodes, lambda, &common.NullLogger{}, false) + req.NoError(st.Equal(st)) + // Apply some changes. + s.makeConfigChanges(st) + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(2, crs)) + masterPubKey := s.newDKGMasterPublicKey(2, 0) + ready := s.newDKGMPKReady(2, 0) + comp := s.newDKGComplaint(2, 0) + final := s.newDKGFinal(2, 0) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) +} + +func (s *StateTestSuite) TestLocalMode() { + // Test State with local mode. + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + st := NewState(1, genesisNodes, lambda, &common.NullLogger{}, true) + config1, nodes1 := st.Snapshot() + req.True(s.compareNodes(genesisNodes, nodes1)) + // Check settings of config1 affected by genesisNodes and lambda. + req.Equal(config1.LambdaBA, lambda) + req.Equal(config1.LambdaDKG, lambda*10) + req.Equal(config1.RoundLength, uint64(1000)) + req.Equal(config1.NotarySetSize, uint32(len(genesisNodes))) + // Request some changes, every fields for config should be affected. + s.makeConfigChanges(st) + // Add new node. + prvKey, err := ecdsa.NewPrivateKey() + req.NoError(err) + pubKey := prvKey.PublicKey() + st.RequestChange(StateAddNode, pubKey) + config2, newNodes := st.Snapshot() + // Check if config changes are applied. + s.checkConfigChanges(config2) + // Check if new node is added. + req.True(s.findNode(newNodes, pubKey)) + // Test adding CRS. + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(2, crs)) + req.Equal(st.CRS(2), crs) + // Test adding node set, DKG complaints, final, master public key. + // Make sure everything is empty before changed. + req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 1)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 1)) + // Add DKG stuffs. + masterPubKey := s.newDKGMasterPublicKey(2, 0) + ready := s.newDKGMPKReady(2, 0) + comp := s.newDKGComplaint(2, 0) + final := s.newDKGFinal(2, 0) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) + // Check DKGMasterPublicKeys. + masterKeyForRound := st.DKGMasterPublicKeys(2) + req.Len(masterKeyForRound, 1) + req.True(masterKeyForRound[0].Equal(masterPubKey)) + // Check IsDKGMPKReady. + req.True(st.IsDKGMPKReady(2, 1)) + // Check DKGComplaints. + compForRound := st.DKGComplaints(2) + req.Len(compForRound, 1) + req.True(compForRound[0].Equal(comp)) + // Check IsDKGFinal. + req.True(st.IsDKGFinal(2, 1)) + // Test ResetDKG. + crs = common.NewRandomHash() + req.NoError(st.RequestChange(StateResetDKG, crs)) + req.Equal(st.CRS(2), crs) + // Make sure all DKG fields are cleared. + req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 1)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 1)) +} + +func (s *StateTestSuite) TestPacking() { + // Make sure everything works when requests are packing + // and unpacked to apply. + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + packAndApply := func(st *State) { + // In remote mode, we need to manually convert own requests to global ones. + _, err := st.PackOwnRequests() + req.NoError(err) + // Pack changes into bytes. + b, err := st.PackRequests() + req.NoError(err) + req.NotEmpty(b) + // Apply those bytes back. + req.NoError(st.Apply(b)) + } + // Make config changes. + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + st := NewState(1, genesisNodes, lambda, &common.NullLogger{}, false) + s.makeConfigChanges(st) + // Add new CRS. + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(2, crs)) + // Add new node. + prvKey, err := ecdsa.NewPrivateKey() + req.NoError(err) + pubKey := prvKey.PublicKey() + st.RequestChange(StateAddNode, pubKey) + // Add DKG stuffs. + masterPubKey := s.newDKGMasterPublicKey(2, 0) + ready := s.newDKGMPKReady(2, 0) + comp := s.newDKGComplaint(2, 0) + final := s.newDKGFinal(2, 0) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) + // Make sure everything is empty before changed. + req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 1)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 1)) + packAndApply(st) + // Check if configs are changed. + config, nodes := st.Snapshot() + s.checkConfigChanges(config) + // Check if CRS is added. + req.Equal(st.CRS(2), crs) + // Check if new node is added. + req.True(s.findNode(nodes, pubKey)) + // Check DKGMasterPublicKeys. + masterKeyForRound := st.DKGMasterPublicKeys(2) + req.Len(masterKeyForRound, 1) + req.True(masterKeyForRound[0].Equal(masterPubKey)) + // Check DKGComplaints. + compForRound := st.DKGComplaints(2) + req.Len(compForRound, 1) + req.True(compForRound[0].Equal(comp)) + // Check IsDKGMPKReady. + req.True(st.IsDKGMPKReady(2, 1)) + // Check IsDKGFinal. + req.True(st.IsDKGFinal(2, 1)) + + // Test ResetDKG. + crs = common.NewRandomHash() + req.NoError(st.RequestChange(StateResetDKG, crs)) + packAndApply(st) + req.Equal(st.CRS(2), crs) + // Make sure all DKG fields are cleared. + req.Empty(st.DKGMasterPublicKeys(2)) + req.False(st.IsDKGMPKReady(2, 1)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 1)) +} + +func (s *StateTestSuite) TestRequestBroadcastAndPack() { + // This test case aims to demonstrate this scenario: + // - a change request is pending at one node. + // - that request can be packed by PackOwnRequests and sent to other nodes. + // - when some other node allow to propose a block, it will pack all those + // 'own' requests from others into the block's payload. + // - when all nodes receive that block, all pending requests (including + // those 'own' requests) would be cleaned. + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + _, genesisNodes, err := NewKeys(20) + req.NoError(err) + st := NewState(1, genesisNodes, lambda, &common.NullLogger{}, false) + st1 := NewState(1, genesisNodes, lambda, &common.NullLogger{}, false) + req.NoError(st.Equal(st1)) + // Make configuration changes. + s.makeConfigChanges(st) + // Add new CRS. + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(2, crs)) + // Add new node. + prvKey, err := ecdsa.NewPrivateKey() + req.NoError(err) + pubKey := prvKey.PublicKey() + st.RequestChange(StateAddNode, pubKey) + // Add DKG stuffs. + masterPubKey := s.newDKGMasterPublicKey(2, 0) + ready := s.newDKGMPKReady(2, 0) + comp := s.newDKGComplaint(2, 0) + final := s.newDKGFinal(2, 0) + s.makeDKGChanges(st, masterPubKey, ready, comp, final) + // Pack those changes into a byte stream, and pass it to other State + // instance. + packed, err := st.PackOwnRequests() + req.NoError(err) + req.NotEmpty(packed) + // The second attempt to pack would get empty result. + emptyPackedAsByte, err := st.PackOwnRequests() + req.NoError(err) + emptyPacked, err := st.unpackRequests(emptyPackedAsByte) + req.NoError(err) + req.Empty(emptyPacked) + // Pass it to others. + req.NoError(st1.AddRequestsFromOthers(packed)) + // These two instance are equal now, because their pending change requests + // are synced. + req.NoError(st.Equal(st1)) + // Make them apply those pending changes. + applyChangesForRemoteState := func(s *State) { + p, err := s.PackRequests() + req.NoError(err) + req.NotEmpty(p) + req.NoError(s.Apply(p)) + } + applyChangesForRemoteState(st) + applyChangesForRemoteState(st1) + // They should be equal after applying those changes. + req.NoError(st.Equal(st1)) +} + +func (s *StateTestSuite) TestUnmatchedResetCount() { + _, genesisNodes, err := NewKeys(20) + s.Require().NoError(err) + st := NewState(1, genesisNodes, 100*time.Millisecond, + &common.NullLogger{}, true) + // Make sure the case in older version without reset won't fail. + mpk := s.newDKGMasterPublicKey(1, 0) + ready := s.newDKGMPKReady(1, 0) + comp := s.newDKGComplaint(1, 0) + final := s.newDKGFinal(1, 0) + s.Require().NoError(st.RequestChange(StateAddDKGMasterPublicKey, mpk)) + s.Require().NoError(st.RequestChange(StateAddDKGMPKReady, ready)) + s.Require().NoError(st.RequestChange(StateAddDKGComplaint, comp)) + s.Require().NoError(st.RequestChange(StateAddDKGFinal, final)) + // Make round 1 reset twice. + s.Require().NoError(st.RequestChange(StateResetDKG, common.NewRandomHash())) + s.Require().NoError(st.RequestChange(StateResetDKG, common.NewRandomHash())) + s.Require().Equal(st.dkgResetCount[1], uint64(2)) + s.Require().EqualError(ErrChangeWontApply, st.RequestChange( + StateAddDKGMasterPublicKey, mpk).Error()) + s.Require().EqualError(ErrChangeWontApply, st.RequestChange( + StateAddDKGMPKReady, ready).Error()) + s.Require().EqualError(ErrChangeWontApply, st.RequestChange( + StateAddDKGComplaint, comp).Error()) + s.Require().EqualError(ErrChangeWontApply, st.RequestChange( + StateAddDKGFinal, final).Error()) + mpk = s.newDKGMasterPublicKey(1, 2) + ready = s.newDKGMPKReady(1, 2) + comp = s.newDKGComplaint(1, 2) + final = s.newDKGFinal(1, 2) + s.Require().NoError(st.RequestChange(StateAddDKGMasterPublicKey, mpk)) + s.Require().NoError(st.RequestChange(StateAddDKGMPKReady, ready)) + s.Require().NoError(st.RequestChange(StateAddDKGComplaint, comp)) + s.Require().NoError(st.RequestChange(StateAddDKGFinal, final)) +} + +func TestState(t *testing.T) { + suite.Run(t, new(StateTestSuite)) +} diff --git a/dex/consensus/core/test/tcp-transport.go b/dex/consensus/core/test/tcp-transport.go new file mode 100644 index 000000000..fdc47d5a8 --- /dev/null +++ b/dex/consensus/core/test/tcp-transport.go @@ -0,0 +1,916 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "context" + "encoding/base64" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "math" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +const ( + tcpThroughputReportNum = 10 +) + +type tcpHandshake struct { + DMoment time.Time + Peers map[types.NodeID]string +} + +type tcpPeerRecord struct { + conn string + sendChannel chan<- []byte + pubKey crypto.PublicKey +} + +// tcpMessage is the general message between peers and server. +type tcpMessage struct { + NodeID types.NodeID `json:"nid"` + Type string `json:"type"` + Info string `json:"conn"` +} + +// BlockEventMessage is for monitoring block events' time. +type BlockEventMessage struct { + BlockHash common.Hash `json:"hash"` + Timestamps []time.Time `json:"timestamps"` +} + +// buildPeerInfo is a tricky way to combine connection string and +// base64 encoded byte slice for public key into a single string, +// separated by ';'. +func buildPeerInfo(pubKey crypto.PublicKey, conn string) string { + return conn + ";" + base64.StdEncoding.EncodeToString(pubKey.Bytes()) +} + +// parsePeerInfo parse connection string and base64 encoded public key built +// via buildPeerInfo. +func parsePeerInfo(info string) (key crypto.PublicKey, conn string) { + tokens := strings.Split(info, ";") + conn = tokens[0] + data, err := base64.StdEncoding.DecodeString(tokens[1]) + if err != nil { + panic(err) + } + key, err = ecdsa.NewPublicKeyFromByteSlice(data) + if err != nil { + panic(err) + } + return +} + +var ( + // ErrTCPHandShakeFail is reported if the tcp handshake fails. + ErrTCPHandShakeFail = fmt.Errorf("tcp handshake fail") + + // ErrConnectToUnexpectedPeer is reported if connect to unexpected peer. + ErrConnectToUnexpectedPeer = fmt.Errorf("connect to unexpected peer") + + // ErrMessageOverflow is reported if the message is too long. + ErrMessageOverflow = fmt.Errorf("message size overflow") +) + +// TCPTransport implements Transport interface via TCP connection. +type TCPTransport struct { + peerType TransportPeerType + nID types.NodeID + pubKey crypto.PublicKey + localPort int + peers map[types.NodeID]*tcpPeerRecord + peersLock sync.RWMutex + recvChannel chan *TransportEnvelope + ctx context.Context + cancel context.CancelFunc + marshaller Marshaller + throughputRecords []ThroughputRecord + throughputLock sync.Mutex + dMoment time.Time +} + +// NewTCPTransport constructs an TCPTransport instance. +func NewTCPTransport(peerType TransportPeerType, pubKey crypto.PublicKey, + marshaller Marshaller, localPort int) *TCPTransport { + ctx, cancel := context.WithCancel(context.Background()) + return &TCPTransport{ + peerType: peerType, + nID: types.NewNodeID(pubKey), + pubKey: pubKey, + peers: make(map[types.NodeID]*tcpPeerRecord), + recvChannel: make(chan *TransportEnvelope, 1000), + ctx: ctx, + cancel: cancel, + localPort: localPort, + marshaller: marshaller, + throughputRecords: []ThroughputRecord{}, + } +} + +const handshakeMsg = "Welcome to DEXON network for test." + +func (t *TCPTransport) serverHandshake(conn net.Conn) ( + nID types.NodeID, err error) { + if err := conn.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + panic(err) + } + msg := &tcpMessage{ + NodeID: t.nID, + Type: "handshake", + Info: handshakeMsg, + } + var payload []byte + payload, err = json.Marshal(msg) + if err != nil { + return + } + if err = t.write(conn, payload); err != nil { + return + } + if payload, err = t.read(conn); err != nil { + return + } + if err = json.Unmarshal(payload, &msg); err != nil { + return + } + if msg.Type != "handshake-ack" || msg.Info != handshakeMsg { + err = ErrTCPHandShakeFail + return + } + nID = msg.NodeID + return +} + +func (t *TCPTransport) clientHandshake(conn net.Conn) ( + nID types.NodeID, err error) { + if err := conn.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { + panic(err) + } + var payload []byte + if payload, err = t.read(conn); err != nil { + return + } + msg := &tcpMessage{} + if err = json.Unmarshal(payload, &msg); err != nil { + return + } + if msg.Type != "handshake" || msg.Info != handshakeMsg { + err = ErrTCPHandShakeFail + return + } + nID = msg.NodeID + msg = &tcpMessage{ + NodeID: t.nID, + Type: "handshake-ack", + Info: handshakeMsg, + } + payload, err = json.Marshal(msg) + if err != nil { + return + } + if err = t.write(conn, payload); err != nil { + return + } + return +} + +// Disconnect implements Transport.Disconnect method. +func (t *TCPTransport) Disconnect(endpoint types.NodeID) { + delete(t.peers, endpoint) +} + +func (t *TCPTransport) send( + endpoint types.NodeID, msg interface{}, payload []byte) { + t.peersLock.RLock() + defer t.peersLock.RUnlock() + t.handleThroughputData(msg, payload) + t.peers[endpoint].sendChannel <- payload +} + +// Send implements Transport.Send method. +func (t *TCPTransport) Send( + endpoint types.NodeID, msg interface{}) (err error) { + + if _, exist := t.peers[endpoint]; !exist { + return fmt.Errorf("the endpoint does not exists: %v", endpoint) + } + + payload, err := t.marshalMessage(msg) + if err != nil { + return + } + go t.send(endpoint, msg, payload) + return +} + +// Broadcast implements Transport.Broadcast method. +func (t *TCPTransport) Broadcast(endpoints map[types.NodeID]struct{}, + latency LatencyModel, msg interface{}) (err error) { + payload, err := t.marshalMessage(msg) + if err != nil { + return + } + for nID := range endpoints { + if nID == t.nID { + continue + } + go func(ID types.NodeID) { + time.Sleep(latency.Delay()) + t.send(ID, msg, payload) + }(nID) + } + return +} + +// Close implements Transport.Close method. +func (t *TCPTransport) Close() (err error) { + // Tell all routines raised by us to die. + t.cancel() + // Reset peers. + t.peersLock.Lock() + defer t.peersLock.Unlock() + t.peers = make(map[types.NodeID]*tcpPeerRecord) + // Tell our user that this channel is closed. + close(t.recvChannel) + t.recvChannel = nil + return +} + +// Peers implements Transport.Peers method. +func (t *TCPTransport) Peers() (peers []crypto.PublicKey) { + for _, rec := range t.peers { + peers = append(peers, rec.pubKey) + } + return +} + +func (t *TCPTransport) write(conn net.Conn, b []byte) (err error) { + if len(b) > math.MaxUint32 { + return ErrMessageOverflow + } + msgLength := make([]byte, 4) + binary.LittleEndian.PutUint32(msgLength, uint32(len(b))) + if _, err = conn.Write(msgLength); err != nil { + return + } + if _, err = conn.Write(b); err != nil { + return + } + return +} + +func (t *TCPTransport) read(conn net.Conn) (b []byte, err error) { + msgLength := make([]byte, 4) + if _, err = io.ReadFull(conn, msgLength); err != nil { + return + } + b = make([]byte, int(binary.LittleEndian.Uint32(msgLength))) + if _, err = io.ReadFull(conn, b); err != nil { + return + } + return +} + +func (t *TCPTransport) marshalMessage( + msg interface{}) (payload []byte, err error) { + + msgCarrier := struct { + PeerType TransportPeerType `json:"peer_type"` + From types.NodeID `json:"from"` + Type string `json:"type"` + Payload interface{} `json:"payload"` + }{ + PeerType: t.peerType, + From: t.nID, + Payload: msg, + } + switch msg.(type) { + case *tcpHandshake: + msgCarrier.Type = "tcp-handshake" + case *tcpMessage: + msgCarrier.Type = "trans-msg" + case []ThroughputRecord: + msgCarrier.Type = "throughput-record" + case *BlockEventMessage: + msgCarrier.Type = "block-event" + default: + if t.marshaller == nil { + err = fmt.Errorf("unknown msg type: %v", msg) + break + } + // Delegate to user defined marshaller. + var buff []byte + msgCarrier.Type, buff, err = t.marshaller.Marshal(msg) + if err != nil { + break + } + msgCarrier.Payload = json.RawMessage(buff) + } + if err != nil { + return + } + payload, err = json.Marshal(msgCarrier) + return +} + +func (t *TCPTransport) unmarshalMessage( + payload []byte) ( + peerType TransportPeerType, + from types.NodeID, + msg interface{}, + err error) { + + msgCarrier := struct { + PeerType TransportPeerType `json:"peer_type"` + From types.NodeID `json:"from"` + Type string `json:"type"` + Payload json.RawMessage `json:"payload"` + }{} + if err = json.Unmarshal(payload, &msgCarrier); err != nil { + return + } + peerType = msgCarrier.PeerType + from = msgCarrier.From + switch msgCarrier.Type { + case "tcp-handshake": + handshake := &tcpHandshake{} + if err = json.Unmarshal(msgCarrier.Payload, &handshake); err != nil { + return + } + msg = handshake + case "trans-msg": + m := &tcpMessage{} + if err = json.Unmarshal(msgCarrier.Payload, m); err != nil { + return + } + msg = m + case "throughput-record": + m := &[]ThroughputRecord{} + if err = json.Unmarshal(msgCarrier.Payload, m); err != nil { + return + } + msg = m + case "block-event": + m := &BlockEventMessage{} + if err = json.Unmarshal(msgCarrier.Payload, m); err != nil { + return + } + msg = m + default: + if t.marshaller == nil { + err = fmt.Errorf("unknown msg type: %v", msgCarrier.Type) + break + } + msg, err = t.marshaller.Unmarshal(msgCarrier.Type, msgCarrier.Payload) + } + return +} + +// connReader is a reader routine to read from a TCP connection. +func (t *TCPTransport) connReader(conn net.Conn) { + defer func() { + if err := conn.Close(); err != nil { + panic(err) + } + }() + + var ( + err error + payload []byte + ) + + checkErr := func(err error) (toBreak bool) { + if err == io.EOF { + toBreak = true + return + } + // Check if timeout. + nErr, ok := err.(*net.OpError) + if !ok { + panic(err) + } + if !nErr.Timeout() { + panic(err) + } + return + } +Loop: + for { + select { + case <-t.ctx.Done(): + break Loop + default: + } + // Add timeout when reading to check if shutdown. + if err := conn.SetReadDeadline( + time.Now().Add(2 * time.Second)); err != nil { + + panic(err) + } + // Read message length. + if payload, err = t.read(conn); err != nil { + if checkErr(err) { + break + } + continue + } + peerType, from, msg, err := t.unmarshalMessage(payload) + if err != nil { + panic(err) + } + t.recvChannel <- &TransportEnvelope{ + PeerType: peerType, + From: from, + Msg: msg, + } + } +} + +// connWriter is a writer routine to write to TCP connection. +func (t *TCPTransport) connWriter(conn net.Conn) chan<- []byte { + // Disable write deadline. + if err := conn.SetWriteDeadline(time.Time{}); err != nil { + panic(err) + } + + ch := make(chan []byte, 1000) + go func() { + defer func() { + close(ch) + if err := conn.Close(); err != nil { + panic(err) + } + }() + for { + select { + case <-t.ctx.Done(): + return + default: + } + select { + case <-t.ctx.Done(): + return + case msg := <-ch: + // Send message length in uint32. + if err := t.write(conn, msg); err != nil { + panic(err) + } + } + } + }() + return ch +} + +// listenerRoutine is a routine to accept incoming request for TCP connection. +func (t *TCPTransport) listenerRoutine(listener *net.TCPListener) { + closed := false + defer func() { + if closed { + return + } + if err := listener.Close(); err != nil { + panic(err) + } + }() + for { + select { + case <-t.ctx.Done(): + return + default: + } + + if err := listener.SetDeadline(time.Now().Add(5 * time.Second)); err != nil { + panic(err) + } + conn, err := listener.Accept() + if err != nil { + // Check if the connection is closed. + if strings.Contains(err.Error(), "use of closed network connection") { + closed = true + return + } + // Check if timeout error. + nErr, ok := err.(*net.OpError) + if !ok { + panic(err) + } + if !nErr.Timeout() { + panic(err) + } + continue + } + if _, err := t.serverHandshake(conn); err != nil { + fmt.Println(err) + continue + } + go t.connReader(conn) + } +} + +// buildConnectionToPeers constructs TCP connections to each peer. +// Although TCP connection could be used for both read/write operation, +// we only utilize the write part for simplicity. +func (t *TCPTransport) buildConnectionsToPeers() (err error) { + var wg sync.WaitGroup + var errs []error + var errsLock sync.Mutex + addErr := func(err error) { + errsLock.Lock() + defer errsLock.Unlock() + errs = append(errs, err) + } + for nID, rec := range t.peers { + if nID == t.nID { + continue + } + wg.Add(1) + go func(nID types.NodeID, addr string) { + defer wg.Done() + conn, localErr := net.Dial("tcp", addr) + if localErr != nil { + addErr(localErr) + return + } + serverID, localErr := t.clientHandshake(conn) + if localErr != nil { + addErr(localErr) + return + } + if nID != serverID { + addErr(ErrConnectToUnexpectedPeer) + return + } + t.peersLock.Lock() + defer t.peersLock.Unlock() + t.peers[nID].sendChannel = t.connWriter(conn) + }(nID, rec.conn) + } + wg.Wait() + if len(errs) > 0 { + // Propagate this error to outside, at least one error + // could be returned to caller. + err = errs[0] + } + return +} + +// ThroughputRecord records the network throughput data. +type ThroughputRecord struct { + Type string `json:"type"` + Size int `json:"size"` + Time time.Time `json:"time"` +} + +// TCPTransportClient implement TransportClient base on TCP connection. +type TCPTransportClient struct { + TCPTransport + local bool + serverWriteChannel chan<- []byte +} + +// NewTCPTransportClient constructs a TCPTransportClient instance. +func NewTCPTransportClient( + pubKey crypto.PublicKey, + marshaller Marshaller, + local bool) *TCPTransportClient { + + return &TCPTransportClient{ + TCPTransport: *NewTCPTransport(TransportPeer, pubKey, marshaller, 8080), + local: local, + } +} + +// Report implements TransportClient.Report method. +func (t *TCPTransportClient) Report(msg interface{}) (err error) { + payload, err := t.marshalMessage(msg) + if err != nil { + return + } + go func() { + t.serverWriteChannel <- payload + }() + return +} + +// Join implements TransportClient.Join method. +func (t *TCPTransportClient) Join( + serverEndpoint interface{}) (ch <-chan *TransportEnvelope, err error) { + // Initiate a TCP server. + // TODO(mission): config initial listening port. + var ( + ln net.Listener + envelopes = []*TransportEnvelope{} + ok bool + addr string + conn string + ) + for { + addr = net.JoinHostPort("0.0.0.0", strconv.Itoa(t.localPort)) + ln, err = net.Listen("tcp", addr) + if err == nil { + go t.listenerRoutine(ln.(*net.TCPListener)) + // It is possible to listen on the same port in some platform. + // Check if this one is actually listening. + testConn, e := net.Dial("tcp", addr) + if e != nil { + err = e + return + } + nID, e := t.clientHandshake(testConn) + if e != nil { + err = e + return + } + if nID == t.nID { + break + } + // #nosec G104 + ln.Close() + } + if err != nil { + if !t.local { + return + } + // In local-tcp, retry with other port when the address is in use. + operr, ok := err.(*net.OpError) + if !ok { + panic(err) + } + oserr, ok := operr.Err.(*os.SyscallError) + if !ok { + panic(operr) + } + errno, ok := oserr.Err.(syscall.Errno) + if !ok { + panic(oserr) + } + if errno != syscall.EADDRINUSE { + panic(errno) + } + } + // The port is used, generate another port randomly. + t.localPort = 1024 + rand.Int()%1024 // #nosec G404 + } + + fmt.Println("Connecting to server", "endpoint", serverEndpoint) + serverConn, err := net.Dial("tcp", serverEndpoint.(string)) + if err != nil { + return + } + _, err = t.clientHandshake(serverConn) + if err != nil { + return + } + t.serverWriteChannel = t.connWriter(serverConn) + if t.local { + conn = addr + } else { + // Find my IP. + var ip string + if ip, err = FindMyIP(); err != nil { + return + } + conn = net.JoinHostPort(ip, strconv.Itoa(t.localPort)) + } + if err = t.Report(&tcpMessage{ + NodeID: t.nID, + Type: "conn", + Info: buildPeerInfo(t.pubKey, conn), + }); err != nil { + return + } + // Wait for peers list sent by server. + e := <-t.recvChannel + handshake, ok := e.Msg.(*tcpHandshake) + if !ok { + panic(fmt.Errorf("expect handshake, not %v", e)) + } + t.dMoment = handshake.DMoment + // Setup peers information. + for nID, info := range handshake.Peers { + pubKey, conn := parsePeerInfo(info) + t.peers[nID] = &tcpPeerRecord{ + conn: conn, + pubKey: pubKey, + } + } + // Setup connections to other peers. + if err = t.buildConnectionsToPeers(); err != nil { + return + } + // Report to server that the connections to other peers are ready. + if err = t.Report(&tcpMessage{ + Type: "conn-ready", + NodeID: t.nID, + }); err != nil { + return + } + // Wait for server to ack us that all peers are ready. + for { + e := <-t.recvChannel + msg, ok := e.Msg.(*tcpMessage) + if !ok { + envelopes = append(envelopes, e) + continue + } + if msg.Type != "all-ready" { + err = fmt.Errorf("expected ready message, but %v", msg) + return + } + break + } + // Replay those messages sent before peer list and ready-ack. + for _, e := range envelopes { + t.recvChannel <- e + } + ch = t.recvChannel + return +} + +// Send calls TCPTransport's Send, and send the throughput data to peer server. +func (t *TCPTransportClient) Send( + endpoint types.NodeID, msg interface{}) (err error) { + + if err := t.TCPTransport.Send(endpoint, msg); err != nil { + return err + } + if len(t.throughputRecords) > tcpThroughputReportNum { + t.throughputLock.Lock() + defer t.throughputLock.Unlock() + if err := t.Report(t.throughputRecords); err != nil { + panic(err) + } + t.throughputRecords = t.throughputRecords[:0] + } + return +} + +// DMoment implments TransportClient. +func (t *TCPTransportClient) DMoment() time.Time { + return t.dMoment +} + +// TCPTransportServer implements TransportServer via TCP connections. +type TCPTransportServer struct { + TCPTransport +} + +// NewTCPTransportServer constructs TCPTransportServer instance. +func NewTCPTransportServer( + marshaller Marshaller, + serverPort int) *TCPTransportServer { + + prvKey, err := ecdsa.NewPrivateKey() + if err != nil { + panic(err) + } + return &TCPTransportServer{ + // NOTE: the assumption here is the node ID of peers + // won't be zero. + TCPTransport: *NewTCPTransport( + TransportPeerServer, prvKey.PublicKey(), marshaller, serverPort), + } +} + +// Host implements TransportServer.Host method. +func (t *TCPTransportServer) Host() (chan *TransportEnvelope, error) { + // The port of peer server should be known to other peers, + // if we can listen on the pre-defiend part, we don't have to + // retry with other random ports. + ln, err := net.Listen( + "tcp", net.JoinHostPort("0.0.0.0", strconv.Itoa(t.localPort))) + if err != nil { + return nil, err + } + go t.listenerRoutine(ln.(*net.TCPListener)) + return t.recvChannel, nil +} + +// SetDMoment implements TransportServer.SetDMoment method. +func (t *TCPTransportServer) SetDMoment(dMoment time.Time) { + t.dMoment = dMoment +} + +// WaitForPeers implements TransportServer.WaitForPeers method. +func (t *TCPTransportServer) WaitForPeers(numPeers uint32) (err error) { + // Collect peers info. Packets other than peer info is + // unexpected. + peersInfo := make(map[types.NodeID]string) + for { + // Wait for connection info reported by peers. + e := <-t.recvChannel + msg, ok := e.Msg.(*tcpMessage) + if !ok { + panic(fmt.Errorf("expect tcpMessage, not %v", e)) + } + if msg.Type != "conn" { + panic(fmt.Errorf("expect connection report, not %v", e)) + } + pubKey, conn := parsePeerInfo(msg.Info) + fmt.Println("Peer connected", "peer", conn) + t.peers[msg.NodeID] = &tcpPeerRecord{ + conn: conn, + pubKey: pubKey, + } + peersInfo[msg.NodeID] = msg.Info + // Check if we already collect enought peers. + if uint32(len(peersInfo)) == numPeers { + break + } + } + // Send collected peers back to them. + if err = t.buildConnectionsToPeers(); err != nil { + return + } + peers := make(map[types.NodeID]struct{}) + for ID := range t.peers { + peers[ID] = struct{}{} + } + handshake := &tcpHandshake{ + DMoment: t.dMoment, + Peers: peersInfo, + } + if err = t.Broadcast(peers, &FixedLatencyModel{}, handshake); err != nil { + return + } + // Wait for peers to send 'ready' report. + readies := make(map[types.NodeID]struct{}) + for { + e := <-t.recvChannel + msg, ok := e.Msg.(*tcpMessage) + if !ok { + panic(fmt.Errorf("expect tcpMessage, not %v", e)) + } + if msg.Type != "conn-ready" { + panic(fmt.Errorf("expect connection ready, not %v", e)) + } + if _, reported := readies[msg.NodeID]; reported { + panic(fmt.Errorf("already report conn-ready message: %v", e)) + } + readies[msg.NodeID] = struct{}{} + if uint32(len(readies)) == numPeers { + break + } + } + // Ack all peers ready to go. + if err = t.Broadcast(peers, &FixedLatencyModel{}, + &tcpMessage{Type: "all-ready"}); err != nil { + return + } + return +} + +func (t *TCPTransport) handleThroughputData(msg interface{}, payload []byte) { + sentTime := time.Now() + t.throughputLock.Lock() + defer t.throughputLock.Unlock() + recordType := "" + switch msg.(type) { + case *types.Vote: + recordType = "vote" + case *types.Block: + recordType = "block" + case *types.AgreementResult: + recordType = "agreement_result" + case *dkg.PartialSignature: + recordType = "partial_sig" + } + if len(recordType) > 0 { + t.throughputRecords = append(t.throughputRecords, ThroughputRecord{ + Type: recordType, + Time: sentTime, + Size: len(payload), + }) + } +} diff --git a/dex/consensus/core/test/transport_test.go b/dex/consensus/core/test/transport_test.go new file mode 100644 index 000000000..9140649ac --- /dev/null +++ b/dex/consensus/core/test/transport_test.go @@ -0,0 +1,287 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "sync" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type testPeer struct { + nID types.NodeID + trans TransportClient + recv <-chan *TransportEnvelope + expectedEchoHash common.Hash + echoBlock *types.Block + myBlock *types.Block + myBlockSentTime time.Time + blocks map[types.NodeID]*types.Block + blocksReceiveTime map[common.Hash]time.Time +} + +type testPeerServer struct { + trans TransportServer + recv chan *TransportEnvelope + peerBlocks map[types.NodeID]*types.Block +} + +type testMarshaller struct{} + +func (m *testMarshaller) Unmarshal( + msgType string, payload []byte) (msg interface{}, err error) { + + switch msgType { + case "block": + block := &types.Block{} + if err = json.Unmarshal(payload, block); err != nil { + return + } + msg = block + default: + err = fmt.Errorf("unknown message type: %v", msgType) + } + return +} + +func (m *testMarshaller) Marshal( + msg interface{}) (msgType string, payload []byte, err error) { + + switch msg.(type) { + case *types.Block: + if payload, err = json.Marshal(msg); err != nil { + return + } + msgType = "block" + default: + err = fmt.Errorf("unknown message type: %v", msg) + } + return +} + +type TransportTestSuite struct { + suite.Suite +} + +func (s *TransportTestSuite) baseTest( + server *testPeerServer, peers map[types.NodeID]*testPeer, delay float64) { + var ( + req = s.Require() + delayDuration = time.Duration(delay) * time.Millisecond + wg sync.WaitGroup + ) + + // For each peers, do following stuffs: + // - broadcast 1 block. + // - report one random block to server, along with its node ID. + // Server would echo the random block back to the peer. + handleServer := func(server *testPeerServer) { + defer wg.Done() + server.peerBlocks = make(map[types.NodeID]*types.Block) + for { + select { + case e := <-server.recv: + req.Equal(e.PeerType, TransportPeer) + switch v := e.Msg.(type) { + case *types.Block: + req.Equal(v.ProposerID, e.From) + server.peerBlocks[v.ProposerID] = v + // Echo the block back + server.trans.Send(v.ProposerID, v) + } + } + // Upon receiving blocks from all peers, stop. + if len(server.peerBlocks) == len(peers) { + return + } + } + } + handlePeer := func(peer *testPeer) { + defer wg.Done() + peer.blocks = make(map[types.NodeID]*types.Block) + peer.blocksReceiveTime = make(map[common.Hash]time.Time) + for { + select { + case e := <-peer.recv: + switch v := e.Msg.(type) { + case *types.Block: + if v.ProposerID == peer.nID { + req.Equal(e.PeerType, TransportPeerServer) + peer.echoBlock = v + } else { + req.Equal(e.PeerType, TransportPeer) + req.Equal(e.From, v.ProposerID) + peer.blocks[v.ProposerID] = v + peer.blocksReceiveTime[v.Hash] = time.Now() + } + } + } + // Upon receiving blocks from all other peers, and echoed from + // server, stop. + if peer.echoBlock != nil && len(peer.blocks) == len(peers)-1 { + return + } + } + } + wg.Add(len(peers) + 1) + go handleServer(server) + peersAsMap := make(map[types.NodeID]struct{}) + for nID := range peers { + peersAsMap[nID] = struct{}{} + } + for nID, peer := range peers { + go handlePeer(peer) + // Broadcast a block. + peer.myBlock = &types.Block{ + ProposerID: nID, + Hash: common.NewRandomHash(), + } + peer.myBlockSentTime = time.Now() + peer.trans.Broadcast( + peersAsMap, &FixedLatencyModel{Latency: delay}, peer.myBlock) + // Report a block to server. + peer.expectedEchoHash = common.NewRandomHash() + peer.trans.Report(&types.Block{ + ProposerID: nID, + Hash: peer.expectedEchoHash, + }) + } + wg.Wait() + // Make sure each sent block is received. + for nID, peer := range peers { + req.NotNil(peer.echoBlock) + req.Equal(peer.echoBlock.Hash, peer.expectedEchoHash) + for othernID, otherPeer := range peers { + if nID == othernID { + continue + } + req.Equal( + peer.myBlock.Hash, + otherPeer.blocks[peer.nID].Hash) + } + } + // Make sure the latency is expected. + for nID, peer := range peers { + for othernID, otherPeer := range peers { + if othernID == nID { + continue + } + req.True(otherPeer.blocksReceiveTime[peer.myBlock.Hash].Sub( + peer.myBlockSentTime) >= delayDuration) + } + } +} + +func (s *TransportTestSuite) TestFake() { + var ( + peerCount = 13 + req = s.Require() + peers = make(map[types.NodeID]*testPeer) + prvKeys = GenerateRandomPrivateKeys(peerCount) + err error + wg sync.WaitGroup + server = &testPeerServer{trans: NewFakeTransportServer()} + ) + // Setup PeerServer + server.recv, err = server.trans.Host() + req.Nil(err) + // Setup Peers + wg.Add(len(prvKeys)) + for _, key := range prvKeys { + nID := types.NewNodeID(key.PublicKey()) + peer := &testPeer{ + nID: nID, + trans: NewFakeTransportClient(key.PublicKey()), + } + peers[nID] = peer + go func() { + defer wg.Done() + recv, err := peer.trans.Join(server.recv) + req.Nil(err) + peer.recv = recv + }() + } + // Block here until we collect enough peers. + server.trans.WaitForPeers(uint32(peerCount)) + // Make sure all clients are ready. + wg.Wait() + s.baseTest(server, peers, 300) + req.Nil(server.trans.Close()) + for _, peer := range peers { + req.Nil(peer.trans.Close()) + } +} + +func (s *TransportTestSuite) TestTCPLocal() { + + var ( + peerCount = 13 + req = s.Require() + peers = make(map[types.NodeID]*testPeer) + prvKeys = GenerateRandomPrivateKeys(peerCount) + err error + wg sync.WaitGroup + serverPort = 8080 + serverAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(serverPort)) + server = &testPeerServer{ + trans: NewTCPTransportServer(&testMarshaller{}, serverPort)} + ) + // Setup PeerServer + server.recv, err = server.trans.Host() + req.Nil(err) + // Setup Peers + wg.Add(len(prvKeys)) + for _, prvKey := range prvKeys { + nID := types.NewNodeID(prvKey.PublicKey()) + peer := &testPeer{ + nID: nID, + trans: NewTCPTransportClient( + prvKey.PublicKey(), &testMarshaller{}, true), + } + peers[nID] = peer + go func() { + defer wg.Done() + + recv, err := peer.trans.Join(serverAddr) + req.Nil(err) + peer.recv = recv + }() + } + // Block here until we collect enough peers. + server.trans.WaitForPeers(uint32(peerCount)) + // Make sure all clients are ready. + wg.Wait() + + s.baseTest(server, peers, 300) + req.Nil(server.trans.Close()) + for _, peer := range peers { + req.Nil(peer.trans.Close()) + } +} + +func TestTransport(t *testing.T) { + suite.Run(t, new(TransportTestSuite)) +} diff --git a/dex/consensus/core/test/utils.go b/dex/consensus/core/test/utils.go new file mode 100644 index 000000000..e02e194eb --- /dev/null +++ b/dex/consensus/core/test/utils.go @@ -0,0 +1,253 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "errors" + "fmt" + "math" + "net" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon/rlp" +) + +// GenerateRandomNodeIDs generates randomly a slices of types.NodeID. +func GenerateRandomNodeIDs(nodeCount int) (nIDs types.NodeIDs) { + nIDs = types.NodeIDs{} + for i := 0; i < nodeCount; i++ { + nIDs = append(nIDs, types.NodeID{Hash: common.NewRandomHash()}) + } + return +} + +// GenerateRandomPrivateKeys generate a set of private keys. +func GenerateRandomPrivateKeys(nodeCount int) (prvKeys []crypto.PrivateKey) { + for i := 0; i < nodeCount; i++ { + prvKey, err := ecdsa.NewPrivateKey() + if err != nil { + panic(err) + } + prvKeys = append(prvKeys, prvKey) + } + return +} + +// CalcLatencyStatistics calculates average and deviation from a slice +// of latencies. +func CalcLatencyStatistics(latencies []time.Duration) (avg, dev time.Duration) { + var ( + sum float64 + sumOfSquareDiff float64 + ) + + // Calculate average. + for _, v := range latencies { + sum += float64(v) + } + avgAsFloat := sum / float64(len(latencies)) + avg = time.Duration(avgAsFloat) + // Calculate deviation + for _, v := range latencies { + diff := math.Abs(float64(v) - avgAsFloat) + sumOfSquareDiff += diff * diff + } + dev = time.Duration(math.Sqrt(sumOfSquareDiff / float64(len(latencies)-1))) + return +} + +// FindMyIP returns local IP address. +func FindMyIP() (ip string, err error) { + addrs, err := net.InterfaceAddrs() + if err != nil { + return + } + for _, a := range addrs { + ipnet, ok := a.(*net.IPNet) + if !ok { + continue + } + if ipnet.IP.IsLoopback() { + continue + } + if ipnet.IP.To4() != nil { + ip = ipnet.IP.String() + return + } + } + err = fmt.Errorf("unable to find IP") + return +} + +// NewKeys creates private keys and corresponding public keys as slice. +func NewKeys(count int) ( + prvKeys []crypto.PrivateKey, pubKeys []crypto.PublicKey, err error) { + for i := 0; i < count; i++ { + var prvKey crypto.PrivateKey + if prvKey, err = ecdsa.NewPrivateKey(); err != nil { + return + } + prvKeys = append(prvKeys, prvKey) + pubKeys = append(pubKeys, prvKey.PublicKey()) + } + return +} + +// CloneDKGComplaint clones a tpyesDKG.Complaint instance. +func CloneDKGComplaint( + comp *typesDKG.Complaint) (copied *typesDKG.Complaint) { + b, err := rlp.EncodeToBytes(comp) + if err != nil { + panic(err) + } + copied = &typesDKG.Complaint{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +// CloneDKGMasterPublicKey clones a typesDKG.MasterPublicKey instance. +func CloneDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) ( + copied *typesDKG.MasterPublicKey) { + b, err := rlp.EncodeToBytes(mpk) + if err != nil { + panic(err) + } + copied = typesDKG.NewMasterPublicKey() + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +// CloneDKGMPKReady clones a typesDKG.MPKReady instance. +func CloneDKGMPKReady(ready *typesDKG.MPKReady) ( + copied *typesDKG.MPKReady) { + b, err := rlp.EncodeToBytes(ready) + if err != nil { + panic(err) + } + copied = &typesDKG.MPKReady{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +// CloneDKGFinalize clones a typesDKG.Finalize instance. +func CloneDKGFinalize(final *typesDKG.Finalize) ( + copied *typesDKG.Finalize) { + b, err := rlp.EncodeToBytes(final) + if err != nil { + panic(err) + } + copied = &typesDKG.Finalize{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +// CloneDKGSuccess clones a typesDKG.Success instance. +func CloneDKGSuccess(success *typesDKG.Success) ( + copied *typesDKG.Success) { + b, err := rlp.EncodeToBytes(success) + if err != nil { + panic(err) + } + copied = &typesDKG.Success{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +// CloneDKGPrivateShare clones a typesDKG.PrivateShare instance. +func CloneDKGPrivateShare(prvShare *typesDKG.PrivateShare) ( + copied *typesDKG.PrivateShare) { + b, err := rlp.EncodeToBytes(prvShare) + if err != nil { + panic(err) + } + copied = &typesDKG.PrivateShare{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +func cloneAgreementResult(result *types.AgreementResult) ( + copied *types.AgreementResult) { + b, err := rlp.EncodeToBytes(result) + if err != nil { + panic(err) + } + copied = &types.AgreementResult{} + if err = rlp.DecodeBytes(b, copied); err != nil { + panic(err) + } + return +} + +var ( + // ErrCompactionChainTipBlockNotExists raised when the hash of compaction + // chain tip doesn't match a block in database. + ErrCompactionChainTipBlockNotExists = errors.New( + "compaction chain tip block not exists") + // ErrEmptyCompactionChainTipInfo raised when a compaction chain tip info + // is empty. + ErrEmptyCompactionChainTipInfo = errors.New( + "empty compaction chain tip info") + // ErrMismatchBlockHash raise when the hash for that block mismatched. + ErrMismatchBlockHash = errors.New("mismatched block hash") +) + +// VerifyDB check if a database is valid after test. +func VerifyDB(db db.Database) error { + hash, height := db.GetCompactionChainTipInfo() + if (hash == common.Hash{}) || height == 0 { + return ErrEmptyCompactionChainTipInfo + } + b, err := db.GetBlock(hash) + if err != nil { + return err + } + if b.Hash != hash { + return ErrMismatchBlockHash + } + return nil +} + +func getComplementSet( + all, set map[types.NodeID]struct{}) map[types.NodeID]struct{} { + complement := make(map[types.NodeID]struct{}) + for nID := range all { + if _, exists := set[nID]; exists { + continue + } + complement[nID] = struct{}{} + } + return complement +} diff --git a/dex/consensus/core/ticker.go b/dex/consensus/core/ticker.go new file mode 100644 index 000000000..636fb8c49 --- /dev/null +++ b/dex/consensus/core/ticker.go @@ -0,0 +1,127 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// TickerType is the type of ticker. +type TickerType int + +// TickerType enum. +const ( + TickerBA TickerType = iota + TickerDKG + TickerCRS +) + +// defaultTicker is a wrapper to implement ticker interface based on +// time.Ticker. +type defaultTicker struct { + ticker *time.Ticker + tickerChan chan time.Time + duration time.Duration + ctx context.Context + ctxCancel context.CancelFunc + waitGroup sync.WaitGroup +} + +// newDefaultTicker constructs an defaultTicker instance by giving an interval. +func newDefaultTicker(lambda time.Duration) *defaultTicker { + ticker := &defaultTicker{duration: lambda} + ticker.init() + return ticker +} + +// Tick implements Tick method of ticker interface. +func (t *defaultTicker) Tick() <-chan time.Time { + return t.tickerChan +} + +// Stop implements Stop method of ticker interface. +func (t *defaultTicker) Stop() { + t.ticker.Stop() + t.ctxCancel() + t.waitGroup.Wait() + t.ctx = nil + t.ctxCancel = nil + close(t.tickerChan) + t.tickerChan = nil +} + +// Restart implements Stop method of ticker interface. +func (t *defaultTicker) Restart() { + t.Stop() + t.init() +} + +func (t *defaultTicker) init() { + t.ticker = time.NewTicker(t.duration) + t.tickerChan = make(chan time.Time) + t.ctx, t.ctxCancel = context.WithCancel(context.Background()) + t.waitGroup.Add(1) + go t.monitor() +} + +func (t *defaultTicker) monitor() { + defer t.waitGroup.Done() +loop: + for { + select { + case <-t.ctx.Done(): + break loop + case v := <-t.ticker.C: + select { + case t.tickerChan <- v: + default: + } + } + } +} + +// newTicker is a helper to setup a ticker by giving an Governance. If +// the governace object implements a ticker generator, a ticker from that +// generator would be returned, else constructs a default one. +func newTicker(gov Governance, round uint64, tickerType TickerType) (t Ticker) { + type tickerGenerator interface { + NewTicker(TickerType) Ticker + } + + if gen, ok := gov.(tickerGenerator); ok { + t = gen.NewTicker(tickerType) + } + if t == nil { + var duration time.Duration + switch tickerType { + case TickerBA: + duration = utils.GetConfigWithPanic(gov, round, nil).LambdaBA + case TickerDKG: + duration = utils.GetConfigWithPanic(gov, round, nil).LambdaDKG + default: + panic(fmt.Errorf("unknown ticker type: %d", tickerType)) + } + t = newDefaultTicker(duration) + } + return +} diff --git a/dex/consensus/core/types/block-randomness.go b/dex/consensus/core/types/block-randomness.go new file mode 100644 index 000000000..1c7454398 --- /dev/null +++ b/dex/consensus/core/types/block-randomness.go @@ -0,0 +1,44 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "encoding/hex" + "fmt" + + "github.com/dexon-foundation/dexon-consensus/common" +) + +// AgreementResult describes an agremeent result. +type AgreementResult struct { + BlockHash common.Hash `json:"block_hash"` + Position Position `json:"position"` + Votes []Vote `json:"votes"` + IsEmptyBlock bool `json:"is_empty_block"` + Randomness []byte `json:"randomness"` +} + +func (r *AgreementResult) String() string { + if len(r.Randomness) == 0 { + return fmt.Sprintf("agreementResult{Block:%s Pos:%s}", + r.BlockHash.String()[:6], r.Position) + } + return fmt.Sprintf("agreementResult{Block:%s Pos:%s Rand:%s}", + r.BlockHash.String()[:6], r.Position, + hex.EncodeToString(r.Randomness)[:6]) +} diff --git a/dex/consensus/core/types/block.go b/dex/consensus/core/types/block.go new file mode 100644 index 000000000..b55cfabf9 --- /dev/null +++ b/dex/consensus/core/types/block.go @@ -0,0 +1,297 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +// TODO(jimmy-dexon): remove comments of WitnessAck before open source. + +package types + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + dexCommon "github.com/dexon-foundation/dexon/common" +) + +// GenesisHeight refers to the initial height the genesis block should be. +const GenesisHeight uint64 = 1 + +// BlockVerifyStatus is the return code for core.Application.VerifyBlock +type BlockVerifyStatus int + +// Enums for return value of core.Application.VerifyBlock. +const ( + // VerifyOK: Block is verified. + VerifyOK BlockVerifyStatus = iota + // VerifyRetryLater: Block is unable to be verified at this moment. + // Try again later. + VerifyRetryLater + // VerifyInvalidBlock: Block is an invalid one. + VerifyInvalidBlock +) + +type rlpTimestamp struct { + time.Time +} + +func (t *rlpTimestamp) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, uint64(t.UTC().UnixNano())) +} + +func (t *rlpTimestamp) DecodeRLP(s *rlp.Stream) error { + var nano uint64 + err := s.Decode(&nano) + if err == nil { + sec := int64(nano) / 1000000000 + nsec := int64(nano) % 1000000000 + t.Time = time.Unix(sec, nsec).UTC() + } + return err +} + +// Witness represents the consensus information on the compaction chain. +type Witness struct { + Height uint64 `json:"height"` + Data []byte `json:"data"` +} + +// Block represents a single event broadcasted on the network. +type Block struct { + ProposerID NodeID `json:"proposer_id"` + ParentHash common.Hash `json:"parent_hash"` + Hash common.Hash `json:"hash"` + Position Position `json:"position"` + Timestamp time.Time `json:"timestamp"` + Payload []byte `json:"payload"` + PayloadHash common.Hash `json:"payload_hash"` + Witness Witness `json:"witness"` + Randomness []byte `json:"randomness"` + Signature crypto.Signature `json:"signature"` + + CRSSignature crypto.Signature `json:"crs_signature"` +} + +type rlpBlock struct { + ProposerID NodeID + ParentHash common.Hash + Hash common.Hash + Position Position + Timestamp *rlpTimestamp + Payload []byte + PayloadHash common.Hash + Witness *Witness + Randomness []byte + Signature crypto.Signature + + CRSSignature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (b *Block) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpBlock{ + ProposerID: b.ProposerID, + ParentHash: b.ParentHash, + Hash: b.Hash, + Position: b.Position, + Timestamp: &rlpTimestamp{b.Timestamp}, + Payload: b.Payload, + PayloadHash: b.PayloadHash, + Witness: &b.Witness, + Randomness: b.Randomness, + Signature: b.Signature, + CRSSignature: b.CRSSignature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (b *Block) DecodeRLP(s *rlp.Stream) error { + var dec rlpBlock + err := s.Decode(&dec) + if err == nil { + *b = Block{ + ProposerID: dec.ProposerID, + ParentHash: dec.ParentHash, + Hash: dec.Hash, + Position: dec.Position, + Timestamp: dec.Timestamp.Time, + Payload: dec.Payload, + PayloadHash: dec.PayloadHash, + Witness: *dec.Witness, + Randomness: dec.Randomness, + Signature: dec.Signature, + CRSSignature: dec.CRSSignature, + } + } + return err +} + +type TmpBlock struct { + ProposerID dexCommon.Address `json:"proposer_id"` + ParentHash dexCommon.Hash `json:"parent_hash"` + Hash dexCommon.Hash `json:"hash"` + Position Position `json:"position"` + Timestamp time.Time `json:"timestamp"` + Payload []byte `json:"payload"` + PayloadHash dexCommon.Hash `json:"payload_hash"` + Witness Witness `json:"witness"` + Randomness []byte `json:"randomness"` + Signature crypto.Signature `json:"signature"` + + CRSSignature crypto.Signature `json:"crs_signature"` +} + +type rlpTmpBlock struct { + ProposerID dexCommon.Address + ParentHash dexCommon.Hash + Hash dexCommon.Hash + Position Position + Timestamp *rlpTimestamp + Payload []byte + PayloadHash dexCommon.Hash + Witness *Witness + Randomness []byte + Signature crypto.Signature + + CRSSignature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (b *TmpBlock) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpTmpBlock{ + ProposerID: b.ProposerID, + ParentHash: b.ParentHash, + Hash: b.Hash, + Position: b.Position, + Timestamp: &rlpTimestamp{b.Timestamp}, + Payload: b.Payload, + PayloadHash: b.PayloadHash, + Witness: &b.Witness, + Randomness: b.Randomness, + Signature: b.Signature, + CRSSignature: b.CRSSignature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (b *TmpBlock) DecodeRLP(s *rlp.Stream) error { + var dec rlpTmpBlock + err := s.Decode(&dec) + if err == nil { + *b = TmpBlock{ + ProposerID: dec.ProposerID, + ParentHash: dec.ParentHash, + Hash: dec.Hash, + Position: dec.Position, + Timestamp: dec.Timestamp.Time, + Payload: dec.Payload, + PayloadHash: dec.PayloadHash, + Witness: *dec.Witness, + Randomness: dec.Randomness, + Signature: dec.Signature, + CRSSignature: dec.CRSSignature, + } + } + return err +} + +func (b *Block) String() string { + return fmt.Sprintf("Block{Hash:%v %s}", b.Hash.String()[:6], b.Position) +} + +// Clone returns a deep copy of a block. +func (b *Block) Clone() (bcopy *Block) { + bcopy = &Block{} + bcopy.ProposerID = b.ProposerID + bcopy.ParentHash = b.ParentHash + bcopy.Hash = b.Hash + bcopy.Position.Round = b.Position.Round + bcopy.Position.Height = b.Position.Height + bcopy.Signature = b.Signature.Clone() + bcopy.CRSSignature = b.CRSSignature.Clone() + bcopy.Witness.Height = b.Witness.Height + bcopy.Witness.Data = common.CopyBytes(b.Witness.Data) + bcopy.Timestamp = b.Timestamp + bcopy.Payload = common.CopyBytes(b.Payload) + bcopy.PayloadHash = b.PayloadHash + bcopy.Randomness = common.CopyBytes(b.Randomness) + return +} + +// IsGenesis checks if the block is a genesisBlock +func (b *Block) IsGenesis() bool { + return b.Position.Height == GenesisHeight && b.ParentHash == common.Hash{} +} + +// IsFinalized checks if the block is finalized. +func (b *Block) IsFinalized() bool { + return len(b.Randomness) > 0 +} + +// IsEmpty checks if the block is an 'empty block'. +func (b *Block) IsEmpty() bool { + return b.ProposerID.Hash == common.Hash{} +} + +// ByHash is the helper type for sorting slice of blocks by hash. +type ByHash []*Block + +func (b ByHash) Len() int { + return len(b) +} + +func (b ByHash) Less(i int, j int) bool { + return bytes.Compare([]byte(b[i].Hash[:]), []byte(b[j].Hash[:])) == -1 +} + +func (b ByHash) Swap(i int, j int) { + b[i], b[j] = b[j], b[i] +} + +// BlocksByPosition is the helper type for sorting slice of blocks by position. +type BlocksByPosition []*Block + +// Len implements Len method in sort.Sort interface. +func (bs BlocksByPosition) Len() int { + return len(bs) +} + +// Less implements Less method in sort.Sort interface. +func (bs BlocksByPosition) Less(i int, j int) bool { + return bs[j].Position.Newer(bs[i].Position) +} + +// Swap implements Swap method in sort.Sort interface. +func (bs BlocksByPosition) Swap(i int, j int) { + bs[i], bs[j] = bs[j], bs[i] +} + +// Push implements Push method in heap interface. +func (bs *BlocksByPosition) Push(x interface{}) { + *bs = append(*bs, x.(*Block)) +} + +// Pop implements Pop method in heap interface. +func (bs *BlocksByPosition) Pop() (ret interface{}) { + n := len(*bs) + *bs, ret = (*bs)[0:n-1], (*bs)[n-1] + return +} diff --git a/dex/consensus/core/types/block_test.go b/dex/consensus/core/types/block_test.go new file mode 100644 index 000000000..9ffbc4fbd --- /dev/null +++ b/dex/consensus/core/types/block_test.go @@ -0,0 +1,200 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "math/rand" + "reflect" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon/rlp" +) + +type BlockTestSuite struct { + suite.Suite +} + +func (s *BlockTestSuite) noZeroInStruct(v reflect.Value) { + t := v.Type() + for i := 0; i < t.NumField(); i++ { + tf := t.Field(i) + vf := v.FieldByName(tf.Name) + if vf.Type().Kind() == reflect.Struct { + s.noZeroInStruct(vf) + continue + } + if !vf.CanInterface() { + s.T().Log("unable to check private field", tf.Name) + continue + } + if reflect.DeepEqual( + vf.Interface(), reflect.Zero(vf.Type()).Interface()) { + s.Failf("", "should not be zero value %s", tf.Name) + } + } +} + +func (s *BlockTestSuite) createRandomBlock() *Block { + payload := common.GenerateRandomBytes() + b := &Block{ + ProposerID: NodeID{common.NewRandomHash()}, + ParentHash: common.NewRandomHash(), + Hash: common.NewRandomHash(), + Position: Position{ + Round: rand.Uint64(), + Height: rand.Uint64(), + }, + Timestamp: time.Now().UTC(), + Witness: Witness{ + Height: rand.Uint64(), + Data: common.GenerateRandomBytes(), + }, + Randomness: common.GenerateRandomBytes(), + Payload: payload, + PayloadHash: crypto.Keccak256Hash(payload), + Signature: crypto.Signature{ + Type: "some type", + Signature: common.GenerateRandomBytes()}, + CRSSignature: crypto.Signature{ + Type: "some type", + Signature: common.GenerateRandomBytes()}, + } + // Check if all fields are initialized with non zero values. + s.noZeroInStruct(reflect.ValueOf(*b)) + return b +} + +func (s *BlockTestSuite) TestCreateRandomBlock() { + b1 := *s.createRandomBlock() + b2 := *s.createRandomBlock() + + v1 := reflect.ValueOf(b1) + v2 := reflect.ValueOf(b2) + for i := 0; i < v1.NumField(); i++ { + f1 := v1.Field(i) + f2 := v2.Field(i) + if reflect.DeepEqual(f1.Interface(), f2.Interface()) { + s.Failf("Non randomized field found", "Field %s is not randomized\n", + v1.Type().Field(i).Name) + } + } +} + +func (s *BlockTestSuite) TestSortByHash() { + hash := common.Hash{} + copy(hash[:], "aaaaaa") + b0 := &Block{Hash: hash} + copy(hash[:], "bbbbbb") + b1 := &Block{Hash: hash} + copy(hash[:], "cccccc") + b2 := &Block{Hash: hash} + copy(hash[:], "dddddd") + b3 := &Block{Hash: hash} + + blocks := []*Block{b3, b2, b1, b0} + sort.Sort(ByHash(blocks)) + s.Equal(blocks[0].Hash, b0.Hash) + s.Equal(blocks[1].Hash, b1.Hash) + s.Equal(blocks[2].Hash, b2.Hash) + s.Equal(blocks[3].Hash, b3.Hash) +} + +func (s *BlockTestSuite) TestSortBlocksByPosition() { + b00 := &Block{Hash: common.NewRandomHash(), Position: Position{Height: 0}} + b01 := &Block{Hash: common.NewRandomHash(), Position: Position{Height: 1}} + b02 := &Block{Hash: common.NewRandomHash(), Position: Position{Height: 2}} + b10 := &Block{Hash: common.NewRandomHash(), + Position: Position{Round: 1, Height: 0}} + b11 := &Block{Hash: common.NewRandomHash(), + Position: Position{Round: 1, Height: 1}} + b12 := &Block{Hash: common.NewRandomHash(), + Position: Position{Round: 1, Height: 2}} + + blocks := []*Block{b12, b11, b10, b02, b01, b00} + sort.Sort(BlocksByPosition(blocks)) + s.Equal(blocks[0].Hash, b00.Hash) + s.Equal(blocks[1].Hash, b01.Hash) + s.Equal(blocks[2].Hash, b02.Hash) + s.Equal(blocks[3].Hash, b10.Hash) + s.Equal(blocks[4].Hash, b11.Hash) + s.Equal(blocks[5].Hash, b12.Hash) +} + +func (s *BlockTestSuite) TestGenesisBlock() { + b0 := &Block{ + Position: Position{ + Height: GenesisHeight, + }, + ParentHash: common.Hash{}, + } + s.True(b0.IsGenesis()) + b1 := &Block{ + Position: Position{ + Height: GenesisHeight + 1, + }, + ParentHash: common.Hash{}, + } + s.False(b1.IsGenesis()) + b2 := &Block{ + Position: Position{ + Height: GenesisHeight, + }, + ParentHash: common.NewRandomHash(), + } + s.False(b2.IsGenesis()) +} + +func (s *BlockTestSuite) TestClone() { + b1 := *s.createRandomBlock() + b2 := *b1.Clone() + + // Use reflect here to better understand the error message. + v1 := reflect.ValueOf(b1) + v2 := reflect.ValueOf(b2) + for i := 0; i < v1.NumField(); i++ { + f1 := v1.Field(i) + f2 := v2.Field(i) + if !reflect.DeepEqual(f1.Interface(), f2.Interface()) { + s.Failf("Field Not Equal", "Field %s is not equal.\n-%v\n+%v\n", + v1.Type().Field(i).Name, + f1, f2) + } + } +} + +func (s *BlockTestSuite) TestRLPEncodeDecode() { + block := s.createRandomBlock() + b, err := rlp.EncodeToBytes(block) + s.Require().NoError(err) + + var dec Block + err = rlp.DecodeBytes(b, &dec) + s.Require().NoError(err) + + s.Require().True(reflect.DeepEqual(block, &dec)) +} + +func TestBlock(t *testing.T) { + suite.Run(t, new(BlockTestSuite)) +} diff --git a/dex/consensus/core/types/config.go b/dex/consensus/core/types/config.go new file mode 100644 index 000000000..dce38369e --- /dev/null +++ b/dex/consensus/core/types/config.go @@ -0,0 +1,75 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "encoding/binary" + "time" +) + +// Config stands for Current Configuration Parameters. +type Config struct { + // Lambda related. + LambdaBA time.Duration + LambdaDKG time.Duration + + // Set related. + NotarySetSize uint32 + + // Time related. + RoundLength uint64 + MinBlockInterval time.Duration +} + +// Clone return a copied configuration. +func (c *Config) Clone() *Config { + return &Config{ + LambdaBA: c.LambdaBA, + LambdaDKG: c.LambdaDKG, + NotarySetSize: c.NotarySetSize, + RoundLength: c.RoundLength, + MinBlockInterval: c.MinBlockInterval, + } +} + +// Bytes returns []byte representation of Config. +func (c *Config) Bytes() []byte { + binaryLambdaBA := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaBA, uint64(c.LambdaBA.Nanoseconds())) + binaryLambdaDKG := make([]byte, 8) + binary.LittleEndian.PutUint64( + binaryLambdaDKG, uint64(c.LambdaDKG.Nanoseconds())) + + binaryNotarySetSize := make([]byte, 4) + binary.LittleEndian.PutUint32(binaryNotarySetSize, c.NotarySetSize) + + binaryRoundLength := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRoundLength, c.RoundLength) + binaryMinBlockInterval := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryMinBlockInterval, + uint64(c.MinBlockInterval.Nanoseconds())) + + enc := make([]byte, 0, 40) + enc = append(enc, binaryLambdaBA...) + enc = append(enc, binaryLambdaDKG...) + enc = append(enc, binaryNotarySetSize...) + enc = append(enc, binaryRoundLength...) + enc = append(enc, binaryMinBlockInterval...) + return enc +} diff --git a/dex/consensus/core/types/config_test.go b/dex/consensus/core/types/config_test.go new file mode 100644 index 000000000..b55004e2a --- /dev/null +++ b/dex/consensus/core/types/config_test.go @@ -0,0 +1,44 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +type ConfigTestSuite struct { + suite.Suite +} + +func (s *ConfigTestSuite) TestClone() { + c := &Config{ + LambdaBA: 1 * time.Millisecond, + LambdaDKG: 2 * time.Hour, + NotarySetSize: 5, + RoundLength: 1000, + MinBlockInterval: 7 * time.Nanosecond, + } + s.Require().Equal(c, c.Clone()) +} + +func TestConfig(t *testing.T) { + suite.Run(t, new(ConfigTestSuite)) +} diff --git a/dex/consensus/core/types/dkg/dkg.go b/dex/consensus/core/types/dkg/dkg.go new file mode 100644 index 000000000..cb921e586 --- /dev/null +++ b/dex/consensus/core/types/dkg/dkg.go @@ -0,0 +1,485 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/dexon-foundation/dexon/rlp" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// Errors for typesDKG package. +var ( + ErrNotReachThreshold = fmt.Errorf("threshold not reach") + ErrInvalidThreshold = fmt.Errorf("invalid threshold") +) + +// NewID creates a DKGID from NodeID. +func NewID(ID types.NodeID) cryptoDKG.ID { + return cryptoDKG.NewID(ID.Hash[:]) +} + +// PrivateShare describe a secret share in DKG protocol. +type PrivateShare struct { + ProposerID types.NodeID `json:"proposer_id"` + ReceiverID types.NodeID `json:"receiver_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + PrivateShare cryptoDKG.PrivateKey `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +// Equal checks equality between two PrivateShare instances. +func (p *PrivateShare) Equal(other *PrivateShare) bool { + return p.ProposerID.Equal(other.ProposerID) && + p.ReceiverID.Equal(other.ReceiverID) && + p.Round == other.Round && + p.Reset == other.Reset && + p.Signature.Type == other.Signature.Type && + bytes.Compare(p.Signature.Signature, other.Signature.Signature) == 0 && + bytes.Compare( + p.PrivateShare.Bytes(), other.PrivateShare.Bytes()) == 0 +} + +// MasterPublicKey decrtibe a master public key in DKG protocol. +type MasterPublicKey struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + DKGID cryptoDKG.ID `json:"dkg_id"` + PublicKeyShares cryptoDKG.PublicKeyShares `json:"public_key_shares"` + Signature crypto.Signature `json:"signature"` +} + +func (d *MasterPublicKey) String() string { + return fmt.Sprintf("MasterPublicKey{KP:%s Round:%d Reset:%d}", + d.ProposerID.String()[:6], + d.Round, + d.Reset) +} + +// Equal check equality of two DKG master public keys. +func (d *MasterPublicKey) Equal(other *MasterPublicKey) bool { + return d.ProposerID.Equal(other.ProposerID) && + d.Round == other.Round && + d.Reset == other.Reset && + d.DKGID.GetHexString() == other.DKGID.GetHexString() && + d.PublicKeyShares.Equal(&other.PublicKeyShares) && + d.Signature.Type == other.Signature.Type && + bytes.Compare(d.Signature.Signature, other.Signature.Signature) == 0 +} + +type rlpMasterPublicKey struct { + ProposerID types.NodeID + Round uint64 + Reset uint64 + DKGID []byte + PublicKeyShares *cryptoDKG.PublicKeyShares + Signature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (d *MasterPublicKey) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, rlpMasterPublicKey{ + ProposerID: d.ProposerID, + Round: d.Round, + Reset: d.Reset, + DKGID: d.DKGID.GetLittleEndian(), + PublicKeyShares: &d.PublicKeyShares, + Signature: d.Signature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (d *MasterPublicKey) DecodeRLP(s *rlp.Stream) error { + var dec rlpMasterPublicKey + if err := s.Decode(&dec); err != nil { + return err + } + + id, err := cryptoDKG.BytesID(dec.DKGID) + if err != nil { + return err + } + + *d = MasterPublicKey{ + ProposerID: dec.ProposerID, + Round: dec.Round, + Reset: dec.Reset, + DKGID: id, + PublicKeyShares: *dec.PublicKeyShares.Move(), + Signature: dec.Signature, + } + return err +} + +// NewMasterPublicKey returns a new MasterPublicKey instance. +func NewMasterPublicKey() *MasterPublicKey { + return &MasterPublicKey{ + PublicKeyShares: *cryptoDKG.NewEmptyPublicKeyShares(), + } +} + +// UnmarshalJSON implements json.Unmarshaller. +func (d *MasterPublicKey) UnmarshalJSON(data []byte) error { + type innertMasterPublicKey MasterPublicKey + d.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares() + return json.Unmarshal(data, (*innertMasterPublicKey)(d)) +} + +// Complaint describe a complaint in DKG protocol. +type Complaint struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + PrivateShare PrivateShare `json:"private_share"` + Signature crypto.Signature `json:"signature"` +} + +func (c *Complaint) String() string { + if c.IsNack() { + return fmt.Sprintf("DKGNackComplaint{CP:%s Round:%d Reset %d PSP:%s}", + c.ProposerID.String()[:6], c.Round, c.Reset, + c.PrivateShare.ProposerID.String()[:6]) + } + return fmt.Sprintf("DKGComplaint{CP:%s Round:%d Reset %d PrivateShare:%v}", + c.ProposerID.String()[:6], c.Round, c.Reset, c.PrivateShare) +} + +// Equal checks equality between two Complaint instances. +func (c *Complaint) Equal(other *Complaint) bool { + return c.ProposerID.Equal(other.ProposerID) && + c.Round == other.Round && + c.Reset == other.Reset && + c.PrivateShare.Equal(&other.PrivateShare) && + c.Signature.Type == other.Signature.Type && + bytes.Compare(c.Signature.Signature, other.Signature.Signature) == 0 +} + +type rlpComplaint struct { + ProposerID types.NodeID + Round uint64 + Reset uint64 + IsNack bool + PrivateShare []byte + Signature crypto.Signature +} + +// EncodeRLP implements rlp.Encoder +func (c *Complaint) EncodeRLP(w io.Writer) error { + if c.IsNack() { + return rlp.Encode(w, rlpComplaint{ + ProposerID: c.ProposerID, + Round: c.Round, + Reset: c.Reset, + IsNack: true, + PrivateShare: c.PrivateShare.ProposerID.Hash[:], + Signature: c.Signature, + }) + } + prvShare, err := rlp.EncodeToBytes(&c.PrivateShare) + if err != nil { + return err + } + return rlp.Encode(w, rlpComplaint{ + ProposerID: c.ProposerID, + Round: c.Round, + Reset: c.Reset, + IsNack: false, + PrivateShare: prvShare, + Signature: c.Signature, + }) +} + +// DecodeRLP implements rlp.Decoder +func (c *Complaint) DecodeRLP(s *rlp.Stream) error { + var dec rlpComplaint + if err := s.Decode(&dec); err != nil { + return err + } + + var prvShare PrivateShare + if dec.IsNack { + copy(prvShare.ProposerID.Hash[:], dec.PrivateShare) + prvShare.Round = dec.Round + prvShare.Reset = dec.Reset + } else { + if err := rlp.DecodeBytes(dec.PrivateShare, &prvShare); err != nil { + return err + } + } + + *c = Complaint{ + ProposerID: dec.ProposerID, + Round: dec.Round, + Reset: dec.Reset, + PrivateShare: prvShare, + Signature: dec.Signature, + } + return nil +} + +// IsNack returns true if it's a nack complaint in DKG protocol. +func (c *Complaint) IsNack() bool { + return len(c.PrivateShare.Signature.Signature) == 0 +} + +// PartialSignature describe a partial signature in DKG protocol. +type PartialSignature struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Hash common.Hash `json:"hash"` + PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` + Signature crypto.Signature `json:"signature"` +} + +// MPKReady describe a dkg ready message in DKG protocol. +type MPKReady struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (ready *MPKReady) String() string { + return fmt.Sprintf("DKGMPKReady{RP:%s Round:%d Reset:%d}", + ready.ProposerID.String()[:6], + ready.Round, + ready.Reset) +} + +// Equal check equality of two MPKReady instances. +func (ready *MPKReady) Equal(other *MPKReady) bool { + return ready.ProposerID.Equal(other.ProposerID) && + ready.Round == other.Round && + ready.Reset == other.Reset && + ready.Signature.Type == other.Signature.Type && + bytes.Compare(ready.Signature.Signature, other.Signature.Signature) == 0 +} + +// Finalize describe a dkg finalize message in DKG protocol. +type Finalize struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (final *Finalize) String() string { + return fmt.Sprintf("DKGFinal{FP:%s Round:%d Reset:%d}", + final.ProposerID.String()[:6], + final.Round, + final.Reset) +} + +// Equal check equality of two Finalize instances. +func (final *Finalize) Equal(other *Finalize) bool { + return final.ProposerID.Equal(other.ProposerID) && + final.Round == other.Round && + final.Reset == other.Reset && + final.Signature.Type == other.Signature.Type && + bytes.Compare(final.Signature.Signature, other.Signature.Signature) == 0 +} + +// Success describe a dkg success message in DKG protocol. +type Success struct { + ProposerID types.NodeID `json:"proposer_id"` + Round uint64 `json:"round"` + Reset uint64 `json:"reset"` + Signature crypto.Signature `json:"signature"` +} + +func (s *Success) String() string { + return fmt.Sprintf("DKGSuccess{SP:%s Round:%d Reset:%d}", + s.ProposerID.String()[:6], + s.Round, + s.Reset) +} + +// Equal check equality of two Success instances. +func (s *Success) Equal(other *Success) bool { + return s.ProposerID.Equal(other.ProposerID) && + s.Round == other.Round && + s.Reset == other.Reset && + s.Signature.Type == other.Signature.Type && + bytes.Compare(s.Signature.Signature, other.Signature.Signature) == 0 +} + +// GroupPublicKey is the result of DKG protocol. +type GroupPublicKey struct { + Round uint64 + QualifyIDs cryptoDKG.IDs + QualifyNodeIDs map[types.NodeID]struct{} + IDMap map[types.NodeID]cryptoDKG.ID + GroupPublicKey *cryptoDKG.PublicKey + Threshold int +} + +// VerifySignature verifies if the signature is correct. +func (gpk *GroupPublicKey) VerifySignature( + hash common.Hash, sig crypto.Signature) bool { + return gpk.GroupPublicKey.VerifySignature(hash, sig) +} + +// CalcQualifyNodes returns the qualified nodes. +func CalcQualifyNodes( + mpks []*MasterPublicKey, complaints []*Complaint, threshold int) ( + qualifyIDs cryptoDKG.IDs, qualifyNodeIDs map[types.NodeID]struct{}, err error) { + if len(mpks) < threshold { + err = ErrInvalidThreshold + return + } + + // Calculate qualify members. + disqualifyIDs := map[types.NodeID]struct{}{} + complaintsByID := map[types.NodeID]map[types.NodeID]struct{}{} + for _, complaint := range complaints { + if complaint.IsNack() { + if _, exist := complaintsByID[complaint.PrivateShare.ProposerID]; !exist { + complaintsByID[complaint.PrivateShare.ProposerID] = + make(map[types.NodeID]struct{}) + } + complaintsByID[complaint.PrivateShare.ProposerID][complaint.ProposerID] = + struct{}{} + } else { + disqualifyIDs[complaint.PrivateShare.ProposerID] = struct{}{} + } + } + for nID, complaints := range complaintsByID { + if len(complaints) >= threshold { + disqualifyIDs[nID] = struct{}{} + } + } + qualifyIDs = make(cryptoDKG.IDs, 0, len(mpks)-len(disqualifyIDs)) + if cap(qualifyIDs) < threshold { + err = ErrNotReachThreshold + return + } + qualifyNodeIDs = make(map[types.NodeID]struct{}) + for _, mpk := range mpks { + if _, exist := disqualifyIDs[mpk.ProposerID]; exist { + continue + } + qualifyIDs = append(qualifyIDs, mpk.DKGID) + qualifyNodeIDs[mpk.ProposerID] = struct{}{} + } + return +} + +// NewGroupPublicKey creats a GroupPublicKey instance. +func NewGroupPublicKey( + round uint64, + mpks []*MasterPublicKey, complaints []*Complaint, + threshold int) ( + *GroupPublicKey, error) { + qualifyIDs, qualifyNodeIDs, err := + CalcQualifyNodes(mpks, complaints, threshold) + if err != nil { + return nil, err + } + mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) + idMap := make(map[types.NodeID]cryptoDKG.ID) + for _, mpk := range mpks { + if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { + continue + } + mpkMap[mpk.DKGID] = mpk + idMap[mpk.ProposerID] = mpk.DKGID + } + // Recover Group Public Key. + pubShares := make([]*cryptoDKG.PublicKeyShares, 0, len(qualifyIDs)) + for _, id := range qualifyIDs { + pubShares = append(pubShares, &mpkMap[id].PublicKeyShares) + } + groupPK := cryptoDKG.RecoverGroupPublicKey(pubShares) + return &GroupPublicKey{ + Round: round, + QualifyIDs: qualifyIDs, + QualifyNodeIDs: qualifyNodeIDs, + IDMap: idMap, + Threshold: threshold, + GroupPublicKey: groupPK, + }, nil +} + +// NodePublicKeys is the result of DKG protocol. +type NodePublicKeys struct { + Round uint64 + QualifyIDs cryptoDKG.IDs + QualifyNodeIDs map[types.NodeID]struct{} + IDMap map[types.NodeID]cryptoDKG.ID + PublicKeys map[types.NodeID]*cryptoDKG.PublicKey + Threshold int +} + +// NewNodePublicKeys creats a NodePublicKeys instance. +func NewNodePublicKeys( + round uint64, + mpks []*MasterPublicKey, complaints []*Complaint, + threshold int) ( + *NodePublicKeys, error) { + qualifyIDs, qualifyNodeIDs, err := + CalcQualifyNodes(mpks, complaints, threshold) + if err != nil { + return nil, err + } + mpkMap := make(map[cryptoDKG.ID]*MasterPublicKey, cap(qualifyIDs)) + idMap := make(map[types.NodeID]cryptoDKG.ID) + for _, mpk := range mpks { + if _, exist := qualifyNodeIDs[mpk.ProposerID]; !exist { + continue + } + mpkMap[mpk.DKGID] = mpk + idMap[mpk.ProposerID] = mpk.DKGID + } + // Recover qualify members' public key. + pubKeys := make(map[types.NodeID]*cryptoDKG.PublicKey, len(qualifyIDs)) + for _, recvID := range qualifyIDs { + pubShares := cryptoDKG.NewEmptyPublicKeyShares() + for _, id := range qualifyIDs { + pubShare, err := mpkMap[id].PublicKeyShares.Share(recvID) + if err != nil { + return nil, err + } + if err := pubShares.AddShare(id, pubShare); err != nil { + return nil, err + } + } + pubKey, err := pubShares.RecoverPublicKey(qualifyIDs) + if err != nil { + return nil, err + } + pubKeys[mpkMap[recvID].ProposerID] = pubKey + } + return &NodePublicKeys{ + Round: round, + QualifyIDs: qualifyIDs, + QualifyNodeIDs: qualifyNodeIDs, + IDMap: idMap, + PublicKeys: pubKeys, + Threshold: threshold, + }, nil +} diff --git a/dex/consensus/core/types/dkg/dkg_test.go b/dex/consensus/core/types/dkg/dkg_test.go new file mode 100644 index 000000000..ea6a56548 --- /dev/null +++ b/dex/consensus/core/types/dkg/dkg_test.go @@ -0,0 +1,435 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package dkg + +import ( + "math/rand" + "reflect" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon/rlp" +) + +type DKGTestSuite struct { + suite.Suite +} + +func (s *DKGTestSuite) genRandomBytes() []byte { + randomness := make([]byte, 32) + _, err := rand.Read(randomness) + s.Require().NoError(err) + return randomness +} + +func (s *DKGTestSuite) genID() cryptoDKG.ID { + dID, err := cryptoDKG.BytesID(s.genRandomBytes()) + s.Require().NoError(err) + return dID +} + +func (s *DKGTestSuite) clone(src, dst interface{}) { + b, err := rlp.EncodeToBytes(src) + s.Require().NoError(err) + s.Require().NoError(rlp.DecodeBytes(b, dst)) +} + +func (s *DKGTestSuite) TestRLPEncodeDecode() { + dID := s.genID() + // Prepare master public key for testing. + _, pubShare := cryptoDKG.NewPrivateKeyShares(10) + d := MasterPublicKey{ + ProposerID: types.NodeID{Hash: common.Hash{1, 2, 3}}, + Round: 10, + Reset: 11, + DKGID: dID, + PublicKeyShares: *pubShare.Clone(), + Signature: crypto.Signature{ + Type: "123", + Signature: []byte{4, 5, 6}, + }, + } + + b, err := rlp.EncodeToBytes(&d) + s.Require().NoError(err) + + var dd MasterPublicKey + err = rlp.DecodeBytes(b, &dd) + s.Require().NoError(err) + + bb, err := rlp.EncodeToBytes(&dd) + s.Require().NoError(err) + s.Require().True(reflect.DeepEqual(b, bb)) + s.Require().True(d.ProposerID.Equal(dd.ProposerID)) + s.Require().True(d.Round == dd.Round) + s.Require().True(reflect.DeepEqual(d.Signature, dd.Signature)) + s.Require().Equal(d.DKGID.GetHexString(), dd.DKGID.GetHexString()) + s.Require().True(d.PublicKeyShares.Equal(pubShare)) + + // Test DKGPrivateShare. + p := PrivateShare{ + ProposerID: types.NodeID{Hash: common.Hash{1, 3, 5}}, + Round: 10, + Reset: 11, + PrivateShare: *cryptoDKG.NewPrivateKey(), + Signature: crypto.Signature{ + Type: "123", + Signature: []byte{2, 4, 6}, + }, + } + + b, err = rlp.EncodeToBytes(&p) + s.Require().NoError(err) + + var pp PrivateShare + err = rlp.DecodeBytes(b, &pp) + s.Require().NoError(err) + + bb, err = rlp.EncodeToBytes(&pp) + s.Require().NoError(err) + s.Require().True(reflect.DeepEqual(b, bb)) + s.Require().True(p.ProposerID.Equal(pp.ProposerID)) + s.Require().True(p.Round == pp.Round) + s.Require().True(reflect.DeepEqual(p.PrivateShare, pp.PrivateShare)) + s.Require().True(reflect.DeepEqual(p.Signature, pp.Signature)) + + // Test DKG Nack Complaint. + c := Complaint{ + ProposerID: d.ProposerID, + Round: 10, + Reset: 11, + PrivateShare: PrivateShare{ + ProposerID: p.ProposerID, + Round: 10, + Reset: 11, + }, + Signature: crypto.Signature{ + Type: "123", + Signature: []byte{3, 3, 3}, + }, + } + s.Require().True(c.IsNack()) + + b, err = rlp.EncodeToBytes(&c) + s.Require().NoError(err) + + var cc Complaint + err = rlp.DecodeBytes(b, &cc) + s.Require().NoError(err) + + bb, err = rlp.EncodeToBytes(&cc) + s.Require().NoError(err) + s.Require().True(reflect.DeepEqual(c, cc)) + s.Require().True(c.ProposerID.Equal(cc.ProposerID)) + s.Require().True(c.Round == cc.Round) + s.Require().True(reflect.DeepEqual(c.PrivateShare, cc.PrivateShare)) + s.Require().True(reflect.DeepEqual(c.Signature, cc.Signature)) + + // Test DKG Complaint. + c = Complaint{ + ProposerID: d.ProposerID, + Round: 10, + Reset: 11, + PrivateShare: p, + Signature: crypto.Signature{ + Type: "123", + Signature: []byte{3, 3, 3}, + }, + } + s.Require().False(c.IsNack()) + + b, err = rlp.EncodeToBytes(&c) + s.Require().NoError(err) + + err = rlp.DecodeBytes(b, &cc) + s.Require().NoError(err) + + bb, err = rlp.EncodeToBytes(&cc) + s.Require().NoError(err) + s.Require().True(reflect.DeepEqual(c, cc)) + s.Require().True(c.ProposerID.Equal(cc.ProposerID)) + s.Require().True(c.Round == cc.Round) + s.Require().True(reflect.DeepEqual(c.PrivateShare, cc.PrivateShare)) + s.Require().True(reflect.DeepEqual(c.Signature, cc.Signature)) +} + +func (s *DKGTestSuite) TestMasterPublicKeyEquality() { + var req = s.Require() + // Prepare source master public key. + master1 := &MasterPublicKey{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1234, + Reset: 5678, + DKGID: s.genID(), + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + prvKey := cryptoDKG.NewPrivateKey() + pubKey := prvKey.PublicKey().(cryptoDKG.PublicKey) + _, pubShares := cryptoDKG.NewPrivateKeyShares(2) + req.NoError(pubShares.AddShare(s.genID(), &pubKey)) + master1.PublicKeyShares = *pubShares.Move() + // Prepare another master public key by copying every field. + master2 := &MasterPublicKey{} + s.clone(master1, master2) + // They should be equal. + req.True(master1.Equal(master2)) + // Change round. + master2.Round = 2345 + req.False(master1.Equal(master2)) + master2.Round = 1234 + // Change reset. + master2.Reset = 6789 + req.False(master1.Equal(master2)) + master2.Reset = 5678 + // Change proposerID. + master2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(master1.Equal(master2)) + master2.ProposerID = master1.ProposerID + // Change DKGID. + master2.DKGID = cryptoDKG.NewID(s.genRandomBytes()) + req.False(master1.Equal(master2)) + master2.DKGID = master1.DKGID + // Change signature. + master2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(master1.Equal(master2)) + master2.Signature = master1.Signature + // Change public key share. + master2.PublicKeyShares = *cryptoDKG.NewEmptyPublicKeyShares() + req.False(master1.Equal(master2)) +} + +func (s *DKGTestSuite) TestPrivateShareEquality() { + var req = s.Require() + share1 := &PrivateShare{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + ReceiverID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Reset: 2, + PrivateShare: *cryptoDKG.NewPrivateKey(), + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy. + share2 := &PrivateShare{} + s.clone(share1, share2) + req.True(share1.Equal(share2)) + // Change proposer ID. + share2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(share1.Equal(share2)) + share2.ProposerID = share1.ProposerID + // Change receiver ID. + share2.ReceiverID = types.NodeID{Hash: common.NewRandomHash()} + req.False(share1.Equal(share2)) + share2.ReceiverID = share1.ReceiverID + // Change round. + share2.Round = share1.Round + 1 + req.False(share1.Equal(share2)) + share2.Round = share1.Round + // Change reset. + share2.Reset = share1.Reset + 1 + req.False(share1.Equal(share2)) + share2.Reset = share1.Reset + // Change signature. + share2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(share1.Equal(share2)) + share2.Signature = share1.Signature + // Change private share. + share2.PrivateShare = *cryptoDKG.NewPrivateKey() + req.False(share1.Equal(share2)) + share2.PrivateShare = share1.PrivateShare + // They should be equal after chaning fields back. + req.True(share1.Equal(share2)) +} + +func (s *DKGTestSuite) TestComplaintEquality() { + var req = s.Require() + comp1 := &Complaint{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Reset: 2, + PrivateShare: PrivateShare{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + ReceiverID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 2, + Reset: 3, + PrivateShare: *cryptoDKG.NewPrivateKey(), + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + }, + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy. + comp2 := &Complaint{} + s.clone(comp1, comp2) + req.True(comp1.Equal(comp2)) + // Change proposer ID. + comp2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(comp1.Equal(comp2)) + comp2.ProposerID = comp1.ProposerID + // Change round. + comp2.Round = comp1.Round + 1 + req.False(comp1.Equal(comp2)) + comp2.Round = comp1.Round + // Change reset. + comp2.Reset = comp1.Reset + 1 + req.False(comp1.Equal(comp2)) + comp2.Reset = comp1.Reset + // Change signature. + comp2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(comp1.Equal(comp2)) + comp2.Signature = comp1.Signature + // Change share's round. + comp2.PrivateShare.Round = comp1.PrivateShare.Round + 1 + req.False(comp1.Equal(comp2)) + comp2.PrivateShare.Round = comp1.PrivateShare.Round + // Change share's reset. + comp2.PrivateShare.Reset = comp1.PrivateShare.Reset + 1 + req.False(comp1.Equal(comp2)) + comp2.PrivateShare.Reset = comp1.PrivateShare.Reset + // After changing every field back, should be equal. + req.True(comp1.Equal(comp2)) +} + +func (s *DKGTestSuite) TestMPKReadyEquality() { + var req = s.Require() + ready1 := &MPKReady{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Reset: 2, + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy + ready2 := &MPKReady{} + s.clone(ready1, ready2) + req.True(ready1.Equal(ready2)) + // Change proposer ID. + ready2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(ready1.Equal(ready2)) + ready2.ProposerID = ready1.ProposerID + // Change round. + ready2.Round = ready1.Round + 1 + req.False(ready1.Equal(ready2)) + ready2.Round = ready1.Round + // Change reset. + ready2.Reset = ready1.Reset + 1 + req.False(ready1.Equal(ready2)) + ready2.Reset = ready1.Reset + // Change signature. + ready2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(ready1.Equal(ready2)) + ready2.Signature = ready1.Signature + // After changing every field back, they should be equal. + req.True(ready1.Equal(ready2)) +} + +func (s *DKGTestSuite) TestFinalizeEquality() { + var req = s.Require() + final1 := &Finalize{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Reset: 2, + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy + final2 := &Finalize{} + s.clone(final1, final2) + req.True(final1.Equal(final2)) + // Change proposer ID. + final2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(final1.Equal(final2)) + final2.ProposerID = final1.ProposerID + // Change round. + final2.Round = final1.Round + 1 + req.False(final1.Equal(final2)) + final2.Round = final1.Round + // Change reset. + final2.Reset = final1.Reset + 1 + req.False(final1.Equal(final2)) + final2.Reset = final1.Reset + // Change signature. + final2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(final1.Equal(final2)) + final2.Signature = final1.Signature + // After changing every field back, they should be equal. + req.True(final1.Equal(final2)) +} + +func (s *DKGTestSuite) TestSuccessEquality() { + var req = s.Require() + success1 := &Success{ + ProposerID: types.NodeID{Hash: common.NewRandomHash()}, + Round: 1, + Reset: 2, + Signature: crypto.Signature{ + Signature: s.genRandomBytes(), + }, + } + // Make a copy + success2 := &Success{} + s.clone(success1, success2) + req.True(success1.Equal(success2)) + // Change proposer ID. + success2.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + req.False(success1.Equal(success2)) + success2.ProposerID = success1.ProposerID + // Change round. + success2.Round = success1.Round + 1 + req.False(success1.Equal(success2)) + success2.Round = success1.Round + // Change reset. + success2.Reset = success1.Reset + 1 + req.False(success1.Equal(success2)) + success2.Reset = success1.Reset + // Change signature. + success2.Signature = crypto.Signature{ + Signature: s.genRandomBytes(), + } + req.False(success1.Equal(success2)) + success2.Signature = success1.Signature + // After changing every field back, they should be equal. + req.True(success1.Equal(success2)) +} + +func TestDKG(t *testing.T) { + suite.Run(t, new(DKGTestSuite)) +} diff --git a/dex/consensus/core/types/message.go b/dex/consensus/core/types/message.go new file mode 100644 index 000000000..0335cfaae --- /dev/null +++ b/dex/consensus/core/types/message.go @@ -0,0 +1,24 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +// Msg for the network ReceiveChan. +type Msg struct { + PeerID interface{} + Payload interface{} +} diff --git a/dex/consensus/core/types/node.go b/dex/consensus/core/types/node.go new file mode 100644 index 000000000..18b6831e0 --- /dev/null +++ b/dex/consensus/core/types/node.go @@ -0,0 +1,61 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "bytes" + "encoding/hex" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +// NodeID is the ID type for nodes. +type NodeID struct { + common.Hash +} + +// NewNodeID returns a NodeID with Hash set to the hash value of +// public key. +func NewNodeID(pubKey crypto.PublicKey) NodeID { + return NodeID{Hash: crypto.Keccak256Hash(pubKey.Bytes()[1:])} +} + +// Equal checks if the hash representation is the same NodeID. +func (v NodeID) Equal(v2 NodeID) bool { + return v.Hash == v2.Hash +} + +func (v NodeID) String() string { + return hex.EncodeToString(v.Hash[:])[:6] +} + +// NodeIDs implements sort.Interface for NodeID. +type NodeIDs []NodeID + +func (v NodeIDs) Len() int { + return len(v) +} + +func (v NodeIDs) Less(i int, j int) bool { + return bytes.Compare([]byte(v[i].Hash[:]), []byte(v[j].Hash[:])) == -1 +} + +func (v NodeIDs) Swap(i int, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/dex/consensus/core/types/nodeset.go b/dex/consensus/core/types/nodeset.go new file mode 100644 index 000000000..806000763 --- /dev/null +++ b/dex/consensus/core/types/nodeset.go @@ -0,0 +1,162 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "container/heap" + "encoding/binary" + "math/big" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" +) + +// NodeSet is the node set structure as defined in DEXON consensus core. +type NodeSet struct { + IDs map[NodeID]struct{} +} + +// SubSetTarget is the sub set target for GetSubSet(). +type SubSetTarget struct { + data [][]byte +} + +type subSetTargetType byte + +const ( + targetNotarySet subSetTargetType = iota + targetNodeLeader +) + +type nodeRank struct { + ID NodeID + rank *big.Int +} + +// rankHeap is a MaxHeap structure. +type rankHeap []*nodeRank + +func (h rankHeap) Len() int { return len(h) } +func (h rankHeap) Less(i, j int) bool { return h[i].rank.Cmp(h[j].rank) > 0 } +func (h rankHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } +func (h *rankHeap) Push(x interface{}) { + *h = append(*h, x.(*nodeRank)) +} +func (h *rankHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// NewNodeSet creates a new NodeSet instance. +func NewNodeSet() *NodeSet { + return &NodeSet{ + IDs: make(map[NodeID]struct{}), + } +} + +// NewNodeSetFromMap creates a new NodeSet from NodeID map. +func NewNodeSetFromMap(nodes map[NodeID]struct{}) *NodeSet { + nIDs := make(map[NodeID]struct{}, len(nodes)) + for nID := range nodes { + nIDs[nID] = struct{}{} + } + return &NodeSet{ + IDs: nIDs, + } +} + +// NewNotarySetTarget is the target for getting Notary Set. +func NewNotarySetTarget(crs common.Hash) *SubSetTarget { + return newTarget(targetNotarySet, crs[:]) +} + +// NewNodeLeaderTarget is the target for getting leader of fast BA. +func NewNodeLeaderTarget(crs common.Hash, height uint64) *SubSetTarget { + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, height) + return newTarget(targetNodeLeader, crs[:], binaryHeight) +} + +// Add a NodeID to the set. +func (ns *NodeSet) Add(ID NodeID) { + ns.IDs[ID] = struct{}{} +} + +// Clone the NodeSet. +func (ns *NodeSet) Clone() *NodeSet { + nsCopy := NewNodeSet() + for ID := range ns.IDs { + nsCopy.Add(ID) + } + return nsCopy +} + +// GetSubSet returns the subset of given target. +func (ns *NodeSet) GetSubSet( + size int, target *SubSetTarget) map[NodeID]struct{} { + if size == 0 { + return make(map[NodeID]struct{}) + } + h := rankHeap{} + idx := 0 + for nID := range ns.IDs { + if idx < size { + h = append(h, newNodeRank(nID, target)) + } else if idx == size { + heap.Init(&h) + } + if idx >= size { + rank := newNodeRank(nID, target) + if rank.rank.Cmp(h[0].rank) < 0 { + h[0] = rank + heap.Fix(&h, 0) + } + } + idx++ + } + + nIDs := make(map[NodeID]struct{}, size) + for _, rank := range h { + nIDs[rank.ID] = struct{}{} + } + + return nIDs +} + +func newTarget(targetType subSetTargetType, data ...[]byte) *SubSetTarget { + data = append(data, []byte{byte(targetType)}) + return &SubSetTarget{ + data: data, + } +} + +func newNodeRank(ID NodeID, target *SubSetTarget) *nodeRank { + data := make([][]byte, 1, len(target.data)+1) + data[0] = make([]byte, len(ID.Hash)) + copy(data[0], ID.Hash[:]) + data = append(data, target.data...) + h := crypto.Keccak256Hash(data...) + num := new(big.Int).SetBytes(h[:]) + return &nodeRank{ + ID: ID, + rank: num, + } +} diff --git a/dex/consensus/core/types/nodeset_test.go b/dex/consensus/core/types/nodeset_test.go new file mode 100644 index 000000000..ef9ac244b --- /dev/null +++ b/dex/consensus/core/types/nodeset_test.go @@ -0,0 +1,70 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "testing" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/stretchr/testify/suite" +) + +type NodeSetTestSuite struct { + suite.Suite +} + +func (s *NodeSetTestSuite) TestGetSubSet() { + total := 10 + crs := common.NewRandomHash() + nodes := NewNodeSet() + for len(nodes.IDs) < total { + nodes.IDs[NodeID{common.NewRandomHash()}] = struct{}{} + } + target := NewNotarySetTarget(crs) + ranks := make(map[NodeID]*nodeRank, len(nodes.IDs)) + for nID := range nodes.IDs { + ranks[nID] = newNodeRank(nID, target) + } + size := 4 + notarySet := nodes.GetSubSet(size, target) + for notary := range notarySet { + win := 0 + rank := ranks[notary].rank + for node := range nodes.IDs { + if rank.Cmp(ranks[node].rank) < 0 { + win++ + } + } + s.True(win >= total-size) + } +} + +func (s *NodeSetTestSuite) TestGetSubSetZeroSize() { + total := 10 + nodes := NewNodeSet() + for len(nodes.IDs) < total { + nodes.IDs[NodeID{common.NewRandomHash()}] = struct{}{} + } + // Passing nil should not crash. + emptySet := nodes.GetSubSet(0, nil) + s.Len(emptySet, 0) +} + +func TestNodeSet(t *testing.T) { + suite.Run(t, new(NodeSetTestSuite)) +} diff --git a/dex/consensus/core/types/position.go b/dex/consensus/core/types/position.go new file mode 100644 index 000000000..81d23c266 --- /dev/null +++ b/dex/consensus/core/types/position.go @@ -0,0 +1,51 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "fmt" +) + +// Position describes the position in the block lattice of an entity. +type Position struct { + Round uint64 `json:"round"` + Height uint64 `json:"height"` +} + +func (pos Position) String() string { + return fmt.Sprintf("Position{Round:%d Height:%d}", pos.Round, pos.Height) +} + +// Equal checks if two positions are equal. +func (pos Position) Equal(other Position) bool { + return pos.Round == other.Round && pos.Height == other.Height +} + +// Newer checks if one block is newer than another one on the same chain. +// If two blocks on different chain compared by this function, it would panic. +func (pos Position) Newer(other Position) bool { + return pos.Round > other.Round || + (pos.Round == other.Round && pos.Height > other.Height) +} + +// Older checks if one block is older than another one on the same chain. +// If two blocks on different chain compared by this function, it would panic. +func (pos Position) Older(other Position) bool { + return pos.Round < other.Round || + (pos.Round == other.Round && pos.Height < other.Height) +} diff --git a/dex/consensus/core/types/position_test.go b/dex/consensus/core/types/position_test.go new file mode 100644 index 000000000..d2f416543 --- /dev/null +++ b/dex/consensus/core/types/position_test.go @@ -0,0 +1,110 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/suite" +) + +type PositionTestSuite struct { + suite.Suite +} + +func (s *PositionTestSuite) TestNewer() { + pos := Position{ + Round: 1, + Height: 1, + } + s.False(pos.Newer(Position{ + Round: 2, + Height: 0, + })) + s.False(pos.Newer(Position{ + Round: 1, + Height: 2, + })) + s.True(pos.Newer(Position{ + Round: 0, + Height: 100, + })) + s.True(pos.Newer(Position{ + Round: 1, + Height: 0, + })) +} + +func (s *PositionTestSuite) TestOlder() { + pos := Position{ + Round: 1, + Height: 1, + } + s.False(pos.Older(Position{ + Round: 0, + Height: 0, + })) + s.False(pos.Older(Position{ + Round: 1, + Height: 0, + })) + s.True(pos.Older(Position{ + Round: 2, + Height: 0, + })) + s.True(pos.Older(Position{ + Round: 1, + Height: 100, + })) +} + +func (s *PositionTestSuite) TestSearchInAsendingOrder() { + positions := []Position{ + Position{Round: 0, Height: 1}, + Position{Round: 0, Height: 2}, + Position{Round: 0, Height: 3}, + Position{Round: 2, Height: 0}, + Position{Round: 2, Height: 1}, + Position{Round: 2, Height: 2}, + Position{Round: 4, Height: 0}, + Position{Round: 4, Height: 1}, + Position{Round: 4, Height: 2}, + } + search := func(pos Position) int { + return sort.Search(len(positions), func(i int) bool { + return positions[i].Newer(pos) || positions[i].Equal(pos) + }) + } + s.Equal(0, search(Position{Round: 0, Height: 0})) + s.Equal(len(positions), search(Position{Round: 4, Height: 4})) + s.Equal(0, search(Position{Round: 0, Height: 1})) + s.Equal(len(positions)-1, search(Position{Round: 4, Height: 2})) + s.Equal(2, search(Position{Round: 0, Height: 3})) +} + +func (s *PositionTestSuite) TestEqual() { + pos := Position{} + s.True(pos.Equal(Position{})) + s.False(pos.Equal(Position{Round: 1})) + s.False(pos.Equal(Position{Height: 1})) +} + +func TestPosition(t *testing.T) { + suite.Run(t, new(PositionTestSuite)) +} diff --git a/dex/consensus/core/types/vote.go b/dex/consensus/core/types/vote.go new file mode 100644 index 000000000..8bc0c78c2 --- /dev/null +++ b/dex/consensus/core/types/vote.go @@ -0,0 +1,100 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package types + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + cryptoDKG "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" +) + +// VoteType is the type of vote. +type VoteType byte + +// VoteType enum. +const ( + VoteInit VoteType = iota + VotePreCom + VoteCom + VoteFast + VoteFastCom + // Do not add any type below MaxVoteType. + MaxVoteType +) + +// NullBlockHash is the blockHash for ⊥ value. +var NullBlockHash common.Hash + +// SkipBlockHash is the blockHash for SKIP value. +var SkipBlockHash common.Hash + +func init() { + for idx := range SkipBlockHash { + SkipBlockHash[idx] = 0xff + } +} + +// VoteHeader is the header for vote, which can be used as map keys. +type VoteHeader struct { + ProposerID NodeID `json:"proposer_id"` + Type VoteType `json:"type"` + BlockHash common.Hash `json:"block_hash"` + Period uint64 `json:"period"` + Position Position `json:"position"` +} + +// Vote is the vote structure defined in Crypto Shuffle Algorithm. +type Vote struct { + VoteHeader `json:"header"` + PartialSignature cryptoDKG.PartialSignature `json:"partial_signature"` + Signature crypto.Signature `json:"signature"` +} + +func (v *Vote) String() string { + return fmt.Sprintf("Vote{VP:%s %s Period:%d Type:%d Hash:%s}", + v.ProposerID.String()[:6], + v.Position, v.Period, v.Type, v.BlockHash.String()[:6]) +} + +// NewVote constructs a Vote instance with header fields. +func NewVote(t VoteType, hash common.Hash, period uint64) *Vote { + return &Vote{ + VoteHeader: VoteHeader{ + Type: t, + BlockHash: hash, + Period: period, + }} +} + +// Clone returns a deep copy of a vote. +func (v *Vote) Clone() *Vote { + return &Vote{ + VoteHeader: VoteHeader{ + ProposerID: v.ProposerID, + Type: v.Type, + BlockHash: v.BlockHash, + Period: v.Period, + Position: v.Position, + }, + PartialSignature: cryptoDKG.PartialSignature( + crypto.Signature(v.PartialSignature).Clone()), + Signature: v.Signature.Clone(), + } +} diff --git a/dex/consensus/core/utils.go b/dex/consensus/core/utils.go new file mode 100644 index 000000000..c4d7b0fc3 --- /dev/null +++ b/dex/consensus/core/utils.go @@ -0,0 +1,255 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +// Errors for utils. +var ( + ErrIncorrectVoteBlockHash = fmt.Errorf( + "incorrect vote block hash") + ErrIncorrectVoteType = fmt.Errorf( + "incorrect vote type") + ErrIncorrectVotePosition = fmt.Errorf( + "incorrect vote position") + ErrIncorrectVoteProposer = fmt.Errorf( + "incorrect vote proposer") + ErrIncorrectVotePeriod = fmt.Errorf( + "incorrect vote period") +) + +// NodeSetCache is type alias to avoid fullnode compile error when moving +// it to core/utils package. +type NodeSetCache = utils.NodeSetCache + +// NewNodeSetCache is function alias to avoid fullnode compile error when moving +// it to core/utils package. +var NewNodeSetCache = utils.NewNodeSetCache + +var ( + debug = false + // ErrEmptyTimestamps would be reported if Block.timestamps is empty. + ErrEmptyTimestamps = errors.New("timestamp vector should not be empty") +) + +func init() { + if os.Getenv("DEBUG") != "" { + debug = true + } +} + +// Debugf is like fmt.Printf, but only output when we are in debug mode. +func Debugf(format string, args ...interface{}) { + if debug { + fmt.Printf(format, args...) + } +} + +// Debugln is like fmt.Println, but only output when we are in debug mode. +func Debugln(args ...interface{}) { + if debug { + fmt.Println(args...) + } +} + +func interpoTime(t1 time.Time, t2 time.Time, sep int) []time.Time { + if sep == 0 { + return []time.Time{} + } + if t1.After(t2) { + return interpoTime(t2, t1, sep) + } + timestamps := make([]time.Time, sep) + duration := t2.Sub(t1) + period := time.Duration( + (duration.Nanoseconds() / int64(sep+1))) * time.Nanosecond + prevTime := t1 + for idx := range timestamps { + prevTime = prevTime.Add(period) + timestamps[idx] = prevTime + } + return timestamps +} + +func getMedianTime(timestamps []time.Time) (t time.Time, err error) { + if len(timestamps) == 0 { + err = ErrEmptyTimestamps + return + } + tscopy := make([]time.Time, 0, len(timestamps)) + for _, ts := range timestamps { + tscopy = append(tscopy, ts) + } + sort.Sort(common.ByTime(tscopy)) + if len(tscopy)%2 == 0 { + t1 := tscopy[len(tscopy)/2-1] + t2 := tscopy[len(tscopy)/2] + t = interpoTime(t1, t2, 1)[0] + } else { + t = tscopy[len(tscopy)/2] + } + return +} + +func removeFromSortedUint32Slice(xs []uint32, x uint32) []uint32 { + indexToRemove := sort.Search(len(xs), func(idx int) bool { + return xs[idx] >= x + }) + if indexToRemove == len(xs) || xs[indexToRemove] != x { + // This value is not found. + return xs + } + return append(xs[:indexToRemove], xs[indexToRemove+1:]...) +} + +// HashConfigurationBlock returns the hash value of configuration block. +func HashConfigurationBlock( + notarySet map[types.NodeID]struct{}, + config *types.Config, + snapshotHash common.Hash, + prevHash common.Hash, +) common.Hash { + notaryIDs := make(types.NodeIDs, 0, len(notarySet)) + for nID := range notarySet { + notaryIDs = append(notaryIDs, nID) + } + sort.Sort(notaryIDs) + notarySetBytes := make([]byte, 0, len(notarySet)*len(common.Hash{})) + for _, nID := range notaryIDs { + notarySetBytes = append(notarySetBytes, nID.Hash[:]...) + } + configBytes := config.Bytes() + + return crypto.Keccak256Hash( + notarySetBytes[:], + configBytes[:], + snapshotHash[:], + prevHash[:], + ) +} + +// VerifyAgreementResult perform sanity check against a types.AgreementResult +// instance. +func VerifyAgreementResult( + res *types.AgreementResult, cache *NodeSetCache) error { + if res.Position.Round >= DKGDelayRound { + if len(res.Randomness) == 0 { + return ErrMissingRandomness + } + return nil + } + notarySet, err := cache.GetNotarySet(res.Position.Round) + if err != nil { + return err + } + if len(res.Votes) < len(notarySet)*2/3+1 { + return ErrNotEnoughVotes + } + voted := make(map[types.NodeID]struct{}, len(notarySet)) + voteType := res.Votes[0].Type + votePeriod := res.Votes[0].Period + if voteType != types.VoteFastCom && voteType != types.VoteCom { + return ErrIncorrectVoteType + } + for _, vote := range res.Votes { + if vote.Period != votePeriod { + return ErrIncorrectVotePeriod + } + if res.IsEmptyBlock { + if (vote.BlockHash != common.Hash{}) { + return ErrIncorrectVoteBlockHash + } + } else { + if vote.BlockHash != res.BlockHash { + return ErrIncorrectVoteBlockHash + } + } + if vote.Type != voteType { + return ErrIncorrectVoteType + } + if vote.Position != res.Position { + return ErrIncorrectVotePosition + } + if _, exist := notarySet[vote.ProposerID]; !exist { + return ErrIncorrectVoteProposer + } + ok, err := utils.VerifyVoteSignature(&vote) + if err != nil { + return err + } + if !ok { + return ErrIncorrectVoteSignature + } + voted[vote.ProposerID] = struct{}{} + } + if len(voted) < len(notarySet)*2/3+1 { + return ErrNotEnoughVotes + } + return nil +} + +// DiffUint64 calculates difference between two uint64. +func DiffUint64(a, b uint64) uint64 { + if a > b { + return a - b + } + return b - a +} + +func isCI() bool { + return os.Getenv("CI") != "" +} + +func isCircleCI() bool { + return isCI() && os.Getenv("CIRCLECI") == "true" +} + +func isTravisCI() bool { + return isCI() && os.Getenv("TRAVIS") == "true" +} + +// checkWithCancel is a helper to perform periodic checking with cancel. +func checkWithCancel(parentCtx context.Context, interval time.Duration, + checker func() bool) (ret bool) { + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() +Loop: + for { + if ret = checker(); ret { + return + } + select { + case <-ctx.Done(): + break Loop + case <-time.After(interval): + } + } + return +} diff --git a/dex/consensus/core/utils/crypto.go b/dex/consensus/core/utils/crypto.go new file mode 100644 index 000000000..96bbe85a0 --- /dev/null +++ b/dex/consensus/core/utils/crypto.go @@ -0,0 +1,411 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "bytes" + "encoding/binary" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + dexCommon "github.com/dexon-foundation/dexon/common" + dexCrypto "github.com/dexon-foundation/dexon/crypto" +) + +func hashWitness(witness *types.Witness) (common.Hash, error) { + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, witness.Height) + return crypto.Keccak256Hash( + binaryHeight, + witness.Data), nil +} + +func hashWitnessTmp(witness *types.Witness) dexCommon.Hash { + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, witness.Height) + return dexCrypto.Keccak256Hash(binaryHeight, witness.Data) +} + +// HashBlock generates hash of a types.Block. +func HashBlock(block *types.Block) (common.Hash, error) { + hashPosition := HashPosition(block.Position) + binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary() + if err != nil { + return common.Hash{}, err + } + binaryWitness := hashWitnessTmp(&block.Witness) + + hash := crypto.Keccak256Hash( + block.ProposerID.Hash[:], + block.ParentHash[:], + hashPosition[:], + binaryTimestamp[:], + block.PayloadHash[:], + binaryWitness[:]) + return hash, nil +} + +func HashTmpBlock(block *types.TmpBlock) (dexCommon.Hash, error) { + hashPosition := HashPositionTmp(block.Position) + binaryTimestamp, err := block.Timestamp.UTC().MarshalBinary() + if err != nil { + return dexCommon.Hash{}, err + } + binaryWitness := hashWitnessTmp(&block.Witness) + hash := dexCrypto.Keccak256Hash( + block.ProposerID.Bytes(), + block.ParentHash.Bytes(), + hashPosition.Bytes(), + binaryTimestamp[:], + block.PayloadHash.Bytes(), + binaryWitness.Bytes()) + return hash, nil +} + +// VerifyBlockSignature verifies the signature of types.Block. +func VerifyBlockSignature(b *types.Block) (err error) { + payloadHash := crypto.Keccak256Hash(b.Payload) + if payloadHash != b.PayloadHash { + err = ErrIncorrectHash + return + } + return VerifyBlockSignatureWithoutPayload(b) +} + +// VerifyBlockSignatureWithoutPayload verifies the signature of types.Block but +// does not check if PayloadHash is correct. +func VerifyBlockSignatureWithoutPayload(b *types.Block) (err error) { + hash, err := HashBlock(b) + if err != nil { + return + } + if hash != b.Hash { + err = ErrIncorrectHash + return + } + pubKey, err := crypto.SigToPub(b.Hash, b.Signature) + if err != nil { + return + } + if !b.ProposerID.Equal(types.NewNodeID(pubKey)) { + err = ErrIncorrectSignature + return + } + return + +} + +// HashVote generates hash of a types.Vote. +func HashVote(vote *types.Vote) common.Hash { + binaryPeriod := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryPeriod, vote.Period) + + hashPosition := HashPosition(vote.Position) + + hash := crypto.Keccak256Hash( + vote.ProposerID.Hash[:], + vote.BlockHash[:], + binaryPeriod, + hashPosition[:], + vote.PartialSignature.Signature[:], + []byte{byte(vote.Type)}, + ) + return hash +} + +// VerifyVoteSignature verifies the signature of types.Vote. +func VerifyVoteSignature(vote *types.Vote) (bool, error) { + hash := HashVote(vote) + pubKey, err := crypto.SigToPub(hash, vote.Signature) + if err != nil { + return false, err + } + if vote.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashCRS(block *types.Block, crs common.Hash) common.Hash { + hashPos := HashPosition(block.Position) + if block.Position.Round < dkgDelayRound { + return crypto.Keccak256Hash(crs[:], hashPos[:], block.ProposerID.Hash[:]) + } + return crypto.Keccak256Hash(crs[:], hashPos[:]) +} + +// VerifyCRSSignature verifies the CRS signature of types.Block. +func VerifyCRSSignature( + block *types.Block, crs common.Hash, npks *typesDKG.NodePublicKeys) bool { + hash := hashCRS(block, crs) + if block.Position.Round < dkgDelayRound { + return bytes.Compare(block.CRSSignature.Signature[:], hash[:]) == 0 + } + if npks == nil { + return false + } + pubKey, exist := npks.PublicKeys[block.ProposerID] + if !exist { + return false + } + return pubKey.VerifySignature(hash, block.CRSSignature) +} + +// HashPosition generates hash of a types.Position. +func HashPosition(position types.Position) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, position.Round) + + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, position.Height) + + return crypto.Keccak256Hash( + binaryRound, + binaryHeight, + ) +} + +func HashPositionTmp(position types.Position) dexCommon.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, position.Round) + + binaryHeight := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryHeight, position.Height) + + return dexCrypto.Keccak256Hash( + binaryRound, + binaryHeight, + ) +} + +func hashDKGPrivateShare(prvShare *typesDKG.PrivateShare) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, prvShare.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, prvShare.Reset) + + return crypto.Keccak256Hash( + prvShare.ProposerID.Hash[:], + prvShare.ReceiverID.Hash[:], + binaryRound, + binaryReset, + prvShare.PrivateShare.Bytes(), + ) +} + +// VerifyDKGPrivateShareSignature verifies the signature of +// typesDKG.PrivateShare. +func VerifyDKGPrivateShareSignature( + prvShare *typesDKG.PrivateShare) (bool, error) { + hash := hashDKGPrivateShare(prvShare) + pubKey, err := crypto.SigToPub(hash, prvShare.Signature) + if err != nil { + return false, err + } + if prvShare.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGMasterPublicKey(mpk *typesDKG.MasterPublicKey) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, mpk.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, mpk.Reset) + + return crypto.Keccak256Hash( + mpk.ProposerID.Hash[:], + mpk.DKGID.GetLittleEndian(), + mpk.PublicKeyShares.MasterKeyBytes(), + binaryRound, + binaryReset, + ) +} + +// VerifyDKGMasterPublicKeySignature verifies DKGMasterPublicKey signature. +func VerifyDKGMasterPublicKeySignature( + mpk *typesDKG.MasterPublicKey) (bool, error) { + hash := hashDKGMasterPublicKey(mpk) + pubKey, err := crypto.SigToPub(hash, mpk.Signature) + if err != nil { + return false, err + } + if mpk.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGComplaint(complaint *typesDKG.Complaint) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, complaint.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, complaint.Reset) + + hashPrvShare := hashDKGPrivateShare(&complaint.PrivateShare) + + return crypto.Keccak256Hash( + complaint.ProposerID.Hash[:], + binaryRound, + binaryReset, + hashPrvShare[:], + ) +} + +// VerifyDKGComplaintSignature verifies DKGCompliant signature. +func VerifyDKGComplaintSignature( + complaint *typesDKG.Complaint) (bool, error) { + if complaint.Round != complaint.PrivateShare.Round { + return false, nil + } + if complaint.Reset != complaint.PrivateShare.Reset { + return false, nil + } + hash := hashDKGComplaint(complaint) + pubKey, err := crypto.SigToPub(hash, complaint.Signature) + if err != nil { + return false, err + } + if complaint.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + if !complaint.IsNack() { + return VerifyDKGPrivateShareSignature(&complaint.PrivateShare) + } + return true, nil +} + +func hashDKGPartialSignature(psig *typesDKG.PartialSignature) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, psig.Round) + + return crypto.Keccak256Hash( + psig.ProposerID.Hash[:], + binaryRound, + psig.Hash[:], + psig.PartialSignature.Signature[:], + ) +} + +// VerifyDKGPartialSignatureSignature verifies the signature of +// typesDKG.PartialSignature. +func VerifyDKGPartialSignatureSignature( + psig *typesDKG.PartialSignature) (bool, error) { + hash := hashDKGPartialSignature(psig) + pubKey, err := crypto.SigToPub(hash, psig.Signature) + if err != nil { + return false, err + } + if psig.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGMPKReady(ready *typesDKG.MPKReady) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, ready.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, ready.Reset) + + return crypto.Keccak256Hash( + ready.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +// VerifyDKGMPKReadySignature verifies DKGMPKReady signature. +func VerifyDKGMPKReadySignature( + ready *typesDKG.MPKReady) (bool, error) { + hash := hashDKGMPKReady(ready) + pubKey, err := crypto.SigToPub(hash, ready.Signature) + if err != nil { + return false, err + } + if ready.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +func hashDKGFinalize(final *typesDKG.Finalize) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, final.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, final.Reset) + + return crypto.Keccak256Hash( + final.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +func hashDKGSuccess(success *typesDKG.Success) common.Hash { + binaryRound := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryRound, success.Round) + binaryReset := make([]byte, 8) + binary.LittleEndian.PutUint64(binaryReset, success.Reset) + + return crypto.Keccak256Hash( + success.ProposerID.Hash[:], + binaryRound, + binaryReset, + ) +} + +// VerifyDKGFinalizeSignature verifies DKGFinalize signature. +func VerifyDKGFinalizeSignature( + final *typesDKG.Finalize) (bool, error) { + hash := hashDKGFinalize(final) + pubKey, err := crypto.SigToPub(hash, final.Signature) + if err != nil { + return false, err + } + if final.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +// VerifyDKGSuccessSignature verifies DKGSuccess signature. +func VerifyDKGSuccessSignature( + success *typesDKG.Success) (bool, error) { + hash := hashDKGSuccess(success) + pubKey, err := crypto.SigToPub(hash, success.Signature) + if err != nil { + return false, err + } + if success.ProposerID != types.NewNodeID(pubKey) { + return false, nil + } + return true, nil +} + +// Rehash hashes the hash again and again and again... +func Rehash(hash common.Hash, count uint) common.Hash { + result := hash + for i := uint(0); i < count; i++ { + result = crypto.Keccak256Hash(result[:]) + } + return result +} diff --git a/dex/consensus/core/utils/crypto_test.go b/dex/consensus/core/utils/crypto_test.go new file mode 100644 index 000000000..29396c59b --- /dev/null +++ b/dex/consensus/core/utils/crypto_test.go @@ -0,0 +1,332 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/stretchr/testify/suite" +) + +type CryptoTestSuite struct { + suite.Suite +} + +var myNID = types.NodeID{Hash: common.NewRandomHash()} + +func (s *CryptoTestSuite) prepareBlock(prevBlock *types.Block) *types.Block { + now := time.Now().UTC() + if prevBlock == nil { + return &types.Block{ + Position: types.Position{Height: types.GenesisHeight}, + Hash: common.NewRandomHash(), + Timestamp: now, + } + } + s.Require().NotEqual(prevBlock.Hash, common.Hash{}) + return &types.Block{ + ParentHash: prevBlock.Hash, + Hash: common.NewRandomHash(), + Timestamp: now, + Position: types.Position{Height: prevBlock.Position.Height + 1}, + } +} + +func (s *CryptoTestSuite) newBlock(prevBlock *types.Block) *types.Block { + block := s.prepareBlock(prevBlock) + var err error + block.Hash, err = HashBlock(block) + s.Require().NoError(err) + return block +} + +func (s *CryptoTestSuite) generateCompactionChain( + length int, prv crypto.PrivateKey) []*types.Block { + blocks := make([]*types.Block, length) + var prevBlock *types.Block + for idx := range blocks { + block := s.newBlock(prevBlock) + prevBlock = block + blocks[idx] = block + } + return blocks +} + +func (s *CryptoTestSuite) generateBlockChain( + length int, signer *Signer) []*types.Block { + blocks := make([]*types.Block, length) + var prevBlock *types.Block + for idx := range blocks { + block := s.newBlock(prevBlock) + blocks[idx] = block + err := signer.SignBlock(block) + s.Require().NoError(err) + } + return blocks +} + +func (s *CryptoTestSuite) TestBlockSignature() { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + blocks := s.generateBlockChain(10, NewSigner(prv)) + blockMap := make(map[common.Hash]*types.Block) + for _, block := range blocks { + blockMap[block.Hash] = block + } + for _, block := range blocks { + if !block.IsGenesis() { + parentBlock, exist := blockMap[block.ParentHash] + s.Require().True(exist) + s.True(parentBlock.Position.Height == block.Position.Height-1) + hash, err := HashBlock(parentBlock) + s.Require().NoError(err) + s.Equal(hash, block.ParentHash) + } + s.NoError(VerifyBlockSignature(block)) + } +} + +func (s *CryptoTestSuite) TestVoteSignature() { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pub := prv.PublicKey() + nID := types.NewNodeID(pub) + vote := types.NewVote(types.VoteInit, common.NewRandomHash(), 1) + vote.ProposerID = nID + vote.Signature, err = prv.Sign(HashVote(vote)) + s.Require().NoError(err) + ok, err := VerifyVoteSignature(vote) + s.Require().NoError(err) + s.True(ok) + vote.Type = types.VoteCom + ok, err = VerifyVoteSignature(vote) + s.Require().NoError(err) + s.False(ok) +} + +func (s *CryptoTestSuite) TestCRSSignature() { + dkgDelayRound = 1 + crs := common.NewRandomHash() + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pub := prv.PublicKey() + nID := types.NewNodeID(pub) + block := &types.Block{ + ProposerID: nID, + } + hash := hashCRS(block, crs) + block.CRSSignature.Signature = hash[:] + ok := VerifyCRSSignature(block, crs, nil) + s.True(ok) + block.Position.Height++ + ok = VerifyCRSSignature(block, crs, nil) + s.False(ok) +} + +func (s *CryptoTestSuite) TestDKGSignature() { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID := types.NewNodeID(prv.PublicKey()) + prvShare := &typesDKG.PrivateShare{ + ProposerID: nID, + Round: 5, + Reset: 6, + PrivateShare: *dkg.NewPrivateKey(), + } + prvShare.Signature, err = prv.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + ok, err := VerifyDKGPrivateShareSignature(prvShare) + s.Require().NoError(err) + s.True(ok) + prvShare.Round++ + ok, err = VerifyDKGPrivateShareSignature(prvShare) + s.Require().NoError(err) + s.False(ok) + prvShare.Round-- + prvShare.Reset++ + ok, err = VerifyDKGPrivateShareSignature(prvShare) + s.Require().NoError(err) + s.False(ok) + prvShare.Reset-- + + id := dkg.NewID([]byte{13}) + _, pkShare := dkg.NewPrivateKeyShares(1) + mpk := &typesDKG.MasterPublicKey{ + ProposerID: nID, + Round: 5, + Reset: 6, + DKGID: id, + PublicKeyShares: *pkShare.Move(), + } + mpk.Signature, err = prv.Sign(hashDKGMasterPublicKey(mpk)) + s.Require().NoError(err) + ok, err = VerifyDKGMasterPublicKeySignature(mpk) + s.Require().NoError(err) + s.True(ok) + // Test incorrect round. + mpk.Round++ + ok, err = VerifyDKGMasterPublicKeySignature(mpk) + s.Require().NoError(err) + s.False(ok) + mpk.Round-- + // Test incorrect reset. + mpk.Reset++ + ok, err = VerifyDKGMasterPublicKeySignature(mpk) + s.Require().NoError(err) + s.False(ok) + mpk.Reset-- + + prvShare.Signature, err = prv.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + complaint := &typesDKG.Complaint{ + ProposerID: nID, + Round: 5, + Reset: 6, + PrivateShare: *prvShare, + } + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + ok, err = VerifyDKGComplaintSignature(complaint) + s.Require().NoError(err) + s.True(ok) + // Test incorrect complaint signature. + complaint.Round++ + ok, err = VerifyDKGComplaintSignature(complaint) + s.Require().NoError(err) + s.False(ok) + complaint.Round-- + // Test mismatch round. + complaint.PrivateShare.Round++ + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + ok, err = VerifyDKGComplaintSignature(complaint) + s.Require().NoError(err) + s.False(ok) + complaint.PrivateShare.Round-- + // Test mismatch reset. + complaint.PrivateShare.Reset++ + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + ok, err = VerifyDKGComplaintSignature(complaint) + s.Require().NoError(err) + s.False(ok) + complaint.PrivateShare.Reset-- + // Test incorrect private share signature. + complaint.PrivateShare.Round-- + complaint.PrivateShare.ReceiverID = types.NodeID{Hash: common.NewRandomHash()} + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + ok, err = VerifyDKGComplaintSignature(complaint) + s.Require().NoError(err) + s.False(ok) + + sig := &typesDKG.PartialSignature{ + ProposerID: nID, + Round: 5, + PartialSignature: dkg.PartialSignature{}, + } + sig.Signature, err = prv.Sign(hashDKGPartialSignature(sig)) + s.Require().NoError(err) + ok, err = VerifyDKGPartialSignatureSignature(sig) + s.Require().NoError(err) + s.True(ok) + sig.Round++ + ok, err = VerifyDKGPartialSignatureSignature(sig) + s.Require().NoError(err) + s.False(ok) + + ready := &typesDKG.MPKReady{ + ProposerID: nID, + Round: 5, + Reset: 6, + } + ready.Signature, err = prv.Sign(hashDKGMPKReady(ready)) + s.Require().NoError(err) + ok, err = VerifyDKGMPKReadySignature(ready) + s.Require().NoError(err) + s.True(ok) + // Test incorrect round. + ready.Round++ + ok, err = VerifyDKGMPKReadySignature(ready) + s.Require().NoError(err) + s.False(ok) + ready.Round-- + // Test incorrect reset. + ready.Reset++ + ok, err = VerifyDKGMPKReadySignature(ready) + s.Require().NoError(err) + s.False(ok) + ready.Reset-- + + final := &typesDKG.Finalize{ + ProposerID: nID, + Round: 5, + Reset: 6, + } + final.Signature, err = prv.Sign(hashDKGFinalize(final)) + s.Require().NoError(err) + ok, err = VerifyDKGFinalizeSignature(final) + s.Require().NoError(err) + s.True(ok) + // Test incorrect round. + final.Round++ + ok, err = VerifyDKGFinalizeSignature(final) + s.Require().NoError(err) + s.False(ok) + final.Round-- + // Test incorrect reset. + final.Reset++ + ok, err = VerifyDKGFinalizeSignature(final) + s.Require().NoError(err) + s.False(ok) + final.Reset-- + + success := &typesDKG.Success{ + ProposerID: nID, + Round: 5, + Reset: 6, + } + success.Signature, err = prv.Sign(hashDKGSuccess(success)) + s.Require().NoError(err) + ok, err = VerifyDKGSuccessSignature(success) + s.Require().NoError(err) + s.True(ok) + // Test incorrect round. + success.Round++ + ok, err = VerifyDKGSuccessSignature(success) + s.Require().NoError(err) + s.False(ok) + success.Round-- + // Test incorrect reset. + success.Reset++ + ok, err = VerifyDKGSuccessSignature(success) + s.Require().NoError(err) + s.False(ok) + success.Reset-- +} + +func TestCrypto(t *testing.T) { + suite.Run(t, new(CryptoTestSuite)) +} diff --git a/dex/consensus/core/utils/nodeset-cache.go b/dex/consensus/core/utils/nodeset-cache.go new file mode 100644 index 000000000..89dcfc86b --- /dev/null +++ b/dex/consensus/core/utils/nodeset-cache.go @@ -0,0 +1,245 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "errors" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +var ( + // ErrNodeSetNotReady means we got nil empty node set. + ErrNodeSetNotReady = errors.New("node set is not ready") + // ErrCRSNotReady means we got empty CRS. + ErrCRSNotReady = errors.New("crs is not ready") + // ErrConfigurationNotReady means we go nil configuration. + ErrConfigurationNotReady = errors.New("configuration is not ready") +) + +type sets struct { + crs common.Hash + nodeSet *types.NodeSet + notarySet map[types.NodeID]struct{} +} + +// NodeSetCacheInterface interface specifies interface used by NodeSetCache. +type NodeSetCacheInterface interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // NodeSet returns the node set at a given round. + // Return the genesis node set if round == 0. + NodeSet(round uint64) []crypto.PublicKey +} + +// NodeSetCache caches node set information. +// +// NOTE: this module doesn't handle DKG resetting and can only be used along +// with utils.RoundEvent. +type NodeSetCache struct { + lock sync.RWMutex + nsIntf NodeSetCacheInterface + rounds map[uint64]*sets + keyPool map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + } +} + +// NewNodeSetCache constructs an NodeSetCache instance. +func NewNodeSetCache(nsIntf NodeSetCacheInterface) *NodeSetCache { + return &NodeSetCache{ + nsIntf: nsIntf, + rounds: make(map[uint64]*sets), + keyPool: make(map[types.NodeID]*struct { + pubKey crypto.PublicKey + refCnt int + }), + } +} + +// Exists checks if a node is in node set of that round. +func (cache *NodeSetCache) Exists( + round uint64, nodeID types.NodeID) (exists bool, err error) { + + nIDs, exists := cache.get(round) + if !exists { + if nIDs, err = cache.update(round); err != nil { + return + } + } + _, exists = nIDs.nodeSet.IDs[nodeID] + return +} + +// GetPublicKey return public key for that node: +func (cache *NodeSetCache) GetPublicKey( + nodeID types.NodeID) (key crypto.PublicKey, exists bool) { + + cache.lock.RLock() + defer cache.lock.RUnlock() + + rec, exists := cache.keyPool[nodeID] + if exists { + key = rec.pubKey + } + return +} + +// GetNodeSet returns IDs of nodes set of this round as map. +func (cache *NodeSetCache) GetNodeSet(round uint64) (*types.NodeSet, error) { + IDs, exists := cache.get(round) + if !exists { + var err error + if IDs, err = cache.update(round); err != nil { + return nil, err + } + } + return IDs.nodeSet.Clone(), nil +} + +// GetNotarySet returns of notary set of this round. +func (cache *NodeSetCache) GetNotarySet( + round uint64) (map[types.NodeID]struct{}, error) { + IDs, err := cache.getOrUpdate(round) + if err != nil { + return nil, err + } + return cache.cloneMap(IDs.notarySet), nil +} + +// Purge a specific round. +func (cache *NodeSetCache) Purge(rID uint64) { + cache.lock.Lock() + defer cache.lock.Unlock() + nIDs, exist := cache.rounds[rID] + if !exist { + return + } + for nID := range nIDs.nodeSet.IDs { + rec := cache.keyPool[nID] + if rec.refCnt--; rec.refCnt == 0 { + delete(cache.keyPool, nID) + } + } + delete(cache.rounds, rID) +} + +// Touch updates the internal cache of round. +func (cache *NodeSetCache) Touch(round uint64) (err error) { + _, err = cache.update(round) + return +} + +func (cache *NodeSetCache) cloneMap( + nIDs map[types.NodeID]struct{}) map[types.NodeID]struct{} { + nIDsCopy := make(map[types.NodeID]struct{}, len(nIDs)) + for k := range nIDs { + nIDsCopy[k] = struct{}{} + } + return nIDsCopy +} + +func (cache *NodeSetCache) getOrUpdate(round uint64) (nIDs *sets, err error) { + s, exists := cache.get(round) + if !exists { + if s, err = cache.update(round); err != nil { + return + } + } + nIDs = s + return +} + +// update node set for that round. +// +// This cache would maintain 10 rounds before the updated round and purge +// rounds not in this range. +func (cache *NodeSetCache) update(round uint64) (nIDs *sets, err error) { + cache.lock.Lock() + defer cache.lock.Unlock() + // Get information for the requested round. + keySet := cache.nsIntf.NodeSet(round) + if keySet == nil { + err = ErrNodeSetNotReady + return + } + crs := cache.nsIntf.CRS(round) + if (crs == common.Hash{}) { + err = ErrCRSNotReady + return + } + // Cache new round. + nodeSet := types.NewNodeSet() + for _, key := range keySet { + nID := types.NewNodeID(key) + nodeSet.Add(nID) + if rec, exists := cache.keyPool[nID]; exists { + rec.refCnt++ + } else { + cache.keyPool[nID] = &struct { + pubKey crypto.PublicKey + refCnt int + }{key, 1} + } + } + cfg := cache.nsIntf.Configuration(round) + if cfg == nil { + err = ErrConfigurationNotReady + return + } + nIDs = &sets{ + crs: crs, + nodeSet: nodeSet, + notarySet: make(map[types.NodeID]struct{}), + } + nIDs.notarySet = nodeSet.GetSubSet( + int(cfg.NotarySetSize), types.NewNotarySetTarget(crs)) + cache.rounds[round] = nIDs + // Purge older rounds. + for rID, nIDs := range cache.rounds { + nodeSet := nIDs.nodeSet + if round-rID <= 5 { + continue + } + for nID := range nodeSet.IDs { + rec := cache.keyPool[nID] + if rec.refCnt--; rec.refCnt == 0 { + delete(cache.keyPool, nID) + } + } + delete(cache.rounds, rID) + } + return +} + +func (cache *NodeSetCache) get(round uint64) (nIDs *sets, exists bool) { + cache.lock.RLock() + defer cache.lock.RUnlock() + nIDs, exists = cache.rounds[round] + return +} diff --git a/dex/consensus/core/utils/nodeset-cache_test.go b/dex/consensus/core/utils/nodeset-cache_test.go new file mode 100644 index 000000000..b9052c888 --- /dev/null +++ b/dex/consensus/core/utils/nodeset-cache_test.go @@ -0,0 +1,149 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type nsIntf struct { + s *NodeSetCacheTestSuite + crs common.Hash + curKeys []crypto.PublicKey +} + +func (g *nsIntf) Configuration(round uint64) (cfg *types.Config) { + return &types.Config{ + NotarySetSize: 7, + RoundLength: 60, + LambdaBA: 250 * time.Millisecond, + MinBlockInterval: 1 * time.Second, + } +} +func (g *nsIntf) CRS(round uint64) (b common.Hash) { return g.crs } +func (g *nsIntf) NodeSet(round uint64) []crypto.PublicKey { + // Randomly generating keys, and check them for verification. + g.curKeys = []crypto.PublicKey{} + for i := 0; i < 10; i++ { + prvKey, err := ecdsa.NewPrivateKey() + g.s.Require().NoError(err) + g.curKeys = append(g.curKeys, prvKey.PublicKey()) + } + return g.curKeys +} + +type NodeSetCacheTestSuite struct { + suite.Suite +} + +func (s *NodeSetCacheTestSuite) TestBasicUsage() { + var ( + nsIntf = &nsIntf{ + s: s, + crs: common.NewRandomHash(), + } + cache = NewNodeSetCache(nsIntf) + req = s.Require() + ) + + chk := func( + cache *NodeSetCache, round uint64, nodeSet map[types.NodeID]struct{}) { + + for nID := range nodeSet { + // It should exists. + exists, err := cache.Exists(round, nID) + req.NoError(err) + req.True(exists) + // We could get keys. + key, exists := cache.GetPublicKey(nID) + req.NotNil(key) + req.True(exists) + } + } + + // Try to get round 0. + nodeSet0, err := cache.GetNodeSet(0) + req.NoError(err) + chk(cache, 0, nodeSet0.IDs) + notarySet, err := cache.GetNotarySet(0) + req.NoError(err) + chk(cache, 0, notarySet) + // Try to get round 1. + nodeSet1, err := cache.GetNodeSet(1) + req.NoError(err) + chk(cache, 0, nodeSet0.IDs) + chk(cache, 1, nodeSet1.IDs) + // Try to get round 6, round 0 should be purged. + nodeSet6, err := cache.GetNodeSet(6) + req.NoError(err) + chk(cache, 1, nodeSet1.IDs) + chk(cache, 6, nodeSet6.IDs) + for nID := range nodeSet0.IDs { + _, exists := cache.GetPublicKey(nID) + req.False(exists) + } +} + +func (s *NodeSetCacheTestSuite) TestTouch() { + var ( + nsIntf = &nsIntf{ + s: s, + crs: common.NewRandomHash(), + } + cache = NewNodeSetCache(nsIntf) + req = s.Require() + ) + + _, exists := cache.get(1) + req.False(exists) + + err := cache.Touch(1) + req.NoError(err) + + _, exists = cache.get(1) + req.True(exists) +} + +func (s *NodeSetCacheTestSuite) TestPurge() { + var ( + nsIntf = &nsIntf{ + s: s, + crs: common.NewRandomHash(), + } + cache = NewNodeSetCache(nsIntf) + req = s.Require() + ) + err := cache.Touch(1) + req.NoError(err) + _, exist := cache.get(1) + req.True(exist) + cache.Purge(1) + _, exist = cache.get(1) + req.False(exist) +} + +func TestNodeSetCache(t *testing.T) { + suite.Run(t, new(NodeSetCacheTestSuite)) +} diff --git a/dex/consensus/core/utils/penalty-helper.go b/dex/consensus/core/utils/penalty-helper.go new file mode 100644 index 000000000..0b38474a6 --- /dev/null +++ b/dex/consensus/core/utils/penalty-helper.go @@ -0,0 +1,131 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "errors" + + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +var ( + // ErrInvalidDKGMasterPublicKey means the DKG MasterPublicKey is invalid. + ErrInvalidDKGMasterPublicKey = errors.New("invalid DKG master public key") + // ErrPayloadNotEmpty means the payload of block is not empty. + ErrPayloadNotEmpty = errors.New("payload not empty") +) + +// NeedPenaltyDKGPrivateShare checks if the proposer of dkg private share +// should be penalized. +func NeedPenaltyDKGPrivateShare( + complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { + if complaint.IsNack() { + return false, nil + } + if mpk.ProposerID != complaint.PrivateShare.ProposerID { + return false, nil + } + ok, err := VerifyDKGMasterPublicKeySignature(mpk) + if err != nil { + return false, err + } + if !ok { + return false, ErrInvalidDKGMasterPublicKey + } + ok, err = VerifyDKGComplaintSignature(complaint) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = mpk.PublicKeyShares.VerifyPrvShare( + typesDKG.NewID(complaint.PrivateShare.ReceiverID), + &complaint.PrivateShare.PrivateShare) + if err != nil { + return false, err + } + return !ok, nil +} + +// NeedPenaltyForkVote checks if two votes are fork vote. +func NeedPenaltyForkVote(vote1, vote2 *types.Vote) (bool, error) { + if vote1.ProposerID != vote2.ProposerID || + vote1.Type != vote2.Type || + vote1.Period != vote2.Period || + vote1.Position != vote2.Position || + vote1.BlockHash == vote2.BlockHash { + return false, nil + } + ok, err := VerifyVoteSignature(vote1) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = VerifyVoteSignature(vote2) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + return true, nil +} + +// NeedPenaltyForkBlock checks if two blocks are fork block. +func NeedPenaltyForkBlock(block1, block2 *types.Block) (bool, error) { + if block1.ProposerID != block2.ProposerID || + block1.Position != block2.Position || + block1.Hash == block2.Hash { + return false, nil + } + if len(block1.Payload) != 0 || len(block2.Payload) != 0 { + return false, ErrPayloadNotEmpty + } + verifyBlock := func(block *types.Block) (bool, error) { + err := VerifyBlockSignatureWithoutPayload(block) + switch err { + case nil: + return true, nil + case ErrIncorrectSignature: + return false, nil + case ErrIncorrectHash: + return false, nil + default: + return false, err + } + } + ok, err := verifyBlock(block1) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = verifyBlock(block2) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + return true, nil +} diff --git a/dex/consensus/core/utils/penalty-helper_test.go b/dex/consensus/core/utils/penalty-helper_test.go new file mode 100644 index 000000000..3e4f8b547 --- /dev/null +++ b/dex/consensus/core/utils/penalty-helper_test.go @@ -0,0 +1,222 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +type PenaltyHelperTestSuite struct { + suite.Suite +} + +func (s *PenaltyHelperTestSuite) TestDKGComplaint() { + signComplaint := func(prv crypto.PrivateKey, complaint *typesDKG.Complaint) { + var err error + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + } + prv1, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID1 := types.NewNodeID(prv1.PublicKey()) + + prv2, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID2 := types.NewNodeID(prv2.PublicKey()) + + prvShares, pubShares := dkg.NewPrivateKeyShares(3) + mpk := &typesDKG.MasterPublicKey{ + ProposerID: nID1, + DKGID: typesDKG.NewID(nID1), + PublicKeyShares: *pubShares.Move(), + } + mpk.Signature, err = prv1.Sign(hashDKGMasterPublicKey(mpk)) + s.Require().NoError(err) + + // NackComplaint should not be penalized. + complaint := &typesDKG.Complaint{ + ProposerID: nID2, + } + signComplaint(prv2, complaint) + s.Require().True(complaint.IsNack()) + ok, err := NeedPenaltyDKGPrivateShare(complaint, mpk) + s.Require().NoError(err) + s.False(ok) + + // Correct privateShare should not be penalized. + prvShares.SetParticipants(dkg.IDs{typesDKG.NewID(nID1), typesDKG.NewID(nID2)}) + share, exist := prvShares.Share(typesDKG.NewID(nID2)) + s.Require().True(exist) + prvShare := &typesDKG.PrivateShare{ + ProposerID: nID1, + ReceiverID: nID2, + PrivateShare: *share, + } + prvShare.Signature, err = prv1.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + complaint.PrivateShare = *prvShare + signComplaint(prv2, complaint) + ok, err = NeedPenaltyDKGPrivateShare(complaint, mpk) + s.Require().NoError(err) + s.False(ok) + + // Penalize incorrect privateShare. + share, exist = prvShares.Share(typesDKG.NewID(nID1)) + s.Require().True(exist) + prvShare.PrivateShare = *share + prvShare.Signature, err = prv1.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + complaint.PrivateShare = *prvShare + signComplaint(prv2, complaint) + ok, err = NeedPenaltyDKGPrivateShare(complaint, mpk) + s.Require().NoError(err) + s.True(ok) + + // Should not penalize if mpk is incorrect. + mpk.Round++ + ok, err = NeedPenaltyDKGPrivateShare(complaint, mpk) + s.Equal(ErrInvalidDKGMasterPublicKey, err) + + // Should not penalize if mpk's proposer not match with prvShares'. + mpk.Round-- + mpk.ProposerID = nID2 + mpk.Signature, err = prv1.Sign(hashDKGMasterPublicKey(mpk)) + s.Require().NoError(err) + ok, err = NeedPenaltyDKGPrivateShare(complaint, mpk) + s.Require().NoError(err) + s.False(ok) +} + +func (s *PenaltyHelperTestSuite) TestForkVote() { + prv1, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + + vote1 := types.NewVote(types.VoteCom, common.NewRandomHash(), uint64(0)) + vote1.ProposerID = types.NewNodeID(prv1.PublicKey()) + + vote2 := vote1.Clone() + for vote2.BlockHash == vote1.BlockHash { + vote2.BlockHash = common.NewRandomHash() + } + vote1.Signature, err = prv1.Sign(HashVote(vote1)) + s.Require().NoError(err) + vote2.Signature, err = prv1.Sign(HashVote(vote2)) + s.Require().NoError(err) + + ok, err := NeedPenaltyForkVote(vote1, vote2) + s.Require().NoError(err) + s.True(ok) + + // Invalid signature should not be penalized. + vote2.VoteHeader.Period++ + ok, err = NeedPenaltyForkVote(vote1, vote2) + s.Require().NoError(err) + s.False(ok) + + // Period not matched. + vote2.Signature, err = prv1.Sign(HashVote(vote2)) + s.Require().NoError(err) + ok, err = NeedPenaltyForkVote(vote1, vote2) + s.Require().NoError(err) + s.False(ok) + + // Proposer not matched. + vote2 = vote1.Clone() + for vote2.BlockHash == vote1.BlockHash { + vote2.BlockHash = common.NewRandomHash() + } + prv2, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + vote2.ProposerID = types.NewNodeID(prv2.PublicKey()) + vote2.Signature, err = prv2.Sign(HashVote(vote2)) + s.Require().NoError(err) + ok, err = NeedPenaltyForkVote(vote1, vote2) + s.Require().NoError(err) + s.False(ok) +} + +func (s *PenaltyHelperTestSuite) TestForkBlock() { + hashBlock := func(block *types.Block) common.Hash { + block.PayloadHash = crypto.Keccak256Hash(block.Payload) + var err error + block.Hash, err = HashBlock(block) + s.Require().NoError(err) + return block.Hash + } + prv1, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + + block1 := &types.Block{ + ProposerID: types.NewNodeID(prv1.PublicKey()), + ParentHash: common.NewRandomHash(), + } + + block2 := block1.Clone() + for block2.ParentHash == block1.ParentHash { + block2.ParentHash = common.NewRandomHash() + } + block1.Signature, err = prv1.Sign(hashBlock(block1)) + s.Require().NoError(err) + block2.Signature, err = prv1.Sign(hashBlock(block2)) + s.Require().NoError(err) + + ok, err := NeedPenaltyForkBlock(block1, block2) + s.Require().NoError(err) + s.True(ok) + + // Invalid signature should not be penalized. + block2.ParentHash[0]++ + ok, err = NeedPenaltyForkBlock(block1, block2) + s.Require().NoError(err) + s.False(ok) + + // Position not matched. + block2.Position.Height++ + block2.Signature, err = prv1.Sign(hashBlock(block2)) + s.Require().NoError(err) + ok, err = NeedPenaltyForkBlock(block1, block2) + s.Require().NoError(err) + s.False(ok) + + // Proposer not matched. + block2 = block1.Clone() + for block2.ParentHash == block1.ParentHash { + block2.ParentHash = common.NewRandomHash() + } + prv2, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + block2.ProposerID = types.NewNodeID(prv2.PublicKey()) + block2.Signature, err = prv2.Sign(hashBlock(block2)) + s.Require().NoError(err) + ok, err = NeedPenaltyForkBlock(block1, block2) + s.Require().NoError(err) + s.False(ok) +} + +func TestPenaltyHelper(t *testing.T) { + suite.Run(t, new(PenaltyHelperTestSuite)) +} diff --git a/dex/consensus/core/utils/round-based-config.go b/dex/consensus/core/utils/round-based-config.go new file mode 100644 index 000000000..4c83d046b --- /dev/null +++ b/dex/consensus/core/utils/round-based-config.go @@ -0,0 +1,112 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "fmt" + + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// RoundBasedConfig is based config for rounds and provide boundary checking +// for rounds. +type RoundBasedConfig struct { + roundID uint64 + roundBeginHeight uint64 + roundEndHeight uint64 + roundLength uint64 +} + +// SetupRoundBasedFields setup round based fields, including round ID, the +// length of rounds. +func (c *RoundBasedConfig) SetupRoundBasedFields( + roundID uint64, cfg *types.Config) { + if c.roundLength > 0 { + panic(fmt.Errorf("duplicated set round based fields: %d", + c.roundLength)) + } + c.roundID = roundID + c.roundLength = cfg.RoundLength +} + +// SetRoundBeginHeight gives the beginning height for the initial round provided +// when constructed. +func (c *RoundBasedConfig) SetRoundBeginHeight(begin uint64) { + if c.roundBeginHeight != 0 { + panic(fmt.Errorf("duplicated set round begin height: %d", + c.roundBeginHeight)) + } + c.roundBeginHeight = begin + c.roundEndHeight = begin + c.roundLength +} + +// IsLastBlock checks if a block is the last block of this round. +func (c *RoundBasedConfig) IsLastBlock(b *types.Block) bool { + if b.Position.Round != c.roundID { + panic(fmt.Errorf("attempt to compare by different round: %s, %d", + b, c.roundID)) + } + return b.Position.Height+1 == c.roundEndHeight +} + +// ExtendLength extends round ending height by the length of current round. +func (c *RoundBasedConfig) ExtendLength() { + c.roundEndHeight += c.roundLength +} + +// Contains checks if a block height is in this round. +func (c *RoundBasedConfig) Contains(h uint64) bool { + return c.roundBeginHeight <= h && c.roundEndHeight > h +} + +// RoundID returns the round ID of this config. +func (c *RoundBasedConfig) RoundID() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundID +} + +// RoundEndHeight returns next checkpoint to varify if this round is ended. +func (c *RoundBasedConfig) RoundEndHeight() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundEndHeight +} + +// AppendTo a config from previous round. +func (c *RoundBasedConfig) AppendTo(other RoundBasedConfig) { + if c.roundID != other.roundID+1 { + panic(fmt.Errorf("round IDs of configs not continuous: %d %d", + c.roundID, other.roundID)) + } + c.SetRoundBeginHeight(other.roundEndHeight) +} + +// LastPeriodBeginHeight returns the begin height of last period. For example, +// if a round is extended twice, then the return from this method is: +// +// begin + 2 * roundLength - roundLength +// +func (c *RoundBasedConfig) LastPeriodBeginHeight() uint64 { + if c.roundLength == 0 { + panic(fmt.Errorf("config is not initialized: %d", c.roundID)) + } + return c.roundEndHeight - c.roundLength +} diff --git a/dex/consensus/core/utils/round-based-config_test.go b/dex/consensus/core/utils/round-based-config_test.go new file mode 100644 index 000000000..084efe25b --- /dev/null +++ b/dex/consensus/core/utils/round-based-config_test.go @@ -0,0 +1,57 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "testing" + + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type RoundBasedConfigTestSuite struct { + suite.Suite +} + +func (s *RoundBasedConfigTestSuite) TestBasicUsage() { + c1 := RoundBasedConfig{} + c1.SetupRoundBasedFields(1, &types.Config{RoundLength: 100}) + c1.SetRoundBeginHeight(11) + s.Require().Equal(c1.RoundID(), uint64(1)) + s.Require().Equal(c1.roundLength, uint64(100)) + s.Require().Equal(c1.roundBeginHeight, uint64(11)) + s.Require().Equal(c1.roundEndHeight, uint64(111)) + s.Require().True(c1.Contains(110)) + s.Require().False(c1.Contains(111)) + c1.ExtendLength() + s.Require().True(c1.Contains(111)) + s.Require().True(c1.Contains(210)) + s.Require().False(c1.Contains(211)) + s.Require().Equal(c1.LastPeriodBeginHeight(), uint64(111)) + s.Require().Equal(c1.RoundEndHeight(), uint64(211)) + // Test AppendTo. + c2 := RoundBasedConfig{} + c2.SetupRoundBasedFields(2, &types.Config{RoundLength: 50}) + c2.AppendTo(c1) + s.Require().Equal(c2.roundBeginHeight, uint64(211)) + s.Require().Equal(c2.roundEndHeight, uint64(261)) +} + +func TestRoundBasedConfig(t *testing.T) { + suite.Run(t, new(RoundBasedConfigTestSuite)) +} diff --git a/dex/consensus/core/utils/round-event.go b/dex/consensus/core/utils/round-event.go new file mode 100644 index 000000000..bda4383fa --- /dev/null +++ b/dex/consensus/core/utils/round-event.go @@ -0,0 +1,358 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "context" + "fmt" + "sync" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +// ErrUnmatchedBlockHeightWithConfig is for invalid parameters for NewRoundEvent. +type ErrUnmatchedBlockHeightWithConfig struct { + round uint64 + reset uint64 + blockHeight uint64 +} + +func (e ErrUnmatchedBlockHeightWithConfig) Error() string { + return fmt.Sprintf("unsynced block height and cfg: round:%d reset:%d h:%d", + e.round, e.reset, e.blockHeight) +} + +// RoundEventParam defines the parameters passed to event handlers of +// RoundEvent. +type RoundEventParam struct { + // 'Round' of next checkpoint, might be identical to previous checkpoint. + Round uint64 + // the count of reset DKG for 'Round+1'. + Reset uint64 + // the begin block height of this event, the end block height of this event + // would be BeginHeight + config.RoundLength. + BeginHeight uint64 + // The configuration for 'Round'. + Config *types.Config + // The CRS for 'Round'. + CRS common.Hash +} + +// NextRoundValidationHeight returns the height to check if the next round is +// ready. +func (e RoundEventParam) NextRoundValidationHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*9/10 +} + +// NextCRSProposingHeight returns the height to propose CRS for next round. +func (e RoundEventParam) NextCRSProposingHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// NextDKGPreparationHeight returns the height to prepare DKG set for next +// round. +func (e RoundEventParam) NextDKGPreparationHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*2/3 +} + +// NextRoundHeight returns the height of the beginning of next round. +func (e RoundEventParam) NextRoundHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength +} + +// NextTouchNodeSetCacheHeight returns the height to touch the node set cache. +func (e RoundEventParam) NextTouchNodeSetCacheHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// NextDKGResetHeight returns the height to reset DKG for next period. +func (e RoundEventParam) NextDKGResetHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength*85/100 +} + +// NextDKGRegisterHeight returns the height to register DKG. +func (e RoundEventParam) NextDKGRegisterHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength/2 +} + +// RoundEndHeight returns the round ending height of this round event. +func (e RoundEventParam) RoundEndHeight() uint64 { + return e.BeginHeight + e.Config.RoundLength +} + +func (e RoundEventParam) String() string { + return fmt.Sprintf("roundEvtParam{Round:%d Reset:%d Height:%d}", + e.Round, + e.Reset, + e.BeginHeight) +} + +// roundEventFn defines the fingerprint of handlers of round events. +type roundEventFn func([]RoundEventParam) + +// governanceAccessor is a subset of core.Governance to break the dependency +// between core and utils package. +type governanceAccessor interface { + // Configuration returns the configuration at a given round. + // Return the genesis configuration if round == 0. + Configuration(round uint64) *types.Config + + // CRS returns the CRS for a given round. + // Return the genesis CRS if round == 0. + CRS(round uint64) common.Hash + + // DKGComplaints gets all the DKGComplaints of round. + DKGComplaints(round uint64) []*typesDKG.Complaint + + // DKGMasterPublicKeys gets all the DKGMasterPublicKey of round. + DKGMasterPublicKeys(round uint64) []*typesDKG.MasterPublicKey + + // IsDKGFinal checks if DKG is final. + IsDKGFinal(round uint64) bool + + // IsDKGSuccess checks if DKG is success. + IsDKGSuccess(round uint64) bool + + // DKGResetCount returns the reset count for DKG of given round. + DKGResetCount(round uint64) uint64 + + // Get the begin height of a round. + GetRoundHeight(round uint64) uint64 +} + +// RoundEventRetryHandlerGenerator generates a handler to common.Event, which +// would register itself to retry next round validation if round event is not +// triggered. +func RoundEventRetryHandlerGenerator( + rEvt *RoundEvent, hEvt *common.Event) func(uint64) { + var hEvtHandler func(uint64) + hEvtHandler = func(h uint64) { + if rEvt.ValidateNextRound(h) == 0 { + // Retry until at least one round event is triggered. + hEvt.RegisterHeight(h+1, hEvtHandler) + } + } + return hEvtHandler +} + +// RoundEvent would be triggered when either: +// - the next DKG set setup is ready. +// - the next DKG set setup is failed, and previous DKG set already reset the +// CRS. +type RoundEvent struct { + gov governanceAccessor + logger common.Logger + lock sync.Mutex + handlers []roundEventFn + config RoundBasedConfig + lastTriggeredRound uint64 + lastTriggeredResetCount uint64 + roundShift uint64 + gpkInvalid bool + ctx context.Context + ctxCancel context.CancelFunc +} + +// NewRoundEvent creates an RoundEvent instance. +func NewRoundEvent(parentCtx context.Context, gov governanceAccessor, + logger common.Logger, initPos types.Position, roundShift uint64) ( + *RoundEvent, error) { + // We need to generate valid ending block height of this round (taken + // DKG reset count into consideration). + logger.Info("new RoundEvent", "position", initPos, "shift", roundShift) + initConfig := GetConfigWithPanic(gov, initPos.Round, logger) + e := &RoundEvent{ + gov: gov, + logger: logger, + lastTriggeredRound: initPos.Round, + roundShift: roundShift, + } + e.ctx, e.ctxCancel = context.WithCancel(parentCtx) + e.config = RoundBasedConfig{} + e.config.SetupRoundBasedFields(initPos.Round, initConfig) + e.config.SetRoundBeginHeight(GetRoundHeight(gov, initPos.Round)) + // Make sure the DKG reset count in current governance can cover the initial + // block height. + if initPos.Height >= types.GenesisHeight { + resetCount := gov.DKGResetCount(initPos.Round + 1) + remains := resetCount + for ; remains > 0 && !e.config.Contains(initPos.Height); remains-- { + e.config.ExtendLength() + } + if !e.config.Contains(initPos.Height) { + return nil, ErrUnmatchedBlockHeightWithConfig{ + round: initPos.Round, + reset: resetCount, + blockHeight: initPos.Height, + } + } + e.lastTriggeredResetCount = resetCount - remains + } + return e, nil +} + +// Register a handler to be called when new round is confirmed or new DKG reset +// is detected. +// +// The earlier registered handler has higher priority. +func (e *RoundEvent) Register(h roundEventFn) { + e.lock.Lock() + defer e.lock.Unlock() + e.handlers = append(e.handlers, h) +} + +// TriggerInitEvent triggers event from the initial setting. +func (e *RoundEvent) TriggerInitEvent() { + e.lock.Lock() + defer e.lock.Unlock() + events := []RoundEventParam{RoundEventParam{ + Round: e.lastTriggeredRound, + Reset: e.lastTriggeredResetCount, + BeginHeight: e.config.LastPeriodBeginHeight(), + CRS: GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger), + Config: GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger), + }} + for _, h := range e.handlers { + h(events) + } +} + +// ValidateNextRound validate if the DKG set for next round is ready to go or +// failed to setup, all registered handlers would be called once some decision +// is made on chain. +// +// The count of triggered events would be returned. +func (e *RoundEvent) ValidateNextRound(blockHeight uint64) (count uint) { + // To make triggers continuous and sequential, the next validation should + // wait for previous one finishing. That's why I use mutex here directly. + var events []RoundEventParam + e.lock.Lock() + defer e.lock.Unlock() + e.logger.Trace("ValidateNextRound", + "height", blockHeight, + "round", e.lastTriggeredRound, + "count", e.lastTriggeredResetCount) + defer func() { + count = uint(len(events)) + if count == 0 { + return + } + for _, h := range e.handlers { + // To make sure all handlers receive triggers sequentially, we can't + // raise go routines here. + h(events) + } + }() + var ( + triggered bool + param RoundEventParam + beginHeight = blockHeight + startRound = e.lastTriggeredRound + ) + for { + param, triggered = e.check(beginHeight, startRound) + if !triggered { + break + } + events = append(events, param) + beginHeight = param.BeginHeight + } + return +} + +func (e *RoundEvent) check(blockHeight, startRound uint64) ( + param RoundEventParam, triggered bool) { + defer func() { + if !triggered { + return + } + // A simple assertion to make sure we didn't pick the wrong round. + if e.config.RoundID() != e.lastTriggeredRound { + panic(fmt.Errorf("Triggered round not matched: %d, %d", + e.config.RoundID(), e.lastTriggeredRound)) + } + param.Round = e.lastTriggeredRound + param.Reset = e.lastTriggeredResetCount + param.BeginHeight = e.config.LastPeriodBeginHeight() + param.CRS = GetCRSWithPanic(e.gov, e.lastTriggeredRound, e.logger) + param.Config = GetConfigWithPanic(e.gov, e.lastTriggeredRound, e.logger) + e.logger.Info("New RoundEvent triggered", + "round", e.lastTriggeredRound, + "reset", e.lastTriggeredResetCount, + "begin-height", e.config.LastPeriodBeginHeight(), + "crs", param.CRS.String()[:6], + ) + }() + nextRound := e.lastTriggeredRound + 1 + if nextRound >= startRound+e.roundShift { + // Avoid access configuration newer than last confirmed one over + // 'roundShift' rounds. Fullnode might crash if we access it before it + // knows. + return + } + nextCfg := GetConfigWithPanic(e.gov, nextRound, e.logger) + resetCount := e.gov.DKGResetCount(nextRound) + if resetCount > e.lastTriggeredResetCount { + e.lastTriggeredResetCount++ + e.config.ExtendLength() + e.gpkInvalid = false + triggered = true + return + } + if e.gpkInvalid { + // We know that DKG already failed, now wait for the DKG set from + // previous round to reset DKG and don't have to reconstruct the + // group public key again. + return + } + if nextRound >= dkgDelayRound { + var ok bool + ok, e.gpkInvalid = IsDKGValid( + e.gov, e.logger, nextRound, e.lastTriggeredResetCount) + if !ok { + return + } + } + // The DKG set for next round is well prepared. + e.lastTriggeredRound = nextRound + e.lastTriggeredResetCount = 0 + e.gpkInvalid = false + rCfg := RoundBasedConfig{} + rCfg.SetupRoundBasedFields(nextRound, nextCfg) + rCfg.AppendTo(e.config) + e.config = rCfg + triggered = true + return +} + +// Stop the event source and block until last trigger returns. +func (e *RoundEvent) Stop() { + e.ctxCancel() +} + +// LastPeriod returns block height related info of the last period, including +// begin height and round length. +func (e *RoundEvent) LastPeriod() (begin uint64, length uint64) { + e.lock.Lock() + defer e.lock.Unlock() + begin = e.config.LastPeriodBeginHeight() + length = e.config.RoundEndHeight() - e.config.LastPeriodBeginHeight() + return +} diff --git a/dex/consensus/core/utils/signer.go b/dex/consensus/core/utils/signer.go new file mode 100644 index 000000000..fe9ea9095 --- /dev/null +++ b/dex/consensus/core/utils/signer.go @@ -0,0 +1,167 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "errors" + + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + dexCrypto "github.com/dexon-foundation/dexon/crypto" +) + +// Errors for signer. +var ( + ErrInvalidProposerID = errors.New("invalid proposer id") + ErrIncorrectHash = errors.New("hash of block is incorrect") + ErrIncorrectSignature = errors.New("signature of block is incorrect") + ErrNoBLSSigner = errors.New("bls signer not set") +) + +type blsSigner func(round uint64, hash common.Hash) (crypto.Signature, error) + +// Signer signs a segment of data. +type Signer struct { + prvKey crypto.PrivateKey + pubKey crypto.PublicKey + proposerID types.NodeID + blsSign blsSigner +} + +// NewSigner constructs an Signer instance. +func NewSigner(prvKey crypto.PrivateKey) (s *Signer) { + s = &Signer{ + prvKey: prvKey, + pubKey: prvKey.PublicKey(), + } + s.proposerID = types.NewNodeID(s.pubKey) + return +} + +// SetBLSSigner for signing CRSSignature +func (s *Signer) SetBLSSigner(signer blsSigner) { + s.blsSign = signer +} + +// SignBlock signs a types.Block. +func (s *Signer) SignBlock(b *types.Block) (err error) { + b.ProposerID = s.proposerID + b.PayloadHash = crypto.Keccak256Hash(b.Payload) + if b.Hash, err = HashBlock(b); err != nil { + return + } + if b.Signature, err = s.prvKey.Sign(b.Hash); err != nil { + return + } + return +} + +func (s *Signer) SignTmpBlock(b *types.TmpBlock) (err error) { + pk := s.prvKey.PublicKey().(*ecdsa.PublicKey).Oops() + b.ProposerID = dexCrypto.PubkeyToAddress(*pk) + b.PayloadHash = dexCrypto.Keccak256Hash(b.Payload) + if b.Hash, err = HashTmpBlock(b); err != nil { + return + } + return +} + +// SignVote signs a types.Vote. +func (s *Signer) SignVote(v *types.Vote) (err error) { + v.ProposerID = s.proposerID + v.Signature, err = s.prvKey.Sign(HashVote(v)) + return +} + +// SignCRS signs CRS signature of types.Block. +func (s *Signer) SignCRS(b *types.Block, crs common.Hash) (err error) { + if b.ProposerID != s.proposerID { + err = ErrInvalidProposerID + return + } + if b.Position.Round < dkgDelayRound { + hash := hashCRS(b, crs) + b.CRSSignature = crypto.Signature{ + Type: "bls", + Signature: hash[:], + } + return + } + if s.blsSign == nil { + err = ErrNoBLSSigner + return + } + b.CRSSignature, err = s.blsSign(b.Position.Round, hashCRS(b, crs)) + return +} + +// SignDKGComplaint signs a DKG complaint. +func (s *Signer) SignDKGComplaint(complaint *typesDKG.Complaint) (err error) { + complaint.ProposerID = s.proposerID + complaint.Signature, err = s.prvKey.Sign(hashDKGComplaint(complaint)) + return +} + +// SignDKGMasterPublicKey signs a DKG master public key. +func (s *Signer) SignDKGMasterPublicKey( + mpk *typesDKG.MasterPublicKey) (err error) { + mpk.ProposerID = s.proposerID + mpk.Signature, err = s.prvKey.Sign(hashDKGMasterPublicKey(mpk)) + return +} + +// SignDKGPrivateShare signs a DKG private share. +func (s *Signer) SignDKGPrivateShare( + prvShare *typesDKG.PrivateShare) (err error) { + prvShare.ProposerID = s.proposerID + prvShare.Signature, err = s.prvKey.Sign(hashDKGPrivateShare(prvShare)) + return +} + +// SignDKGPartialSignature signs a DKG partial signature. +func (s *Signer) SignDKGPartialSignature( + pSig *typesDKG.PartialSignature) (err error) { + pSig.ProposerID = s.proposerID + pSig.Signature, err = s.prvKey.Sign(hashDKGPartialSignature(pSig)) + return +} + +// SignDKGMPKReady signs a DKG ready message. +func (s *Signer) SignDKGMPKReady(ready *typesDKG.MPKReady) (err error) { + ready.ProposerID = s.proposerID + ready.Signature, err = s.prvKey.Sign(hashDKGMPKReady(ready)) + return +} + +// SignDKGFinalize signs a DKG finalize message. +func (s *Signer) SignDKGFinalize(final *typesDKG.Finalize) (err error) { + final.ProposerID = s.proposerID + final.Signature, err = s.prvKey.Sign(hashDKGFinalize(final)) + return +} + +// SignDKGSuccess signs a DKG success message. +func (s *Signer) SignDKGSuccess(success *typesDKG.Success) (err error) { + success.ProposerID = s.proposerID + success.Signature, err = s.prvKey.Sign(hashDKGSuccess(success)) + return +} diff --git a/dex/consensus/core/utils/signer_test.go b/dex/consensus/core/utils/signer_test.go new file mode 100644 index 000000000..b0faee4da --- /dev/null +++ b/dex/consensus/core/utils/signer_test.go @@ -0,0 +1,105 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "fmt" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/stretchr/testify/suite" +) + +type SignerTestSuite struct { + suite.Suite +} + +func (s *SignerTestSuite) setupSigner() *Signer { + k, err := ecdsa.NewPrivateKey() + s.NoError(err) + return NewSigner(k) +} + +func (s *SignerTestSuite) TestBlock() { + k := s.setupSigner() + b := &types.Block{ + ParentHash: common.NewRandomHash(), + Position: types.Position{ + Round: 2, + Height: 3, + }, + Timestamp: time.Now().UTC(), + } + s.NoError(k.SignBlock(b)) + s.NoError(VerifyBlockSignature(b)) +} + +func (s *SignerTestSuite) TestVote() { + k := s.setupSigner() + v := types.NewVote(types.VoteCom, common.NewRandomHash(), 123) + v.Position = types.Position{ + Round: 4, + Height: 6, + } + v.ProposerID = types.NodeID{Hash: common.NewRandomHash()} + s.NoError(k.SignVote(v)) + ok, err := VerifyVoteSignature(v) + s.True(ok) + s.NoError(err) +} + +func (s *SignerTestSuite) TestCRS() { + dkgDelayRound = 1 + k := s.setupSigner() + b := &types.Block{ + ParentHash: common.NewRandomHash(), + Position: types.Position{ + Round: 0, + Height: 9, + }, + Timestamp: time.Now().UTC(), + } + crs := common.NewRandomHash() + s.Error(k.SignCRS(b, crs)) + // Hash block before hash CRS. + s.NoError(k.SignBlock(b)) + s.NoError(k.SignCRS(b, crs)) + ok := VerifyCRSSignature(b, crs, nil) + s.True(ok) +} + +func (s *SignerTestSuite) TestTmp() { + k := s.setupSigner() + h0 := common.NewRandomHash() + p0 := h0[:] + b0 := &types.Block{Payload: p0} + s.Require().NoError(k.SignBlock(b0)) + + t0 := &types.TmpBlock{Payload: p0} + s.Require().NoError(k.SignTmpBlock(t0)) + fmt.Printf("%+v -- %+v\n", b0.ProposerID.Hash[:], t0.ProposerID) + fmt.Printf("%+v -- %+v\n", b0.PayloadHash[:], t0.PayloadHash) + fmt.Printf("%+v -- %+v\n", b0.Hash[:], t0.Hash) +} + +func TestSigner(t *testing.T) { + suite.Run(t, new(SignerTestSuite)) +} diff --git a/dex/consensus/core/utils/utils.go b/dex/consensus/core/utils/utils.go new file mode 100644 index 000000000..f259f34bb --- /dev/null +++ b/dex/consensus/core/utils/utils.go @@ -0,0 +1,207 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "context" + "fmt" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +var dkgDelayRound uint64 + +// SetDKGDelayRound sets the variable. +func SetDKGDelayRound(delay uint64) { + dkgDelayRound = delay +} + +type configAccessor interface { + Configuration(round uint64) *types.Config +} + +// GetConfigWithPanic is a helper to access configs, and panic when config for +// that round is not ready yet. +func GetConfigWithPanic(accessor configAccessor, round uint64, + logger common.Logger) *types.Config { + if logger != nil { + logger.Debug("Calling Governance.Configuration", "round", round) + } + c := accessor.Configuration(round) + if c == nil { + panic(fmt.Errorf("configuration is not ready %v", round)) + } + return c +} + +type crsAccessor interface { + CRS(round uint64) common.Hash +} + +// GetCRSWithPanic is a helper to access CRS, and panic when CRS for that +// round is not ready yet. +func GetCRSWithPanic(accessor crsAccessor, round uint64, + logger common.Logger) common.Hash { + if logger != nil { + logger.Debug("Calling Governance.CRS", "round", round) + } + crs := accessor.CRS(round) + if (crs == common.Hash{}) { + panic(fmt.Errorf("CRS is not ready %v", round)) + } + return crs +} + +// VerifyDKGComplaint verifies if its a valid DKGCompliant. +func VerifyDKGComplaint( + complaint *typesDKG.Complaint, mpk *typesDKG.MasterPublicKey) (bool, error) { + ok, err := VerifyDKGComplaintSignature(complaint) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + if complaint.IsNack() { + return true, nil + } + if complaint.Round != mpk.Round { + return false, nil + } + ok, err = VerifyDKGMasterPublicKeySignature(mpk) + if err != nil { + return false, err + } + if !ok { + return false, nil + } + ok, err = mpk.PublicKeyShares.VerifyPrvShare( + typesDKG.NewID(complaint.PrivateShare.ReceiverID), + &complaint.PrivateShare.PrivateShare) + if err != nil { + return false, err + } + return !ok, nil +} + +// LaunchDummyReceiver launches a go routine to receive from the receive +// channel of a network module. An context is required to stop the go routine +// automatically. An optinal message handler could be provided. +func LaunchDummyReceiver( + ctx context.Context, recv <-chan types.Msg, handler func(types.Msg)) ( + context.CancelFunc, <-chan struct{}) { + var ( + dummyCtx, dummyCancel = context.WithCancel(ctx) + finishedChan = make(chan struct{}, 1) + ) + go func() { + defer func() { + finishedChan <- struct{}{} + }() + loop: + for { + select { + case <-dummyCtx.Done(): + break loop + case v, ok := <-recv: + if !ok { + panic(fmt.Errorf( + "receive channel is closed before dummy receiver")) + } + if handler != nil { + handler(v) + } + } + } + }() + return dummyCancel, finishedChan +} + +// GetDKGThreshold return expected threshold for given DKG set size. +func GetDKGThreshold(config *types.Config) int { + return int(config.NotarySetSize*2/3) + 1 +} + +// GetDKGValidThreshold return threshold for DKG set to considered valid. +func GetDKGValidThreshold(config *types.Config) int { + return int(config.NotarySetSize * 5 / 6) +} + +// GetBAThreshold return threshold for BA votes. +func GetBAThreshold(config *types.Config) int { + return int(config.NotarySetSize*2/3 + 1) +} + +// GetNextRoundValidationHeight returns the block height to check if the next +// round is ready. +func GetNextRoundValidationHeight(begin, length uint64) uint64 { + return begin + length*9/10 +} + +// GetRoundHeight wraps the workaround for the round height logic in fullnode. +func GetRoundHeight(accessor interface{}, round uint64) uint64 { + type roundHeightAccessor interface { + GetRoundHeight(round uint64) uint64 + } + accessorInst := accessor.(roundHeightAccessor) + height := accessorInst.GetRoundHeight(round) + if round == 0 && height < types.GenesisHeight { + return types.GenesisHeight + } + return height +} + +// IsDKGValid check if DKG is correctly prepared. +func IsDKGValid( + gov governanceAccessor, logger common.Logger, round, reset uint64) ( + valid bool, gpkInvalid bool) { + if !gov.IsDKGFinal(round) { + logger.Debug("DKG is not final", "round", round, "reset", reset) + return + } + if !gov.IsDKGSuccess(round) { + logger.Debug("DKG is not successful", "round", round, "reset", reset) + return + } + cfg := GetConfigWithPanic(gov, round, logger) + gpk, err := typesDKG.NewGroupPublicKey( + round, + gov.DKGMasterPublicKeys(round), + gov.DKGComplaints(round), + GetDKGThreshold(cfg)) + if err != nil { + logger.Debug("Group public key setup failed", + "round", round, + "reset", reset, + "error", err) + gpkInvalid = true + return + } + if len(gpk.QualifyNodeIDs) < GetDKGValidThreshold(cfg) { + logger.Debug("Group public key threshold not reach", + "round", round, + "reset", reset, + "qualified", len(gpk.QualifyNodeIDs)) + gpkInvalid = true + return + } + valid = true + return +} diff --git a/dex/consensus/core/utils/utils_test.go b/dex/consensus/core/utils/utils_test.go new file mode 100644 index 000000000..c6f85433d --- /dev/null +++ b/dex/consensus/core/utils/utils_test.go @@ -0,0 +1,178 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" +) + +type UtilsTestSuite struct { + suite.Suite +} + +func (s *UtilsTestSuite) TestVerifyDKGComplaint() { + signComplaint := func(prv crypto.PrivateKey, complaint *typesDKG.Complaint) { + var err error + complaint.Signature, err = prv.Sign(hashDKGComplaint(complaint)) + s.Require().NoError(err) + } + prv1, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID1 := types.NewNodeID(prv1.PublicKey()) + + prv2, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nID2 := types.NewNodeID(prv2.PublicKey()) + + prvShares, pubShares := dkg.NewPrivateKeyShares(3) + mpk := &typesDKG.MasterPublicKey{ + ProposerID: nID1, + DKGID: typesDKG.NewID(nID1), + PublicKeyShares: *pubShares.Move(), + } + mpk.Signature, err = prv1.Sign(hashDKGMasterPublicKey(mpk)) + s.Require().NoError(err) + + // Valid NackComplaint. + complaint := &typesDKG.Complaint{ + ProposerID: nID2, + } + signComplaint(prv2, complaint) + s.Require().True(complaint.IsNack()) + ok, err := VerifyDKGComplaint(complaint, mpk) + s.Require().NoError(err) + s.True(ok) + + // Correct privateShare. + prvShares.SetParticipants(dkg.IDs{typesDKG.NewID(nID1), typesDKG.NewID(nID2)}) + share, exist := prvShares.Share(typesDKG.NewID(nID2)) + s.Require().True(exist) + prvShare := &typesDKG.PrivateShare{ + ProposerID: nID1, + ReceiverID: nID2, + PrivateShare: *share, + } + prvShare.Signature, err = prv1.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + complaint.PrivateShare = *prvShare + signComplaint(prv2, complaint) + ok, err = VerifyDKGComplaint(complaint, mpk) + s.Require().NoError(err) + s.False(ok) + + // Incorrect privateShare. + share, exist = prvShares.Share(typesDKG.NewID(nID1)) + s.Require().True(exist) + prvShare.PrivateShare = *share + prvShare.Signature, err = prv1.Sign(hashDKGPrivateShare(prvShare)) + s.Require().NoError(err) + complaint.PrivateShare = *prvShare + signComplaint(prv2, complaint) + ok, err = VerifyDKGComplaint(complaint, mpk) + s.Require().NoError(err) + s.True(ok) + + // MPK is incorrect. + mpk.Round++ + ok, err = VerifyDKGComplaint(complaint, mpk) + s.Require().NoError(err) + s.False(ok) + + // MPK's proposer not match with prvShares'. + mpk.Round-- + mpk.ProposerID = nID2 + mpk.Signature, err = prv1.Sign(hashDKGMasterPublicKey(mpk)) + s.Require().NoError(err) + ok, err = VerifyDKGComplaint(complaint, mpk) + s.Require().NoError(err) + s.False(ok) +} + +func (s *UtilsTestSuite) TestDummyReceiver() { + var ( + msgCount = 1000 + fakeMsgs = make([]int, 0, msgCount) + ) + for i := 0; i < msgCount; i++ { + fakeMsgs = append(fakeMsgs, i) + } + launchDummySender := func(msgs []int, inputChan chan<- types.Msg) { + finished := make(chan struct{}, 1) + go func() { + defer func() { + finished <- struct{}{} + }() + for _, v := range msgs { + inputChan <- types.Msg{ + Payload: v, + } + } + }() + select { + case <-finished: + case <-time.After(1 * time.Second): + s.Require().FailNow("unable to deliver all messages in time") + } + } + checkBuffer := func(sent []int, buff []types.Msg) { + s.Require().Len(buff, len(sent)) + for i := range sent { + s.Require().Equal(sent[i], buff[i].Payload.(int)) + } + } + // Basic scenario: a dummy receiver with caching enabled. + recv := make(chan types.Msg) + buff := []types.Msg{} + cancel, finished := LaunchDummyReceiver( + context.Background(), recv, func(msg types.Msg) { + buff = append(buff, msg) + }) + launchDummySender(fakeMsgs, recv) + cancel() + select { + case <-finished: + case <-time.After(1 * time.Second): + s.Require().FailNow("should finished after cancel is called") + } + checkBuffer(fakeMsgs, buff) + // Dummy receiver can be shutdown along with parent context, and caching + // is not enabled. + ctx, cancel := context.WithCancel(context.Background()) + _, finished = LaunchDummyReceiver(ctx, recv, nil) + launchDummySender(fakeMsgs, recv) + cancel() + select { + case <-finished: + case <-time.After(1 * time.Second): + s.Require().FailNow("should finished after cancel is called") + } +} + +func TestUtils(t *testing.T) { + suite.Run(t, new(UtilsTestSuite)) +} diff --git a/dex/consensus/core/utils/vote-filter.go b/dex/consensus/core/utils/vote-filter.go new file mode 100644 index 000000000..446d88a64 --- /dev/null +++ b/dex/consensus/core/utils/vote-filter.go @@ -0,0 +1,72 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +// VoteFilter filters votes that are useless for now. +// To maximize performance, this structure is not thread-safe and will never be. +type VoteFilter struct { + Voted map[types.VoteHeader]struct{} + Position types.Position + LockIter uint64 + Period uint64 + Confirm bool +} + +// NewVoteFilter creates a new vote filter instance. +func NewVoteFilter() *VoteFilter { + return &VoteFilter{ + Voted: make(map[types.VoteHeader]struct{}), + } +} + +// Filter checks if the vote should be filtered out. +func (vf *VoteFilter) Filter(vote *types.Vote) bool { + if vote.Type == types.VoteInit { + return true + } + if vote.Position.Older(vf.Position) { + return true + } else if vote.Position.Newer(vf.Position) { + // It's impossible to check the vote of other height. + return false + } + if vf.Confirm { + return true + } + if vote.Type == types.VotePreCom && vote.Period < vf.LockIter { + return true + } + if vote.Type == types.VoteCom && + vote.Period < vf.Period && + vote.BlockHash == types.SkipBlockHash { + return true + } + if _, exist := vf.Voted[vote.VoteHeader]; exist { + return true + } + return false +} + +// AddVote to the filter so the same vote will be filtered. +func (vf *VoteFilter) AddVote(vote *types.Vote) { + vf.Voted[vote.VoteHeader] = struct{}{} +} diff --git a/dex/consensus/core/utils/vote-filter_test.go b/dex/consensus/core/utils/vote-filter_test.go new file mode 100644 index 000000000..4e7a7b98a --- /dev/null +++ b/dex/consensus/core/utils/vote-filter_test.go @@ -0,0 +1,100 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/types" +) + +type VoteFilterTestSuite struct { + suite.Suite +} + +func (s *VoteFilterTestSuite) TestFilterVotePass() { + filter := NewVoteFilter() + filter.Position.Height = uint64(6) + filter.Period = uint64(3) + filter.LockIter = uint64(3) + // Pass with higher Height. + vote := types.NewVote(types.VotePreCom, common.NewRandomHash(), uint64(1)) + vote.Position.Height = filter.Position.Height + 1 + s.Require().False(filter.Filter(vote)) + // Pass with VotePreCom. + vote = types.NewVote(types.VotePreCom, common.NewRandomHash(), + filter.LockIter) + vote.Position.Height = filter.Position.Height + s.Require().False(filter.Filter(vote)) + // Pass with VoteCom. + vote = types.NewVote(types.VoteCom, common.NewRandomHash(), + filter.Period) + vote.Position.Height = filter.Position.Height + s.Require().False(filter.Filter(vote)) + vote.Period-- + s.Require().False(filter.Filter(vote)) +} + +func (s *VoteFilterTestSuite) TestFilterVoteInit() { + filter := NewVoteFilter() + vote := types.NewVote(types.VoteInit, common.NewRandomHash(), uint64(1)) + s.True(filter.Filter(vote)) +} + +func (s *VoteFilterTestSuite) TestFilterVotePreCom() { + filter := NewVoteFilter() + filter.LockIter = uint64(3) + vote := types.NewVote(types.VotePreCom, common.NewRandomHash(), uint64(1)) + s.True(filter.Filter(vote)) +} + +func (s *VoteFilterTestSuite) TestFilterVoteCom() { + filter := NewVoteFilter() + filter.Period = uint64(3) + vote := types.NewVote(types.VoteCom, types.SkipBlockHash, uint64(1)) + s.True(filter.Filter(vote)) +} + +func (s *VoteFilterTestSuite) TestFilterConfirm() { + filter := NewVoteFilter() + filter.Confirm = true + vote := types.NewVote(types.VoteCom, common.NewRandomHash(), uint64(1)) + s.True(filter.Filter(vote)) +} +func (s *VoteFilterTestSuite) TestFilterLowerHeight() { + filter := NewVoteFilter() + filter.Position.Height = uint64(10) + vote := types.NewVote(types.VoteCom, common.NewRandomHash(), uint64(1)) + vote.Position.Height = filter.Position.Height - 1 + s.True(filter.Filter(vote)) +} + +func (s *VoteFilterTestSuite) TestFilterSameVote() { + filter := NewVoteFilter() + vote := types.NewVote(types.VoteCom, common.NewRandomHash(), uint64(5)) + s.False(filter.Filter(vote)) + filter.AddVote(vote) + s.True(filter.Filter(vote)) +} + +func TestVoteFilter(t *testing.T) { + suite.Run(t, new(VoteFilterTestSuite)) +} diff --git a/dex/consensus/core/utils_test.go b/dex/consensus/core/utils_test.go new file mode 100644 index 000000000..560e92369 --- /dev/null +++ b/dex/consensus/core/utils_test.go @@ -0,0 +1,124 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package core + +import ( + "testing" + "time" + + "github.com/stretchr/testify/suite" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" +) + +type UtilsTestSuite struct { + suite.Suite +} + +func (s *UtilsTestSuite) TestRemoveFromSortedUint32Slice() { + // Remove something exists. + xs := []uint32{1, 2, 3, 4, 5} + s.Equal( + removeFromSortedUint32Slice(xs, 3), + []uint32{1, 2, 4, 5}) + // Remove something not exists. + s.Equal(removeFromSortedUint32Slice(xs, 6), xs) + // Remove from empty slice, should not panic. + s.Equal([]uint32{}, removeFromSortedUint32Slice([]uint32{}, 1)) +} + +func (s *UtilsTestSuite) TestVerifyAgreementResult() { + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(test.NewState(DKGDelayRound, + pubKeys, time.Second, &common.NullLogger{}, true), ConfigRoundShift) + s.Require().NoError(err) + cache := utils.NewNodeSetCache(gov) + hash := common.NewRandomHash() + signers := make([]*utils.Signer, 0, len(prvKeys)) + for _, prvKey := range prvKeys { + signers = append(signers, utils.NewSigner(prvKey)) + } + pos := types.Position{ + Round: 0, + Height: 20, + } + baResult := &types.AgreementResult{ + BlockHash: hash, + Position: pos, + } + for _, signer := range signers { + vote := types.NewVote(types.VoteCom, hash, 0) + vote.Position = pos + s.Require().NoError(signer.SignVote(vote)) + baResult.Votes = append(baResult.Votes, *vote) + } + s.Require().NoError(VerifyAgreementResult(baResult, cache)) + + // Test negative case. + // All period should be the same. + baResult.Votes[1].Period++ + s.Equal(ErrIncorrectVotePeriod, VerifyAgreementResult(baResult, cache)) + baResult.Votes[1].Period-- + + // Blockhash should match the one in votes. + baResult.BlockHash = common.NewRandomHash() + s.Equal(ErrIncorrectVoteBlockHash, VerifyAgreementResult(baResult, cache)) + baResult.BlockHash = hash + + // Position should match. + baResult.Position.Height++ + s.Equal(ErrIncorrectVotePosition, VerifyAgreementResult(baResult, cache)) + baResult.Position = pos + + // types.VotePreCom is not accepted in agreement result. + baResult.Votes[0].Type = types.VotePreCom + s.Equal(ErrIncorrectVoteType, VerifyAgreementResult(baResult, cache)) + baResult.Votes[0].Type = types.VoteCom + + // Vote type should be the same. + baResult.Votes[1].Type = types.VoteFastCom + s.Equal(ErrIncorrectVoteType, VerifyAgreementResult(baResult, cache)) + baResult.Votes[1].Type = types.VoteCom + + // Only vote proposed by notarySet is valid. + baResult.Votes[0].ProposerID = types.NodeID{Hash: common.NewRandomHash()} + s.Equal(ErrIncorrectVoteProposer, VerifyAgreementResult(baResult, cache)) + baResult.Votes[0].ProposerID = types.NewNodeID(pubKeys[0]) + + // Vote shuold have valid signature. + baResult.Votes[0].Signature, err = prvKeys[0].Sign(common.NewRandomHash()) + s.Require().NoError(err) + s.Equal(ErrIncorrectVoteSignature, VerifyAgreementResult(baResult, cache)) + s.Require().NoError(signers[0].SignVote(&baResult.Votes[0])) + + // Unique votes shuold be more than threshold. + baResult.Votes = baResult.Votes[:1] + s.Equal(ErrNotEnoughVotes, VerifyAgreementResult(baResult, cache)) + for range signers { + baResult.Votes = append(baResult.Votes, baResult.Votes[0]) + } + s.Equal(ErrNotEnoughVotes, VerifyAgreementResult(baResult, cache)) +} + +func TestUtils(t *testing.T) { + suite.Run(t, new(UtilsTestSuite)) +} diff --git a/dex/consensus/integration_test/byzantine_test.go b/dex/consensus/integration_test/byzantine_test.go new file mode 100644 index 000000000..34c59f76c --- /dev/null +++ b/dex/consensus/integration_test/byzantine_test.go @@ -0,0 +1,269 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package integration + +import ( + "context" + "fmt" + "log" + "os" + "sync" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +// There is no scheduler in these tests, we need to wait a long period to make +// sure these tests are ok. +type ByzantineTestSuite struct { + suite.Suite + + directLatencyModel map[types.NodeID]test.LatencyModel +} + +func (s *ByzantineTestSuite) SetupTest() { + s.directLatencyModel = make(map[types.NodeID]test.LatencyModel) +} + +func (s *ByzantineTestSuite) setupNodes( + dMoment time.Time, + prvKeys []crypto.PrivateKey, + seedGov *test.Governance) map[types.NodeID]*node { + var ( + wg sync.WaitGroup + ) + // Setup peer server at transport layer. + server := test.NewFakeTransportServer() + serverChannel, err := server.Host() + s.Require().NoError(err) + // setup nodes. + nodes := make(map[types.NodeID]*node) + wg.Add(len(prvKeys)) + for i, k := range prvKeys { + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + nID := types.NewNodeID(k.PublicKey()) + // Prepare essential modules: app, gov, db. + var directLatencyModel test.LatencyModel + if model, exist := s.directLatencyModel[nID]; exist { + directLatencyModel = model + } else { + directLatencyModel = &test.FixedLatencyModel{} + } + networkModule := test.NewNetwork(k.PublicKey(), test.NetworkConfig{ + Type: test.NetworkTypeFake, + DirectLatency: directLatencyModel, + GossipLatency: &test.FixedLatencyModel{}, + Marshaller: test.NewDefaultMarshaller(nil)}, + ) + gov := seedGov.Clone() + gov.SwitchToRemoteMode(networkModule) + gov.NotifyRound(0, types.GenesisHeight) + networkModule.AttachNodeSetCache(utils.NewNodeSetCache(gov)) + f, err := os.Create(fmt.Sprintf("log.%d.log", i)) + if err != nil { + panic(err) + } + logger := common.NewCustomLogger(log.New(f, "", log.LstdFlags|log.Lmicroseconds)) + app := test.NewApp(1, gov, nil) + nodes[nID] = &node{ + ID: nID, + app: app, + gov: gov, + db: dbInst, + network: networkModule, + logger: logger, + } + go func() { + defer wg.Done() + s.Require().NoError(networkModule.Setup(serverChannel)) + go networkModule.Run() + }() + } + // Make sure transport layer is ready. + s.Require().NoError(server.WaitForPeers(uint32(len(prvKeys)))) + wg.Wait() + for _, k := range prvKeys { + node := nodes[types.NewNodeID(k.PublicKey())] + // Now is the consensus module. + node.con = core.NewConsensus( + dMoment, + node.app, + node.gov, + node.db, + node.network, + k, + node.logger, + ) + } + return nodes +} + +func (s *ByzantineTestSuite) verifyNodes(nodes map[types.NodeID]*node) { + for ID, node := range nodes { + s.Require().NoError(test.VerifyDB(node.db)) + s.Require().NoError(node.app.Verify()) + for otherID, otherNode := range nodes { + if ID == otherID { + continue + } + s.Require().NoError(node.app.Compare(otherNode.app)) + } + } +} + +func (s *ByzantineTestSuite) TestOneSlowNodeOneDeadNode() { + // 4 nodes setup with one slow node and one dead node. + // The network of slow node is very slow. + var ( + req = s.Require() + peerCount = 4 + dMoment = time.Now().UTC() + untilRound = uint64(3) + ) + if testing.Short() { + untilRound = 1 + } + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + lambda := 100 * time.Millisecond + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, lambda, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + slowNodeID := types.NewNodeID(pubKeys[0]) + deadNodeID := types.NewNodeID(pubKeys[1]) + s.directLatencyModel[slowNodeID] = &test.FixedLatencyModel{ + Latency: lambda.Seconds() * 1000 * 2, + } + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + for _, n := range nodes { + if n.ID == deadNodeID { + continue + } + go n.con.Run() + defer n.con.Stop() + } + // Clean deadNode's network receive channel, or it might exceed the limit + // and block other go routines. + dummyReceiverCtxCancel, _ := utils.LaunchDummyReceiver( + context.Background(), nodes[deadNodeID].network.ReceiveChan(), nil) + defer dummyReceiverCtxCancel() +Loop: + for { + <-time.After(5 * time.Second) + fmt.Println("check latest position delivered by each node") + for _, n := range nodes { + if n.ID == deadNodeID { + continue + } + latestPos := n.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", n.ID, &latestPos) + if latestPos.Round < untilRound { + continue Loop + } + } + // Oh ya. + break + } + delete(nodes, deadNodeID) + s.verifyNodes(nodes) +} + +type voteCensor struct{} + +func (vc *voteCensor) Censor(msg interface{}) bool { + _, ok := msg.(*types.Vote) + return ok +} + +func (s *ByzantineTestSuite) TestOneNodeWithoutVote() { + // 4 nodes setup with one node's votes been censored. + // so it will always do syncing BA. + var ( + req = s.Require() + peerCount = 4 + dMoment = time.Now().UTC() + untilRound = uint64(3) + tolerence = uint64(2) + ) + if testing.Short() { + untilRound = 2 + } + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + lambda := 100 * time.Millisecond + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, lambda, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + votelessNodeID := types.NewNodeID(pubKeys[0]) + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + votelessNode := nodes[votelessNodeID] + votelessNode.network.SetCensor(&voteCensor{}, &voteCensor{}) + for _, n := range nodes { + go n.con.Run() + defer n.con.Stop() + } +Loop: + for { + <-time.After(5 * time.Second) + fmt.Println("check latest position delivered by voteless node") + latestPos := votelessNode.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", votelessNode.ID, &latestPos) + for _, n := range nodes { + if n.ID == votelessNodeID { + continue + } + otherPos := n.app.GetLatestDeliveredPosition() + if otherPos.Newer(latestPos) { + fmt.Println("otherPos", n.ID, &otherPos) + s.Require().True( + otherPos.Height-latestPos.Height <= tolerence) + } + } + if latestPos.Round < untilRound { + continue Loop + } + // Oh ya. + break + } + s.verifyNodes(nodes) +} + +func TestByzantine(t *testing.T) { + suite.Run(t, new(ByzantineTestSuite)) +} diff --git a/dex/consensus/integration_test/consensus_test.go b/dex/consensus/integration_test/consensus_test.go new file mode 100644 index 000000000..bf2970ac9 --- /dev/null +++ b/dex/consensus/integration_test/consensus_test.go @@ -0,0 +1,772 @@ +// Copyright 2018 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package integration + +import ( + "context" + "fmt" + "log" + "os" + "sync" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/db" + "github.com/dexon-foundation/dexon-consensus/core/syncer" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +// There is no scheduler in these tests, we need to wait a long period to make +// sure these tests are ok. +type ConsensusTestSuite struct { + suite.Suite +} + +// A round event handler to purge utils.NodeSetCache in test.Network. +func purgeHandlerGen(n *test.Network) func([]utils.RoundEventParam) { + return func(evts []utils.RoundEventParam) { + for _, e := range evts { + if e.Reset == 0 { + continue + } + n.PurgeNodeSetCache(e.Round + 1) + } + } +} + +func govHandlerGen( + round, reset uint64, + g *test.Governance, + doer func(*test.Governance)) func([]utils.RoundEventParam) { + return func(evts []utils.RoundEventParam) { + for _, e := range evts { + if e.Round == round && e.Reset == reset { + doer(g) + } + } + } + +} + +type node struct { + ID types.NodeID + con *core.Consensus + app *test.App + gov *test.Governance + rEvt *utils.RoundEvent + db db.Database + network *test.Network + logger common.Logger +} + +func prohibitDKG(gov *test.Governance) { + gov.Prohibit(test.StateAddDKGMasterPublicKey) + gov.Prohibit(test.StateAddDKGFinal) + gov.Prohibit(test.StateAddDKGComplaint) +} + +func prohibitDKGExceptFinalize(gov *test.Governance) { + gov.Prohibit(test.StateAddDKGMasterPublicKey) + gov.Prohibit(test.StateAddDKGComplaint) +} + +func unprohibitDKG(gov *test.Governance) { + gov.Unprohibit(test.StateAddDKGMasterPublicKey) + gov.Unprohibit(test.StateAddDKGFinal) + gov.Unprohibit(test.StateAddDKGComplaint) +} + +func (s *ConsensusTestSuite) setupNodes( + dMoment time.Time, + prvKeys []crypto.PrivateKey, + seedGov *test.Governance) map[types.NodeID]*node { + var ( + wg sync.WaitGroup + initRound uint64 + ) + // Setup peer server at transport layer. + server := test.NewFakeTransportServer() + serverChannel, err := server.Host() + s.Require().NoError(err) + // setup nodes. + nodes := make(map[types.NodeID]*node) + wg.Add(len(prvKeys)) + for i, k := range prvKeys { + dbInst, err := db.NewMemBackedDB() + s.Require().NoError(err) + // Prepare essential modules: app, gov, db. + networkModule := test.NewNetwork(k.PublicKey(), test.NetworkConfig{ + Type: test.NetworkTypeFake, + DirectLatency: &test.FixedLatencyModel{}, + GossipLatency: &test.FixedLatencyModel{}, + Marshaller: test.NewDefaultMarshaller(nil)}, + ) + gov := seedGov.Clone() + gov.SwitchToRemoteMode(networkModule) + gov.NotifyRound(initRound, types.GenesisHeight) + networkModule.AttachNodeSetCache(utils.NewNodeSetCache(gov)) + f, err := os.Create(fmt.Sprintf("log.%d.log", i)) + if err != nil { + panic(err) + } + logger := common.NewCustomLogger(log.New(f, "", log.LstdFlags|log.Lmicroseconds)) + rEvt, err := utils.NewRoundEvent(context.Background(), gov, logger, + types.Position{Height: types.GenesisHeight}, core.ConfigRoundShift) + s.Require().NoError(err) + nID := types.NewNodeID(k.PublicKey()) + nodes[nID] = &node{ + ID: nID, + app: test.NewApp(initRound+1, gov, rEvt), + gov: gov, + db: dbInst, + logger: logger, + rEvt: rEvt, + network: networkModule, + } + go func() { + defer wg.Done() + s.Require().NoError(networkModule.Setup(serverChannel)) + go networkModule.Run() + }() + } + // Make sure transport layer is ready. + s.Require().NoError(server.WaitForPeers(uint32(len(prvKeys)))) + wg.Wait() + for _, k := range prvKeys { + node := nodes[types.NewNodeID(k.PublicKey())] + // Now is the consensus module. + node.con = core.NewConsensus( + dMoment, + node.app, + node.gov, + node.db, + node.network, + k, + node.logger, + ) + } + return nodes +} + +func (s *ConsensusTestSuite) verifyNodes(nodes map[types.NodeID]*node) { + for ID, node := range nodes { + s.Require().NoError(test.VerifyDB(node.db)) + s.Require().NoError(node.app.Verify()) + for otherID, otherNode := range nodes { + if ID == otherID { + continue + } + s.Require().NoError(node.app.Compare(otherNode.app)) + } + } +} + +func (s *ConsensusTestSuite) syncBlocksWithSomeNode( + sourceNode, syncNode *node, + syncerObj *syncer.Consensus, + nextSyncHeight uint64) ( + syncedCon *core.Consensus, syncerHeight uint64, err error) { + syncerHeight = nextSyncHeight + // Setup revealer. + DBAll, err := sourceNode.db.GetAllBlocks() + if err != nil { + return + } + r, err := test.NewBlockRevealerByPosition(DBAll, nextSyncHeight) + if err != nil { + return + } + // Load all blocks from revealer and dump them into syncer. + var compactionChainBlocks []*types.Block + syncBlocks := func() (done bool) { + // Apply txs in blocks to make sure our governance instance is ready. + // This action should be performed by fullnode in production mode. + for _, b := range compactionChainBlocks { + if err = syncNode.gov.State().Apply(b.Payload); err != nil { + if err != test.ErrDuplicatedChange { + return + } + err = nil + } + // Sync app. + syncNode.app.BlockConfirmed(*b) + syncNode.app.BlockDelivered(b.Hash, b.Position, b.Randomness) + // Sync gov. + syncNode.gov.CatchUpWithRound( + b.Position.Round + core.ConfigRoundShift) + } + var synced bool + synced, err = syncerObj.SyncBlocks(compactionChainBlocks, true) + if err != nil { + done = true + } + if synced { + syncedCon, err = syncerObj.GetSyncedConsensus() + done = true + } + compactionChainBlocks = nil + return + } + for { + var b types.Block + b, err = r.NextBlock() + if err != nil { + if err == db.ErrIterationFinished { + err = nil + if syncBlocks() { + break + } + } + break + } + syncerHeight = b.Position.Height + 1 + compactionChainBlocks = append(compactionChainBlocks, &b) + if len(compactionChainBlocks) >= 20 { + if syncBlocks() { + break + } + } + } + return +} + +func (s *ConsensusTestSuite) TestSimple() { + if testing.Short() { + // All other tests will cover this basic case. To speed up CI process, + // ignore this test in short mode. + return + } + // The simplest test case: + // - Node set is equals to DKG set and notary set for each chain in each + // round. + // - No configuration change. + // - 4 rounds (0, 1 are genesis rounds, round 2 would be ready when the + // first block delivered. Test until round 3 should be enough. + var ( + req = s.Require() + peerCount = 4 + dMoment = time.Now().UTC() + untilRound = uint64(5) + ) + if testing.Short() { + untilRound = 2 + } + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, 100*time.Millisecond, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + // A short round interval. + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + for _, n := range nodes { + go n.con.Run() + defer n.con.Stop() + } +Loop: + for { + <-time.After(5 * time.Second) + for _, n := range nodes { + latestPos := n.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", n.ID, &latestPos) + if latestPos.Round < untilRound { + continue Loop + } + } + // Oh ya. + break + } + s.verifyNodes(nodes) +} + +func (s *ConsensusTestSuite) TestSetSizeChange() { + var ( + req = s.Require() + peerCount = 7 + dMoment = time.Now().UTC() + untilRound = uint64(5) + ) + if testing.Short() { + // Short test won't test configuration change packed as payload of + // blocks and applied when delivered. + untilRound = 5 + } + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, pubKeys, 100*time.Millisecond, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + req.NoError(seedGov.State().RequestChange( + test.StateChangeNotarySetSize, uint32(4))) + seedGov.CatchUpWithRound(0) + // Setup configuration for round 0 and round 1. + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + req.NoError(seedGov.State().RequestChange( + test.StateChangeNotarySetSize, uint32(5))) + seedGov.CatchUpWithRound(1) + // Setup configuration for round 2. + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + req.NoError(seedGov.State().RequestChange( + test.StateChangeNotarySetSize, uint32(6))) + seedGov.CatchUpWithRound(2) + // Setup configuration for round 3. + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + req.NoError(seedGov.State().RequestChange( + test.StateChangeNotarySetSize, uint32(4))) + seedGov.CatchUpWithRound(3) + // Setup nodes. + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + // Pick master node, and register changes on it. + var pickedNode *node + for _, pickedNode = range nodes { + break + } + // Register configuration changes for round 4. + req.NoError(pickedNode.gov.RegisterConfigChange( + 4, test.StateChangeRoundLength, uint64(100))) + req.NoError(pickedNode.gov.RegisterConfigChange( + 4, test.StateChangeNotarySetSize, uint32(5))) + // Register configuration changes for round 5. + req.NoError(pickedNode.gov.RegisterConfigChange( + 5, test.StateChangeRoundLength, uint64(60))) + req.NoError(pickedNode.gov.RegisterConfigChange( + 5, test.StateChangeNotarySetSize, uint32(4))) + // Run test. + for _, n := range nodes { + go n.con.Run() + defer n.con.Stop() + } +Loop: + for { + <-time.After(5 * time.Second) + for _, n := range nodes { + latestPos := n.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", n.ID, &latestPos) + if latestPos.Round < untilRound { + continue Loop + } + } + // Oh ya. + break + } + s.verifyNodes(nodes) +} + +func (s *ConsensusTestSuite) TestSync() { + // The sync test case: + // - No configuration change. + // - One node does not run when all others starts until aliveRound exceeded. + // - One DKG reset happened before syncing. + var ( + req = s.Require() + peerCount = 4 + dMoment = time.Now().UTC() + untilRound = uint64(6) + stopRound = uint64(4) + // aliveRound should be large enough to test round event handling in + // syncer. + aliveRound = uint64(2) + errChan = make(chan error, 100) + ) + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, 100*time.Millisecond, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + seedGov.CatchUpWithRound(0) + seedGov.CatchUpWithRound(1) + // A short round interval. + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + // Choose the first node as "syncNode" that its consensus' Run() is called + // later. + syncNode := nodes[types.NewNodeID(pubKeys[0])] + syncNode.con = nil + // Pick a node to stop when synced. + stoppedNode := nodes[types.NewNodeID(pubKeys[1])] + for _, n := range nodes { + n.rEvt.Register(purgeHandlerGen(n.network)) + // Round Height reference table: + // - Round:1 Reset:0 -- 100 + // - Round:1 Reset:1 -- 200 + // - Round:2 Reset:0 -- 300 + n.rEvt.Register(govHandlerGen(1, 0, n.gov, prohibitDKG)) + n.rEvt.Register(govHandlerGen(1, 1, n.gov, unprohibitDKG)) + if n.ID != syncNode.ID { + go n.con.Run() + if n.ID != stoppedNode.ID { + defer n.con.Stop() + } + } + } + // Clean syncNode's network receive channel, or it might exceed the limit + // and block other go routines. + dummyReceiverCtxCancel, dummyFinished := utils.LaunchDummyReceiver( + context.Background(), syncNode.network.ReceiveChan(), nil) +ReachAlive: + for { + // Check if any error happened or sleep for a period of time. + select { + case err := <-errChan: + req.NoError(err) + case <-time.After(5 * time.Second): + } + // If all nodes excepts syncNode have reached aliveRound, call syncNode's + // Run() and send it all blocks in one of normal node's compaction chain. + for id, n := range nodes { + if id == syncNode.ID { + continue + } + pos := n.app.GetLatestDeliveredPosition() + if pos.Round < aliveRound { + fmt.Println("latestPos", n.ID, &pos) + continue ReachAlive + } + } + dummyReceiverCtxCancel() + <-dummyFinished + break + } + // Initiate Syncer. + runnerCtx, runnerCtxCancel := context.WithCancel(context.Background()) + defer runnerCtxCancel() + f, err := os.Create("log.sync.log") + if err != nil { + panic(err) + } + logger := common.NewCustomLogger(log.New(f, "", log.LstdFlags|log.Lmicroseconds)) + syncerObj := syncer.NewConsensus( + 0, + dMoment, + syncNode.app, + syncNode.gov, + syncNode.db, + syncNode.network, + prvKeys[0], + logger, + ) + // Initialize communication channel, it's not recommended to assertion in + // another go routine. + go func() { + var ( + syncedHeight uint64 = 1 + err error + syncedCon *core.Consensus + ) + SyncLoop: + for { + syncedCon, syncedHeight, err = s.syncBlocksWithSomeNode( + stoppedNode, syncNode, syncerObj, syncedHeight) + if syncedCon != nil { + syncNode.con = syncedCon + go syncNode.con.Run() + go func() { + <-runnerCtx.Done() + syncNode.con.Stop() + }() + break SyncLoop + } + if err != nil { + errChan <- err + break SyncLoop + } + select { + case <-runnerCtx.Done(): + break SyncLoop + case <-time.After(4 * time.Second): + } + } + }() + // Wait until all nodes reach 'untilRound'. + var stoppedRound uint64 + go func() { + n, pos := stoppedNode, stoppedNode.app.GetLatestDeliveredPosition() + ReachFinished: + for { + fmt.Println("latestPos", n.ID, &pos) + time.Sleep(5 * time.Second) + if stoppedNode.con != nil { + pos = n.app.GetLatestDeliveredPosition() + if pos.Round >= stopRound { + // Stop a node, we should still be able to proceed. + stoppedNode.con.Stop() + stoppedNode.con = nil + stoppedRound = pos.Round + fmt.Println("one node stopped", stoppedNode.ID) + utils.LaunchDummyReceiver( + runnerCtx, stoppedNode.network.ReceiveChan(), nil) + } + } + for _, n = range nodes { + if n.ID == stoppedNode.ID { + continue + } + pos = n.app.GetLatestDeliveredPosition() + if pos.Round < untilRound { + continue ReachFinished + } + } + break + } + runnerCtxCancel() + }() + // Block until any reasonable testing milestone reached. + select { + case err := <-errChan: + req.NoError(err) + case <-runnerCtx.Done(): + // This test passed. + } + s.Require().Equal(stoppedRound, stopRound) +} + +func (s *ConsensusTestSuite) TestForceSync() { + // The sync test case: + // - No configuration change. + // - One node does not run when all others starts until aliveRound exceeded. + var ( + req = s.Require() + peerCount = 4 + dMoment = time.Now().UTC() + untilRound = uint64(3) + stopRound = uint64(1) + errChan = make(chan error, 100) + ) + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, 100*time.Millisecond, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + seedGov.CatchUpWithRound(0) + seedGov.CatchUpWithRound(1) + // A short round interval. + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + for _, n := range nodes { + go n.con.Run() + } +ReachStop: + for { + // Check if any error happened or sleep for a period of time. + select { + case err := <-errChan: + req.NoError(err) + case <-time.After(5 * time.Second): + } + // If one of the nodes have reached stopRound, stop all nodes to simulate + // crash. + for _, n := range nodes { + pos := n.app.GetLatestDeliveredPosition() + if pos.Round >= stopRound { + break ReachStop + } else { + fmt.Println("latestPos", n.ID, &pos) + } + } + } + + var latestHeight uint64 + var latestNodeID types.NodeID + for _, n := range nodes { + n.con.Stop() + time.Sleep(1 * time.Second) + } + for nID, n := range nodes { + _, height := n.db.GetCompactionChainTipInfo() + if height > latestHeight { + fmt.Println("Newer height", nID, height) + latestNodeID = nID + latestHeight = height + } + } + fmt.Println("Latest node", latestNodeID, latestHeight) + for nID, node := range nodes { + if nID == latestNodeID { + continue + } + fmt.Printf("[%p] Clearing %s %s\n", node.app, nID, node.app.GetLatestDeliveredPosition()) + node.app.ClearUndeliveredBlocks() + } + syncerCon := make(map[types.NodeID]*syncer.Consensus, len(nodes)) + for i, prvKey := range prvKeys { + f, err := os.Create(fmt.Sprintf("log.sync.%d.log", i)) + if err != nil { + panic(err) + } + logger := common.NewCustomLogger(log.New(f, "", log.LstdFlags|log.Lmicroseconds)) + nID := types.NewNodeID(prvKey.PublicKey()) + node := nodes[nID] + syncerCon[nID] = syncer.NewConsensus( + latestHeight, + dMoment, + node.app, + node.gov, + node.db, + node.network, + prvKey, + logger, + ) + } + targetNode := nodes[latestNodeID] + for nID, node := range nodes { + if nID == latestNodeID { + continue + } + syncedHeight := node.app.GetLatestDeliveredPosition().Height + syncedHeight++ + var err error + for { + fmt.Println("Syncing", nID, syncedHeight) + if syncedHeight >= latestHeight { + break + } + _, syncedHeight, err = s.syncBlocksWithSomeNode( + targetNode, node, syncerCon[nID], syncedHeight) + if err != nil { + panic(err) + } + fmt.Println("Syncing after", nID, syncedHeight) + } + fmt.Println("Synced", nID, syncedHeight) + } + // Make sure all nodes are synced in db and app. + _, latestHeight = targetNode.db.GetCompactionChainTipInfo() + latestPos := targetNode.app.GetLatestDeliveredPosition() + for _, node := range nodes { + _, height := node.db.GetCompactionChainTipInfo() + s.Require().Equal(height, latestHeight) + pos := node.app.GetLatestDeliveredPosition() + s.Require().Equal(latestPos, pos) + } + for _, con := range syncerCon { + con.ForceSync(latestPos, true) + } + for nID := range nodes { + con, err := syncerCon[nID].GetSyncedConsensus() + s.Require().NoError(err) + nodes[nID].con = con + } + for _, node := range nodes { + go node.con.Run() + defer node.con.Stop() + } + +Loop: + for { + <-time.After(5 * time.Second) + for _, n := range nodes { + latestPos := n.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", n.ID, &latestPos) + if latestPos.Round < untilRound { + continue Loop + } + } + // Oh ya. + break + } + s.verifyNodes(nodes) +} + +func (s *ConsensusTestSuite) TestResetDKG() { + var ( + req = s.Require() + peerCount = 5 + dMoment = time.Now().UTC() + untilRound = uint64(3) + ) + prvKeys, pubKeys, err := test.NewKeys(peerCount) + req.NoError(err) + // Setup seed governance instance. Give a short latency to make this test + // run faster. + seedGov, err := test.NewGovernance( + test.NewState(core.DKGDelayRound, + pubKeys, 100*time.Millisecond, &common.NullLogger{}, true), + core.ConfigRoundShift) + req.NoError(err) + req.NoError(seedGov.State().RequestChange( + test.StateChangeRoundLength, uint64(100))) + req.NoError(seedGov.State().RequestChange( + test.StateChangeNotarySetSize, uint32(4))) + nodes := s.setupNodes(dMoment, prvKeys, seedGov) + for _, n := range nodes { + n.rEvt.Register(purgeHandlerGen(n.network)) + // Round Height reference table: + // - Round:1 Reset:0 -- 100 + // - Round:1 Reset:1 -- 200 + // - Round:1 Reset:2 -- 300 + // - Round:2 Reset:0 -- 400 + // - Round:2 Reset:1 -- 500 + // - Round:3 Reset:0 -- 600 + n.rEvt.Register(govHandlerGen(1, 0, n.gov, prohibitDKG)) + n.rEvt.Register(govHandlerGen(1, 2, n.gov, unprohibitDKG)) + n.rEvt.Register(govHandlerGen(2, 0, n.gov, prohibitDKGExceptFinalize)) + n.rEvt.Register(govHandlerGen(2, 1, n.gov, unprohibitDKG)) + go n.con.Run() + } +Loop: + for { + <-time.After(5 * time.Second) + for _, n := range nodes { + latestPos := n.app.GetLatestDeliveredPosition() + fmt.Println("latestPos", n.ID, &latestPos) + if latestPos.Round < untilRound { + continue Loop + } + } + // Oh ya. + break + } + s.verifyNodes(nodes) + for _, n := range nodes { + n.con.Stop() + req.Equal(n.gov.DKGResetCount(2), uint64(2)) + req.Equal(n.gov.DKGResetCount(3), uint64(1)) + } +} + +func TestConsensus(t *testing.T) { + suite.Run(t, new(ConsensusTestSuite)) +} diff --git a/dex/consensus/integration_test/round-event_test.go b/dex/consensus/integration_test/round-event_test.go new file mode 100644 index 000000000..dca0834dc --- /dev/null +++ b/dex/consensus/integration_test/round-event_test.go @@ -0,0 +1,267 @@ +// Copyright 2019 The dexon-consensus Authors +// This file is part of the dexon-consensus library. +// +// The dexon-consensus library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus library. If not, see +// <http://www.gnu.org/licenses/>. + +package integration + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus/common" + "github.com/dexon-foundation/dexon-consensus/core" + "github.com/dexon-foundation/dexon-consensus/core/crypto" + "github.com/dexon-foundation/dexon-consensus/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus/core/test" + "github.com/dexon-foundation/dexon-consensus/core/types" + typesDKG "github.com/dexon-foundation/dexon-consensus/core/types/dkg" + "github.com/dexon-foundation/dexon-consensus/core/utils" + "github.com/stretchr/testify/suite" +) + +func getCRS(round, reset uint64) []byte { + return []byte(fmt.Sprintf("r#%d,reset#%d", round, reset)) +} + +type evtParamToCheck struct { + round uint64 + reset uint64 + height uint64 + crs common.Hash +} + +type RoundEventTestSuite struct { + suite.Suite + + pubKeys []crypto.PublicKey + signers []*utils.Signer + logger common.Logger +} + +func (s *RoundEventTestSuite) SetupSuite() { + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + s.pubKeys = pubKeys + for _, k := range prvKeys { + s.signers = append(s.signers, utils.NewSigner(k)) + } + s.logger = &common.NullLogger{} +} + +func (s *RoundEventTestSuite) prepareGov() *test.Governance { + gov, err := test.NewGovernance( + test.NewState(1, s.pubKeys, 100*time.Millisecond, s.logger, true), + core.ConfigRoundShift) + s.Require().NoError(err) + return gov +} + +func (s *RoundEventTestSuite) proposeMPK( + gov *test.Governance, + round, reset uint64, + count int) { + for idx, pubKey := range s.pubKeys[:count] { + _, pubShare := dkg.NewPrivateKeyShares(utils.GetDKGThreshold( + gov.Configuration(round))) + mpk := &typesDKG.MasterPublicKey{ + Round: round, + Reset: reset, + DKGID: typesDKG.NewID(types.NewNodeID(pubKey)), + PublicKeyShares: *pubShare.Move(), + } + s.Require().NoError(s.signers[idx].SignDKGMasterPublicKey(mpk)) + gov.AddDKGMasterPublicKey(mpk) + } +} + +func (s *RoundEventTestSuite) proposeFinalize( + gov *test.Governance, + round, reset uint64, + count int) { + for idx, pubKey := range s.pubKeys[:count] { + final := &typesDKG.Finalize{ + ProposerID: types.NewNodeID(pubKey), + Round: round, + Reset: reset, + } + s.Require().NoError(s.signers[idx].SignDKGFinalize(final)) + gov.AddDKGFinalize(final) + } +} + +func (s *RoundEventTestSuite) TestFromRound0() { + // Prepare test.Governance. + gov := s.prepareGov() + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(100))) + gov.CatchUpWithRound(0) + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(200))) + gov.CatchUpWithRound(1) + // Prepare utils.RoundEvent, starts from genesis. + rEvt, err := utils.NewRoundEvent(context.Background(), gov, s.logger, + types.Position{Height: types.GenesisHeight}, core.ConfigRoundShift) + s.Require().NoError(err) + // Register a handler to collects triggered events. + var evts []evtParamToCheck + rEvt.Register(func(params []utils.RoundEventParam) { + for _, p := range params { + evts = append(evts, evtParamToCheck{ + round: p.Round, + reset: p.Reset, + height: p.BeginHeight, + crs: p.CRS, + }) + // Tricky part to make sure passed config is correct. + s.Require().Equal((p.Round+1)*100, p.Config.RoundLength) + } + }) + // Reset round#1 twice, then make it ready. + gov.ResetDKG([]byte("DKG round 1 reset 1")) + gov.ResetDKG([]byte("DKG round 1 reset 2")) + s.proposeMPK(gov, 1, 2, 3) + s.proposeFinalize(gov, 1, 2, 3) + s.Require().Equal(rEvt.ValidateNextRound(80), uint(3)) + // Check collected events. + s.Require().Len(evts, 3) + s.Require().Equal(evts[0], evtParamToCheck{0, 1, 101, gov.CRS(0)}) + s.Require().Equal(evts[1], evtParamToCheck{0, 2, 201, gov.CRS(0)}) + s.Require().Equal(evts[2], evtParamToCheck{1, 0, 301, gov.CRS(1)}) +} + +func (s *RoundEventTestSuite) TestFromRoundN() { + // Prepare test.Governance. + var ( + gov = s.prepareGov() + roundLength = uint64(100) + ) + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + roundLength)) + for r := uint64(2); r <= uint64(20); r++ { + gov.ProposeCRS(r, getCRS(r, 0)) + } + for r := uint64(1); r <= uint64(19); r++ { + gov.NotifyRound(r, utils.GetRoundHeight(gov, r-1)+roundLength) + } + gov.NotifyRound(20, 2201) + // Reset round#20 twice, then make it done DKG preparation. + gov.ResetDKG(getCRS(20, 1)) + gov.ResetDKG(getCRS(20, 2)) + s.proposeMPK(gov, 20, 2, 3) + s.proposeFinalize(gov, 20, 2, 3) + s.Require().Equal(gov.DKGResetCount(20), uint64(2)) + // Propose CRS for round#21, and it works without reset. + gov.ProposeCRS(21, getCRS(21, 0)) + s.proposeMPK(gov, 21, 0, 3) + s.proposeFinalize(gov, 21, 0, 3) + // Propose CRS for round#22, and it works without reset. + gov.ProposeCRS(22, getCRS(22, 0)) + s.proposeMPK(gov, 22, 0, 3) + s.proposeFinalize(gov, 22, 0, 3) + // Prepare utils.RoundEvent, starts from round#19, reset(for round#20)#1. + rEvt, err := utils.NewRoundEvent(context.Background(), gov, s.logger, + types.Position{Round: 19, Height: 2019}, core.ConfigRoundShift) + s.Require().NoError(err) + // Register a handler to collects triggered events. + var evts []evtParamToCheck + rEvt.Register(func(params []utils.RoundEventParam) { + for _, p := range params { + evts = append(evts, evtParamToCheck{ + round: p.Round, + reset: p.Reset, + height: p.BeginHeight, + crs: p.CRS, + }) + } + }) + // Check for round#19, reset(for round#20)#2 at height=2080. + s.Require().Equal(rEvt.ValidateNextRound(2080), uint(2)) + // Check collected events. + s.Require().Len(evts, 2) + s.Require().Equal(evts[0], evtParamToCheck{19, 2, 2101, gov.CRS(19)}) + s.Require().Equal(evts[1], evtParamToCheck{20, 0, 2201, gov.CRS(20)}) + // Round might exceed round-shift limitation would not be triggered. + s.Require().Equal(rEvt.ValidateNextRound(2280), uint(1)) + s.Require().Len(evts, 3) + s.Require().Equal(evts[2], evtParamToCheck{21, 0, 2301, gov.CRS(21)}) + s.Require().Equal(rEvt.ValidateNextRound(2380), uint(1)) + s.Require().Equal(evts[3], evtParamToCheck{22, 0, 2401, gov.CRS(22)}) +} + +func (s *RoundEventTestSuite) TestLastPeriod() { + gov := s.prepareGov() + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(100))) + gov.CatchUpWithRound(0) + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(200))) + gov.CatchUpWithRound(1) + // Prepare utils.RoundEvent, starts from genesis. + rEvt, err := utils.NewRoundEvent(context.Background(), gov, s.logger, + types.Position{Height: types.GenesisHeight}, core.ConfigRoundShift) + s.Require().NoError(err) + begin, length := rEvt.LastPeriod() + s.Require().Equal(begin, uint64(1)) + s.Require().Equal(length, uint64(100)) + // Reset round#1 twice, then make it ready. + gov.ResetDKG([]byte("DKG round 1 reset 1")) + gov.ResetDKG([]byte("DKG round 1 reset 2")) + rEvt.ValidateNextRound(80) + begin, length = rEvt.LastPeriod() + s.Require().Equal(begin, uint64(201)) + s.Require().Equal(length, uint64(100)) + s.proposeMPK(gov, 1, 2, 3) + s.proposeFinalize(gov, 1, 2, 3) + rEvt.ValidateNextRound(80) + begin, length = rEvt.LastPeriod() + s.Require().Equal(begin, uint64(301)) + s.Require().Equal(length, uint64(200)) +} + +func (s *RoundEventTestSuite) TestTriggerInitEvent() { + gov := s.prepareGov() + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(100))) + gov.CatchUpWithRound(0) + s.Require().NoError(gov.State().RequestChange(test.StateChangeRoundLength, + uint64(200))) + gov.CatchUpWithRound(1) + // Prepare utils.RoundEvent, starts from genesis. + rEvt, err := utils.NewRoundEvent(context.Background(), gov, s.logger, + types.Position{Height: types.GenesisHeight}, core.ConfigRoundShift) + s.Require().NoError(err) + // Register a handler to collects triggered events. + var evts []evtParamToCheck + rEvt.Register(func(params []utils.RoundEventParam) { + for _, p := range params { + evts = append(evts, evtParamToCheck{ + round: p.Round, + reset: p.Reset, + height: p.BeginHeight, + crs: p.CRS, + }) + } + }) + rEvt.TriggerInitEvent() + s.Require().Len(evts, 1) + s.Require().Equal(evts[0], evtParamToCheck{0, 0, 1, gov.CRS(0)}) +} + +func TestRoundEvent(t *testing.T) { + suite.Run(t, new(RoundEventTestSuite)) +} |