diff options
author | Mission Liao <mission.liao@dexon.org> | 2018-10-24 17:50:09 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2018-10-24 17:50:09 +0800 |
commit | dbee0586b0a565ae9a31a3c2d967f5c2af76f60d (patch) | |
tree | cdfaef7754aedfda8d02c7023364a645e391e59e | |
parent | f90c15fcfa575e138355a449c49cd784ba54db17 (diff) | |
download | tangerine-consensus-dbee0586b0a565ae9a31a3c2d967f5c2af76f60d.tar.gz tangerine-consensus-dbee0586b0a565ae9a31a3c2d967f5c2af76f60d.tar.zst tangerine-consensus-dbee0586b0a565ae9a31a3c2d967f5c2af76f60d.zip |
test: add test.State (#239)
* separate test utility and interface implementation
for test.Governance.
* add test.State.
* integrate test.State to test.Governance.
test.State is mainly used to emulate state propagation
on fullnode.
-rw-r--r-- | core/compaction-chain_test.go | 4 | ||||
-rw-r--r-- | core/configuration-chain_test.go | 2 | ||||
-rw-r--r-- | core/consensus_test.go | 49 | ||||
-rw-r--r-- | core/dkg-tsig-protocol_test.go | 33 | ||||
-rw-r--r-- | core/lattice_test.go | 4 | ||||
-rw-r--r-- | core/nodeset-cache_test.go | 4 | ||||
-rw-r--r-- | core/test/governance.go | 199 | ||||
-rw-r--r-- | core/test/state.go | 575 | ||||
-rw-r--r-- | core/test/state_test.go | 277 | ||||
-rw-r--r-- | core/test/utils.go | 14 | ||||
-rw-r--r-- | integration_test/utils.go | 12 |
11 files changed, 1019 insertions, 154 deletions
diff --git a/core/compaction-chain_test.go b/core/compaction-chain_test.go index e73482f..f4860fb 100644 --- a/core/compaction-chain_test.go +++ b/core/compaction-chain_test.go @@ -36,7 +36,9 @@ func (s *CompactionChainTestSuite) SetupTest() { } func (s *CompactionChainTestSuite) newCompactionChain() *compactionChain { - gov, err := test.NewGovernance(4, 100*time.Millisecond) + _, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100*time.Millisecond) s.Require().NoError(err) cc := newCompactionChain(gov) cc.init(&types.Block{}) diff --git a/core/configuration-chain_test.go b/core/configuration-chain_test.go index 19d50a3..ec2d6f2 100644 --- a/core/configuration-chain_test.go +++ b/core/configuration-chain_test.go @@ -166,7 +166,7 @@ func (s *ConfigurationChainTestSuite) runDKG( recv := newTestCCReceiver(s) for _, nID := range s.nIDs { - gov, err := test.NewGovernance(0, 50*time.Millisecond) + gov, err := test.NewGovernance(nil, 50*time.Millisecond) s.Require().NoError(err) cfgChains[nID] = newConfigurationChain( nID, recv, gov, &common.NullLogger{}) diff --git a/core/consensus_test.go b/core/consensus_test.go index 31cb28a..6414f43 100644 --- a/core/consensus_test.go +++ b/core/consensus_test.go @@ -165,7 +165,7 @@ func (s *ConsensusTestSuite) prepareConsensus( app := test.NewApp() db, err := blockdb.NewMemBackedBlockDB() - s.Require().Nil(err) + s.Require().NoError(err) nID := types.NewNodeID(prvKey.PublicKey()) network := conn.newNetwork(nID) con := NewConsensus( @@ -190,14 +190,15 @@ func (s *ConsensusTestSuite) TestSimpleDeliverBlock() { // - Byzantine Agreement layer is not taken into consideration, every // block is passed to lattice module directly. var ( - gov, err = test.NewGovernance(4, time.Second) - minInterval = gov.Configuration(0).MinBlockInterval - req = s.Require() - prvKeys = gov.PrivateKeys() - nodes []types.NodeID - conn = s.newNetworkConnection() + req = s.Require() + nodes []types.NodeID + conn = s.newNetworkConnection() ) - s.Require().Nil(err) + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, time.Second) + s.Require().NoError(err) + minInterval := gov.Configuration(0).MinBlockInterval // Setup core.Consensus and test.App. objs := map[types.NodeID]*struct { app *test.App @@ -409,13 +410,14 @@ func (s *ConsensusTestSuite) TestPrepareBlock() { // - Make sure Consensus.prepareBlock would only attempt to // ack the prepared block. var ( - gov, err = test.NewGovernance(4, time.Second) - req = s.Require() - nodes []types.NodeID - prvKeys = gov.PrivateKeys() - conn = s.newNetworkConnection() + req = s.Require() + nodes []types.NodeID + conn = s.newNetworkConnection() ) - s.Require().Nil(err) + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, time.Second) + s.Require().NoError(err) dMoment := time.Now().UTC() // Setup core.Consensus and test.App. cons := map[types.NodeID]*Consensus{} @@ -454,9 +456,11 @@ func (s *ConsensusTestSuite) TestPrepareBlock() { func (s *ConsensusTestSuite) TestPrepareGenesisBlock() { conn := s.newNetworkConnection() - gov, err := test.NewGovernance(4, time.Second) + prvKeys, pubKeys, err := test.NewKeys(4) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, time.Second) s.Require().NoError(err) - prvKey := gov.PrivateKeys()[0] + prvKey := prvKeys[0] _, con := s.prepareConsensus(time.Now().UTC(), gov, prvKey, conn) block := &types.Block{ Position: types.Position{ChainID: 0}, @@ -468,16 +472,17 @@ func (s *ConsensusTestSuite) TestPrepareGenesisBlock() { func (s *ConsensusTestSuite) TestDKGCRS() { n := 21 - lambda := time.Duration(200) + lambda := 200 * time.Millisecond if testing.Short() { n = 7 - lambda = 100 + lambda = 100 * time.Millisecond } conn := s.newNetworkConnection() - gov, err := test.NewGovernance(n, lambda*time.Millisecond) - s.Require().Nil(err) - gov.RoundInterval = 200 * lambda * time.Millisecond - prvKeys := gov.PrivateKeys() + prvKeys, pubKeys, err := test.NewKeys(n) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, lambda) + s.Require().NoError(err) + gov.State().RequestChange(test.StateChangeRoundInterval, 200*lambda) cons := map[types.NodeID]*Consensus{} dMoment := time.Now().UTC() for _, key := range prvKeys { diff --git a/core/dkg-tsig-protocol_test.go b/core/dkg-tsig-protocol_test.go index e533a2f..5c60a2f 100644 --- a/core/dkg-tsig-protocol_test.go +++ b/core/dkg-tsig-protocol_test.go @@ -139,7 +139,9 @@ func (s *DKGTSIGProtocolTestSuite) TestDKGTSIGProtocol() { k := 2 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -242,7 +244,9 @@ func (s *DKGTSIGProtocolTestSuite) TestNackComplaint() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -286,7 +290,9 @@ func (s *DKGTSIGProtocolTestSuite) TestComplaint() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -349,7 +355,9 @@ func (s *DKGTSIGProtocolTestSuite) TestAntiComplaint() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -404,7 +412,9 @@ func (s *DKGTSIGProtocolTestSuite) TestEncorceNackComplaint() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -456,7 +466,9 @@ func (s *DKGTSIGProtocolTestSuite) TestQualifyIDs() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, _ := s.newProtocols(k, n, round) @@ -519,7 +531,9 @@ func (s *DKGTSIGProtocolTestSuite) TestPartialSignature() { k := 3 n := 10 round := uint64(1) - gov, err := test.NewGovernance(5, 100) + _, pubKeys, err := test.NewKeys(5) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) receivers, protocols := s.newProtocols(k, n, round) @@ -639,8 +653,11 @@ func (s *DKGTSIGProtocolTestSuite) TestProposeFinalize() { func (s *DKGTSIGProtocolTestSuite) TestTSigVerifierCache() { k := 3 n := 10 - gov, err := test.NewGovernance(n, 100) + _, pubKeys, err := test.NewKeys(n) + s.Require().NoError(err) + gov, err := test.NewGovernance(pubKeys, 100) s.Require().NoError(err) + gov.CatchUpWithRound(10) for i := 0; i < 10; i++ { round := uint64(i + 1) receivers, protocols := s.newProtocols(k, n, round) diff --git a/core/lattice_test.go b/core/lattice_test.go index 8fac592..bf4138a 100644 --- a/core/lattice_test.go +++ b/core/lattice_test.go @@ -105,7 +105,9 @@ func (s *LatticeTestSuite) newTestLatticeMgr( // Setup application. app := test.NewApp() // Setup governance. - gov, err := test.NewGovernance(int(cfg.NotarySetSize), cfg.LambdaBA) + _, pubKeys, err := test.NewKeys(int(cfg.NotarySetSize)) + req.NoError(err) + gov, err := test.NewGovernance(pubKeys, cfg.LambdaBA) req.NoError(err) // Setup compaction chain. cc := newCompactionChain(gov) diff --git a/core/nodeset-cache_test.go b/core/nodeset-cache_test.go index 0b69ee3..5934e21 100644 --- a/core/nodeset-cache_test.go +++ b/core/nodeset-cache_test.go @@ -61,7 +61,9 @@ type NodeSetCacheTestSuite struct { func (s *NodeSetCacheTestSuite) TestGovernanceIntf() { // NodeSetCacheInterface should let Governance implement it. var gov Governance - gov, err := test.NewGovernance(7, 250*time.Millisecond) + _, pubKeys, err := test.NewKeys(7) + s.Require().NoError(err) + gov, err = test.NewGovernance(pubKeys, 250*time.Millisecond) s.Require().NoError(err) _, ok := gov.(NodeSetCacheInterface) s.True(ok) diff --git a/core/test/governance.go b/core/test/governance.go index 21c04f6..94dcb6c 100644 --- a/core/test/governance.go +++ b/core/test/governance.go @@ -18,14 +18,12 @@ package test import ( - "encoding/json" "fmt" "sync" "time" "github.com/dexon-foundation/dexon-consensus-core/common" "github.com/dexon-foundation/dexon-consensus-core/core/crypto" - "github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa" "github.com/dexon-foundation/dexon-consensus-core/core/types" ) @@ -37,86 +35,65 @@ var ( // Governance is an implementation of Goverance for testing purpose. type Governance struct { - lambdaBA time.Duration - lambdaDKG time.Duration - privateKeys map[types.NodeID]crypto.PrivateKey - crs []common.Hash - tsig map[uint64]crypto.Signature - DKGComplaint map[uint64][]*types.DKGComplaint - DKGMasterPublicKey map[uint64][]*types.DKGMasterPublicKey - DKGFinal map[uint64]map[types.NodeID]struct{} - RoundInterval time.Duration - MinBlockInterval time.Duration - MaxBlockInterval time.Duration - lock sync.RWMutex + privateKeys map[types.NodeID]crypto.PrivateKey + configs []*types.Config + nodeSets [][]crypto.PublicKey + state *State + lock sync.RWMutex } // NewGovernance constructs a Governance instance. -func NewGovernance(nodeCount int, lambda time.Duration) ( - g *Governance, err error) { - hashCRS := crypto.Keccak256Hash([]byte("__ DEXON")) +func NewGovernance(genesisNodes []crypto.PublicKey, + lambda time.Duration) (g *Governance, err error) { + // Setup a State instance. + // TODO(mission): it's not a good idea to embed initialization of one + // public class in another, I did this to make the range of + // modification smaller. g = &Governance{ - lambdaBA: lambda, - lambdaDKG: lambda * 10, - privateKeys: make(map[types.NodeID]crypto.PrivateKey), - crs: []common.Hash{hashCRS}, - tsig: make(map[uint64]crypto.Signature), - DKGComplaint: make(map[uint64][]*types.DKGComplaint), - DKGMasterPublicKey: make(map[uint64][]*types.DKGMasterPublicKey), - DKGFinal: make(map[uint64]map[types.NodeID]struct{}), - RoundInterval: 365 * 86400 * time.Second, - MinBlockInterval: 1 * time.Millisecond, - MaxBlockInterval: lambda * 8, - } - for i := 0; i < nodeCount; i++ { - prv, err := ecdsa.NewPrivateKey() - if err != nil { - return nil, err - } - nID := types.NewNodeID(prv.PublicKey()) - g.privateKeys[nID] = prv + state: NewState(genesisNodes, lambda, true), } return } // NodeSet implements Governance interface to return current // notary set. -func (g *Governance) NodeSet(_ uint64) ( - ret []crypto.PublicKey) { - for _, key := range g.privateKeys { - ret = append(ret, key.PublicKey()) +func (g *Governance) NodeSet(round uint64) []crypto.PublicKey { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) } - return + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.nodeSets)) { + return nil + } + return g.nodeSets[round] } // Configuration returns the configuration at a given block height. -func (g *Governance) Configuration(_ uint64) *types.Config { - return &types.Config{ - NumChains: uint32(len(g.privateKeys)), - LambdaBA: g.lambdaBA, - LambdaDKG: g.lambdaDKG, - K: 0, - PhiRatio: 0.667, - NotarySetSize: uint32(len(g.privateKeys)), - DKGSetSize: uint32(len(g.privateKeys)), - RoundInterval: g.RoundInterval, - MinBlockInterval: g.MinBlockInterval, - MaxBlockInterval: g.MaxBlockInterval, +func (g *Governance) Configuration(round uint64) *types.Config { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) } + g.lock.RLock() + defer g.lock.RUnlock() + if round >= uint64(len(g.nodeSets)) { + return nil + } + return g.configs[round] } // CRS returns the CRS for a given round. func (g *Governance) CRS(round uint64) common.Hash { - g.lock.RLock() - defer g.lock.RUnlock() - if round >= uint64(len(g.crs)) { - return common.Hash{} - } - return g.crs[round] + return g.state.CRS(round) } // NotifyRoundHeight notifies governace contract to snapshot config. func (g *Governance) NotifyRoundHeight(round, height uint64) { + g.CatchUpWithRound(round) } // ProposeCRS propose a CRS. @@ -124,19 +101,13 @@ func (g *Governance) ProposeCRS(round uint64, signedCRS []byte) { g.lock.Lock() defer g.lock.Unlock() crs := crypto.Keccak256Hash(signedCRS) - if g.crs[len(g.crs)-1].Equal(crs) { - return - } - g.crs = append(g.crs, crs) -} - -// PrivateKeys return the private key for that node, this function -// is a test utility and not a general Governance interface. -func (g *Governance) PrivateKeys() (keys []crypto.PrivateKey) { - for _, k := range g.privateKeys { - keys = append(keys, k) + if err := g.state.ProposeCRS(round, crs); err != nil { + // CRS can be proposed multiple times, other errors are not + // accepted. + if err != ErrDuplicatedChange { + panic(err) + } } - return } // AddDKGComplaint add a DKGComplaint. @@ -148,29 +119,12 @@ func (g *Governance) AddDKGComplaint( if g.IsDKGFinal(complaint.Round) { return } - g.lock.Lock() - defer g.lock.Unlock() - if _, exist := g.DKGFinal[complaint.Round][complaint.ProposerID]; exist { - return - } - for _, comp := range g.DKGComplaint[complaint.Round] { - if comp == complaint { - return - } - } - g.DKGComplaint[complaint.Round] = append(g.DKGComplaint[complaint.Round], - complaint) + g.state.RequestChange(StateAddDKGComplaint, complaint) } // DKGComplaints returns the DKGComplaints of round. func (g *Governance) DKGComplaints(round uint64) []*types.DKGComplaint { - g.lock.RLock() - defer g.lock.RUnlock() - complaints, exist := g.DKGComplaint[round] - if !exist { - return []*types.DKGComplaint{} - } - return complaints + return g.state.DKGComplaints(round) } // AddDKGMasterPublicKey adds a DKGMasterPublicKey. @@ -179,29 +133,13 @@ func (g *Governance) AddDKGMasterPublicKey( if round != masterPublicKey.Round { return } - g.lock.Lock() - defer g.lock.Unlock() - g.DKGMasterPublicKey[masterPublicKey.Round] = append( - g.DKGMasterPublicKey[masterPublicKey.Round], masterPublicKey) + g.state.RequestChange(StateAddDKGMasterPublicKey, masterPublicKey) } // DKGMasterPublicKeys returns the DKGMasterPublicKeys of round. func (g *Governance) DKGMasterPublicKeys( round uint64) []*types.DKGMasterPublicKey { - g.lock.RLock() - defer g.lock.RUnlock() - masterPublicKeys, exist := g.DKGMasterPublicKey[round] - if !exist { - return []*types.DKGMasterPublicKey{} - } - mpks := make([]*types.DKGMasterPublicKey, 0, len(masterPublicKeys)) - for _, mpk := range masterPublicKeys { - bytes, _ := json.Marshal(mpk) - mpkCopy := types.NewDKGMasterPublicKey() - json.Unmarshal(bytes, mpkCopy) - mpks = append(mpks, mpkCopy) - } - return mpks + return g.state.DKGMasterPublicKeys(round) } // AddDKGFinalize adds a DKG finalize message. @@ -209,17 +147,48 @@ func (g *Governance) AddDKGFinalize(round uint64, final *types.DKGFinalize) { if round != final.Round { return } - g.lock.Lock() - defer g.lock.Unlock() - if _, exist := g.DKGFinal[final.Round]; !exist { - g.DKGFinal[final.Round] = make(map[types.NodeID]struct{}) - } - g.DKGFinal[final.Round][final.ProposerID] = struct{}{} + g.state.RequestChange(StateAddDKGFinal, final) } // IsDKGFinal checks if DKG is final. func (g *Governance) IsDKGFinal(round uint64) bool { + if round == 0 || round == 1 { + // Round 0, 1 are genesis round, their configs should be created + // by default. + g.CatchUpWithRound(round) + } g.lock.RLock() defer g.lock.RUnlock() - return len(g.DKGFinal[round]) > int(g.Configuration(round).DKGSetSize)/3*2 + if round >= uint64(len(g.configs)) { + return false + } + return g.state.IsDKGFinal(round, int(g.configs[round].DKGSetSize)/3*2) +} + +// +// Test Utilities +// + +// State allows to access embed State instance. +func (g *Governance) State() *State { + return g.state +} + +// CatchUpWithRound attempts to perform state snapshot to +// provide configuration/nodeSet for round R. +func (g *Governance) CatchUpWithRound(round uint64) { + if func() bool { + g.lock.RLock() + defer g.lock.RUnlock() + return uint64(len(g.configs)) > round + }() { + return + } + g.lock.Lock() + defer g.lock.Unlock() + for uint64(len(g.configs)) <= round { + config, nodeSet := g.state.Snapshot() + g.configs = append(g.configs, config) + g.nodeSets = append(g.nodeSets, nodeSet) + } } diff --git a/core/test/state.go b/core/test/state.go new file mode 100644 index 0000000..59b3dc5 --- /dev/null +++ b/core/test/state.go @@ -0,0 +1,575 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "errors" + "math" + "sync" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus-core/core/types" + "github.com/dexon-foundation/dexon/rlp" +) + +// StateChangeType is the type of state change request. +type StateChangeType uint8 + +var ( + // ErrDuplicatedChange means the change request is already applied. + ErrDuplicatedChange = errors.New("duplicated change") + // ErrForkedCRS means a different CRS for one round is proposed. + ErrForkedCRS = errors.New("forked CRS") + // ErrMissingPreviousCRS means previous CRS not found when + // proposing a specific round of CRS. + ErrMissingPreviousCRS = errors.New("missing previous CRS") + // ErrUnknownStateChangeType means a StateChangeType is not recognized. + ErrUnknownStateChangeType = errors.New("unknown state change type") + // ErrProposerIsFinal means a proposer of one complaint is finalized. + ErrProposerIsFinal = errors.New("proposer is final") +) + +// Types of state change. +const ( + StateChangeNothing StateChangeType = iota + // DKG & CRS + StateAddCRS + StateAddDKGComplaint + StateAddDKGMasterPublicKey + StateAddDKGFinal + // Configuration related. + StateChangeNumChains + StateChangeLambdaBA + StateChangeLambdaDKG + StateChangeRoundInterval + StateChangeMinBlockInterval + StateChangeMaxBlockInterval + StateChangeK + StateChangePhiRatio + StateChangeNotarySetSize + StateChangeDKGSetSize + // Node set related. + StateAddNode +) + +type crsAdditionRequest struct { + Round uint64 `json:"round"` + CRS common.Hash `json:"crs"` +} + +// StateChangeRequest carries information of state change request. +type StateChangeRequest struct { + Type StateChangeType `json:"type"` + Payload interface{} `json:"payload"` +} + +type rawStateChangeRequest struct { + Type StateChangeType + Payload rlp.RawValue +} + +// State emulates what the global state in governace contract on a fullnode. +type State struct { + // Configuration related. + numChains uint32 + lambdaBA time.Duration + lambdaDKG time.Duration + k int + phiRatio float32 + notarySetSize uint32 + dkgSetSize uint32 + roundInterval time.Duration + minBlockInterval time.Duration + maxBlockInterval time.Duration + // Nodes + nodes map[types.NodeID]crypto.PublicKey + // DKG & CRS + dkgComplaints map[uint64]map[types.NodeID][]*types.DKGComplaint + dkgMasterPublicKeys map[uint64]map[types.NodeID]*types.DKGMasterPublicKey + dkgFinals map[uint64]map[types.NodeID]*types.DKGFinalize + crs []common.Hash + // Other stuffs + local bool + lock sync.RWMutex + // ChangeRequest(s) are organized as map, indexed by type of state change. + // For each time to apply state change, only the last request would be + // applied. + pendingChangedConfigs map[StateChangeType]interface{} + pendingNodes [][]byte + pendingDKGComplaints []*types.DKGComplaint + pendingDKGFinals []*types.DKGFinalize + pendingDKGMasterPublicKeys []*types.DKGMasterPublicKey + pendingCRS []*crsAdditionRequest + pendingChangesLock sync.Mutex +} + +// NewState constructs an State instance with genesis information, including: +// - node set +// - crs +func NewState( + nodePubKeys []crypto.PublicKey, lambda time.Duration, local bool) *State { + nodes := make(map[types.NodeID]crypto.PublicKey) + for _, key := range nodePubKeys { + nodes[types.NewNodeID(key)] = key + } + genesisCRS := crypto.Keccak256Hash([]byte("__ DEXON")) + return &State{ + local: local, + numChains: uint32(len(nodes)), + lambdaBA: lambda, + lambdaDKG: lambda * 10, + roundInterval: lambda * 10000, + minBlockInterval: time.Millisecond * 1, + maxBlockInterval: lambda * 8, + crs: []common.Hash{genesisCRS}, + nodes: nodes, + phiRatio: 0.667, + k: 0, + notarySetSize: uint32(len(nodes)), + dkgSetSize: uint32(len(nodes)), + pendingChangedConfigs: make(map[StateChangeType]interface{}), + dkgFinals: make( + map[uint64]map[types.NodeID]*types.DKGFinalize), + dkgComplaints: make( + map[uint64]map[types.NodeID][]*types.DKGComplaint), + dkgMasterPublicKeys: make( + map[uint64]map[types.NodeID]*types.DKGMasterPublicKey), + } +} + +// Snapshot returns configration that could be snapshotted. +func (s *State) Snapshot() (*types.Config, []crypto.PublicKey) { + s.lock.RLock() + defer s.lock.RUnlock() + // Clone a node set. + nodes := make([]crypto.PublicKey, 0, len(s.nodes)) + for _, key := range s.nodes { + nodes = append(nodes, key) + } + return &types.Config{ + NumChains: s.numChains, + LambdaBA: s.lambdaBA, + LambdaDKG: s.lambdaDKG, + K: s.k, + PhiRatio: s.phiRatio, + NotarySetSize: s.notarySetSize, + DKGSetSize: s.dkgSetSize, + RoundInterval: s.roundInterval, + MinBlockInterval: s.minBlockInterval, + MaxBlockInterval: s.maxBlockInterval, + }, nodes +} + +func (s *State) unpackPayload( + raw *rawStateChangeRequest) (v interface{}, err error) { + switch raw.Type { + case StateAddCRS: + v = &crsAdditionRequest{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGComplaint: + v = &types.DKGComplaint{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGMasterPublicKey: + v = &types.DKGMasterPublicKey{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateAddDKGFinal: + v = &types.DKGFinalize{} + err = rlp.DecodeBytes(raw.Payload, v) + case StateChangeNumChains: + var tmp uint32 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeLambdaBA: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeLambdaDKG: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeRoundInterval: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeMinBlockInterval: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeMaxBlockInterval: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeK: + var tmp uint64 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangePhiRatio: + var tmp uint32 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeNotarySetSize: + var tmp uint32 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateChangeDKGSetSize: + var tmp uint32 + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + case StateAddNode: + var tmp []byte + err = rlp.DecodeBytes(raw.Payload, &tmp) + v = tmp + default: + err = ErrUnknownStateChangeType + } + if err != nil { + return + } + return +} + +// Apply change requests, this function would also +// be called when we extract these request from delivered blocks. +func (s *State) Apply(reqsAsBytes []byte) (err error) { + // Try to unmarshal this byte stream into []*StateChangeRequest. + rawReqs := []*rawStateChangeRequest{} + if err = rlp.DecodeBytes(reqsAsBytes, &rawReqs); err != nil { + return + } + var reqs []*StateChangeRequest + for _, r := range rawReqs { + var payload interface{} + if payload, err = s.unpackPayload(r); err != nil { + return + } + reqs = append(reqs, &StateChangeRequest{ + Type: r.Type, + Payload: payload, + }) + } + s.lock.Lock() + defer s.lock.Unlock() + for _, req := range reqs { + if err = s.applyRequest(req); err != nil { + return + } + } + return +} + +// PackRequests pack current pending requests as byte slice, which +// could be sent as blocks' payload and unmarshall back to apply. +func (s *State) PackRequests() (b []byte, err error) { + packed := []*StateChangeRequest{} + s.pendingChangesLock.Lock() + defer s.pendingChangesLock.Unlock() + // Pack simple configuration changes first. There should be no + // validity problems for those changes. + for k, v := range s.pendingChangedConfigs { + packed = append(packed, &StateChangeRequest{ + Type: k, + Payload: v, + }) + } + s.pendingChangedConfigs = make(map[StateChangeType]interface{}) + // For other changes, we need to check their validity. + s.lock.RLock() + defer s.lock.RUnlock() + for _, bytesOfKey := range s.pendingNodes { + packed = append(packed, &StateChangeRequest{ + Type: StateAddNode, + Payload: bytesOfKey, + }) + } + for _, comp := range s.pendingDKGComplaints { + packed = append(packed, &StateChangeRequest{ + Type: StateAddDKGComplaint, + Payload: comp, + }) + } + for _, final := range s.pendingDKGFinals { + packed = append(packed, &StateChangeRequest{ + Type: StateAddDKGFinal, + Payload: final, + }) + } + for _, masterPubKey := range s.pendingDKGMasterPublicKeys { + packed = append(packed, &StateChangeRequest{ + Type: StateAddDKGMasterPublicKey, + Payload: masterPubKey, + }) + } + for _, crs := range s.pendingCRS { + packed = append(packed, &StateChangeRequest{ + Type: StateAddCRS, + Payload: crs, + }) + } + if b, err = rlp.EncodeToBytes(packed); err != nil { + return + } + return +} + +// isValidRequest checks if this request is valid to proceed or not. +func (s *State) isValidRequest(req *StateChangeRequest) (err error) { + // NOTE: there would be no lock in this helper, callers should be + // responsible for acquiring appropriate lock. + switch req.Type { + case StateAddDKGComplaint: + comp := req.Payload.(*types.DKGComplaint) + // If we've received DKG final from that proposer, we would ignore + // its complaint. + if _, exists := s.dkgFinals[comp.Round][comp.ProposerID]; exists { + return ErrProposerIsFinal + } + // If we've received identical complaint, ignore it. + compForRound, exists := s.dkgComplaints[comp.Round] + if !exists { + break + } + comps, exists := compForRound[comp.ProposerID] + if !exists { + break + } + for _, tmpComp := range comps { + if tmpComp == comp { + return ErrDuplicatedChange + } + } + case StateAddCRS: + crsReq := req.Payload.(*crsAdditionRequest) + if uint64(len(s.crs)) > crsReq.Round { + if !s.crs[crsReq.Round].Equal(crsReq.CRS) { + return ErrForkedCRS + } + return ErrDuplicatedChange + } else if uint64(len(s.crs)) == crsReq.Round { + return nil + } else { + return ErrMissingPreviousCRS + } + } + return nil +} + +// applyRequest applies a single StateChangeRequest. +func (s *State) applyRequest(req *StateChangeRequest) error { + // NOTE: there would be no lock in this helper, callers should be + // responsible for acquiring appropriate lock. + switch req.Type { + case StateAddNode: + pubKey, err := ecdsa.NewPublicKeyFromByteSlice(req.Payload.([]byte)) + if err != nil { + return err + } + s.nodes[types.NewNodeID(pubKey)] = pubKey + case StateAddCRS: + crsRequest := req.Payload.(*crsAdditionRequest) + if crsRequest.Round != uint64(len(s.crs)) { + return ErrDuplicatedChange + } + s.crs = append(s.crs, crsRequest.CRS) + case StateAddDKGComplaint: + comp := req.Payload.(*types.DKGComplaint) + if _, exists := s.dkgComplaints[comp.Round]; !exists { + s.dkgComplaints[comp.Round] = make( + map[types.NodeID][]*types.DKGComplaint) + } + s.dkgComplaints[comp.Round][comp.ProposerID] = append( + s.dkgComplaints[comp.Round][comp.ProposerID], comp) + case StateAddDKGMasterPublicKey: + mKey := req.Payload.(*types.DKGMasterPublicKey) + if _, exists := s.dkgMasterPublicKeys[mKey.Round]; !exists { + s.dkgMasterPublicKeys[mKey.Round] = make( + map[types.NodeID]*types.DKGMasterPublicKey) + } + s.dkgMasterPublicKeys[mKey.Round][mKey.ProposerID] = mKey + case StateAddDKGFinal: + final := req.Payload.(*types.DKGFinalize) + if _, exists := s.dkgFinals[final.Round]; !exists { + s.dkgFinals[final.Round] = make(map[types.NodeID]*types.DKGFinalize) + } + s.dkgFinals[final.Round][final.ProposerID] = final + case StateChangeNumChains: + s.numChains = req.Payload.(uint32) + case StateChangeLambdaBA: + s.lambdaBA = time.Duration(req.Payload.(uint64)) + case StateChangeLambdaDKG: + s.lambdaDKG = time.Duration(req.Payload.(uint64)) + case StateChangeRoundInterval: + s.roundInterval = time.Duration(req.Payload.(uint64)) + case StateChangeMinBlockInterval: + s.minBlockInterval = time.Duration(req.Payload.(uint64)) + case StateChangeMaxBlockInterval: + s.maxBlockInterval = time.Duration(req.Payload.(uint64)) + case StateChangeK: + s.k = int(req.Payload.(uint64)) + case StateChangePhiRatio: + s.phiRatio = math.Float32frombits(req.Payload.(uint32)) + case StateChangeNotarySetSize: + s.notarySetSize = req.Payload.(uint32) + case StateChangeDKGSetSize: + s.dkgSetSize = req.Payload.(uint32) + default: + return errors.New("you are definitely kidding me") + } + return nil +} + +// ProposeCRS propose a new CRS for a specific round. +func (s *State) ProposeCRS(round uint64, crs common.Hash) (err error) { + err = s.RequestChange(StateAddCRS, &crsAdditionRequest{ + Round: round, + CRS: crs, + }) + return +} + +// RequestChange submits a state change request. +func (s *State) RequestChange( + t StateChangeType, payload interface{}) (err error) { + // Patch input parameter's type. + switch t { + case StateAddNode: + payload = payload.(crypto.PublicKey).Bytes() + case StateChangeLambdaBA, + StateChangeLambdaDKG, + StateChangeRoundInterval, + StateChangeMinBlockInterval, + StateChangeMaxBlockInterval: + payload = uint64(payload.(time.Duration)) + case StateChangeK: + payload = uint64(payload.(int)) + case StateChangePhiRatio: + payload = math.Float32bits(payload.(float32)) + } + req := &StateChangeRequest{ + Type: t, + Payload: payload, + } + if s.local { + err = func() error { + s.lock.Lock() + defer s.lock.Unlock() + if err := s.isValidRequest(req); err != nil { + return err + } + return s.applyRequest(req) + }() + return + } + s.lock.RLock() + defer s.lock.RUnlock() + if err = s.isValidRequest(req); err != nil { + return + } + s.pendingChangesLock.Lock() + defer s.pendingChangesLock.Unlock() + switch t { + case StateAddNode: + s.pendingNodes = append(s.pendingNodes, payload.([]byte)) + case StateAddCRS: + s.pendingCRS = append(s.pendingCRS, payload.(*crsAdditionRequest)) + case StateAddDKGComplaint: + s.pendingDKGComplaints = append( + s.pendingDKGComplaints, payload.(*types.DKGComplaint)) + case StateAddDKGMasterPublicKey: + s.pendingDKGMasterPublicKeys = append( + s.pendingDKGMasterPublicKeys, payload.(*types.DKGMasterPublicKey)) + case StateAddDKGFinal: + s.pendingDKGFinals = append( + s.pendingDKGFinals, payload.(*types.DKGFinalize)) + default: + s.pendingChangedConfigs[t] = payload + } + return +} + +// CRS access crs proposed for that round. +func (s *State) CRS(round uint64) common.Hash { + s.lock.RLock() + defer s.lock.RUnlock() + if round >= uint64(len(s.crs)) { + return common.Hash{} + } + return s.crs[round] +} + +// DKGComplaints access current received dkg complaints for that round. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) DKGComplaints(round uint64) []*types.DKGComplaint { + s.lock.RLock() + defer s.lock.RUnlock() + comps, exists := s.dkgComplaints[round] + if !exists { + return nil + } + tmpComps := make([]*types.DKGComplaint, 0, len(comps)) + for _, compProp := range comps { + for _, comp := range compProp { + bytes, err := rlp.EncodeToBytes(comp) + if err != nil { + panic(err) + } + compCopy := &types.DKGComplaint{} + if err = rlp.DecodeBytes(bytes, compCopy); err != nil { + panic(err) + } + tmpComps = append(tmpComps, compCopy) + } + } + return tmpComps +} + +// DKGMasterPublicKeys access current received dkg master public keys for that +// round. This information won't be snapshot, thus can't be cached in +// test.Governance. +func (s *State) DKGMasterPublicKeys(round uint64) []*types.DKGMasterPublicKey { + s.lock.RLock() + defer s.lock.RUnlock() + masterPublicKeys, exists := s.dkgMasterPublicKeys[round] + if !exists { + return nil + } + mpks := make([]*types.DKGMasterPublicKey, 0, len(masterPublicKeys)) + for _, mpk := range masterPublicKeys { + // Return a deep copied master public keys. + b, err := rlp.EncodeToBytes(mpk) + if err != nil { + panic(err) + } + mpkCopy := types.NewDKGMasterPublicKey() + if err = rlp.DecodeBytes(b, mpkCopy); err != nil { + panic(err) + } + mpks = append(mpks, mpkCopy) + } + return mpks +} + +// IsDKGFinal checks if current received dkg finals exceeds threshold. +// This information won't be snapshot, thus can't be cached in test.Governance. +func (s *State) IsDKGFinal(round uint64, threshold int) bool { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.dkgFinals[round]) > threshold +} diff --git a/core/test/state_test.go b/core/test/state_test.go new file mode 100644 index 0000000..b5ed383 --- /dev/null +++ b/core/test/state_test.go @@ -0,0 +1,277 @@ +// Copyright 2018 The dexon-consensus-core Authors +// This file is part of the dexon-consensus-core library. +// +// The dexon-consensus-core library is free software: you can redistribute it +// and/or modify it under the terms of the GNU Lesser General Public License as +// published by the Free Software Foundation, either version 3 of the License, +// or (at your option) any later version. +// +// The dexon-consensus-core library is distributed in the hope that it will be +// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +// General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the dexon-consensus-core library. If not, see +// <http://www.gnu.org/licenses/>. + +package test + +import ( + "sort" + "testing" + "time" + + "github.com/dexon-foundation/dexon-consensus-core/common" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto/dkg" + "github.com/dexon-foundation/dexon-consensus-core/core/crypto/ecdsa" + "github.com/dexon-foundation/dexon-consensus-core/core/types" + "github.com/stretchr/testify/suite" +) + +type StateTestSuite struct { + suite.Suite +} + +func (s *StateTestSuite) newDKGMasterPublicKey( + round uint64) *types.DKGMasterPublicKey { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pubKey := prvKey.PublicKey() + nodeID := types.NewNodeID(pubKey) + _, pubShare := dkg.NewPrivateKeyShares(3) + dID, err := dkg.BytesID(nodeID.Hash[:]) + s.Require().NoError(err) + return &types.DKGMasterPublicKey{ + ProposerID: nodeID, + Round: round, + DKGID: dID, + PublicKeyShares: *pubShare, + } +} + +func (s *StateTestSuite) newDKGComplaint(round uint64) *types.DKGComplaint { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pubKey := prvKey.PublicKey() + nodeID := types.NewNodeID(pubKey) + // TODO(mission): sign it, and it doesn't make sense to complaint self. + return &types.DKGComplaint{ + ProposerID: nodeID, + Round: round, + PrivateShare: types.DKGPrivateShare{ + ProposerID: nodeID, + ReceiverID: nodeID, + Round: round, + PrivateShare: *dkg.NewPrivateKey(), + }, + } +} + +func (s *StateTestSuite) newDKGFinal(round uint64) *types.DKGFinalize { + prvKey, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + pubKey := prvKey.PublicKey() + nodeID := types.NewNodeID(pubKey) + // TODO(mission): sign it. + return &types.DKGFinalize{ + ProposerID: nodeID, + Round: round, + } +} + +func (s *StateTestSuite) genNodes(count int) (nodes []crypto.PublicKey) { + for i := 0; i < count; i++ { + prv, err := ecdsa.NewPrivateKey() + s.Require().NoError(err) + nodes = append(nodes, prv.PublicKey()) + } + return +} + +func (s *StateTestSuite) compareNodes(node1, node2 []crypto.PublicKey) bool { + id1 := common.Hashes{} + for _, n := range node1 { + id1 = append(id1, types.NewNodeID(n).Hash) + } + sort.Sort(id1) + id2 := common.Hashes{} + for _, n := range node2 { + id2 = append(id2, types.NewNodeID(n).Hash) + } + sort.Sort(id2) + if len(id1) != len(id2) { + return false + } + for idx, id := range id1 { + if id != id2[idx] { + return false + } + } + return true +} + +func (s *StateTestSuite) findNode( + nodes []crypto.PublicKey, node crypto.PublicKey) bool { + nodeID := types.NewNodeID(node) + for _, n := range nodes { + nID := types.NewNodeID(n) + if nID == nodeID { + return true + } + } + return false +} + +func (s *StateTestSuite) makeDKGChanges( + st *State, + masterPubKey *types.DKGMasterPublicKey, + complaint *types.DKGComplaint, + final *types.DKGFinalize) { + st.RequestChange(StateAddDKGMasterPublicKey, masterPubKey) + st.RequestChange(StateAddDKGComplaint, complaint) + st.RequestChange(StateAddDKGFinal, final) +} + +func (s *StateTestSuite) makeConfigChanges(st *State) { + st.RequestChange(StateChangeNumChains, uint32(7)) + st.RequestChange(StateChangeLambdaBA, time.Nanosecond) + st.RequestChange(StateChangeLambdaDKG, time.Millisecond) + st.RequestChange(StateChangeRoundInterval, time.Hour) + st.RequestChange(StateChangeMinBlockInterval, time.Second) + st.RequestChange(StateChangeMaxBlockInterval, time.Minute) + st.RequestChange(StateChangeK, 1) + st.RequestChange(StateChangePhiRatio, float32(0.5)) + st.RequestChange(StateChangeNotarySetSize, uint32(5)) + st.RequestChange(StateChangeDKGSetSize, uint32(6)) +} + +func (s *StateTestSuite) checkConfigChanges(config *types.Config) { + req := s.Require() + req.Equal(config.NumChains, uint32(7)) + req.Equal(config.LambdaBA, time.Nanosecond) + req.Equal(config.LambdaDKG, time.Millisecond) + req.Equal(config.RoundInterval, time.Hour) + req.Equal(config.MinBlockInterval, time.Second) + req.Equal(config.MaxBlockInterval, time.Minute) + req.Equal(config.K, 1) + req.Equal(config.PhiRatio, float32(0.5)) + req.Equal(config.NotarySetSize, uint32(5)) + req.Equal(config.DKGSetSize, uint32(6)) +} + +func (s *StateTestSuite) TestLocalMode() { + // Test State with local mode. + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + genesisNodes := s.genNodes(20) + st := NewState(genesisNodes, lambda, true) + config1, nodes1 := st.Snapshot() + req.True(s.compareNodes(genesisNodes, nodes1)) + // Check settings of config1 affected by genesisNodes and lambda. + req.Equal(config1.NumChains, uint32(len(genesisNodes))) + req.Equal(config1.LambdaBA, lambda) + req.Equal(config1.LambdaDKG, lambda*10) + req.Equal(config1.RoundInterval, lambda*10000) + req.Equal(config1.MaxBlockInterval, lambda*8) + req.Equal(config1.NotarySetSize, uint32(len(genesisNodes))) + req.Equal(config1.DKGSetSize, uint32(len(genesisNodes))) + req.Equal(config1.K, 0) + req.Equal(config1.PhiRatio, float32(0.667)) + // Request some changes, every fields for config should be affected. + s.makeConfigChanges(st) + // Add new node. + prvKey, err := ecdsa.NewPrivateKey() + req.NoError(err) + pubKey := prvKey.PublicKey() + st.RequestChange(StateAddNode, pubKey) + config2, newNodes := st.Snapshot() + // Check if config changes are applied. + s.checkConfigChanges(config2) + // Check if new node is added. + req.True(s.findNode(newNodes, pubKey)) + // Test adding CRS. + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(1, crs)) + req.Equal(st.CRS(1), crs) + // Test adding node set, DKG complaints, final, master public key. + // Make sure everything is empty before changed. + req.Empty(st.DKGMasterPublicKeys(2)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 0)) + // Add DKG stuffs. + masterPubKey := s.newDKGMasterPublicKey(2) + comp := s.newDKGComplaint(2) + final := s.newDKGFinal(2) + s.makeDKGChanges(st, masterPubKey, comp, final) + // Check DKGMasterPublicKeys. + masterKeyForRound := st.DKGMasterPublicKeys(2) + req.Len(masterKeyForRound, 1) + req.True(masterKeyForRound[0].Equal(masterPubKey)) + // Check DKGComplaints. + compForRound := st.DKGComplaints(2) + req.Len(compForRound, 1) + req.True(compForRound[0].Equal(comp)) + // Check IsDKGFinal. + req.True(st.IsDKGFinal(2, 0)) +} + +func (s *StateTestSuite) TestPacking() { + // Make sure everything works when requests are packing + // and unpacked to apply. + var ( + req = s.Require() + lambda = 250 * time.Millisecond + ) + // Make config changes. + genesisNodes := s.genNodes(20) + st := NewState(genesisNodes, lambda, false) + s.makeConfigChanges(st) + // Add new CRS. + crs := common.NewRandomHash() + req.NoError(st.ProposeCRS(1, crs)) + // Add new node. + prvKey, err := ecdsa.NewPrivateKey() + req.NoError(err) + pubKey := prvKey.PublicKey() + st.RequestChange(StateAddNode, pubKey) + // Add DKG stuffs. + masterPubKey := s.newDKGMasterPublicKey(2) + comp := s.newDKGComplaint(2) + final := s.newDKGFinal(2) + s.makeDKGChanges(st, masterPubKey, comp, final) + // Make sure everything is empty before changed. + req.Empty(st.DKGMasterPublicKeys(2)) + req.Empty(st.DKGComplaints(2)) + req.False(st.IsDKGFinal(2, 0)) + // Pack changes into bytes. + b, err := st.PackRequests() + req.NoError(err) + req.NotEmpty(b) + // Apply those bytes back. + req.NoError(st.Apply(b)) + // Check if configs are changed. + config, nodes := st.Snapshot() + s.checkConfigChanges(config) + // Check if CRS is added. + req.Equal(st.CRS(1), crs) + // Check if new node is added. + req.True(s.findNode(nodes, pubKey)) + // Check DKGMasterPublicKeys. + masterKeyForRound := st.DKGMasterPublicKeys(2) + req.Len(masterKeyForRound, 1) + req.True(masterKeyForRound[0].Equal(masterPubKey)) + // Check DKGComplaints. + compForRound := st.DKGComplaints(2) + req.Len(compForRound, 1) + req.True(compForRound[0].Equal(comp)) + // Check IsDKGFinal. + req.True(st.IsDKGFinal(2, 0)) +} + +func TestState(t *testing.T) { + suite.Run(t, new(StateTestSuite)) +} diff --git a/core/test/utils.go b/core/test/utils.go index 2fc21ce..0bb82b8 100644 --- a/core/test/utils.go +++ b/core/test/utils.go @@ -102,3 +102,17 @@ func FindMyIP() (ip string, err error) { err = fmt.Errorf("unable to find IP") return } + +// NewKeys creates private keys and corresponding public keys as slice. +func NewKeys(count int) ( + prvKeys []crypto.PrivateKey, pubKeys []crypto.PublicKey, err error) { + for i := 0; i < count; i++ { + var prvKey crypto.PrivateKey + if prvKey, err = ecdsa.NewPrivateKey(); err != nil { + return + } + prvKeys = append(prvKeys, prvKey) + pubKeys = append(pubKeys, prvKey.PublicKey()) + } + return +} diff --git a/integration_test/utils.go b/integration_test/utils.go index d9c4995..df6c215 100644 --- a/integration_test/utils.go +++ b/integration_test/utils.go @@ -16,18 +16,20 @@ func PrepareNodes( dbs map[types.NodeID]blockdb.BlockDatabase, nodes map[types.NodeID]*Node, err error) { - apps = make(map[types.NodeID]*test.App) dbs = make(map[types.NodeID]blockdb.BlockDatabase) nodes = make(map[types.NodeID]*Node) - - gov, err := test.NewGovernance(nodeCount, 700*time.Millisecond) + prvKeys, pubKeys, err := test.NewKeys(nodeCount) + if err != nil { + return + } + gov, err := test.NewGovernance(pubKeys, 700*time.Millisecond) if err != nil { return } dMoment := time.Now().UTC() - for _, prvKey := range gov.PrivateKeys() { - nID := types.NewNodeID(prvKey.PublicKey()) + for idx, prvKey := range prvKeys { + nID := types.NewNodeID(pubKeys[idx]) apps[nID] = test.NewApp() dbs[nID], err = blockdb.NewMemBackedBlockDB() if err != nil { |