aboutsummaryrefslogtreecommitdiffstats
path: root/simulation
diff options
context:
space:
mode:
authorMission Liao <mission.liao@dexon.org>2018-08-28 13:13:21 +0800
committerGitHub <noreply@github.com>2018-08-28 13:13:21 +0800
commit7e9d2db5576d697b578669c935b2e7bbf9422ec7 (patch)
treee4fb9f4b95b23934a142a88ee05fbd49dff50b3c /simulation
parent9c8f9a447bfd768a7b29db904bd604410ec66a09 (diff)
downloadtangerine-consensus-7e9d2db5576d697b578669c935b2e7bbf9422ec7.tar.gz
tangerine-consensus-7e9d2db5576d697b578669c935b2e7bbf9422ec7.tar.zst
tangerine-consensus-7e9d2db5576d697b578669c935b2e7bbf9422ec7.zip
core: tune performance (#73)
- Avoid using recursive function in critical path. - Do not write through when using levelDB. Things put to levelDB would be safe from panic even we didn't force to write through every time. - Dump count of confirmed blocks proposed by self. - Avoid allocating variables in loop. - Return length of acking node set, we only need that when total ordering. - Fix potential bug: make sure win records updated when acking height vectors of candidates are changed. - Keep dirty validators in slice. - Add cache for objects to ease the pressure to garbage collector. - Cache global acking status when total ordering. - Add method to recycle blocks. - Marshal JSON should be called once for each broadcast. - Make updateWinRecord called in parallel. - Log average / deviation of latencies when simulation finished.
Diffstat (limited to 'simulation')
-rw-r--r--simulation/app.go2
-rw-r--r--simulation/tcp-network.go70
-rw-r--r--simulation/validator.go2
-rw-r--r--simulation/verification.go33
4 files changed, 70 insertions, 37 deletions
diff --git a/simulation/app.go b/simulation/app.go
index c7a7ccf..635b071 100644
--- a/simulation/app.go
+++ b/simulation/app.go
@@ -106,7 +106,7 @@ func (a *simApp) TotalOrderingDeliver(blockHashes common.Hashes, early bool) {
}
a.Outputs = blocks
a.Early = early
- fmt.Println("OUTPUT", a.ValidatorID, a.Early, a.Outputs)
+ //fmt.Println("OUTPUT", a.ValidatorID, a.Early, a.Outputs)
confirmLatency := []time.Duration{}
diff --git a/simulation/tcp-network.go b/simulation/tcp-network.go
index bb63bd1..f30284b 100644
--- a/simulation/tcp-network.go
+++ b/simulation/tcp-network.go
@@ -239,12 +239,40 @@ func (n *TCPNetwork) Join(endpoint Endpoint) chan interface{} {
}
// Send sends a msg to another client.
-func (n *TCPNetwork) Send(destID types.ValidatorID, msg interface{}) {
+func (n *TCPNetwork) Send(destID types.ValidatorID, messageJSON []byte) {
clientAddr, exists := n.endpoints[destID]
if !exists {
return
}
+ msgURL := fmt.Sprintf("http://%s/msg", clientAddr)
+ go func() {
+ time.Sleep(n.model.Delay())
+ for i := 0; i < retries; i++ {
+ req, err := http.NewRequest(
+ http.MethodPost, msgURL, strings.NewReader(string(messageJSON)))
+ if err != nil {
+ continue
+ }
+ req.Header.Add("ID", n.endpoint.GetID().String())
+
+ resp, err := n.client.Do(req)
+ if err == nil {
+ defer resp.Body.Close()
+ io.Copy(ioutil.Discard, resp.Body)
+ }
+ if err == nil && resp.StatusCode == http.StatusOK {
+ runtime.Goexit()
+ }
+
+ fmt.Printf("failed to submit message: %s\n", err)
+ time.Sleep(1 * time.Second)
+ }
+ fmt.Printf("failed to send message: %v\n", string(messageJSON))
+ }()
+}
+
+func (n *TCPNetwork) marshalMessage(msg interface{}) (messageJSON []byte) {
message := struct {
Type string `json:"type"`
Payload interface{} `json:"payload"`
@@ -270,65 +298,39 @@ func (n *TCPNetwork) Send(destID types.ValidatorID, msg interface{}) {
fmt.Printf("error: failed to marshal json: %v\n%+v\n", err, message)
return
}
-
- msgURL := fmt.Sprintf("http://%s/msg", clientAddr)
-
- go func() {
- time.Sleep(n.model.Delay())
- for i := 0; i < retries; i++ {
- req, err := http.NewRequest(
- http.MethodPost, msgURL, strings.NewReader(string(messageJSON)))
- if err != nil {
- continue
- }
- req.Header.Add("ID", n.endpoint.GetID().String())
-
- resp, err := n.client.Do(req)
- if err == nil {
- defer resp.Body.Close()
- io.Copy(ioutil.Discard, resp.Body)
- }
- if err == nil && resp.StatusCode == http.StatusOK {
- runtime.Goexit()
- }
-
- fmt.Printf("failed to submit message: %s\n", err)
- time.Sleep(1 * time.Second)
- }
- fmt.Printf("failed to send message: %v\n", msg)
- }()
+ return
}
// BroadcastBlock broadcast blocks into the network.
func (n *TCPNetwork) BroadcastBlock(block *types.Block) {
- block = block.Clone()
+ payload := n.marshalMessage(block)
for endpoint := range n.endpoints {
if endpoint == block.ProposerID {
continue
}
- n.Send(endpoint, block)
+ n.Send(endpoint, payload)
}
}
// BroadcastNotaryAck broadcast notaryAck into the network.
func (n *TCPNetwork) BroadcastNotaryAck(notaryAck *types.NotaryAck) {
- notaryAck = notaryAck.Clone()
+ payload := n.marshalMessage(notaryAck)
for endpoint := range n.endpoints {
if endpoint == notaryAck.ProposerID {
continue
}
- n.Send(endpoint, notaryAck)
+ n.Send(endpoint, payload)
}
}
// BroadcastVote broadcast vote into the network.
func (n *TCPNetwork) BroadcastVote(vote *types.Vote) {
- vote = vote.Clone()
+ payload := n.marshalMessage(vote)
for endpoint := range n.endpoints {
if endpoint == vote.ProposerID {
continue
}
- n.Send(endpoint, vote)
+ n.Send(endpoint, payload)
}
}
diff --git a/simulation/validator.go b/simulation/validator.go
index b26603a..24de155 100644
--- a/simulation/validator.go
+++ b/simulation/validator.go
@@ -153,8 +153,8 @@ func (v *Validator) MsgServer(
v.app.addBlock(val)
if err := v.consensus.ProcessBlock(val); err != nil {
fmt.Println(err)
- //panic(err)
}
+ types.RecycleBlock(val)
case *types.NotaryAck:
if err := v.consensus.ProcessNotaryAck(val); err != nil {
fmt.Println(err)
diff --git a/simulation/verification.go b/simulation/verification.go
index 574f3c5..ad2c911 100644
--- a/simulation/verification.go
+++ b/simulation/verification.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/dexon-foundation/dexon-consensus-core/common"
+ "github.com/dexon-foundation/dexon-consensus-core/core/test"
"github.com/dexon-foundation/dexon-consensus-core/core/types"
)
@@ -201,7 +202,7 @@ func VerifyTotalOrder(id types.ValidatorID,
if hasError {
log.Printf("[%d] Hash is %v from %v\n", i, hash, id)
} else {
- log.Printf("Block %v confirmed\n", hash)
+ //log.Printf("Block %v confirmed\n", hash)
}
}
@@ -223,8 +224,38 @@ func LogStatus(peerTotalOrder PeerTotalOrder) {
totalOrder.CalculateBlocksPerSecond())
log.Printf(" Confirm Latency: %.2fms\n",
totalOrder.CalculateAverageConfirmLatency()*1000)
+ log.Printf(" Confirm Blocks: %v\n", len(totalOrder.status.confirmLatency))
intLatency, extLatency := totalOrder.CalculateAverageTimestampLatency()
log.Printf(" Internal Timestamp Latency: %.2fms\n", intLatency*1000)
log.Printf(" External Timestamp Latency: %.2fms\n", extLatency*1000)
}
+ logOverallLatency(peerTotalOrder)
+}
+
+// logOverallLatency prints overall status related to latency.
+func logOverallLatency(peerTotalOrder PeerTotalOrder) {
+ // Let's use brute-force way since the simulation should be done
+ // at this moment.
+ var (
+ overallConfirmLatency []time.Duration
+ overallInternalTimestampLatency []time.Duration
+ overallExternalTimestampLatency []time.Duration
+ )
+ for _, totalOrder := range peerTotalOrder {
+ overallConfirmLatency = append(
+ overallConfirmLatency, totalOrder.status.confirmLatency...)
+ overallInternalTimestampLatency = append(
+ overallInternalTimestampLatency,
+ totalOrder.status.internalTimestampLatency...)
+ overallExternalTimestampLatency = append(
+ overallExternalTimestampLatency,
+ totalOrder.status.externalTimestampLatency...)
+ }
+ log.Print("[Overall]\n")
+ avg, dev := test.CalcLatencyStatistics(overallConfirmLatency)
+ log.Printf(" Confirm Latency: %v, dev: %v\n", avg, dev)
+ avg, dev = test.CalcLatencyStatistics(overallInternalTimestampLatency)
+ log.Printf(" Interal Timestamp Latency: %v, dev: %v\n", avg, dev)
+ avg, dev = test.CalcLatencyStatistics(overallExternalTimestampLatency)
+ log.Printf(" External Timestamp Latency: %v, dev: %v\n", avg, dev)
}