aboutsummaryrefslogtreecommitdiffstats
path: root/core
diff options
context:
space:
mode:
authorPéter Szilágyi <peterke@gmail.com>2019-04-04 19:39:11 +0800
committerPéter Szilágyi <peterke@gmail.com>2019-04-08 17:04:31 +0800
commit0e63a70505e0011d8f668dba86c99071cee9790e (patch)
tree084ea0bc35e2620e154a790e360df8c6994e5ff7 /core
parentf1b00cffc828105c17c0ecacb2074874b752a9a0 (diff)
downloaddexon-0e63a70505e0011d8f668dba86c99071cee9790e.tar.gz
dexon-0e63a70505e0011d8f668dba86c99071cee9790e.tar.zst
dexon-0e63a70505e0011d8f668dba86c99071cee9790e.zip
core: minor code polishes + rebase fixes
Diffstat (limited to 'core')
-rw-r--r--core/blockchain.go85
-rw-r--r--core/blockchain_test.go10
2 files changed, 54 insertions, 41 deletions
diff --git a/core/blockchain.go b/core/blockchain.go
index 117be8c72..bd55acf7f 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -1391,17 +1391,21 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
return 0, nil, nil, nil
}
-// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them
-// to be part of the new canonical chain and accumulates potential missing transactions and post an
-// event about them
+// reorg takes two blocks, an old chain and a new chain and will reconstruct the
+// blocks and inserts them to be part of the new canonical chain and accumulates
+// potential missing transactions and post an event about them.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var (
newChain types.Blocks
oldChain types.Blocks
commonBlock *types.Block
- deletedTxs types.Transactions
+
+ deletedTxs types.Transactions
+ addedTxs types.Transactions
+
deletedLogs []*types.Log
rebirthLogs []*types.Log
+
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn
@@ -1424,46 +1428,49 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
}
}
)
-
- // first reduce whoever is higher bound
+ // Reduce the longer chain to the same number as the shorter one
if oldBlock.NumberU64() > newBlock.NumberU64() {
- // reduce old chain
+ // Old chain is longer, gather all transactions and logs as deleted ones
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
-
collectLogs(oldBlock.Hash(), true)
}
} else {
- // reduce new chain and append new chain blocks for inserting later on
+ // New chain is longer, stash all blocks away for subsequent insertion
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock)
}
}
if oldBlock == nil {
- return fmt.Errorf("Invalid old chain")
+ return fmt.Errorf("invalid old chain")
}
if newBlock == nil {
- return fmt.Errorf("Invalid new chain")
+ return fmt.Errorf("invalid new chain")
}
-
+ // Both sides of the reorg are at the same number, reduce both until the common
+ // ancestor is found
for {
+ // If the common ancestor was found, bail out
if oldBlock.Hash() == newBlock.Hash() {
commonBlock = oldBlock
break
}
-
+ // Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock)
- newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true)
- oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
+ newChain = append(newChain, newBlock)
+
+ // Step back with both chains
+ oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil {
- return fmt.Errorf("Invalid old chain")
+ return fmt.Errorf("invalid old chain")
}
+ newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil {
- return fmt.Errorf("Invalid new chain")
+ return fmt.Errorf("invalid new chain")
}
}
// Ensure the user sees large reorgs
@@ -1478,42 +1485,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
}
// Insert the new chain, taking care of the proper incremental order
- var addedTxs types.Transactions
for i := len(newChain) - 1; i >= 0; i-- {
- // insert the block in the canonical way, re-writing history
+ // Insert the block in the canonical way, re-writing history
bc.insert(newChain[i])
- // collect reborn logs due to chain reorg(except head block)
+
+ // Collect reborn logs due to chain reorg (except head block (reverse order))
if i != 0 {
collectLogs(newChain[i].Hash(), false)
}
- // write lookup entries for hash based transaction/receipt searches
+ // Write lookup entries for hash based transaction/receipt searches
rawdb.WriteTxLookupEntries(bc.db, newChain[i])
addedTxs = append(addedTxs, newChain[i].Transactions()...)
}
- // calculate the difference between deleted and added transactions
- diff := types.TxDifference(deletedTxs, addedTxs)
- // When transactions get deleted from the database that means the
- // receipts that were created in the fork must also be deleted
+ // When transactions get deleted from the database, the receipts that were
+ // created in the fork must also be deleted
batch := bc.db.NewBatch()
- for _, tx := range diff {
+ for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(batch, tx.Hash())
}
batch.Write()
- if len(deletedLogs) > 0 {
- go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- }
- if len(rebirthLogs) > 0 {
- go bc.logsFeed.Send(rebirthLogs)
- }
- if len(oldChain) > 0 {
- go func() {
+ // If any logs need to be fired, do it now. In theory we could avoid creating
+ // this goroutine if there are no events to fire, but realistcally that only
+ // ever happens if we're reorging empty blocks, which will only happen on idle
+ // networks where performance is not an issue either way.
+ //
+ // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
+ // event ordering?
+ go func() {
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
+ }
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(rebirthLogs)
+ }
+ if len(oldChain) > 0 {
for _, block := range oldChain {
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
}
- }()
- }
-
+ }
+ }()
return nil
}
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 7c76f1fc4..e1a0f33b7 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -934,6 +934,7 @@ func TestLogRebirth(t *testing.T) {
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase()
+
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
@@ -1035,10 +1036,6 @@ func TestLogRebirth(t *testing.T) {
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- // Rebirth logs should omit a newLogEvent
- if !<-newLogCh {
- t.Fatalf("failed to receive new log event")
- }
// Ensure removedLog events received
select {
case ev := <-rmLogsCh:
@@ -1048,6 +1045,10 @@ func TestLogRebirth(t *testing.T) {
case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
+ // Rebirth logs should omit a newLogEvent
+ if !<-newLogCh {
+ t.Fatalf("failed to receive new log event")
+ }
}
func TestSideLogRebirth(t *testing.T) {
@@ -1055,6 +1056,7 @@ func TestSideLogRebirth(t *testing.T) {
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase()
+
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}