core: minor code polishes + rebase fixes

This commit is contained in:
Péter Szilágyi 2019-04-04 14:39:11 +03:00
parent 690bd8a417
commit 43631aa1d6
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
2 changed files with 57 additions and 43 deletions

View File

@ -1465,17 +1465,21 @@ func (bc *BlockChain) insertSidechain(block *types.Block, it *insertIterator) (i
return 0, nil, nil, nil return 0, nil, nil, nil
} }
// reorgs takes two blocks, an old chain and a new chain and will reconstruct the blocks and inserts them // reorg takes two blocks, an old chain and a new chain and will reconstruct the
// to be part of the new canonical chain and accumulates potential missing transactions and post an // blocks and inserts them to be part of the new canonical chain and accumulates
// event about them // potential missing transactions and post an event about them.
func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
var ( var (
newChain types.Blocks newChain types.Blocks
oldChain types.Blocks oldChain types.Blocks
commonBlock *types.Block commonBlock *types.Block
deletedTxs types.Transactions
deletedTxs types.Transactions
addedTxs types.Transactions
deletedLogs []*types.Log deletedLogs []*types.Log
rebirthLogs []*types.Log rebirthLogs []*types.Log
// collectLogs collects the logs that were generated during the // collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash. // processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn // These logs are later announced as deleted or reborn
@ -1498,46 +1502,49 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
} }
} }
) )
// Reduce the longer chain to the same number as the shorter one
// first reduce whoever is higher bound
if oldBlock.NumberU64() > newBlock.NumberU64() { if oldBlock.NumberU64() > newBlock.NumberU64() {
// reduce old chain // Old chain is longer, gather all transactions and logs as deleted ones
for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) {
oldChain = append(oldChain, oldBlock) oldChain = append(oldChain, oldBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...) deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true) collectLogs(oldBlock.Hash(), true)
} }
} else { } else {
// reduce new chain and append new chain blocks for inserting later on // New chain is longer, stash all blocks away for subsequent insertion
for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) { for ; newBlock != nil && newBlock.NumberU64() != oldBlock.NumberU64(); newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) {
newChain = append(newChain, newBlock) newChain = append(newChain, newBlock)
} }
} }
if oldBlock == nil { if oldBlock == nil {
return fmt.Errorf("Invalid old chain") return fmt.Errorf("invalid old chain")
} }
if newBlock == nil { if newBlock == nil {
return fmt.Errorf("Invalid new chain") return fmt.Errorf("invalid new chain")
} }
// Both sides of the reorg are at the same number, reduce both until the common
// ancestor is found
for { for {
// If the common ancestor was found, bail out
if oldBlock.Hash() == newBlock.Hash() { if oldBlock.Hash() == newBlock.Hash() {
commonBlock = oldBlock commonBlock = oldBlock
break break
} }
// Remove an old block as well as stash away a new block
oldChain = append(oldChain, oldBlock) oldChain = append(oldChain, oldBlock)
newChain = append(newChain, newBlock)
deletedTxs = append(deletedTxs, oldBlock.Transactions()...) deletedTxs = append(deletedTxs, oldBlock.Transactions()...)
collectLogs(oldBlock.Hash(), true) collectLogs(oldBlock.Hash(), true)
oldBlock, newBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1), bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) newChain = append(newChain, newBlock)
// Step back with both chains
oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1)
if oldBlock == nil { if oldBlock == nil {
return fmt.Errorf("Invalid old chain") return fmt.Errorf("invalid old chain")
} }
newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1)
if newBlock == nil { if newBlock == nil {
return fmt.Errorf("Invalid new chain") return fmt.Errorf("invalid new chain")
} }
} }
// Ensure the user sees large reorgs // Ensure the user sees large reorgs
@ -1552,42 +1559,46 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash()) log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "newnum", newBlock.Number(), "newhash", newBlock.Hash())
} }
// Insert the new chain, taking care of the proper incremental order // Insert the new chain, taking care of the proper incremental order
var addedTxs types.Transactions
for i := len(newChain) - 1; i >= 0; i-- { for i := len(newChain) - 1; i >= 0; i-- {
// insert the block in the canonical way, re-writing history // Insert the block in the canonical way, re-writing history
bc.insert(newChain[i]) bc.insert(newChain[i])
// collect reborn logs due to chain reorg(except head block)
// Collect reborn logs due to chain reorg (except head block (reverse order))
if i != 0 { if i != 0 {
collectLogs(newChain[i].Hash(), false) collectLogs(newChain[i].Hash(), false)
} }
// write lookup entries for hash based transaction/receipt searches // Write lookup entries for hash based transaction/receipt searches
rawdb.WriteTxLookupEntries(bc.db, newChain[i]) rawdb.WriteTxLookupEntries(bc.db, newChain[i])
addedTxs = append(addedTxs, newChain[i].Transactions()...) addedTxs = append(addedTxs, newChain[i].Transactions()...)
} }
// calculate the difference between deleted and added transactions // When transactions get deleted from the database, the receipts that were
diff := types.TxDifference(deletedTxs, addedTxs) // created in the fork must also be deleted
// When transactions get deleted from the database that means the
// receipts that were created in the fork must also be deleted
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
for _, tx := range diff { for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
rawdb.DeleteTxLookupEntry(batch, tx.Hash()) rawdb.DeleteTxLookupEntry(batch, tx.Hash())
} }
batch.Write() batch.Write()
if len(deletedLogs) > 0 { // If any logs need to be fired, do it now. In theory we could avoid creating
go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) // this goroutine if there are no events to fire, but realistcally that only
} // ever happens if we're reorging empty blocks, which will only happen on idle
if len(rebirthLogs) > 0 { // networks where performance is not an issue either way.
go bc.logsFeed.Send(rebirthLogs) //
} // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
if len(oldChain) > 0 { // event ordering?
go func() { go func() {
if len(deletedLogs) > 0 {
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
}
if len(rebirthLogs) > 0 {
bc.logsFeed.Send(rebirthLogs)
}
if len(oldChain) > 0 {
for _, block := range oldChain { for _, block := range oldChain {
bc.chainSideFeed.Send(ChainSideEvent{Block: block}) bc.chainSideFeed.Send(ChainSideEvent{Block: block})
} }
}() }
} }()
return nil return nil
} }

View File

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
@ -916,7 +917,8 @@ func TestLogRebirth(t *testing.T) {
var ( var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase() db = memorydb.New()
// this code generates a log // this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
@ -1018,10 +1020,6 @@ func TestLogRebirth(t *testing.T) {
if _, err := blockchain.InsertChain(newBlocks); err != nil { if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err) t.Fatalf("failed to insert forked chain: %v", err)
} }
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
// Ensure removedLog events received // Ensure removedLog events received
select { select {
case ev := <-rmLogsCh: case ev := <-rmLogsCh:
@ -1031,13 +1029,18 @@ func TestLogRebirth(t *testing.T) {
case <-time.NewTimer(1 * time.Second).C: case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.") t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
} }
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
}
} }
func TestSideLogRebirth(t *testing.T) { func TestSideLogRebirth(t *testing.T) {
var ( var (
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
db = ethdb.NewMemDatabase() db = memorydb.New()
// this code generates a log // this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}