2015-07-07 00:54:22 +00:00
|
|
|
// Copyright 2014 The go-ethereum Authors
|
2015-07-22 16:48:40 +00:00
|
|
|
// This file is part of the go-ethereum library.
|
2015-07-07 00:54:22 +00:00
|
|
|
//
|
2015-07-23 16:35:11 +00:00
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
2015-07-07 00:54:22 +00:00
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
2015-07-22 16:48:40 +00:00
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
2015-07-07 00:54:22 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2015-07-22 16:48:40 +00:00
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2015-07-07 00:54:22 +00:00
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
2015-07-22 16:48:40 +00:00
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
2015-07-07 00:54:22 +00:00
|
|
|
|
2014-12-04 09:28:02 +00:00
|
|
|
package core
|
2014-12-17 11:57:35 +00:00
|
|
|
|
|
|
|
import (
|
2020-12-04 11:22:19 +00:00
|
|
|
"errors"
|
2019-05-07 12:26:00 +00:00
|
|
|
"fmt"
|
2015-02-18 03:02:15 +00:00
|
|
|
"math/big"
|
2015-06-08 00:19:39 +00:00
|
|
|
"math/rand"
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
"os"
|
2017-08-07 12:47:25 +00:00
|
|
|
"sync"
|
2014-12-17 11:57:35 +00:00
|
|
|
"testing"
|
2016-03-07 17:11:52 +00:00
|
|
|
"time"
|
2014-12-17 11:57:35 +00:00
|
|
|
|
2015-04-29 22:08:43 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common"
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
"github.com/ethereum/go-ethereum/common/math"
|
2018-06-19 11:41:13 +00:00
|
|
|
"github.com/ethereum/go-ethereum/consensus"
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/beacon"
|
2017-04-04 22:16:29 +00:00
|
|
|
"github.com/ethereum/go-ethereum/consensus/ethash"
|
2018-05-07 11:35:06 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
2015-10-19 14:08:17 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/state"
|
2014-12-18 12:12:54 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
2015-08-30 08:19:10 +00:00
|
|
|
"github.com/ethereum/go-ethereum/core/vm"
|
2015-08-17 12:01:41 +00:00
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
2021-11-25 12:17:09 +00:00
|
|
|
"github.com/ethereum/go-ethereum/eth/tracers/logger"
|
2014-12-18 12:12:54 +00:00
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
2015-08-17 12:01:41 +00:00
|
|
|
"github.com/ethereum/go-ethereum/params"
|
2020-08-21 12:10:40 +00:00
|
|
|
"github.com/ethereum/go-ethereum/trie"
|
2014-12-17 11:57:35 +00:00
|
|
|
)
|
|
|
|
|
2018-06-19 11:41:13 +00:00
|
|
|
// So we can deterministically seed different blockchains
|
|
|
|
var (
|
|
|
|
canonicalSeed = 1
|
|
|
|
forkSeed = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
// newCanonical creates a chain database, and injects a deterministic canonical
|
|
|
|
// chain. Depending on the full flag, if creates either a full block chain or a
|
2022-09-07 18:21:59 +00:00
|
|
|
// header only chain. The database and genesis specification for block generation
|
|
|
|
// are also returned in case more test blocks are needed later.
|
|
|
|
func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *Genesis, *BlockChain, error) {
|
2018-06-19 11:41:13 +00:00
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis = &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
Config: params.AllEthashProtocolChanges,
|
|
|
|
}
|
2018-06-19 11:41:13 +00:00
|
|
|
)
|
|
|
|
// Initialize a fresh chain with only a genesis block
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
|
|
|
|
2018-06-19 11:41:13 +00:00
|
|
|
// Create and inject the requested chain
|
|
|
|
if n == 0 {
|
2022-09-07 18:21:59 +00:00
|
|
|
return rawdb.NewMemoryDatabase(), genesis, blockchain, nil
|
2018-06-19 11:41:13 +00:00
|
|
|
}
|
|
|
|
if full {
|
|
|
|
// Full block-chain requested
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks := makeBlockChainWithGenesis(genesis, n, engine, canonicalSeed)
|
2018-06-19 11:41:13 +00:00
|
|
|
_, err := blockchain.InsertChain(blocks)
|
2022-09-07 18:21:59 +00:00
|
|
|
return genDb, genesis, blockchain, err
|
2018-06-19 11:41:13 +00:00
|
|
|
}
|
|
|
|
// Header-only chain requested
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed)
|
2023-05-03 09:58:39 +00:00
|
|
|
_, err := blockchain.InsertHeaderChain(headers)
|
2022-09-07 18:21:59 +00:00
|
|
|
return genDb, genesis, blockchain, err
|
2018-06-19 11:41:13 +00:00
|
|
|
}
|
|
|
|
|
2021-05-17 13:13:22 +00:00
|
|
|
func newGwei(n int64) *big.Int {
|
|
|
|
return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei))
|
|
|
|
}
|
|
|
|
|
2015-02-18 03:02:15 +00:00
|
|
|
// Test fork of length N starting from block i
|
2015-10-19 14:08:17 +00:00
|
|
|
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
|
2015-09-21 12:36:29 +00:00
|
|
|
// Copy old chain up to #i into a new db
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not make new canonical in testFork", err)
|
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain2.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Assert the chains have the same header/block at #i
|
|
|
|
var hash1, hash2 common.Hash
|
|
|
|
if full {
|
2015-10-19 14:08:17 +00:00
|
|
|
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
2015-09-21 12:36:29 +00:00
|
|
|
} else {
|
2015-10-19 14:08:17 +00:00
|
|
|
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
|
2015-02-28 19:58:37 +00:00
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
if hash1 != hash2 {
|
|
|
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
2015-02-28 19:58:37 +00:00
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
// Extend the newly created chain
|
|
|
|
var (
|
|
|
|
blockChainB []*types.Block
|
|
|
|
headerChainB []*types.Header
|
|
|
|
)
|
|
|
|
if full {
|
2023-03-02 06:29:15 +00:00
|
|
|
blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
2015-10-19 14:08:17 +00:00
|
|
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 18:21:59 +00:00
|
|
|
headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
// Sanity check that the forked chain can be imported into the original
|
|
|
|
var tdPre, tdPost *big.Int
|
2015-04-29 10:43:24 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
if full {
|
2021-10-11 21:16:46 +00:00
|
|
|
cur := blockchain.CurrentBlock()
|
2023-03-02 06:29:15 +00:00
|
|
|
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
2015-10-19 14:08:17 +00:00
|
|
|
if err := testBlockChainImport(blockChainB, blockchain); err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to import forked block chain: %v", err)
|
|
|
|
}
|
2021-10-11 21:16:46 +00:00
|
|
|
last := blockChainB[len(blockChainB)-1]
|
|
|
|
tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
|
2015-09-21 12:36:29 +00:00
|
|
|
} else {
|
2021-10-11 21:16:46 +00:00
|
|
|
cur := blockchain.CurrentHeader()
|
|
|
|
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
|
2015-10-19 14:08:17 +00:00
|
|
|
if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to import forked header chain: %v", err)
|
|
|
|
}
|
2021-10-11 21:16:46 +00:00
|
|
|
last := headerChainB[len(headerChainB)-1]
|
|
|
|
tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
// Compare the total difficulties of the chains
|
|
|
|
comparator(tdPre, tdPost)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// testBlockChainImport tries to process a chain of blocks, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 14:08:17 +00:00
|
|
|
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
|
2015-09-21 12:36:29 +00:00
|
|
|
for _, block := range chain {
|
|
|
|
// Try and process the block
|
2023-05-03 09:58:39 +00:00
|
|
|
err := blockchain.engine.VerifyHeader(blockchain, block.Header())
|
2017-04-04 22:16:29 +00:00
|
|
|
if err == nil {
|
|
|
|
err = blockchain.validator.ValidateBody(block)
|
|
|
|
}
|
2015-10-19 14:08:17 +00:00
|
|
|
if err != nil {
|
2017-04-06 11:58:03 +00:00
|
|
|
if err == ErrKnownBlock {
|
2015-02-18 03:02:15 +00:00
|
|
|
continue
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
return err
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2020-02-24 11:26:34 +00:00
|
|
|
statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
|
2015-10-19 14:08:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-25 10:41:50 +00:00
|
|
|
receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
|
2015-10-19 14:08:17 +00:00
|
|
|
if err != nil {
|
2016-11-23 12:32:25 +00:00
|
|
|
blockchain.reportBlock(block, receipts, err)
|
2015-10-19 14:08:17 +00:00
|
|
|
return err
|
|
|
|
}
|
2019-03-13 10:31:35 +00:00
|
|
|
err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
|
2015-10-19 14:08:17 +00:00
|
|
|
if err != nil {
|
2016-11-23 12:32:25 +00:00
|
|
|
blockchain.reportBlock(block, receipts, err)
|
2015-10-19 14:08:17 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-10-07 13:47:50 +00:00
|
|
|
|
|
|
|
blockchain.chainmu.MustLock()
|
2021-10-11 21:16:46 +00:00
|
|
|
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
|
2018-05-07 11:35:06 +00:00
|
|
|
rawdb.WriteBlock(blockchain.db, block)
|
2018-02-05 16:40:32 +00:00
|
|
|
statedb.Commit(false)
|
2019-01-11 13:27:47 +00:00
|
|
|
blockchain.chainmu.Unlock()
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// testHeaderChainImport tries to process a chain of header, writing them into
|
|
|
|
// the database if successful.
|
2015-10-19 14:08:17 +00:00
|
|
|
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
|
2015-09-21 12:36:29 +00:00
|
|
|
for _, header := range chain {
|
|
|
|
// Try and validate the header
|
2023-05-03 09:58:39 +00:00
|
|
|
if err := blockchain.engine.VerifyHeader(blockchain, header); err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
return err
|
|
|
|
}
|
2016-03-15 18:55:39 +00:00
|
|
|
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
|
2021-10-07 13:47:50 +00:00
|
|
|
blockchain.chainmu.MustLock()
|
2021-10-11 21:16:46 +00:00
|
|
|
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
|
2018-05-07 11:35:06 +00:00
|
|
|
rawdb.WriteHeader(blockchain.db, header)
|
2019-01-11 13:27:47 +00:00
|
|
|
blockchain.chainmu.Unlock()
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
return nil
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
2015-10-05 14:51:06 +00:00
|
|
|
func TestLastBlock(t *testing.T) {
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
2018-02-23 12:02:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2017-08-07 12:47:25 +00:00
|
|
|
|
2023-03-02 06:29:15 +00:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 1, ethash.NewFullFaker(), genDb, 0)
|
2018-02-23 12:02:33 +00:00
|
|
|
if _, err := blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("Failed to insert block: %v", err)
|
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
|
2018-02-23 12:02:33 +00:00
|
|
|
t.Fatalf("Write/Get HeadBlockHash failed")
|
2015-10-05 14:51:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
// Test inserts the blocks/headers after the fork choice rule is changed.
|
|
|
|
// The chain is reorged to whatever specified.
|
|
|
|
func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) {
|
|
|
|
// Copy old chain up to #i into a new db
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal("could not make new canonical in testFork", err)
|
|
|
|
}
|
|
|
|
defer blockchain2.Stop()
|
|
|
|
|
|
|
|
// Assert the chains have the same header/block at #i
|
|
|
|
var hash1, hash2 common.Hash
|
|
|
|
if full {
|
|
|
|
hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
|
|
|
|
} else {
|
|
|
|
hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
|
|
|
|
}
|
|
|
|
if hash1 != hash2 {
|
|
|
|
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extend the newly created chain
|
|
|
|
if full {
|
2023-03-02 06:29:15 +00:00
|
|
|
blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if _, err := blockchain2.InsertChain(blockChainB); err != nil {
|
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if blockchain2.CurrentBlock().Number.Uint64() != blockChainB[len(blockChainB)-1].NumberU64() {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 18:21:59 +00:00
|
|
|
headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
t.Fatalf("failed to insert forking chain: %v", err)
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() {
|
|
|
|
t.Fatalf("failed to reorg to the given chain")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
|
|
|
// with various length chains.
|
|
|
|
func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
|
|
|
|
func TestExtendCanonicalBlocks(t *testing.T) { testExtendCanonical(t, true) }
|
|
|
|
|
|
|
|
func testExtendCanonical(t *testing.T, full bool) {
|
|
|
|
length := 5
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Define the difficulty comparator
|
|
|
|
better := func(td1, td2 *big.Int) {
|
2015-02-18 03:02:15 +00:00
|
|
|
if td2.Cmp(td1) <= 0 {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
// Start fork from current height
|
|
|
|
testFork(t, processor, length, 1, full, better)
|
|
|
|
testFork(t, processor, length, 2, full, better)
|
|
|
|
testFork(t, processor, length, 5, full, better)
|
|
|
|
testFork(t, processor, length, 10, full, better)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, it can be extended
|
|
|
|
// with various length chains.
|
|
|
|
func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) }
|
|
|
|
func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testExtendCanonicalAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 5
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, length, 1, full)
|
|
|
|
testInsertAfterMerge(t, processor, length, 10, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
|
|
|
// forks do not take canonical ownership.
|
|
|
|
func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
|
|
|
|
func TestShorterForkBlocks(t *testing.T) { testShorterFork(t, true) }
|
|
|
|
|
|
|
|
func testShorterFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Define the difficulty comparator
|
|
|
|
worse := func(td1, td2 *big.Int) {
|
2015-02-18 03:02:15 +00:00
|
|
|
if td2.Cmp(td1) >= 0 {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
// Sum of numbers must be less than `length` for this to be a shorter fork
|
|
|
|
testFork(t, processor, 0, 3, full, worse)
|
|
|
|
testFork(t, processor, 0, 7, full, worse)
|
|
|
|
testFork(t, processor, 1, 1, full, worse)
|
|
|
|
testFork(t, processor, 1, 7, full, worse)
|
|
|
|
testFork(t, processor, 5, 3, full, worse)
|
|
|
|
testFork(t, processor, 5, 4, full, worse)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating shorter
|
|
|
|
// forks do not take canonical ownership.
|
|
|
|
func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) }
|
|
|
|
func TestShorterForkBlocksAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testShorterForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 3, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 7, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 1, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 7, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 3, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 4, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating longer
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
|
|
|
|
func TestLongerForkBlocks(t *testing.T) { testLongerFork(t, true) }
|
|
|
|
|
|
|
|
func testLongerFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer processor.Stop()
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
testInsertAfterMerge(t, processor, 0, 11, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 15, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 12, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 6, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 8, full)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that given a starting canonical chain of a given size, creating longer
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) }
|
|
|
|
func TestLongerForkBlocksAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testLongerForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 11, full)
|
|
|
|
testInsertAfterMerge(t, processor, 0, 15, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 12, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 6, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 8, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating equal
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
|
|
|
|
func TestEqualForkBlocks(t *testing.T) { testEqualFork(t, true) }
|
|
|
|
|
|
|
|
func testEqualFork(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer processor.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Define the difficulty comparator
|
|
|
|
equal := func(td1, td2 *big.Int) {
|
2015-02-18 03:02:15 +00:00
|
|
|
if td2.Cmp(td1) != 0 {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
// Sum of numbers must be equal to `length` for this to be an equal fork
|
|
|
|
testFork(t, processor, 0, 10, full, equal)
|
|
|
|
testFork(t, processor, 1, 9, full, equal)
|
|
|
|
testFork(t, processor, 2, 8, full, equal)
|
|
|
|
testFork(t, processor, 5, 5, full, equal)
|
|
|
|
testFork(t, processor, 6, 4, full, equal)
|
|
|
|
testFork(t, processor, 9, 1, full, equal)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
// Tests that given a starting canonical chain of a given size, creating equal
|
|
|
|
// forks do take canonical ownership.
|
|
|
|
func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) }
|
|
|
|
func TestEqualForkBlocksAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, true) }
|
|
|
|
|
|
|
|
func testEqualForkAfterMerge(t *testing.T, full bool) {
|
|
|
|
length := 10
|
|
|
|
|
|
|
|
// Make first chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
|
|
|
}
|
|
|
|
defer processor.Stop()
|
|
|
|
|
|
|
|
testInsertAfterMerge(t, processor, 0, 10, full)
|
|
|
|
testInsertAfterMerge(t, processor, 1, 9, full)
|
|
|
|
testInsertAfterMerge(t, processor, 2, 8, full)
|
|
|
|
testInsertAfterMerge(t, processor, 5, 5, full)
|
|
|
|
testInsertAfterMerge(t, processor, 6, 4, full)
|
|
|
|
testInsertAfterMerge(t, processor, 9, 1, full)
|
|
|
|
}
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that chains missing links do not get accepted by the processor.
|
|
|
|
func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
|
|
|
|
func TestBrokenBlockChain(t *testing.T) { testBrokenChain(t, true) }
|
|
|
|
|
|
|
|
func testBrokenChain(t *testing.T, full bool) {
|
|
|
|
// Make chain starting from genesis
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full)
|
2015-02-18 03:02:15 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to make new canonical chain: %v", err)
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Create a forked chain, and try to insert with a missing link
|
|
|
|
if full {
|
2023-03-02 06:29:15 +00:00
|
|
|
chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
2015-10-19 14:08:17 +00:00
|
|
|
if err := testBlockChainImport(chain, blockchain); err == nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("broken block chain not reported")
|
|
|
|
}
|
|
|
|
} else {
|
2022-09-07 18:21:59 +00:00
|
|
|
chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
|
2015-10-19 14:08:17 +00:00
|
|
|
if err := testHeaderChainImport(chain, blockchain); err == nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("broken header chain not reported")
|
|
|
|
}
|
2015-02-18 03:02:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 18:55:39 +00:00
|
|
|
// Tests that reorganising a long difficult chain after a short easy one
|
2015-09-21 12:36:29 +00:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
|
|
|
|
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
|
2015-07-10 12:29:40 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
func testReorgLong(t *testing.T, full bool) {
|
2021-10-26 06:44:43 +00:00
|
|
|
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2015-04-29 22:08:43 +00:00
|
|
|
|
2016-03-15 18:55:39 +00:00
|
|
|
// Tests that reorganising a short difficult chain after a long easy one
|
2015-09-21 12:36:29 +00:00
|
|
|
// overwrites the canonical numbers and links in the database.
|
|
|
|
func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
|
|
|
|
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
|
2015-04-29 10:43:24 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
func testReorgShort(t *testing.T, full bool) {
|
2018-02-23 12:02:33 +00:00
|
|
|
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
|
|
|
|
// we need a fairly long chain of blocks with different difficulties for a short
|
2022-06-10 15:47:06 +00:00
|
|
|
// one to become heavier than a long one. The 96 is an empirical value.
|
2018-02-23 12:02:33 +00:00
|
|
|
easy := make([]int64, 96)
|
|
|
|
for i := 0; i < len(easy); i++ {
|
|
|
|
easy[i] = 60
|
|
|
|
}
|
|
|
|
diff := make([]int64, len(easy)-1)
|
|
|
|
for i := 0; i < len(diff); i++ {
|
|
|
|
diff[i] = -9
|
|
|
|
}
|
2021-10-26 06:44:43 +00:00
|
|
|
testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2015-04-29 10:43:24 +00:00
|
|
|
|
2018-02-23 12:02:33 +00:00
|
|
|
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
|
|
|
|
// Create a pristine chain and database
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 12:02:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2015-09-21 12:36:29 +00:00
|
|
|
|
|
|
|
// Insert an easy and a difficult chain afterwards
|
2023-03-02 06:29:15 +00:00
|
|
|
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) {
|
2018-02-23 12:02:33 +00:00
|
|
|
b.OffsetTime(first[i])
|
|
|
|
})
|
2023-03-02 06:29:15 +00:00
|
|
|
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) {
|
2018-02-23 12:02:33 +00:00
|
|
|
b.OffsetTime(second[i])
|
|
|
|
})
|
2015-09-21 12:36:29 +00:00
|
|
|
if full {
|
2018-02-23 12:02:33 +00:00
|
|
|
if _, err := blockchain.InsertChain(easyBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert easy chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertChain(diffBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
} else {
|
2018-02-23 12:02:33 +00:00
|
|
|
easyHeaders := make([]*types.Header, len(easyBlocks))
|
|
|
|
for i, block := range easyBlocks {
|
|
|
|
easyHeaders[i] = block.Header()
|
|
|
|
}
|
|
|
|
diffHeaders := make([]*types.Header, len(diffBlocks))
|
|
|
|
for i, block := range diffBlocks {
|
|
|
|
diffHeaders[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err := blockchain.InsertHeaderChain(easyHeaders); err != nil {
|
2018-02-23 12:02:33 +00:00
|
|
|
t.Fatalf("failed to insert easy chain: %v", err)
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err := blockchain.InsertHeaderChain(diffHeaders); err != nil {
|
2018-02-23 12:02:33 +00:00
|
|
|
t.Fatalf("failed to insert difficult chain: %v", err)
|
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
// Check that the chain is valid number and link wise
|
|
|
|
if full {
|
2018-02-23 12:02:33 +00:00
|
|
|
prev := blockchain.CurrentBlock()
|
2023-03-02 06:29:15 +00:00
|
|
|
for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().Number.Uint64() - 1); block.NumberU64() != 0; prev, block = block.Header(), blockchain.GetBlockByNumber(block.NumberU64()-1) {
|
|
|
|
if prev.ParentHash != block.Hash() {
|
|
|
|
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash, block.Hash())
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2018-02-23 12:02:33 +00:00
|
|
|
prev := blockchain.CurrentHeader()
|
|
|
|
for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
|
2015-09-21 12:36:29 +00:00
|
|
|
if prev.ParentHash != header.Hash() {
|
|
|
|
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure the chain total difficulty is the correct one
|
2018-02-23 12:02:33 +00:00
|
|
|
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
|
2015-09-21 12:36:29 +00:00
|
|
|
if full {
|
2021-10-11 21:16:46 +00:00
|
|
|
cur := blockchain.CurrentBlock()
|
2023-03-02 06:29:15 +00:00
|
|
|
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
|
|
|
}
|
|
|
|
} else {
|
2021-10-11 21:16:46 +00:00
|
|
|
cur := blockchain.CurrentHeader()
|
|
|
|
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
|
2015-04-29 10:43:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-29 22:08:43 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests that the insertion functions detect banned hashes.
|
|
|
|
func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
|
|
|
|
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testBadHashes(t *testing.T, full bool) {
|
2018-02-23 12:02:33 +00:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 12:02:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2015-09-14 14:56:33 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Create a chain, ban a hash and try to import
|
|
|
|
if full {
|
2023-03-02 06:29:15 +00:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 3, ethash.NewFaker(), genDb, 10)
|
2018-02-23 12:02:33 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
BadHashes[blocks[2].Header().Hash()] = true
|
2018-02-23 12:02:33 +00:00
|
|
|
defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
|
|
|
|
|
|
|
|
_, err = blockchain.InsertChain(blocks)
|
2015-09-21 12:36:29 +00:00
|
|
|
} else {
|
2022-09-07 18:21:59 +00:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 3, ethash.NewFaker(), genDb, 10)
|
2018-02-23 12:02:33 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
BadHashes[headers[2].Hash()] = true
|
2018-02-23 12:02:33 +00:00
|
|
|
defer func() { delete(BadHashes, headers[2].Hash()) }()
|
|
|
|
|
2023-05-03 09:58:39 +00:00
|
|
|
_, err = blockchain.InsertHeaderChain(headers)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2021-07-29 08:17:40 +00:00
|
|
|
if !errors.Is(err, ErrBannedHash) {
|
|
|
|
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash)
|
2015-09-14 14:56:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-15 18:08:18 +00:00
|
|
|
// Tests that bad hashes are detected on boot, and the chain rolled back to a
|
2015-09-21 12:36:29 +00:00
|
|
|
// good state prior to the bad hash.
|
|
|
|
func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
|
|
|
|
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
|
|
|
|
|
|
|
|
func testReorgBadHashes(t *testing.T, full bool) {
|
2018-02-23 12:02:33 +00:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2018-02-23 12:02:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
2016-03-15 18:08:18 +00:00
|
|
|
// Create a chain, import and ban afterwards
|
2022-09-07 18:21:59 +00:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10)
|
2023-03-02 06:29:15 +00:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 4, ethash.NewFaker(), genDb, 10)
|
2015-09-14 14:56:33 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
if full {
|
2018-02-23 12:02:33 +00:00
|
|
|
if _, err = blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Errorf("failed to import blocks: %v", err)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2018-02-23 12:02:33 +00:00
|
|
|
if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
BadHashes[blocks[3].Header().Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
|
|
|
|
} else {
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err = blockchain.InsertHeaderChain(headers); err != nil {
|
2018-02-23 12:02:33 +00:00
|
|
|
t.Errorf("failed to import headers: %v", err)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2018-02-23 12:02:33 +00:00
|
|
|
if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
BadHashes[headers[3].Hash()] = true
|
|
|
|
defer func() { delete(BadHashes, headers[3].Hash()) }()
|
2015-09-14 14:56:33 +00:00
|
|
|
}
|
2018-02-23 12:02:33 +00:00
|
|
|
blockchain.Stop()
|
2017-03-02 13:03:33 +00:00
|
|
|
|
|
|
|
// Create a new BlockChain and check that it rolled back the state.
|
2022-08-30 16:22:28 +00:00
|
|
|
ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-07-10 12:29:40 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to create new chain manager: %v", err)
|
2015-07-10 12:29:40 +00:00
|
|
|
}
|
2015-09-21 12:36:29 +00:00
|
|
|
if full {
|
|
|
|
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
|
|
|
|
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
|
|
|
|
}
|
2017-11-13 11:47:27 +00:00
|
|
|
if blocks[2].Header().GasLimit != ncm.GasLimit() {
|
|
|
|
t.Errorf("last block gasLimit mismatch: have: %d, want %d", ncm.GasLimit(), blocks[2].Header().GasLimit)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-10-09 13:21:47 +00:00
|
|
|
if ncm.CurrentHeader().Hash() != headers[2].Hash() {
|
|
|
|
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
|
2015-04-29 22:08:43 +00:00
|
|
|
}
|
|
|
|
}
|
2018-02-23 12:02:33 +00:00
|
|
|
ncm.Stop()
|
2015-04-29 22:08:43 +00:00
|
|
|
}
|
2015-06-08 00:19:39 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Tests chain insertions in the face of one entity containing an invalid nonce.
|
|
|
|
func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
|
|
|
|
func TestBlocksInsertNonceError(t *testing.T) { testInsertNonceError(t, true) }
|
|
|
|
|
|
|
|
func testInsertNonceError(t *testing.T, full bool) {
|
2022-10-06 11:39:20 +00:00
|
|
|
doTest := func(i int) {
|
2015-09-21 12:36:29 +00:00
|
|
|
// Create a pristine chain and database
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
|
2015-07-10 12:29:40 +00:00
|
|
|
if err != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
2015-07-10 12:29:40 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
// Create and insert a chain with a failing nonce
|
|
|
|
var (
|
2017-04-04 22:16:29 +00:00
|
|
|
failAt int
|
|
|
|
failRes int
|
|
|
|
failNum uint64
|
2015-09-21 12:36:29 +00:00
|
|
|
)
|
|
|
|
if full {
|
2023-03-02 06:29:15 +00:00
|
|
|
blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), i, ethash.NewFaker(), genDb, 0)
|
2015-09-21 12:36:29 +00:00
|
|
|
|
|
|
|
failAt = rand.Int() % len(blocks)
|
|
|
|
failNum = blocks[failAt].NumberU64()
|
2015-10-07 09:14:30 +00:00
|
|
|
|
2017-04-04 22:16:29 +00:00
|
|
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
2015-10-19 14:08:17 +00:00
|
|
|
failRes, err = blockchain.InsertChain(blocks)
|
2015-09-21 12:36:29 +00:00
|
|
|
} else {
|
2022-09-07 18:21:59 +00:00
|
|
|
headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), i, ethash.NewFaker(), genDb, 0)
|
2015-06-08 00:19:39 +00:00
|
|
|
|
2015-09-21 12:36:29 +00:00
|
|
|
failAt = rand.Int() % len(headers)
|
|
|
|
failNum = headers[failAt].Number.Uint64()
|
2015-10-07 09:14:30 +00:00
|
|
|
|
2017-04-04 22:16:29 +00:00
|
|
|
blockchain.engine = ethash.NewFakeFailer(failNum)
|
|
|
|
blockchain.hc.engine = blockchain.engine
|
2023-05-03 09:58:39 +00:00
|
|
|
failRes, err = blockchain.InsertHeaderChain(headers)
|
2015-09-21 12:36:29 +00:00
|
|
|
}
|
2018-11-20 12:15:26 +00:00
|
|
|
// Check that the returned error indicates the failure
|
2015-09-21 12:36:29 +00:00
|
|
|
if failRes != failAt {
|
2018-11-20 12:15:26 +00:00
|
|
|
t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
|
2015-06-08 00:19:39 +00:00
|
|
|
}
|
2018-11-20 12:15:26 +00:00
|
|
|
// Check that all blocks after the failing block have been inserted
|
2015-09-21 12:36:29 +00:00
|
|
|
for j := 0; j < i-failAt; j++ {
|
|
|
|
if full {
|
2015-10-19 14:08:17 +00:00
|
|
|
if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("test %d: invalid block in chain: %v", i, block)
|
|
|
|
}
|
|
|
|
} else {
|
2015-10-19 14:08:17 +00:00
|
|
|
if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
|
2015-09-21 12:36:29 +00:00
|
|
|
t.Errorf("test %d: invalid header in chain: %v", i, header)
|
|
|
|
}
|
2015-06-08 00:19:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
for i := 1; i < 25 && !t.Failed(); i++ {
|
|
|
|
doTest(i)
|
|
|
|
}
|
2015-06-08 00:19:39 +00:00
|
|
|
}
|
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
// Tests that fast importing a block chain produces the same chain data as the
|
|
|
|
// classical full block processing.
|
|
|
|
func TestFastVsFullChains(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2018-05-09 12:24:25 +00:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2018-05-09 12:24:25 +00:00
|
|
|
gspec = &Genesis{
|
2021-06-15 10:56:14 +00:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-09-30 16:23:31 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, func(i int, block *BlockGen) {
|
2015-09-30 16:23:31 +00:00
|
|
|
block.SetCoinbase(common.Address{0x00})
|
|
|
|
|
|
|
|
// If the block number is multiple of 3, send a few bonus transactions to the miner
|
|
|
|
if i%3 == 2 {
|
|
|
|
for j := 0; j < i%4+1; j++ {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2015-09-30 16:23:31 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
}
|
2022-08-08 22:20:46 +00:00
|
|
|
// If the block number is a multiple of 5, add an uncle to the block
|
|
|
|
if i%5 == 4 {
|
|
|
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))})
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain as an archive node for the comparison baseline
|
2018-09-24 12:57:49 +00:00
|
|
|
archiveDb := rawdb.NewMemoryDatabase()
|
2022-08-30 16:22:28 +00:00
|
|
|
archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 12:47:25 +00:00
|
|
|
defer archive.Stop()
|
2015-09-30 16:23:31 +00:00
|
|
|
|
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Fast import the chain as a non-archive node to test
|
2018-09-24 12:57:49 +00:00
|
|
|
fastDb := rawdb.NewMemoryDatabase()
|
2022-08-30 16:22:28 +00:00
|
|
|
fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 12:47:25 +00:00
|
|
|
defer fast.Stop()
|
2015-09-30 16:23:31 +00:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := fast.InsertHeaderChain(headers); err != nil {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// Freezer style fast import the chain.
|
2022-09-07 18:21:59 +00:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
defer ancient.Stop()
|
|
|
|
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
// Iterate over all chain data components, and cross reference
|
|
|
|
for i := 0; i < len(blocks); i++ {
|
2023-04-21 09:52:02 +00:00
|
|
|
num, hash, time := blocks[i].NumberU64(), blocks[i].Hash(), blocks[i].Time()
|
2015-09-30 16:23:31 +00:00
|
|
|
|
2021-10-11 21:16:46 +00:00
|
|
|
if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
|
|
|
|
}
|
2021-10-11 21:16:46 +00:00
|
|
|
if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
2016-04-05 13:22:04 +00:00
|
|
|
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
|
|
|
|
}
|
|
|
|
if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
|
|
|
|
t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
|
2021-02-02 12:09:23 +00:00
|
|
|
} else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
|
|
|
|
} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
|
|
|
|
t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
|
|
|
// Check receipts.
|
2023-04-21 09:52:02 +00:00
|
|
|
freceipts := rawdb.ReadReceipts(fastDb, hash, num, time, fast.Config())
|
|
|
|
anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, time, fast.Config())
|
|
|
|
areceipts := rawdb.ReadReceipts(archiveDb, hash, num, time, fast.Config())
|
2021-09-07 10:31:17 +00:00
|
|
|
if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
|
|
|
// Check that hash-to-number mappings are present in all databases.
|
|
|
|
if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
|
|
|
|
}
|
|
|
|
if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
|
|
|
|
}
|
|
|
|
if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
|
|
|
|
t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
|
|
|
|
}
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
// Check that the canonical chains are the same between the databases
|
|
|
|
for i := 0; i < len(blocks)+1; i++ {
|
2018-05-07 11:35:06 +00:00
|
|
|
if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash)
|
|
|
|
}
|
|
|
|
if anhash, arhash := rawdb.ReadCanonicalHash(ancientDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); anhash != arhash {
|
|
|
|
t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that various import methods move the chain head pointers to the correct
|
|
|
|
// positions.
|
|
|
|
func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2018-05-09 12:24:25 +00:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2015-09-30 16:23:31 +00:00
|
|
|
)
|
|
|
|
height := uint64(1024)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
2015-09-30 16:23:31 +00:00
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
// makeDb creates a db instance for testing.
|
2022-04-08 13:44:55 +00:00
|
|
|
makeDb := func() ethdb.Database {
|
2022-09-07 18:21:59 +00:00
|
|
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
return db
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
}
|
2015-10-09 13:21:47 +00:00
|
|
|
// Configure a subchain to roll back
|
2020-08-20 10:01:24 +00:00
|
|
|
remove := blocks[height/2].NumberU64()
|
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
// Create a small assertion method to check the three heads
|
|
|
|
assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
|
2020-08-20 10:01:24 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2023-03-02 06:29:15 +00:00
|
|
|
if num := chain.CurrentBlock().Number.Uint64(); num != block {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if num := chain.CurrentSnapBlock().Number.Uint64(); num != fast {
|
|
|
|
t.Errorf("%s head snap-block mismatch: have #%v, want #%v", kind, num, fast)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
|
|
|
if num := chain.CurrentHeader().Number.Uint64(); num != header {
|
|
|
|
t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the chain as an archive node and ensure all pointers are updated
|
2022-04-08 13:44:55 +00:00
|
|
|
archiveDb := makeDb()
|
|
|
|
defer archiveDb.Close()
|
2020-08-20 10:01:24 +00:00
|
|
|
|
|
|
|
archiveCaching := *defaultCacheConfig
|
|
|
|
archiveCaching.TrieDirtyDisabled = true
|
|
|
|
|
2022-08-30 16:22:28 +00:00
|
|
|
archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-09-30 16:23:31 +00:00
|
|
|
if n, err := archive.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to process block %d: %v", n, err)
|
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer archive.Stop()
|
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
assert(t, "archive", archive, height, height, height)
|
2020-08-20 10:01:24 +00:00
|
|
|
archive.SetHead(remove - 1)
|
2015-10-09 13:21:47 +00:00
|
|
|
assert(t, "archive", archive, height/2, height/2, height/2)
|
2015-09-30 16:23:31 +00:00
|
|
|
|
|
|
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
2022-04-08 13:44:55 +00:00
|
|
|
fastDb := makeDb()
|
|
|
|
defer fastDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 12:47:25 +00:00
|
|
|
defer fast.Stop()
|
2015-09-30 16:23:31 +00:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := fast.InsertHeaderChain(headers); err != nil {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "fast", fast, height, height, 0)
|
2020-08-20 10:01:24 +00:00
|
|
|
fast.SetHead(remove - 1)
|
2015-10-09 13:21:47 +00:00
|
|
|
assert(t, "fast", fast, height/2, height/2, 0)
|
2015-09-30 16:23:31 +00:00
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
2022-04-08 13:44:55 +00:00
|
|
|
ancientDb := makeDb()
|
|
|
|
defer ancientDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
defer ancient.Stop()
|
|
|
|
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
|
|
|
assert(t, "ancient", ancient, height, height, 0)
|
2020-08-20 10:01:24 +00:00
|
|
|
ancient.SetHead(remove - 1)
|
|
|
|
assert(t, "ancient", ancient, 0, 0, 0)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
|
2020-08-20 10:01:24 +00:00
|
|
|
if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
|
|
|
|
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
|
|
|
|
}
|
2015-09-30 16:23:31 +00:00
|
|
|
// Import the chain as a light node and ensure all pointers are updated
|
2022-04-08 13:44:55 +00:00
|
|
|
lightDb := makeDb()
|
|
|
|
defer lightDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := light.InsertHeaderChain(headers); err != nil {
|
2015-09-30 16:23:31 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer light.Stop()
|
|
|
|
|
2015-09-30 16:23:31 +00:00
|
|
|
assert(t, "light", light, height, 0, 0)
|
2020-08-20 10:01:24 +00:00
|
|
|
light.SetHead(remove - 1)
|
2015-10-09 13:21:47 +00:00
|
|
|
assert(t, "light", light, height/2, 0, 0)
|
2015-09-30 16:23:31 +00:00
|
|
|
}
|
|
|
|
|
2016-03-15 18:55:39 +00:00
|
|
|
// Tests that chain reorganisations handle transaction removals and reinsertions.
|
2015-08-17 12:01:41 +00:00
|
|
|
func TestChainTxReorgs(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
addr3 = crypto.PubkeyToAddress(key3.PublicKey)
|
2017-03-02 13:03:33 +00:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
GasLimit: 3141592,
|
|
|
|
Alloc: GenesisAlloc{
|
2021-06-15 10:56:14 +00:00
|
|
|
addr1: {Balance: big.NewInt(1000000000000000)},
|
|
|
|
addr2: {Balance: big.NewInt(1000000000000000)},
|
|
|
|
addr3: {Balance: big.NewInt(1000000000000000)},
|
2017-03-02 13:03:33 +00:00
|
|
|
},
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-08-17 12:01:41 +00:00
|
|
|
)
|
2017-03-02 13:03:33 +00:00
|
|
|
|
2015-08-17 12:01:41 +00:00
|
|
|
// Create two transactions shared between the chains:
|
|
|
|
// - postponed: transaction included at a later block in the forked chain
|
|
|
|
// - swapped: transaction included at the same block number in the forked chain
|
2021-06-15 10:56:14 +00:00
|
|
|
postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
|
|
|
|
swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
|
2015-08-17 12:01:41 +00:00
|
|
|
|
|
|
|
// Create two transactions that will be dropped by the forked chain:
|
|
|
|
// - pastDrop: transaction dropped retroactively from a past block
|
|
|
|
// - freshDrop: transaction dropped exactly at the block where the reorg is detected
|
|
|
|
var pastDrop, freshDrop *types.Transaction
|
|
|
|
|
|
|
|
// Create three transactions that will be added in the forked chain:
|
2016-03-15 18:08:18 +00:00
|
|
|
// - pastAdd: transaction added before the reorganization is detected
|
2015-08-17 12:01:41 +00:00
|
|
|
// - freshAdd: transaction added at the exact block the reorg is detected
|
|
|
|
// - futureAdd: transaction added after the reorg has already finished
|
|
|
|
var pastAdd, freshAdd, futureAdd *types.Transaction
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {
|
2015-08-17 12:01:41 +00:00
|
|
|
switch i {
|
|
|
|
case 0:
|
2021-06-15 10:56:14 +00:00
|
|
|
pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
|
2015-08-17 12:01:41 +00:00
|
|
|
|
|
|
|
gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point
|
|
|
|
gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
|
|
|
|
|
|
|
|
case 2:
|
2021-06-15 10:56:14 +00:00
|
|
|
freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
|
2015-08-17 12:01:41 +00:00
|
|
|
|
|
|
|
gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
|
|
|
|
gen.AddTx(swapped) // This transaction will be swapped out at the exact height
|
|
|
|
|
|
|
|
gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// Import the chain. This runs all block validation rules.
|
2022-09-07 18:21:59 +00:00
|
|
|
db := rawdb.NewMemoryDatabase()
|
2022-08-30 16:22:28 +00:00
|
|
|
blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2015-10-19 14:08:17 +00:00
|
|
|
if i, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
|
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain.Stop()
|
2015-08-17 12:01:41 +00:00
|
|
|
|
|
|
|
// overwrite the old chain
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 5, func(i int, gen *BlockGen) {
|
2015-08-17 12:01:41 +00:00
|
|
|
switch i {
|
|
|
|
case 0:
|
2021-06-15 10:56:14 +00:00
|
|
|
pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 12:01:41 +00:00
|
|
|
gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
|
|
|
|
|
|
|
|
case 2:
|
|
|
|
gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
|
|
|
|
gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain
|
|
|
|
|
2021-06-15 10:56:14 +00:00
|
|
|
freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 12:01:41 +00:00
|
|
|
gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
|
|
|
|
|
|
|
|
case 3:
|
2021-06-15 10:56:14 +00:00
|
|
|
futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
|
2015-08-17 12:01:41 +00:00
|
|
|
gen.AddTx(futureAdd) // This transaction will be added after a full reorg
|
|
|
|
}
|
|
|
|
})
|
2015-10-19 14:08:17 +00:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
2015-06-08 10:12:13 +00:00
|
|
|
}
|
2015-08-17 12:01:41 +00:00
|
|
|
|
|
|
|
// removed tx
|
|
|
|
for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
|
2015-10-22 12:43:21 +00:00
|
|
|
t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
|
2015-08-17 12:01:41 +00:00
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil {
|
2017-07-14 16:39:53 +00:00
|
|
|
t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
|
2015-08-17 12:01:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// added tx
|
|
|
|
for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Errorf("add %d: expected tx to be found", i)
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Errorf("add %d: expected receipt to be found", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// shared tx
|
|
|
|
for i, tx := range (types.Transactions{postponed, swapped}) {
|
2018-05-07 11:35:06 +00:00
|
|
|
if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Errorf("share %d: expected tx to be found", i)
|
|
|
|
}
|
2019-04-15 09:36:27 +00:00
|
|
|
if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
|
2015-08-17 12:01:41 +00:00
|
|
|
t.Errorf("share %d: expected receipt to be found", i)
|
|
|
|
}
|
2015-06-08 10:12:13 +00:00
|
|
|
}
|
|
|
|
}
|
2015-11-30 23:11:24 +00:00
|
|
|
|
|
|
|
func TestLogReorgs(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2022-09-07 18:21:59 +00:00
|
|
|
|
2015-11-30 23:11:24 +00:00
|
|
|
// this code generates a log
|
2022-09-07 18:21:59 +00:00
|
|
|
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2015-11-30 23:11:24 +00:00
|
|
|
)
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain.Stop()
|
2015-11-30 23:11:24 +00:00
|
|
|
|
2017-08-18 10:58:36 +00:00
|
|
|
rmLogsCh := make(chan RemovedLogsEvent)
|
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2015-11-30 23:11:24 +00:00
|
|
|
if i == 1 {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, code), signer, key1)
|
2015-11-30 23:11:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
|
2019-11-29 13:22:08 +00:00
|
|
|
done := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
ev := <-rmLogsCh
|
|
|
|
if len(ev.Logs) == 0 {
|
|
|
|
t.Error("expected logs")
|
|
|
|
}
|
|
|
|
close(done)
|
|
|
|
}()
|
2015-11-30 23:11:24 +00:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2017-08-18 10:58:36 +00:00
|
|
|
timeout := time.NewTimer(1 * time.Second)
|
2020-04-02 14:04:45 +00:00
|
|
|
defer timeout.Stop()
|
2017-08-18 10:58:36 +00:00
|
|
|
select {
|
2019-11-29 13:22:08 +00:00
|
|
|
case <-done:
|
2017-08-18 10:58:36 +00:00
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
|
2015-11-30 23:11:24 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-07 17:11:52 +00:00
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
// This EVM code generates a log when the contract is created.
|
|
|
|
var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
|
|
|
|
|
|
|
|
// This test checks that log events and RemovedLogsEvent are sent
|
|
|
|
// when the chain reorganizes.
|
2018-12-17 07:23:54 +00:00
|
|
|
func TestLogRebirth(t *testing.T) {
|
|
|
|
var (
|
2020-04-28 08:06:49 +00:00
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
2021-02-25 14:26:57 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-04-28 08:06:49 +00:00
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2018-12-17 07:23:54 +00:00
|
|
|
)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
// The event channels.
|
|
|
|
newLogCh := make(chan []*types.Log, 10)
|
|
|
|
rmLogsCh := make(chan RemovedLogsEvent, 10)
|
|
|
|
blockchain.SubscribeLogsEvent(newLogCh)
|
2018-12-17 07:23:54 +00:00
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
|
|
|
|
2022-09-09 13:25:55 +00:00
|
|
|
// This chain contains 10 logs.
|
|
|
|
genDb, chain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
|
|
|
|
if i < 2 {
|
|
|
|
for ii := 0; ii < 5; ii++ {
|
|
|
|
tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
|
|
|
|
Nonce: gen.TxNonce(addr1),
|
|
|
|
GasPrice: gen.header.BaseFee,
|
|
|
|
Gas: uint64(1000001),
|
|
|
|
Data: logCode,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
2018-12-17 07:23:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
2022-09-09 13:25:55 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 0)
|
|
|
|
|
|
|
|
// Generate long reorg chain containing more logs. Inserting the
|
|
|
|
// chain removes one log and adds four.
|
|
|
|
_, forkChain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
|
|
|
|
if i == 2 {
|
|
|
|
// The last (head) block is not part of the reorg-chain, we can ignore it
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for ii := 0; ii < 5; ii++ {
|
|
|
|
tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
|
|
|
|
Nonce: gen.TxNonce(addr1),
|
|
|
|
GasPrice: gen.header.BaseFee,
|
|
|
|
Gas: uint64(1000000),
|
|
|
|
Data: logCode,
|
|
|
|
})
|
2018-12-17 07:23:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
2022-09-09 13:25:55 +00:00
|
|
|
gen.OffsetTime(-9) // higher block difficulty
|
2018-12-17 07:23:54 +00:00
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(forkChain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2022-09-09 13:25:55 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
|
2018-12-17 07:23:54 +00:00
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
// This chain segment is rooted in the original chain, but doesn't contain any logs.
|
|
|
|
// When inserting it, the canonical chain switches away from forkChain and re-emits
|
|
|
|
// the log event for the old chain, as well as a RemovedLogsEvent for forkChain.
|
2022-09-07 18:21:59 +00:00
|
|
|
newBlocks, _ := GenerateChain(gspec.Config, chain[len(chain)-1], engine, genDb, 1, func(i int, gen *BlockGen) {})
|
2018-12-17 07:23:54 +00:00
|
|
|
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2022-09-09 13:25:55 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
|
2018-12-17 07:23:54 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
// This test is a variation of TestLogRebirth. It verifies that log events are emitted
|
|
|
|
// when a side chain containing log events overtakes the canonical chain.
|
2018-12-17 07:23:54 +00:00
|
|
|
func TestSideLogRebirth(t *testing.T) {
|
|
|
|
var (
|
2020-04-28 08:06:49 +00:00
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
|
2021-02-25 14:26:57 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2018-12-17 07:23:54 +00:00
|
|
|
)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
newLogCh := make(chan []*types.Log, 10)
|
|
|
|
rmLogsCh := make(chan RemovedLogsEvent, 10)
|
|
|
|
blockchain.SubscribeLogsEvent(newLogCh)
|
|
|
|
blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2018-12-17 07:23:54 +00:00
|
|
|
if i == 1 {
|
2020-04-28 08:06:49 +00:00
|
|
|
gen.OffsetTime(-9) // higher block difficulty
|
2018-12-17 07:23:54 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 08:06:49 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
|
2018-12-17 07:23:54 +00:00
|
|
|
|
|
|
|
// Generate side chain with lower difficulty
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
|
2018-12-17 07:23:54 +00:00
|
|
|
if i == 1 {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
|
2018-12-17 07:23:54 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if _, err := blockchain.InsertChain(sideChain); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 08:06:49 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
|
2018-12-17 07:23:54 +00:00
|
|
|
|
2020-04-28 08:06:49 +00:00
|
|
|
// Generate a new block based on side chain.
|
2022-09-07 18:21:59 +00:00
|
|
|
newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
|
2018-12-17 07:23:54 +00:00
|
|
|
if _, err := blockchain.InsertChain(newBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert forked chain: %v", err)
|
|
|
|
}
|
2020-04-28 08:06:49 +00:00
|
|
|
checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
|
|
|
|
t.Helper()
|
2022-09-09 13:25:55 +00:00
|
|
|
var (
|
|
|
|
countNew int
|
|
|
|
countRm int
|
|
|
|
prev int
|
|
|
|
)
|
2020-04-28 08:06:49 +00:00
|
|
|
// Drain events.
|
2022-09-09 13:25:55 +00:00
|
|
|
for len(logsCh) > 0 {
|
|
|
|
x := <-logsCh
|
|
|
|
countNew += len(x)
|
|
|
|
for _, log := range x {
|
|
|
|
// We expect added logs to be in ascending order: 0:0, 0:1, 1:0 ...
|
|
|
|
have := 100*int(log.BlockNumber) + int(log.TxIndex)
|
|
|
|
if have < prev {
|
|
|
|
t.Fatalf("Expected new logs to arrive in ascending order (%d < %d)", have, prev)
|
|
|
|
}
|
|
|
|
prev = have
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prev = 0
|
|
|
|
for len(rmLogsCh) > 0 {
|
|
|
|
x := <-rmLogsCh
|
|
|
|
countRm += len(x.Logs)
|
|
|
|
for _, log := range x.Logs {
|
|
|
|
// We expect removed logs to be in ascending order: 0:0, 0:1, 1:0 ...
|
|
|
|
have := 100*int(log.BlockNumber) + int(log.TxIndex)
|
|
|
|
if have < prev {
|
|
|
|
t.Fatalf("Expected removed logs to arrive in ascending order (%d < %d)", have, prev)
|
|
|
|
}
|
|
|
|
prev = have
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if countNew != wantNew {
|
|
|
|
t.Fatalf("wrong number of log events: got %d, want %d", countNew, wantNew)
|
2020-04-28 08:06:49 +00:00
|
|
|
}
|
2022-09-09 13:25:55 +00:00
|
|
|
if countRm != wantRemoved {
|
|
|
|
t.Fatalf("wrong number of removed log events: got %d, want %d", countRm, wantRemoved)
|
2018-12-17 07:23:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-07 17:11:52 +00:00
|
|
|
func TestReorgSideEvent(t *testing.T) {
|
|
|
|
var (
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
2017-04-04 22:16:29 +00:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
2021-06-15 10:56:14 +00:00
|
|
|
Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
|
2017-04-04 22:16:29 +00:00
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2016-03-07 17:11:52 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2017-08-07 12:47:25 +00:00
|
|
|
defer blockchain.Stop()
|
2016-03-07 17:11:52 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
|
2016-03-07 17:11:52 +00:00
|
|
|
if _, err := blockchain.InsertChain(chain); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
|
2016-03-31 15:43:41 +00:00
|
|
|
if i == 2 {
|
2017-09-14 07:59:05 +00:00
|
|
|
gen.OffsetTime(-9)
|
2016-03-31 15:43:41 +00:00
|
|
|
}
|
2016-03-07 17:11:52 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
2017-09-11 10:13:05 +00:00
|
|
|
chainSideCh := make(chan ChainSideEvent, 64)
|
2017-08-18 10:58:36 +00:00
|
|
|
blockchain.SubscribeChainSideEvent(chainSideCh)
|
2016-03-07 17:11:52 +00:00
|
|
|
if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// first two block of the secondary chain are for a brief moment considered
|
|
|
|
// side chains because up to that point the first one is considered the
|
|
|
|
// heavier chain.
|
|
|
|
expectedSideHashes := map[common.Hash]bool{
|
|
|
|
replacementBlocks[0].Hash(): true,
|
|
|
|
replacementBlocks[1].Hash(): true,
|
|
|
|
chain[0].Hash(): true,
|
|
|
|
chain[1].Hash(): true,
|
|
|
|
chain[2].Hash(): true,
|
|
|
|
}
|
|
|
|
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
const timeoutDura = 10 * time.Second
|
|
|
|
timeout := time.NewTimer(timeoutDura)
|
|
|
|
done:
|
|
|
|
for {
|
|
|
|
select {
|
2017-08-18 10:58:36 +00:00
|
|
|
case ev := <-chainSideCh:
|
|
|
|
block := ev.Block
|
2016-03-07 17:11:52 +00:00
|
|
|
if _, ok := expectedSideHashes[block.Hash()]; !ok {
|
|
|
|
t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
|
|
|
|
if i == len(expectedSideHashes) {
|
|
|
|
timeout.Stop()
|
|
|
|
|
|
|
|
break done
|
|
|
|
}
|
|
|
|
timeout.Reset(timeoutDura)
|
|
|
|
|
|
|
|
case <-timeout.C:
|
|
|
|
t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure no more events are fired
|
|
|
|
select {
|
2017-08-18 10:58:36 +00:00
|
|
|
case e := <-chainSideCh:
|
2016-03-15 18:08:18 +00:00
|
|
|
t.Errorf("unexpected event fired: %v", e)
|
2016-03-07 17:11:52 +00:00
|
|
|
case <-time.After(250 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
2016-07-26 14:37:04 +00:00
|
|
|
|
|
|
|
// Tests if the canonical block can be fetched from the database during chain insertion.
|
|
|
|
func TestCanonicalBlockRetrieval(t *testing.T) {
|
2022-09-07 18:21:59 +00:00
|
|
|
_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
|
2018-02-23 12:02:33 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create pristine chain: %v", err)
|
|
|
|
}
|
|
|
|
defer blockchain.Stop()
|
2017-08-07 12:47:25 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 10, func(i int, gen *BlockGen) {})
|
2016-07-26 14:37:04 +00:00
|
|
|
|
2017-08-07 12:47:25 +00:00
|
|
|
var pend sync.WaitGroup
|
|
|
|
pend.Add(len(chain))
|
|
|
|
|
2017-01-06 14:52:03 +00:00
|
|
|
for i := range chain {
|
2016-07-26 14:37:04 +00:00
|
|
|
go func(block *types.Block) {
|
2017-08-07 12:47:25 +00:00
|
|
|
defer pend.Done()
|
|
|
|
|
2016-07-26 14:37:04 +00:00
|
|
|
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
|
|
|
|
for {
|
2018-05-07 11:35:06 +00:00
|
|
|
ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
|
2016-07-26 14:37:04 +00:00
|
|
|
if ch == (common.Hash{}) {
|
|
|
|
continue // busy wait for canonical hash to be written
|
|
|
|
}
|
|
|
|
if ch != block.Hash() {
|
2019-11-20 08:53:01 +00:00
|
|
|
t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
|
|
|
|
return
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
2018-05-07 11:35:06 +00:00
|
|
|
fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
|
2016-07-26 14:37:04 +00:00
|
|
|
if fb == nil {
|
2019-11-20 08:53:01 +00:00
|
|
|
t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
|
|
|
|
return
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
|
|
|
if fb.Hash() != block.Hash() {
|
2019-11-20 08:53:01 +00:00
|
|
|
t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
|
|
|
|
return
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(chain[i])
|
|
|
|
|
2018-02-23 12:02:33 +00:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
|
2017-08-07 12:47:25 +00:00
|
|
|
t.Fatalf("failed to insert block %d: %v", i, err)
|
|
|
|
}
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
2017-08-07 12:47:25 +00:00
|
|
|
pend.Wait()
|
2016-07-26 14:37:04 +00:00
|
|
|
}
|
2016-11-02 12:44:13 +00:00
|
|
|
|
|
|
|
func TestEIP155Transition(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
2016-11-28 00:33:28 +00:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
deleteAddr = common.Address{1}
|
2017-03-02 13:03:33 +00:00
|
|
|
gspec = &Genesis{
|
2022-09-07 18:21:59 +00:00
|
|
|
Config: ¶ms.ChainConfig{
|
|
|
|
ChainID: big.NewInt(1),
|
|
|
|
EIP150Block: big.NewInt(0),
|
|
|
|
EIP155Block: big.NewInt(2),
|
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
},
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
|
2017-03-02 13:03:33 +00:00
|
|
|
}
|
2016-11-02 12:44:13 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
|
2016-11-02 12:44:13 +00:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
|
|
|
basicTx = func(signer types.Signer) (*types.Transaction, error) {
|
2017-11-13 11:47:27 +00:00
|
|
|
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-02 12:44:13 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
case 2:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
|
2021-02-25 14:26:57 +00:00
|
|
|
tx, err = basicTx(types.LatestSigner(gspec.Config))
|
2016-11-02 12:44:13 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
case 3:
|
|
|
|
tx, err = basicTx(types.HomesteadSigner{})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
|
2021-02-25 14:26:57 +00:00
|
|
|
tx, err = basicTx(types.LatestSigner(gspec.Config))
|
2016-11-02 12:44:13 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2016-11-02 12:44:13 +00:00
|
|
|
if _, err := blockchain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block := blockchain.GetBlockByNumber(1)
|
|
|
|
if block.Transactions()[0].Protected() {
|
|
|
|
t.Error("Expected block[0].txs[0] to not be replay protected")
|
|
|
|
}
|
|
|
|
|
|
|
|
block = blockchain.GetBlockByNumber(3)
|
|
|
|
if block.Transactions()[0].Protected() {
|
|
|
|
t.Error("Expected block[3].txs[0] to not be replay protected")
|
|
|
|
}
|
|
|
|
if !block.Transactions()[1].Protected() {
|
|
|
|
t.Error("Expected block[3].txs[1] to be replay protected")
|
|
|
|
}
|
|
|
|
if _, err := blockchain.InsertChain(blocks[4:]); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// generate an invalid chain id transaction
|
2022-09-07 18:21:59 +00:00
|
|
|
config := ¶ms.ChainConfig{
|
|
|
|
ChainID: big.NewInt(2),
|
|
|
|
EIP150Block: big.NewInt(0),
|
|
|
|
EIP155Block: big.NewInt(2),
|
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
}
|
|
|
|
blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 4, func(i int, block *BlockGen) {
|
2016-11-02 12:44:13 +00:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
|
|
|
basicTx = func(signer types.Signer) (*types.Transaction, error) {
|
2017-11-13 11:47:27 +00:00
|
|
|
return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-02 12:44:13 +00:00
|
|
|
}
|
|
|
|
)
|
2018-07-30 09:30:09 +00:00
|
|
|
if i == 0 {
|
2021-02-25 14:26:57 +00:00
|
|
|
tx, err = basicTx(types.LatestSigner(config))
|
2016-11-02 12:44:13 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
_, err := blockchain.InsertChain(blocks)
|
2021-05-17 23:10:28 +00:00
|
|
|
if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) {
|
|
|
|
t.Errorf("have %v, want %v", have, want)
|
2016-11-02 12:44:13 +00:00
|
|
|
}
|
|
|
|
}
|
2016-11-28 00:33:28 +00:00
|
|
|
|
|
|
|
func TestEIP161AccountRemoval(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
theAddr = common.Address{1}
|
2017-03-02 13:03:33 +00:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: ¶ms.ChainConfig{
|
2018-06-05 10:31:34 +00:00
|
|
|
ChainID: big.NewInt(1),
|
2017-03-02 13:03:33 +00:00
|
|
|
HomesteadBlock: new(big.Int),
|
|
|
|
EIP155Block: new(big.Int),
|
2019-10-16 11:23:14 +00:00
|
|
|
EIP150Block: new(big.Int),
|
2017-03-02 13:03:33 +00:00
|
|
|
EIP158Block: big.NewInt(2),
|
|
|
|
},
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
}
|
2016-11-28 00:33:28 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
|
2016-11-28 00:33:28 +00:00
|
|
|
var (
|
|
|
|
tx *types.Transaction
|
|
|
|
err error
|
2021-02-25 14:26:57 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2016-11-28 00:33:28 +00:00
|
|
|
)
|
|
|
|
switch i {
|
|
|
|
case 0:
|
2017-11-13 11:47:27 +00:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 00:33:28 +00:00
|
|
|
case 1:
|
2017-11-13 11:47:27 +00:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 00:33:28 +00:00
|
|
|
case 2:
|
2017-11-13 11:47:27 +00:00
|
|
|
tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
|
2016-11-28 00:33:28 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
|
|
|
// account must exist pre eip 161
|
2022-09-07 18:21:59 +00:00
|
|
|
blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
|
|
defer blockchain.Stop()
|
|
|
|
|
2016-11-28 00:33:28 +00:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 13:57:06 +00:00
|
|
|
if st, _ := blockchain.State(); !st.Exist(theAddr) {
|
2016-11-28 00:33:28 +00:00
|
|
|
t.Error("expected account to exist")
|
|
|
|
}
|
|
|
|
|
|
|
|
// account needs to be deleted post eip 161
|
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 13:57:06 +00:00
|
|
|
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
2017-03-02 13:03:33 +00:00
|
|
|
t.Error("account should not exist")
|
2016-11-28 00:33:28 +00:00
|
|
|
}
|
|
|
|
|
2020-05-25 08:21:28 +00:00
|
|
|
// account mustn't be created post eip 161
|
2016-11-28 00:33:28 +00:00
|
|
|
if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2017-06-27 13:57:06 +00:00
|
|
|
if st, _ := blockchain.State(); st.Exist(theAddr) {
|
2017-03-02 13:03:33 +00:00
|
|
|
t.Error("account should not exist")
|
2016-11-28 00:33:28 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-22 12:07:47 +00:00
|
|
|
|
|
|
|
// This is a regression test (i.e. as weird as it is, don't delete it ever), which
|
|
|
|
// tests that under weird reorg conditions the blockchain and its internal header-
|
|
|
|
// chain return the same latest block/header.
|
|
|
|
//
|
|
|
|
// https://github.com/ethereum/go-ethereum/pull/15941
|
|
|
|
func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2018-01-22 12:07:47 +00:00
|
|
|
|
|
|
|
// Generate a bunch of fork blocks, each side forking from the canonical chain
|
|
|
|
forks := make([]*types.Block, len(blocks))
|
|
|
|
for i := 0; i < len(forks); i++ {
|
2022-09-07 18:21:59 +00:00
|
|
|
parent := genesis.ToBlock()
|
2018-01-22 12:07:47 +00:00
|
|
|
if i > 0 {
|
|
|
|
parent = blocks[i-1]
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
2018-01-22 12:07:47 +00:00
|
|
|
forks[i] = fork[0]
|
|
|
|
}
|
|
|
|
// Import the canonical and fork chain side by side, verifying the current block
|
|
|
|
// and current header consistency
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-01-22 12:07:47 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2018-01-22 12:07:47 +00:00
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
|
2023-03-02 06:29:15 +00:00
|
|
|
t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
|
2018-01-22 12:07:47 +00:00
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
|
2023-03-02 06:29:15 +00:00
|
|
|
t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
|
2018-01-22 12:07:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-05 16:40:32 +00:00
|
|
|
|
|
|
|
// Tests that importing small side forks doesn't leave junk in the trie database
|
|
|
|
// cache (which would eventually cause memory issues).
|
|
|
|
func TestTrieForkGC(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 18:21:59 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 16:22:28 +00:00
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2018-02-05 16:40:32 +00:00
|
|
|
|
|
|
|
// Generate a bunch of fork blocks, each side forking from the canonical chain
|
|
|
|
forks := make([]*types.Block, len(blocks))
|
|
|
|
for i := 0; i < len(forks); i++ {
|
2022-09-07 18:21:59 +00:00
|
|
|
parent := genesis.ToBlock()
|
2018-02-05 16:40:32 +00:00
|
|
|
if i > 0 {
|
|
|
|
parent = blocks[i-1]
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
2018-02-05 16:40:32 +00:00
|
|
|
forks[i] = fork[0]
|
|
|
|
}
|
|
|
|
// Import the canonical and fork chain side by side, forcing the trie cache to cache both
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-02-05 16:40:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2018-02-05 16:40:32 +00:00
|
|
|
for i := 0; i < len(blocks); i++ {
|
|
|
|
if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
|
|
|
|
t.Fatalf("fork %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Dereference all the recent tries and ensure no past trie is left in
|
2019-05-30 18:51:13 +00:00
|
|
|
for i := 0; i < TriesInMemory; i++ {
|
2018-06-21 09:28:05 +00:00
|
|
|
chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
|
|
|
|
chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
|
2018-02-05 16:40:32 +00:00
|
|
|
}
|
|
|
|
if len(chain.stateCache.TrieDB().Nodes()) > 0 {
|
|
|
|
t.Fatalf("stale tries still alive after garbase collection")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that doing large reorgs works even if the state associated with the
|
|
|
|
// forking point is not available any more.
|
|
|
|
func TestLargeReorgTrieGC(t *testing.T) {
|
|
|
|
// Generate the original common chain segment and the two competing forks
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 18:21:59 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 16:22:28 +00:00
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
|
|
|
original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
|
|
|
|
competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
|
2018-02-05 16:40:32 +00:00
|
|
|
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2018-02-05 16:40:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2018-02-05 16:40:32 +00:00
|
|
|
if _, err := chain.InsertChain(shared); err != nil {
|
|
|
|
t.Fatalf("failed to insert shared chain: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := chain.InsertChain(original); err != nil {
|
2018-11-20 12:15:26 +00:00
|
|
|
t.Fatalf("failed to insert original chain: %v", err)
|
2018-02-05 16:40:32 +00:00
|
|
|
}
|
|
|
|
// Ensure that the state associated with the forking point is pruned away
|
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
|
|
|
|
t.Fatalf("common-but-old ancestor still cache")
|
|
|
|
}
|
|
|
|
// Import the competitor chain without exceeding the canonical's TD and ensure
|
|
|
|
// we have not processed any of the blocks (protection against malicious blocks)
|
|
|
|
if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
|
|
|
|
t.Fatalf("failed to insert competitor chain: %v", err)
|
|
|
|
}
|
|
|
|
for i, block := range competitor[:len(competitor)-2] {
|
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
|
|
|
|
t.Fatalf("competitor %d: low TD chain became processed", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the head of the competitor chain, triggering the reorg and ensure we
|
|
|
|
// successfully reprocess all the stashed away blocks.
|
|
|
|
if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
|
|
|
|
t.Fatalf("failed to finalize competitor chain: %v", err)
|
|
|
|
}
|
2019-05-30 18:51:13 +00:00
|
|
|
for i, block := range competitor[:len(competitor)-TriesInMemory] {
|
2018-02-05 16:40:32 +00:00
|
|
|
if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
|
|
|
|
t.Fatalf("competitor %d: competing chain state missing", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-04-07 21:20:57 +00:00
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
func TestBlockchainRecovery(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(1000000000)
|
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
|
|
|
)
|
|
|
|
height := uint64(1024)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
|
|
|
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
2022-09-07 18:21:59 +00:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
|
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := ancient.InsertHeaderChain(headers); err != nil {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
|
|
|
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
|
|
|
}
|
2020-08-20 10:01:24 +00:00
|
|
|
rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
ancient.Stop()
|
|
|
|
|
|
|
|
// Destroy head fast block manually
|
|
|
|
midBlock := blocks[len(blocks)/2]
|
|
|
|
rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
|
|
|
|
|
|
|
|
// Reopen broken blockchain again
|
2022-08-30 16:22:28 +00:00
|
|
|
ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
defer ancient.Stop()
|
2023-03-02 06:29:15 +00:00
|
|
|
if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if num := ancient.CurrentSnapBlock().Number.Uint64(); num != midBlock.NumberU64() {
|
|
|
|
t.Errorf("head snap-block mismatch: have #%v, want #%v", num, midBlock.NumberU64())
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
|
|
|
if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() {
|
|
|
|
t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-07 10:31:17 +00:00
|
|
|
// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
|
|
|
|
func TestInsertReceiptChainRollback(t *testing.T) {
|
|
|
|
// Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
|
2022-09-07 18:21:59 +00:00
|
|
|
tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains()
|
2021-09-07 10:31:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer tmpChain.Stop()
|
|
|
|
// Get the side chain receipts.
|
|
|
|
if _, err := tmpChain.InsertChain(sideblocks); err != nil {
|
|
|
|
t.Fatal("processing side chain failed:", err)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
t.Log("sidechain head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
|
2021-09-07 10:31:17 +00:00
|
|
|
sidechainReceipts := make([]types.Receipts, len(sideblocks))
|
|
|
|
for i, block := range sideblocks {
|
|
|
|
sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
|
|
|
}
|
|
|
|
// Get the canon chain receipts.
|
|
|
|
if _, err := tmpChain.InsertChain(canonblocks); err != nil {
|
|
|
|
t.Fatal("processing canon chain failed:", err)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
t.Log("canon head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
|
2021-09-07 10:31:17 +00:00
|
|
|
canonReceipts := make([]types.Receipts, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
|
2021-09-07 10:31:17 +00:00
|
|
|
// Set up a BlockChain that uses the ancient store.
|
2022-08-30 16:22:28 +00:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer ancientDb.Close()
|
2022-08-30 16:22:28 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
ancientChain, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
|
2021-09-07 10:31:17 +00:00
|
|
|
defer ancientChain.Stop()
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
|
2021-09-07 10:31:17 +00:00
|
|
|
// Import the canonical header chain.
|
|
|
|
canonHeaders := make([]*types.Header, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonHeaders[i] = block.Header()
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if _, err = ancientChain.InsertHeaderChain(canonHeaders); err != nil {
|
2021-09-07 10:31:17 +00:00
|
|
|
t.Fatal("can't import canon headers:", err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
|
|
|
// Try to insert blocks/receipts of the side chain.
|
|
|
|
_, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
|
|
|
|
if err == nil {
|
|
|
|
t.Fatal("expected error from InsertReceiptChain.")
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if ancientChain.CurrentSnapBlock().Number.Uint64() != 0 {
|
|
|
|
t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentSnapBlock().Number)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
|
|
|
|
t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
|
|
|
|
// Insert blocks/receipts of the canonical chain.
|
|
|
|
_, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't import canon chain receipts: %v", err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if ancientChain.CurrentSnapBlock().Number.Uint64() != canonblocks[len(canonblocks)-1].NumberU64() {
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
t.Fatalf("failed to insert ancient recept chain after rollback")
|
|
|
|
}
|
2021-09-07 10:31:17 +00:00
|
|
|
if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
|
|
|
|
t.Fatalf("wrong ancients count %d", frozen)
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
}
|
|
|
|
|
2019-02-04 10:54:39 +00:00
|
|
|
// Tests that importing a very large side fork, which is larger than the canon chain,
|
|
|
|
// but where the difficulty per block is kept low: this means that it will not
|
|
|
|
// overtake the 'canon' chain until after it's passed canon by about 200 blocks.
|
2019-02-08 09:11:31 +00:00
|
|
|
//
|
|
|
|
// Details at:
|
2022-09-10 11:25:40 +00:00
|
|
|
// - https://github.com/ethereum/go-ethereum/issues/18977
|
|
|
|
// - https://github.com/ethereum/go-ethereum/pull/18988
|
2019-02-08 09:11:31 +00:00
|
|
|
func TestLowDiffLongChain(t *testing.T) {
|
2019-02-04 10:54:39 +00:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 18:21:59 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 16:22:28 +00:00
|
|
|
}
|
2019-02-08 09:11:31 +00:00
|
|
|
// We must use a pretty long chain to ensure that the fork doesn't overtake us
|
|
|
|
// until after at least 128 blocks post tip
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-08 09:11:31 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9)
|
|
|
|
})
|
2019-02-04 10:54:39 +00:00
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-02-04 10:54:39 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.stopWithoutSaving()
|
|
|
|
|
2019-02-08 09:11:31 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
2019-02-04 10:54:39 +00:00
|
|
|
}
|
|
|
|
// Generate fork chain, starting from an early block
|
|
|
|
parent := blocks[10]
|
2022-09-07 18:21:59 +00:00
|
|
|
fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-08 09:11:31 +00:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
})
|
2019-02-04 10:54:39 +00:00
|
|
|
|
|
|
|
// And now import the fork
|
|
|
|
if i, err := chain.InsertChain(fork); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", i, err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
2019-02-08 09:11:31 +00:00
|
|
|
// Sanity check that all the canonical numbers are present
|
|
|
|
header := chain.CurrentHeader()
|
2023-03-02 06:29:15 +00:00
|
|
|
for number := head.Number.Uint64(); number > 0; number-- {
|
2019-02-08 09:11:31 +00:00
|
|
|
if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
|
|
|
|
t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
|
|
|
|
}
|
|
|
|
header = chain.GetHeader(header.ParentHash, number-1)
|
|
|
|
}
|
2019-02-04 10:54:39 +00:00
|
|
|
}
|
2019-02-21 10:36:49 +00:00
|
|
|
|
|
|
|
// Tests that importing a sidechain (S), where
|
|
|
|
// - S is sidechain, containing blocks [Sn...Sm]
|
|
|
|
// - C is canon chain, containing blocks [G..Cn..Cm]
|
|
|
|
// - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock
|
|
|
|
// - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
//
|
|
|
|
// The mergePoint can be these values:
|
|
|
|
// -1: the transition won't happen
|
|
|
|
// 0: the transition happens since genesis
|
|
|
|
// 1: the transition happens after some chain segments
|
|
|
|
func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) {
|
2019-02-21 10:36:49 +00:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
2022-09-07 18:21:59 +00:00
|
|
|
chainConfig := *params.TestChainConfig
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
|
|
|
|
engine = beacon.New(ethash.NewFaker())
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
nonce = uint64(0)
|
|
|
|
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: &chainConfig,
|
|
|
|
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-12-20 14:56:52 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
|
|
|
mergeBlock = math.MaxInt32
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
)
|
2019-02-21 10:36:49 +00:00
|
|
|
// Generate and import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-02-21 10:36:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
// Activate the transition since genesis if required
|
|
|
|
if mergePoint == 0 {
|
2022-12-20 14:56:52 +00:00
|
|
|
mergeBlock = 0
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
merger.ReachTTD()
|
|
|
|
merger.FinalizePoS()
|
|
|
|
|
|
|
|
// Set the terminal total difficulty in the config
|
|
|
|
gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tx: %v", err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
2022-12-20 14:56:52 +00:00
|
|
|
if int(gen.header.Number.Uint64()) >= mergeBlock {
|
|
|
|
gen.SetPoS()
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
nonce++
|
|
|
|
})
|
2019-02-21 10:36:49 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
2019-05-30 18:51:13 +00:00
|
|
|
lastPrunedIndex := len(blocks) - TriesInMemory - 1
|
2019-02-21 10:36:49 +00:00
|
|
|
lastPrunedBlock := blocks[lastPrunedIndex]
|
2019-05-30 18:51:13 +00:00
|
|
|
firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
|
2019-02-21 10:36:49 +00:00
|
|
|
|
|
|
|
// Verify pruning of lastPrunedBlock
|
|
|
|
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
// Verify firstNonPrunedBlock is not pruned
|
|
|
|
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
|
|
|
|
// Activate the transition in the middle of the chain
|
|
|
|
if mergePoint == 1 {
|
|
|
|
merger.ReachTTD()
|
|
|
|
merger.FinalizePoS()
|
|
|
|
// Set the terminal total difficulty in the config
|
2022-12-20 14:56:52 +00:00
|
|
|
ttd := big.NewInt(int64(len(blocks)))
|
|
|
|
ttd.Mul(ttd, params.GenesisDifficulty)
|
|
|
|
gspec.Config.TerminalTotalDifficulty = ttd
|
|
|
|
mergeBlock = len(blocks)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
|
|
|
|
2019-02-21 10:36:49 +00:00
|
|
|
// Generate the sidechain
|
|
|
|
// First block should be a known block, block after should be a pruned block. So
|
|
|
|
// canon(pruned), side, side...
|
|
|
|
|
|
|
|
// Generate fork chain, make it longer than canon
|
|
|
|
parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
|
|
|
|
parent := blocks[parentIndex]
|
2022-09-07 18:21:59 +00:00
|
|
|
fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) {
|
2019-02-21 10:36:49 +00:00
|
|
|
b.SetCoinbase(common.Address{2})
|
2022-12-20 14:56:52 +00:00
|
|
|
if int(b.header.Number.Uint64()) >= mergeBlock {
|
|
|
|
b.SetPoS()
|
|
|
|
}
|
2019-02-21 10:36:49 +00:00
|
|
|
})
|
|
|
|
// Prepend the parent(s)
|
|
|
|
var sidechain []*types.Block
|
|
|
|
for i := numCanonBlocksInSidechain; i > 0; i-- {
|
|
|
|
sidechain = append(sidechain, blocks[parentIndex+1-i])
|
|
|
|
}
|
|
|
|
sidechain = append(sidechain, fork...)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
n, err := chain.InsertChain(sidechain)
|
2019-02-21 10:36:49 +00:00
|
|
|
if err != nil {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n)
|
2019-02-21 10:36:49 +00:00
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := fork[len(fork)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tests that importing a sidechain (S), where
|
2022-09-10 11:25:40 +00:00
|
|
|
// - S is sidechain, containing blocks [Sn...Sm]
|
|
|
|
// - C is canon chain, containing blocks [G..Cn..Cm]
|
|
|
|
// - The common ancestor Cc is pruned
|
|
|
|
// - The first block in S: Sn, is == Cn
|
|
|
|
//
|
2019-02-21 10:36:49 +00:00
|
|
|
// That is: the sidechain for import contains some blocks already present in canon chain.
|
2022-09-10 11:25:40 +00:00
|
|
|
// So the blocks are:
|
|
|
|
//
|
|
|
|
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
|
|
|
|
// ^ ^ ^ pruned
|
2019-02-21 10:36:49 +00:00
|
|
|
func TestPrunedImportSide(t *testing.T) {
|
|
|
|
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
|
|
|
//glogger.Verbosity(3)
|
|
|
|
//log.Root().SetHandler(log.Handler(glogger))
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
testSideImport(t, 3, 3, -1)
|
|
|
|
testSideImport(t, 3, -3, -1)
|
|
|
|
testSideImport(t, 10, 0, -1)
|
|
|
|
testSideImport(t, 1, 10, -1)
|
|
|
|
testSideImport(t, 1, -10, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestPrunedImportSideWithMerging(t *testing.T) {
|
|
|
|
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
|
|
|
|
//glogger.Verbosity(3)
|
|
|
|
//log.Root().SetHandler(log.Handler(glogger))
|
|
|
|
testSideImport(t, 3, 3, 0)
|
|
|
|
testSideImport(t, 3, -3, 0)
|
|
|
|
testSideImport(t, 10, 0, 0)
|
|
|
|
testSideImport(t, 1, 10, 0)
|
|
|
|
testSideImport(t, 1, -10, 0)
|
|
|
|
|
|
|
|
testSideImport(t, 3, 3, 1)
|
|
|
|
testSideImport(t, 3, -3, 1)
|
|
|
|
testSideImport(t, 10, 0, 1)
|
|
|
|
testSideImport(t, 1, 10, 1)
|
|
|
|
testSideImport(t, 1, -10, 1)
|
2019-02-21 10:36:49 +00:00
|
|
|
}
|
2019-05-07 12:26:00 +00:00
|
|
|
|
2019-05-08 11:30:36 +00:00
|
|
|
func TestInsertKnownHeaders(t *testing.T) { testInsertKnownChainData(t, "headers") }
|
|
|
|
func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") }
|
|
|
|
func TestInsertKnownBlocks(t *testing.T) { testInsertKnownChainData(t, "blocks") }
|
|
|
|
|
|
|
|
func testInsertKnownChainData(t *testing.T, typ string) {
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
2019-05-08 11:30:36 +00:00
|
|
|
|
|
|
|
// A longer chain but total difficulty is lower.
|
2022-09-07 18:21:59 +00:00
|
|
|
blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
|
|
|
|
|
2019-05-08 11:30:36 +00:00
|
|
|
// A shorter chain but total difficulty is higher.
|
2022-09-07 18:21:59 +00:00
|
|
|
blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
|
2019-05-08 11:30:36 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9) // A higher difficulty
|
|
|
|
})
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-08-30 16:22:28 +00:00
|
|
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer chaindb.Close()
|
2019-05-08 11:30:36 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-05-08 11:30:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
2019-05-08 11:30:36 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
inserter func(blocks []*types.Block, receipts []types.Receipts) error
|
|
|
|
asserter func(t *testing.T, block *types.Block)
|
|
|
|
)
|
|
|
|
if typ == "headers" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
_, err := chain.InsertHeaderChain(headers)
|
2019-05-08 11:30:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentHeader().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if typ == "receipts" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
_, err := chain.InsertHeaderChain(headers)
|
2019-05-08 11:30:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
2019-04-25 14:59:48 +00:00
|
|
|
_, err = chain.InsertReceiptChain(blocks, receipts, 0)
|
2019-05-08 11:30:36 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
2023-03-02 06:29:15 +00:00
|
|
|
if chain.CurrentSnapBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
_, err := chain.InsertChain(blocks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reimport the chain data again. All the imported
|
|
|
|
// chain data are regarded "known" data.
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a long canonical chain with some known data as prefix.
|
2020-08-20 10:01:24 +00:00
|
|
|
rollback := blocks[len(blocks)/2].NumberU64()
|
|
|
|
|
|
|
|
chain.SetHead(rollback - 1)
|
2019-05-08 11:30:36 +00:00
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
|
|
|
|
// Import a heavier shorter but higher total difficulty chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
|
|
|
// Import a longer but lower total difficulty chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
// The head shouldn't change.
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
// Rollback the heavier chain and re-insert the longer chain again
|
2020-08-20 10:01:24 +00:00
|
|
|
chain.SetHead(rollback - 1)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
2019-05-14 14:07:44 +00:00
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
func TestInsertKnownHeadersWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "headers", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownReceiptChainWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "receipts", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownBlocksWithMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "blocks", 0)
|
|
|
|
}
|
|
|
|
func TestInsertKnownHeadersAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "headers", 1)
|
|
|
|
}
|
|
|
|
func TestInsertKnownReceiptChainAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "receipts", 1)
|
|
|
|
}
|
|
|
|
func TestInsertKnownBlocksAfterMerging(t *testing.T) {
|
|
|
|
testInsertKnownChainDataWithMerging(t, "blocks", 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeHeight can be assigned in these values:
|
|
|
|
// 0: means the merging is applied since genesis
|
|
|
|
// 1: means the merging is applied after the first segment
|
|
|
|
func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) {
|
|
|
|
// Copy the TestChainConfig so we can modify it during tests
|
|
|
|
chainConfig := *params.TestChainConfig
|
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis = &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
Config: &chainConfig,
|
|
|
|
}
|
2022-12-20 14:56:52 +00:00
|
|
|
engine = beacon.New(ethash.NewFaker())
|
|
|
|
mergeBlock = uint64(math.MaxUint64)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
)
|
|
|
|
// Apply merging since genesis
|
|
|
|
if mergeHeight == 0 {
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis.Config.TerminalTotalDifficulty = big.NewInt(0)
|
2022-12-20 14:56:52 +00:00
|
|
|
mergeBlock = uint64(0)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
2022-12-20 14:56:52 +00:00
|
|
|
|
|
|
|
genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32,
|
|
|
|
func(i int, b *BlockGen) {
|
|
|
|
if b.header.Number.Uint64() >= mergeBlock {
|
|
|
|
b.SetPoS()
|
|
|
|
}
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
})
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
|
|
|
|
// Apply merging after the first segment
|
|
|
|
if mergeHeight == 1 {
|
2022-12-20 14:56:52 +00:00
|
|
|
// TTD is genesis diff + blocks
|
|
|
|
ttd := big.NewInt(1 + int64(len(blocks)))
|
|
|
|
ttd.Mul(ttd, params.GenesisDifficulty)
|
|
|
|
genesis.Config.TerminalTotalDifficulty = ttd
|
|
|
|
mergeBlock = uint64(len(blocks))
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
|
|
|
// Longer chain and shorter chain
|
2022-12-20 14:56:52 +00:00
|
|
|
blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) {
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
if b.header.Number.Uint64() >= mergeBlock {
|
|
|
|
b.SetPoS()
|
|
|
|
}
|
|
|
|
})
|
2022-09-07 18:21:59 +00:00
|
|
|
blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed
|
2022-12-20 14:56:52 +00:00
|
|
|
if b.header.Number.Uint64() >= mergeBlock {
|
|
|
|
b.SetPoS()
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
})
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 18:21:59 +00:00
|
|
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer chaindb.Close()
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
var (
|
|
|
|
inserter func(blocks []*types.Block, receipts []types.Receipts) error
|
|
|
|
asserter func(t *testing.T, block *types.Block)
|
|
|
|
)
|
|
|
|
if typ == "headers" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
i, err := chain.InsertHeaderChain(headers)
|
2022-12-20 14:56:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("index %d, number %d: %w", i, headers[i].Number, err)
|
|
|
|
}
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentHeader().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if typ == "receipts" {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
|
|
|
headers := make([]*types.Header, 0, len(blocks))
|
|
|
|
for _, block := range blocks {
|
|
|
|
headers = append(headers, block.Header())
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
i, err := chain.InsertHeaderChain(headers)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
if err != nil {
|
2022-12-20 14:56:52 +00:00
|
|
|
return fmt.Errorf("index %d: %w", i, err)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
|
|
|
_, err = chain.InsertReceiptChain(blocks, receipts, 0)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
2023-03-02 06:29:15 +00:00
|
|
|
if chain.CurrentSnapBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
|
2022-12-20 14:56:52 +00:00
|
|
|
i, err := chain.InsertChain(blocks)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("index %d: %w", i, err)
|
|
|
|
}
|
|
|
|
return nil
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
}
|
|
|
|
asserter = func(t *testing.T, block *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != block.Hash() {
|
|
|
|
t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reimport the chain data again. All the imported
|
|
|
|
// chain data are regarded "known" data.
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a long canonical chain with some known data as prefix.
|
|
|
|
rollback := blocks[len(blocks)/2].NumberU64()
|
|
|
|
chain.SetHead(rollback - 1)
|
|
|
|
if err := inserter(blocks, receipts); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks[len(blocks)-1])
|
|
|
|
|
|
|
|
// Import a longer chain with some known data as prefix.
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
|
|
|
|
// Import a shorter chain with some known data as prefix.
|
|
|
|
// The reorg is expected since the fork choice rule is
|
|
|
|
// already changed.
|
|
|
|
if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
// The head shouldn't change.
|
|
|
|
asserter(t, blocks3[len(blocks3)-1])
|
|
|
|
|
|
|
|
// Reimport the longer chain again, the reorg is still expected
|
|
|
|
chain.SetHead(rollback - 1)
|
|
|
|
if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
|
|
|
|
t.Fatalf("failed to insert chain data: %v", err)
|
|
|
|
}
|
|
|
|
asserter(t, blocks2[len(blocks2)-1])
|
|
|
|
}
|
|
|
|
|
2021-09-07 10:31:17 +00:00
|
|
|
// getLongAndShortChains returns two chains: A is longer, B is heavier.
|
2022-09-07 18:21:59 +00:00
|
|
|
func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) {
|
2019-05-07 12:26:00 +00:00
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 18:21:59 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 16:22:28 +00:00
|
|
|
}
|
2019-05-07 12:26:00 +00:00
|
|
|
// Generate and import the canonical chain,
|
|
|
|
// Offset the time, to keep the difficulty low
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
|
2019-05-07 12:26:00 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
})
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-05-07 12:26:00 +00:00
|
|
|
if err != nil {
|
2022-09-07 18:21:59 +00:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
|
|
|
// Generate fork chain, make it shorter than canon, with common ancestor pretty early
|
|
|
|
parentIndex := 3
|
|
|
|
parent := longChain[parentIndex]
|
2022-09-07 18:21:59 +00:00
|
|
|
heavyChainExt, _ := GenerateChain(genesis.Config, parent, engine, genDb, 75, func(i int, b *BlockGen) {
|
2019-05-07 12:26:00 +00:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
b.OffsetTime(-9)
|
|
|
|
})
|
2022-09-07 18:21:59 +00:00
|
|
|
var heavyChain []*types.Block
|
2021-09-07 10:31:17 +00:00
|
|
|
heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
|
|
|
|
heavyChain = append(heavyChain, heavyChainExt...)
|
|
|
|
|
2019-05-07 12:26:00 +00:00
|
|
|
// Verify that the test is sane
|
|
|
|
var (
|
|
|
|
longerTd = new(big.Int)
|
|
|
|
shorterTd = new(big.Int)
|
|
|
|
)
|
|
|
|
for index, b := range longChain {
|
|
|
|
longerTd.Add(longerTd, b.Difficulty())
|
|
|
|
if index <= parentIndex {
|
|
|
|
shorterTd.Add(shorterTd, b.Difficulty())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range heavyChain {
|
|
|
|
shorterTd.Add(shorterTd, b.Difficulty())
|
|
|
|
}
|
|
|
|
if shorterTd.Cmp(longerTd) <= 0 {
|
2022-09-07 18:21:59 +00:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain td (%v) must be larger than canon td (%v)", shorterTd, longerTd)
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
|
|
|
longerNum := longChain[len(longChain)-1].NumberU64()
|
|
|
|
shorterNum := heavyChain[len(heavyChain)-1].NumberU64()
|
|
|
|
if shorterNum >= longerNum {
|
2022-09-07 18:21:59 +00:00
|
|
|
return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain num (%v) must be lower than canon num (%v)", shorterNum, longerNum)
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
return chain, longChain, heavyChain, genesis, nil
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestReorgToShorterRemovesCanonMapping tests that if we
|
|
|
|
// 1. Have a chain [0 ... N .. X]
|
|
|
|
// 2. Reorg to shorter but heavier chain [0 ... N ... Y]
|
|
|
|
// 3. Then there should be no canon mapping for the block at height X
|
2021-10-11 17:25:21 +00:00
|
|
|
// 4. The forked block should still be retrievable by hash
|
2019-05-07 12:26:00 +00:00
|
|
|
func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
|
2019-05-07 12:26:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2019-05-07 12:26:00 +00:00
|
|
|
if n, err := chain.InsertChain(canonblocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
canonNum := chain.CurrentBlock().Number.Uint64()
|
2021-10-11 17:25:21 +00:00
|
|
|
canonHash := chain.CurrentBlock().Hash()
|
2019-05-07 12:26:00 +00:00
|
|
|
_, err = chain.InsertChain(sideblocks)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Got error, %v", err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentBlock()
|
|
|
|
if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
// We have now inserted a sidechain.
|
|
|
|
if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
|
|
|
|
t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
|
|
|
|
}
|
|
|
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
2023-03-02 06:29:15 +00:00
|
|
|
t.Errorf("expected header to be gone: %v", headerByNum.Number)
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
2021-10-11 17:25:21 +00:00
|
|
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
|
|
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
|
|
|
}
|
|
|
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
|
|
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
|
|
|
}
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
|
|
|
|
// as TestReorgToShorterRemovesCanonMapping, but applied on headerchain
|
|
|
|
// imports -- that is, for fast sync
|
|
|
|
func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
|
2019-05-07 12:26:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2019-05-07 12:26:00 +00:00
|
|
|
// Convert into headers
|
|
|
|
canonHeaders := make([]*types.Header, len(canonblocks))
|
|
|
|
for i, block := range canonblocks {
|
|
|
|
canonHeaders[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := chain.InsertHeaderChain(canonHeaders); err != nil {
|
2019-05-07 12:26:00 +00:00
|
|
|
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
canonNum := chain.CurrentHeader().Number.Uint64()
|
2021-10-11 17:25:21 +00:00
|
|
|
canonHash := chain.CurrentBlock().Hash()
|
2019-05-07 12:26:00 +00:00
|
|
|
sideHeaders := make([]*types.Header, len(sideblocks))
|
|
|
|
for i, block := range sideblocks {
|
|
|
|
sideHeaders[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := chain.InsertHeaderChain(sideHeaders); err != nil {
|
2019-05-07 12:26:00 +00:00
|
|
|
t.Fatalf("header %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
head := chain.CurrentHeader()
|
|
|
|
if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
|
|
|
|
t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
|
|
|
|
}
|
|
|
|
// We have now inserted a sidechain.
|
|
|
|
if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
|
|
|
|
t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
|
|
|
|
}
|
|
|
|
if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
|
|
|
|
t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
|
|
|
|
}
|
2021-10-11 17:25:21 +00:00
|
|
|
if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
|
|
|
|
t.Errorf("expected block to be present: %x", blockByHash.Hash())
|
|
|
|
}
|
|
|
|
if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
|
|
|
|
t.Errorf("expected header to be present: %x", headerByHash.Hash())
|
|
|
|
}
|
2019-05-07 12:26:00 +00:00
|
|
|
}
|
2019-05-08 11:30:36 +00:00
|
|
|
|
2020-05-11 15:58:43 +00:00
|
|
|
func TestTransactionIndices(t *testing.T) {
|
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(100000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-05-11 15:58:43 +00:00
|
|
|
)
|
2022-09-29 07:50:24 +00:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
check := func(tail *uint64, chain *BlockChain) {
|
|
|
|
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
|
|
if tail == nil && stored != nil {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
|
|
}
|
|
|
|
if tail != nil && *stored != *tail {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
|
|
}
|
|
|
|
if tail != nil {
|
2023-03-02 06:29:15 +00:00
|
|
|
for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
|
2020-05-11 15:58:43 +00:00
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
|
|
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := uint64(0); i < *tail; i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
|
|
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Init block chain with external ancients, check all needed indices has been indexed.
|
|
|
|
limit := []uint64{0, 32, 64, 128}
|
|
|
|
for _, l := range limit {
|
2022-09-29 07:50:24 +00:00
|
|
|
frdir := t.TempDir()
|
|
|
|
ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
|
|
|
rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
|
|
|
|
2022-06-13 14:24:45 +00:00
|
|
|
l := l
|
2022-09-29 07:50:24 +00:00
|
|
|
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-09-29 07:50:24 +00:00
|
|
|
chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
|
|
|
|
|
2020-05-11 15:58:43 +00:00
|
|
|
var tail uint64
|
|
|
|
if l != 0 {
|
|
|
|
tail = uint64(128) - l + 1
|
|
|
|
}
|
|
|
|
check(&tail, chain)
|
|
|
|
chain.Stop()
|
|
|
|
ancientDb.Close()
|
2022-09-29 07:50:24 +00:00
|
|
|
os.RemoveAll(frdir)
|
2020-05-11 15:58:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Reconstruct a block chain which only reserves HEAD-64 tx indices
|
2022-09-29 07:50:24 +00:00
|
|
|
ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
2022-04-08 13:44:55 +00:00
|
|
|
defer ancientDb.Close()
|
2020-05-11 15:58:43 +00:00
|
|
|
|
2022-09-29 07:50:24 +00:00
|
|
|
rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
2020-05-11 15:58:43 +00:00
|
|
|
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
2022-09-29 07:50:24 +00:00
|
|
|
for _, l := range limit {
|
2022-06-13 14:24:45 +00:00
|
|
|
l := l
|
2022-09-29 07:50:24 +00:00
|
|
|
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-09-29 07:50:24 +00:00
|
|
|
var tail uint64
|
|
|
|
if l != 0 {
|
|
|
|
tail = uint64(128) - l + 1
|
|
|
|
}
|
|
|
|
chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
|
|
|
|
check(&tail, chain)
|
2020-05-11 15:58:43 +00:00
|
|
|
chain.Stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-26 11:26:03 +00:00
|
|
|
func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
2020-05-11 15:58:43 +00:00
|
|
|
// Configure and generate a sample block chain
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(100000000000000000)
|
2020-05-11 15:58:43 +00:00
|
|
|
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
|
2021-02-25 14:26:57 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
2020-05-11 15:58:43 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
|
2021-06-15 10:56:14 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
check := func(tail *uint64, chain *BlockChain) {
|
|
|
|
stored := rawdb.ReadTxIndexTail(chain.db)
|
|
|
|
if tail == nil && stored != nil {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
|
|
|
|
}
|
|
|
|
if tail != nil && *stored != *tail {
|
|
|
|
t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
|
|
|
|
}
|
|
|
|
if tail != nil {
|
2023-03-02 06:29:15 +00:00
|
|
|
for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
|
2020-05-11 15:58:43 +00:00
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
|
|
|
|
t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := uint64(0); i < *tail; i++ {
|
|
|
|
block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
|
|
|
|
if block.Transactions().Len() == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
|
|
|
|
t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
|
|
|
}
|
2022-04-08 13:44:55 +00:00
|
|
|
defer ancientDb.Close()
|
2020-05-11 15:58:43 +00:00
|
|
|
|
|
|
|
// Import all blocks into ancient db, only HEAD-32 indices are kept.
|
|
|
|
l := uint64(32)
|
2022-08-30 16:22:28 +00:00
|
|
|
chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
|
2020-05-11 15:58:43 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2020-05-11 15:58:43 +00:00
|
|
|
headers := make([]*types.Header, len(blocks))
|
|
|
|
for i, block := range blocks {
|
|
|
|
headers[i] = block.Header()
|
|
|
|
}
|
2023-05-03 09:58:39 +00:00
|
|
|
if n, err := chain.InsertHeaderChain(headers); err != nil {
|
2020-05-11 15:58:43 +00:00
|
|
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
|
|
|
}
|
|
|
|
// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
|
|
|
|
if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
tail := uint64(32)
|
|
|
|
check(&tail, chain)
|
|
|
|
}
|
|
|
|
|
2019-05-08 11:30:36 +00:00
|
|
|
// Benchmarks large blocks with value transfers to non-existing accounts
|
|
|
|
func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
|
|
|
|
var (
|
|
|
|
signer = types.HomesteadSigner{}
|
|
|
|
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
|
|
|
bankFunds = big.NewInt(100000000000000000)
|
2022-08-30 16:22:28 +00:00
|
|
|
gspec = &Genesis{
|
2019-05-08 11:30:36 +00:00
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
testBankAddress: {Balance: bankFunds},
|
|
|
|
common.HexToAddress("0xc0de"): {
|
|
|
|
Code: []byte{0x60, 0x01, 0x50},
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
}, // push 1, pop
|
|
|
|
},
|
|
|
|
GasLimit: 100e6, // 100 M
|
|
|
|
}
|
|
|
|
)
|
|
|
|
// Generate the original common chain segment and the two competing forks
|
|
|
|
engine := ethash.NewFaker()
|
|
|
|
|
|
|
|
blockGenerator := func(i int, block *BlockGen) {
|
|
|
|
block.SetCoinbase(common.Address{1})
|
|
|
|
for txi := 0; txi < numTxs; txi++ {
|
|
|
|
uniq := uint64(i*numTxs + txi)
|
|
|
|
recipient := recipientFn(uniq)
|
2021-10-27 11:08:51 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
|
2019-05-08 11:30:36 +00:00
|
|
|
if err != nil {
|
|
|
|
b.Error(err)
|
|
|
|
}
|
|
|
|
block.AddTx(tx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, shared, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, blockGenerator)
|
2019-05-08 11:30:36 +00:00
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
// Import the shared chain and the original canonical one
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-05-08 11:30:36 +00:00
|
|
|
if err != nil {
|
|
|
|
b.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
b.StartTimer()
|
|
|
|
if _, err := chain.InsertChain(shared); err != nil {
|
|
|
|
b.Fatalf("failed to insert shared chain: %v", err)
|
|
|
|
}
|
|
|
|
b.StopTimer()
|
2023-03-02 06:29:15 +00:00
|
|
|
block := chain.GetBlockByHash(chain.CurrentBlock().Hash())
|
|
|
|
if got := block.Transactions().Len(); got != numTxs*numBlocks {
|
2019-05-08 11:30:36 +00:00
|
|
|
b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 12:09:48 +00:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce))
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 12:09:48 +00:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(1337))
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
|
|
|
|
var (
|
|
|
|
numTxs = 1000
|
|
|
|
numBlocks = 1
|
|
|
|
)
|
|
|
|
b.StopTimer()
|
|
|
|
b.ResetTimer()
|
|
|
|
|
|
|
|
recipientFn := func(nonce uint64) common.Address {
|
2022-06-14 12:09:48 +00:00
|
|
|
return common.BigToAddress(new(big.Int).SetUint64(0xc0de))
|
2019-05-08 11:30:36 +00:00
|
|
|
}
|
|
|
|
dataFn := func(nonce uint64) []byte {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
|
|
|
|
}
|
2019-08-21 07:17:19 +00:00
|
|
|
|
|
|
|
// Tests that importing a some old blocks, where all blocks are before the
|
|
|
|
// pruning point.
|
|
|
|
// This internally leads to a sidechain import, since the blocks trigger an
|
|
|
|
// ErrPrunedAncestor error.
|
|
|
|
// This may e.g. happen if
|
2022-09-10 11:25:40 +00:00
|
|
|
// 1. Downloader rollbacks a batch of inserted blocks and exits
|
|
|
|
// 2. Downloader starts to sync again
|
|
|
|
// 3. The blocks fetched are all known and canonical blocks
|
2019-08-21 07:17:19 +00:00
|
|
|
func TestSideImportPrunedBlocks(t *testing.T) {
|
|
|
|
// Generate a canonical chain to act as the main dataset
|
|
|
|
engine := ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
genesis := &Genesis{
|
2022-08-30 16:22:28 +00:00
|
|
|
Config: params.TestChainConfig,
|
2022-09-07 18:21:59 +00:00
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
2022-08-30 16:22:28 +00:00
|
|
|
}
|
2019-08-21 07:17:19 +00:00
|
|
|
// Generate and import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil)
|
all: core rework for the merge transition (#23761)
* all: work for eth1/2 transtition
* consensus/beacon, eth: change beacon difficulty to 0
* eth: updates
* all: add terminalBlockDifficulty config, fix rebasing issues
* eth: implemented merge interop spec
* internal/ethapi: update to v1.0.0.alpha.2
This commit updates the code to the new spec, moving payloadId into
it's own object. It also fixes an issue with finalizing an empty blockhash.
It also properly sets the basefee
* all: sync polishes, other fixes + refactors
* core, eth: correct semantics for LeavePoW, EnterPoS
* core: fixed rebasing artifacts
* core: light: performance improvements
* core: use keyed field (f)
* core: eth: fix compilation issues + tests
* eth/catalyst: dbetter error codes
* all: move Merger to consensus/, remove reliance on it in bc
* all: renamed EnterPoS and LeavePoW to ReachTDD and FinalizePoS
* core: make mergelogs a function
* core: use InsertChain instead of InsertBlock
* les: drop merger from lightchain object
* consensus: add merger
* core: recoverAncestors in catalyst mode
* core: fix nitpick
* all: removed merger from beacon, use TTD, nitpicks
* consensus: eth: add docstring, removed unnecessary code duplication
* consensus/beacon: better comment
* all: easy to fix nitpicks by karalabe
* consensus/beacon: verify known headers to be sure
* core: comments
* core: eth: don't drop peers who advertise blocks, nitpicks
* core: never add beacon blocks to the future queue
* core: fixed nitpicks
* consensus/beacon: simplify IsTTDReached check
* consensus/beacon: correct IsTTDReached check
Co-authored-by: rjl493456442 <garyrong0905@gmail.com>
Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-11-26 11:23:02 +00:00
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
|
2019-08-21 07:17:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2019-08-21 07:17:19 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
lastPrunedIndex := len(blocks) - TriesInMemory - 1
|
|
|
|
lastPrunedBlock := blocks[lastPrunedIndex]
|
|
|
|
|
|
|
|
// Verify pruning of lastPrunedBlock
|
|
|
|
if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
|
|
|
|
// Verify firstNonPrunedBlock is not pruned
|
|
|
|
if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
|
|
|
|
t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
|
|
|
|
}
|
|
|
|
// Now re-import some old blocks
|
|
|
|
blockToReimport := blocks[5:8]
|
|
|
|
_, err = chain.InsertChain(blockToReimport)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("Got error, %v", err)
|
|
|
|
}
|
|
|
|
}
|
2019-09-16 08:39:41 +00:00
|
|
|
|
|
|
|
// TestDeleteCreateRevert tests a weird state transition corner case that we hit
|
|
|
|
// while changing the internals of statedb. The workflow is that a contract is
|
|
|
|
// self destructed, then in a followup transaction (but same block) it's created
|
|
|
|
// again and the transaction reverted.
|
|
|
|
//
|
|
|
|
// The original statedb implementation flushed dirty objects to the tries after
|
|
|
|
// each transaction, so this works ok. The rework accumulated writes in memory
|
|
|
|
// first, but the journal wiped the entire state object on create-revert.
|
|
|
|
func TestDeleteCreateRevert(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
2019-09-16 08:39:41 +00:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(100000000000000000)
|
2019-09-16 08:39:41 +00:00
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
2020-03-03 13:52:00 +00:00
|
|
|
Code: []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)},
|
2019-09-16 08:39:41 +00:00
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
// The address 0xBBBB send 1 wei to 0xAAAA, then reverts
|
|
|
|
bb: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC), // [0]
|
|
|
|
byte(vm.DUP1), // [0,0]
|
|
|
|
byte(vm.DUP1), // [0,0,0]
|
|
|
|
byte(vm.DUP1), // [0,0,0,0]
|
|
|
|
byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value)
|
|
|
|
byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa]
|
|
|
|
byte(vm.GAS),
|
|
|
|
byte(vm.CALL),
|
|
|
|
byte(vm.REVERT),
|
|
|
|
},
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2019-09-16 08:39:41 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AAAA
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2019-09-16 08:39:41 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to BBBB
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, bb,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2019-09-16 08:39:41 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2019-09-16 08:39:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2019-09-16 08:39:41 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
}
|
2020-03-02 12:46:56 +00:00
|
|
|
|
2020-03-02 13:06:44 +00:00
|
|
|
// TestDeleteRecreateSlots tests a state-transition that contains both deletion
|
2020-03-02 12:46:56 +00:00
|
|
|
// and recreation of contract state.
|
|
|
|
// Contract A exists, has slots 1 and 2 set
|
|
|
|
// Tx 1: Selfdestruct A
|
|
|
|
// Tx 2: Re-create A, set slots 3 and 4
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
|
|
|
|
// and then the new slots exist
|
2020-03-02 13:06:44 +00:00
|
|
|
func TestDeleteRecreateSlots(t *testing.T) {
|
2020-03-02 12:46:56 +00:00
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
|
2020-03-02 12:46:56 +00:00
|
|
|
// A sender who makes transactions, has some funds
|
2020-03-04 12:39:27 +00:00
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-02 12:46:56 +00:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
2020-03-03 13:52:00 +00:00
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
2020-03-02 12:46:56 +00:00
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. Set slots 3=3, 4=4,
|
|
|
|
// 2. Return aaCode
|
|
|
|
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x3, // value
|
|
|
|
byte(vm.PUSH1), 0x3, // location
|
2022-01-21 08:12:40 +00:00
|
|
|
byte(vm.SSTORE), // Set slot[3] = 3
|
2020-03-02 12:46:56 +00:00
|
|
|
byte(vm.PUSH1), 0x4, // value
|
|
|
|
byte(vm.PUSH1), 0x4, // location
|
2022-01-21 08:12:40 +00:00
|
|
|
byte(vm.SSTORE), // Set slot[4] = 4
|
2020-03-02 12:46:56 +00:00
|
|
|
// Slots are set, now return the code
|
2020-03-04 12:39:27 +00:00
|
|
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
2020-03-02 12:46:56 +00:00
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
// Code is now in memory.
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), byte(32 - 2), // offset
|
|
|
|
byte(vm.RETURN),
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
2020-03-04 12:39:27 +00:00
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
|
2020-03-02 12:46:56 +00:00
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
// The contract BB recreates AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2020-03-02 12:46:56 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AA, to kill it
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 12:46:56 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to BB, to recreate AA
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, bb,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 12:46:56 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2021-11-25 12:17:09 +00:00
|
|
|
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 05:46:10 +00:00
|
|
|
}, nil, nil)
|
2020-03-02 12:46:56 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2020-03-02 12:46:56 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
|
|
|
|
// If all is correct, then slot 1 and 2 are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
// Also, 3 and 4 should be set
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp {
|
|
|
|
t.Fatalf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp {
|
|
|
|
t.Fatalf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
}
|
2020-03-02 13:06:44 +00:00
|
|
|
|
|
|
|
// TestDeleteRecreateAccount tests a state-transition that contains deletion of a
|
|
|
|
// contract with storage, and a recreate of the same contract via a
|
|
|
|
// regular value-transfer
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A
|
|
|
|
func TestDeleteRecreateAccount(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
|
2020-03-02 13:06:44 +00:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-02 13:06:44 +00:00
|
|
|
|
|
|
|
aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
|
2020-03-03 13:52:00 +00:00
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
2020-03-02 13:06:44 +00:00
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2020-03-02 13:06:44 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to AA, to kill it
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(0, aa,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 13:06:44 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
// One transaction to AA, to recreate it (but without storage
|
|
|
|
tx, _ = types.SignTx(types.NewTransaction(1, aa,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(1), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-02 13:06:44 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2021-11-25 12:17:09 +00:00
|
|
|
Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 05:46:10 +00:00
|
|
|
}, nil, nil)
|
2020-03-02 13:06:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2020-03-02 13:06:44 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
|
|
|
|
// If all is correct, then both slots are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("got %x exp %x", got, exp)
|
|
|
|
}
|
|
|
|
}
|
2020-03-04 12:39:27 +00:00
|
|
|
|
|
|
|
// TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion
|
|
|
|
// and recreation of contract state.
|
|
|
|
// Contract A exists, has slots 1 and 2 set
|
|
|
|
// Tx 1: Selfdestruct A
|
|
|
|
// Tx 2: Re-create A, set slots 3 and 4
|
|
|
|
// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
|
|
|
|
// and then the new slots exist
|
|
|
|
func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
|
2020-03-04 12:39:27 +00:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-04 12:39:27 +00:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
|
|
|
aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA
|
|
|
|
aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
|
|
|
|
)
|
|
|
|
// Populate two slots
|
|
|
|
aaStorage[common.HexToHash("01")] = common.HexToHash("01")
|
|
|
|
aaStorage[common.HexToHash("02")] = common.HexToHash("02")
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. Set slots 3=blocknum+1, 4=4,
|
|
|
|
// 2. Return aaCode
|
|
|
|
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x1, //
|
|
|
|
byte(vm.NUMBER), // value = number + 1
|
|
|
|
byte(vm.ADD), //
|
|
|
|
byte(vm.PUSH1), 0x3, // location
|
|
|
|
byte(vm.SSTORE), // Set slot[3] = number + 1
|
|
|
|
byte(vm.PUSH1), 0x4, // value
|
|
|
|
byte(vm.PUSH1), 0x4, // location
|
|
|
|
byte(vm.SSTORE), // Set slot[4] = 4
|
|
|
|
// Slots are set, now return the code
|
|
|
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
// Code is now in memory.
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), byte(32 - 2), // offset
|
|
|
|
byte(vm.RETURN),
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAAA selfdestructs if called
|
|
|
|
aa: {
|
|
|
|
// Code needs to just selfdestruct
|
|
|
|
Code: aaCode,
|
|
|
|
Nonce: 1,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
Storage: aaStorage,
|
|
|
|
},
|
|
|
|
// The contract BB recreates AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var nonce uint64
|
|
|
|
|
|
|
|
type expectation struct {
|
|
|
|
exist bool
|
|
|
|
blocknum int
|
|
|
|
values map[int]int
|
|
|
|
}
|
|
|
|
var current = &expectation{
|
|
|
|
exist: true, // exists in genesis
|
|
|
|
blocknum: 0,
|
|
|
|
values: map[int]int{1: 1, 2: 2},
|
|
|
|
}
|
|
|
|
var expectations []*expectation
|
2021-06-15 10:56:14 +00:00
|
|
|
var newDestruct = func(e *expectation, b *BlockGen) *types.Transaction {
|
2020-03-04 12:39:27 +00:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, aa,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-04 12:39:27 +00:00
|
|
|
nonce++
|
|
|
|
if e.exist {
|
|
|
|
e.exist = false
|
|
|
|
e.values = nil
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
//t.Logf("block %d; adding destruct\n", e.blocknum)
|
2020-03-04 12:39:27 +00:00
|
|
|
return tx
|
|
|
|
}
|
2021-06-15 10:56:14 +00:00
|
|
|
var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction {
|
2020-03-04 12:39:27 +00:00
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-04 12:39:27 +00:00
|
|
|
nonce++
|
|
|
|
if !e.exist {
|
|
|
|
e.exist = true
|
|
|
|
e.values = map[int]int{3: e.blocknum + 1, 4: 4}
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
//t.Logf("block %d; adding resurrect\n", e.blocknum)
|
2020-03-04 12:39:27 +00:00
|
|
|
return tx
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 150, func(i int, b *BlockGen) {
|
2020-03-04 12:39:27 +00:00
|
|
|
var exp = new(expectation)
|
|
|
|
exp.blocknum = i + 1
|
|
|
|
exp.values = make(map[int]int)
|
|
|
|
for k, v := range current.values {
|
|
|
|
exp.values[k] = v
|
|
|
|
}
|
|
|
|
exp.exist = current.exist
|
|
|
|
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
if i%2 == 0 {
|
2021-06-15 10:56:14 +00:00
|
|
|
b.AddTx(newDestruct(exp, b))
|
2020-03-04 12:39:27 +00:00
|
|
|
}
|
|
|
|
if i%3 == 0 {
|
2021-06-15 10:56:14 +00:00
|
|
|
b.AddTx(newResurrect(exp, b))
|
2020-03-04 12:39:27 +00:00
|
|
|
}
|
|
|
|
if i%5 == 0 {
|
2021-06-15 10:56:14 +00:00
|
|
|
b.AddTx(newDestruct(exp, b))
|
2020-03-04 12:39:27 +00:00
|
|
|
}
|
|
|
|
if i%7 == 0 {
|
2021-06-15 10:56:14 +00:00
|
|
|
b.AddTx(newResurrect(exp, b))
|
2020-03-04 12:39:27 +00:00
|
|
|
}
|
|
|
|
expectations = append(expectations, exp)
|
|
|
|
current = exp
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-04 12:39:27 +00:00
|
|
|
//Debug: true,
|
|
|
|
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 05:46:10 +00:00
|
|
|
}, nil, nil)
|
2020-03-04 12:39:27 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2020-03-04 12:39:27 +00:00
|
|
|
var asHash = func(num int) common.Hash {
|
|
|
|
return common.BytesToHash([]byte{byte(num)})
|
|
|
|
}
|
|
|
|
for i, block := range blocks {
|
|
|
|
blockNum := i + 1
|
|
|
|
if n, err := chain.InsertChain([]*types.Block{block}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
statedb, _ := chain.State()
|
|
|
|
// If all is correct, then slot 1 and 2 are zero
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
|
|
|
|
}
|
|
|
|
if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
|
|
|
|
t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
|
|
|
|
}
|
|
|
|
exp := expectations[i]
|
|
|
|
if exp.exist {
|
|
|
|
if !statedb.Exist(aa) {
|
|
|
|
t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa)
|
|
|
|
}
|
|
|
|
for slot, val := range exp.values {
|
|
|
|
if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue {
|
|
|
|
t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if statedb.Exist(aa) {
|
|
|
|
t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-06 12:05:44 +00:00
|
|
|
|
|
|
|
// TestInitThenFailCreateContract tests a pretty notorious case that happened
|
|
|
|
// on mainnet over blocks 7338108, 7338110 and 7338115.
|
2022-09-10 11:25:40 +00:00
|
|
|
// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
|
|
|
|
// with 0.001 ether (thus created but no code)
|
|
|
|
// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
|
|
|
|
// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
|
|
|
|
// deployment fails due to OOG during initcode execution
|
|
|
|
// - Block 7338115: another tx checks the balance of
|
|
|
|
// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
|
|
|
|
// zero.
|
2020-03-06 12:05:44 +00:00
|
|
|
//
|
|
|
|
// The problem being that the snapshotter maintains a destructset, and adds items
|
|
|
|
// to the destructset in case something is created "onto" an existing item.
|
|
|
|
// We need to either roll back the snapDestructs, or not place it into snapDestructs
|
|
|
|
// in the first place.
|
|
|
|
func TestInitThenFailCreateContract(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
2022-09-07 18:21:59 +00:00
|
|
|
|
2020-03-06 12:05:44 +00:00
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2020-03-06 12:05:44 +00:00
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
|
|
|
)
|
|
|
|
|
|
|
|
// The bb-code needs to CREATE2 the aa contract. It consists of
|
|
|
|
// both initcode and deployment code
|
|
|
|
// initcode:
|
|
|
|
// 1. If blocknum < 1, error out (e.g invalid opcode)
|
|
|
|
// 2. else, return a snippet of code
|
|
|
|
initCode := []byte{
|
|
|
|
byte(vm.PUSH1), 0x1, // y (2)
|
|
|
|
byte(vm.NUMBER), // x (number)
|
|
|
|
byte(vm.GT), // x > y?
|
|
|
|
byte(vm.PUSH1), byte(0x8),
|
|
|
|
byte(vm.JUMPI), // jump to label if number > 2
|
|
|
|
byte(0xFE), // illegal opcode
|
|
|
|
byte(vm.JUMPDEST),
|
|
|
|
byte(vm.PUSH1), 0x2, // size
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.RETURN), // return 2 bytes of zero-code
|
|
|
|
}
|
|
|
|
if l := len(initCode); l > 32 {
|
|
|
|
t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
|
|
|
|
}
|
|
|
|
bbCode := []byte{
|
|
|
|
// Push initcode onto stack
|
|
|
|
byte(vm.PUSH1) + byte(len(initCode)-1)}
|
|
|
|
bbCode = append(bbCode, initCode...)
|
|
|
|
bbCode = append(bbCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x00, // salt
|
|
|
|
byte(vm.PUSH1), byte(len(initCode)), // size
|
|
|
|
byte(vm.PUSH1), byte(32 - len(initCode)), // offset
|
|
|
|
byte(vm.PUSH1), 0x00, // endowment
|
|
|
|
byte(vm.CREATE2),
|
|
|
|
}...)
|
|
|
|
|
|
|
|
initHash := crypto.Keccak256Hash(initCode)
|
|
|
|
aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
|
|
|
|
t.Logf("Destination address: %x\n", aa)
|
|
|
|
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address aa has some funds
|
|
|
|
aa: {Balance: big.NewInt(100000)},
|
|
|
|
// The contract BB tries to create code onto AA
|
|
|
|
bb: {
|
|
|
|
Code: bbCode,
|
|
|
|
Balance: big.NewInt(1),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nonce := uint64(0)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) {
|
2020-03-06 12:05:44 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
// One transaction to BB
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
|
2021-06-15 10:56:14 +00:00
|
|
|
big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
|
2020-03-06 12:05:44 +00:00
|
|
|
b.AddTx(tx)
|
|
|
|
nonce++
|
|
|
|
})
|
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
2020-03-06 12:05:44 +00:00
|
|
|
//Debug: true,
|
|
|
|
//Tracer: vm.NewJSONLogger(nil, os.Stdout),
|
2020-05-12 05:46:10 +00:00
|
|
|
}, nil, nil)
|
2020-03-06 12:05:44 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2020-03-06 12:05:44 +00:00
|
|
|
statedb, _ := chain.State()
|
|
|
|
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
|
|
|
t.Fatalf("Genesis err, got %v exp %v", got, exp)
|
|
|
|
}
|
|
|
|
// First block tries to create, but fails
|
|
|
|
{
|
|
|
|
block := blocks[0]
|
|
|
|
if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
statedb, _ = chain.State()
|
|
|
|
if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
|
|
|
|
t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Import the rest of the blocks
|
|
|
|
for _, block := range blocks[1:] {
|
|
|
|
if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-02-25 14:26:57 +00:00
|
|
|
|
|
|
|
// TestEIP2718Transition tests that an EIP-2718 transaction will be accepted
|
|
|
|
// after the fork block has passed. This is verified by sending an EIP-2930
|
|
|
|
// access list transaction, which specifies a single slot access, and then
|
|
|
|
// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated
|
|
|
|
// correctly.
|
|
|
|
func TestEIP2718Transition(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
2021-02-25 14:26:57 +00:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
2021-06-15 10:56:14 +00:00
|
|
|
funds = big.NewInt(1000000000000000)
|
2021-02-25 14:26:57 +00:00
|
|
|
gspec = &Genesis{
|
2021-05-06 09:07:42 +00:00
|
|
|
Config: params.TestChainConfig,
|
2021-02-25 14:26:57 +00:00
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
// The address 0xAAAA sloads 0x00 and 0x01
|
|
|
|
aa: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
// Generate blocks
|
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2021-02-25 14:26:57 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
|
|
|
|
// One transaction to 0xAAAA
|
|
|
|
signer := types.LatestSigner(gspec.Config)
|
|
|
|
tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{
|
|
|
|
ChainID: gspec.Config.ChainID,
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
2021-06-15 10:56:14 +00:00
|
|
|
GasPrice: b.header.BaseFee,
|
2021-02-25 14:26:57 +00:00
|
|
|
AccessList: types.AccessList{{
|
|
|
|
Address: aa,
|
|
|
|
StorageKeys: []common.Hash{{0}},
|
|
|
|
}},
|
|
|
|
})
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2021-02-25 14:26:57 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2021-02-25 14:26:57 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := chain.GetBlockByNumber(1)
|
|
|
|
|
|
|
|
// Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list
|
2021-05-07 06:25:32 +00:00
|
|
|
expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
|
|
|
|
vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
|
2021-02-25 14:26:57 +00:00
|
|
|
if block.GasUsed() != expected {
|
|
|
|
t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed())
|
|
|
|
}
|
|
|
|
}
|
2021-05-17 13:13:22 +00:00
|
|
|
|
|
|
|
// TestEIP1559Transition tests the following:
|
|
|
|
//
|
2022-09-10 11:25:40 +00:00
|
|
|
// 1. A transaction whose gasFeeCap is greater than the baseFee is valid.
|
|
|
|
// 2. Gas accounting for access lists on EIP-1559 transactions is correct.
|
|
|
|
// 3. Only the transaction's tip will be received by the coinbase.
|
|
|
|
// 4. The transaction sender pays for both the tip and baseFee.
|
|
|
|
// 5. The coinbase receives only the partially realized tip when
|
|
|
|
// gasFeeCap - gasTipCap < baseFee.
|
|
|
|
// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
|
2021-05-17 13:13:22 +00:00
|
|
|
func TestEIP1559Transition(t *testing.T) {
|
|
|
|
var (
|
2022-09-07 18:21:59 +00:00
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
2021-05-17 13:13:22 +00:00
|
|
|
engine = ethash.NewFaker()
|
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.AllEthashProtocolChanges,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
addr1: {Balance: funds},
|
|
|
|
addr2: {Balance: funds},
|
|
|
|
// The address 0xAAAA sloads 0x00 and 0x01
|
|
|
|
aa: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
gspec.Config.BerlinBlock = common.Big0
|
|
|
|
gspec.Config.LondonBlock = common.Big0
|
|
|
|
signer := types.LatestSigner(gspec.Config)
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
2021-05-17 13:13:22 +00:00
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
|
|
|
|
// One transaction to 0xAAAA
|
|
|
|
accesses := types.AccessList{types.AccessTuple{
|
|
|
|
Address: aa,
|
|
|
|
StorageKeys: []common.Hash{{0}},
|
|
|
|
}}
|
|
|
|
|
|
|
|
txdata := &types.DynamicFeeTx{
|
|
|
|
ChainID: gspec.Config.ChainID,
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
2021-06-08 10:05:41 +00:00
|
|
|
GasFeeCap: newGwei(5),
|
|
|
|
GasTipCap: big.NewInt(2),
|
2021-05-17 13:13:22 +00:00
|
|
|
AccessList: accesses,
|
|
|
|
Data: []byte{},
|
|
|
|
}
|
|
|
|
tx := types.NewTx(txdata)
|
|
|
|
tx, _ = types.SignTx(tx, signer, key1)
|
|
|
|
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2021-05-17 13:13:22 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2021-05-17 13:13:22 +00:00
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := chain.GetBlockByNumber(1)
|
|
|
|
|
|
|
|
// 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
|
|
|
|
expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
|
|
|
|
vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
|
|
|
|
if block.GasUsed() != expectedGas {
|
|
|
|
t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
|
|
|
|
}
|
|
|
|
|
|
|
|
state, _ := chain.State()
|
|
|
|
|
|
|
|
// 3: Ensure that miner received only the tx's tip.
|
|
|
|
actual := state.GetBalance(block.Coinbase())
|
|
|
|
expected := new(big.Int).Add(
|
2021-06-08 10:05:41 +00:00
|
|
|
new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
|
2021-05-17 13:13:22 +00:00
|
|
|
ethash.ConstantinopleBlockReward,
|
|
|
|
)
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
|
|
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
|
2021-06-08 10:05:41 +00:00
|
|
|
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
2021-05-17 13:13:22 +00:00
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
2022-09-07 18:21:59 +00:00
|
|
|
blocks, _ = GenerateChain(gspec.Config, block, engine, genDb, 1, func(i int, b *BlockGen) {
|
2021-05-17 13:13:22 +00:00
|
|
|
b.SetCoinbase(common.Address{2})
|
|
|
|
|
|
|
|
txdata := &types.LegacyTx{
|
|
|
|
Nonce: 0,
|
|
|
|
To: &aa,
|
|
|
|
Gas: 30000,
|
|
|
|
GasPrice: newGwei(5),
|
|
|
|
}
|
|
|
|
tx := types.NewTx(txdata)
|
|
|
|
tx, _ = types.SignTx(tx, signer, key2)
|
|
|
|
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block = chain.GetBlockByNumber(2)
|
|
|
|
state, _ = chain.State()
|
2021-06-08 10:05:41 +00:00
|
|
|
effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
|
2021-05-17 13:13:22 +00:00
|
|
|
|
|
|
|
// 6+5: Ensure that miner received only the tx's effective tip.
|
|
|
|
actual = state.GetBalance(block.Coinbase())
|
|
|
|
expected = new(big.Int).Add(
|
|
|
|
new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
|
|
|
|
ethash.ConstantinopleBlockReward,
|
|
|
|
)
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
|
|
|
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr2))
|
|
|
|
expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
}
|
2022-05-05 07:36:26 +00:00
|
|
|
|
|
|
|
// Tests the scenario the chain is requested to another point with the missing state.
|
|
|
|
// It expects the state is recovered and all relevant chain markers are set correctly.
|
|
|
|
func TestSetCanonical(t *testing.T) {
|
|
|
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
|
|
|
|
|
|
|
var (
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
funds = big.NewInt(100000000000000000)
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{address: {Balance: funds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
signer = types.LatestSigner(gspec.Config)
|
|
|
|
engine = ethash.NewFaker()
|
2022-05-05 07:36:26 +00:00
|
|
|
)
|
|
|
|
// Generate and import the canonical chain
|
2022-09-07 18:21:59 +00:00
|
|
|
_, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
2022-05-05 07:36:26 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2022-05-05 07:36:26 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
defer chain.Stop()
|
|
|
|
|
2022-05-05 07:36:26 +00:00
|
|
|
if n, err := chain.InsertChain(canon); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the side chain and import them
|
2022-09-07 18:21:59 +00:00
|
|
|
_, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
|
2022-05-05 07:36:26 +00:00
|
|
|
tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
gen.AddTx(tx)
|
|
|
|
})
|
|
|
|
for _, block := range side {
|
|
|
|
err := chain.InsertBlockWithoutSetHead(block)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to insert into chain: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, block := range side {
|
|
|
|
got := chain.GetBlockByHash(block.Hash())
|
|
|
|
if got == nil {
|
|
|
|
t.Fatalf("Lost the inserted block")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the chain head to the side chain, ensure all the relevant markers are updated.
|
|
|
|
verify := func(head *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if chain.CurrentSnapBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
|
2022-05-05 07:36:26 +00:00
|
|
|
}
|
|
|
|
if chain.CurrentHeader().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
|
|
|
|
}
|
|
|
|
if !chain.HasState(head.Root()) {
|
|
|
|
t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
chain.SetCanonical(side[len(side)-1])
|
|
|
|
verify(side[len(side)-1])
|
|
|
|
|
|
|
|
// Reset the chain head to original chain
|
|
|
|
chain.SetCanonical(canon[TriesInMemory-1])
|
|
|
|
verify(canon[TriesInMemory-1])
|
|
|
|
}
|
2022-06-01 09:03:24 +00:00
|
|
|
|
|
|
|
// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted
|
|
|
|
// correctly in case reorg is called.
|
|
|
|
func TestCanonicalHashMarker(t *testing.T) {
|
|
|
|
var cases = []struct {
|
|
|
|
forkA int
|
|
|
|
forkB int
|
|
|
|
}{
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 1 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [2, 10] should be deleted
|
|
|
|
// markers [1] should be updated
|
|
|
|
{10, 1},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 2 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [3, 10] should be deleted
|
|
|
|
// markers [1, 2] should be updated
|
|
|
|
{10, 2},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 10 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [1, 10] should be updated
|
|
|
|
{10, 10},
|
|
|
|
|
|
|
|
// ForkA: 10 blocks
|
|
|
|
// ForkB: 11 blocks
|
|
|
|
//
|
|
|
|
// reorged:
|
|
|
|
// markers [1, 11] should be updated
|
|
|
|
{10, 11},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
var (
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
2022-09-07 18:21:59 +00:00
|
|
|
engine = ethash.NewFaker()
|
2022-06-01 09:03:24 +00:00
|
|
|
)
|
2022-09-07 18:21:59 +00:00
|
|
|
_, forkA, _ := GenerateChainWithGenesis(gspec, engine, c.forkA, func(i int, gen *BlockGen) {})
|
|
|
|
_, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
|
2022-06-01 09:03:24 +00:00
|
|
|
|
|
|
|
// Initialize test chain
|
2022-09-07 18:21:59 +00:00
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
|
2022-06-01 09:03:24 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
// Insert forkA and forkB, the canonical should on forkA still
|
|
|
|
if n, err := chain.InsertChain(forkA); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(forkB); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
verify := func(head *types.Block) {
|
|
|
|
if chain.CurrentBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
|
|
|
|
}
|
2023-03-02 06:29:15 +00:00
|
|
|
if chain.CurrentSnapBlock().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
|
2022-06-01 09:03:24 +00:00
|
|
|
}
|
|
|
|
if chain.CurrentHeader().Hash() != head.Hash() {
|
|
|
|
t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
|
|
|
|
}
|
|
|
|
if !chain.HasState(head.Root()) {
|
|
|
|
t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Switch canonical chain to forkB if necessary
|
|
|
|
if len(forkA) < len(forkB) {
|
|
|
|
verify(forkB[len(forkB)-1])
|
|
|
|
} else {
|
|
|
|
verify(forkA[len(forkA)-1])
|
|
|
|
chain.SetCanonical(forkB[len(forkB)-1])
|
|
|
|
verify(forkB[len(forkB)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure all hash markers are updated correctly
|
|
|
|
for i := 0; i < len(forkB); i++ {
|
|
|
|
block := forkB[i]
|
|
|
|
hash := chain.GetCanonicalHash(block.NumberU64())
|
|
|
|
if hash != block.Hash() {
|
|
|
|
t.Fatalf("Unexpected canonical hash %d", block.NumberU64())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if c.forkA > c.forkB {
|
|
|
|
for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ {
|
|
|
|
hash := chain.GetCanonicalHash(i)
|
|
|
|
if hash != (common.Hash{}) {
|
|
|
|
t.Fatalf("Unexpected canonical hash %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-10-06 11:39:20 +00:00
|
|
|
chain.Stop()
|
2022-06-01 09:03:24 +00:00
|
|
|
}
|
|
|
|
}
|
2022-09-29 07:50:24 +00:00
|
|
|
|
|
|
|
// TestTxIndexer tests the tx indexes are updated correctly.
|
|
|
|
func TestTxIndexer(t *testing.T) {
|
|
|
|
var (
|
|
|
|
testBankKey, _ = crypto.GenerateKey()
|
|
|
|
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
|
|
|
|
testBankFunds = big.NewInt(1000000000000000000)
|
|
|
|
|
|
|
|
gspec = &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
|
|
|
|
BaseFee: big.NewInt(params.InitialBaseFee),
|
|
|
|
}
|
|
|
|
engine = ethash.NewFaker()
|
|
|
|
nonce = uint64(0)
|
|
|
|
)
|
|
|
|
_, blocks, receipts := GenerateChainWithGenesis(gspec, engine, 128, func(i int, gen *BlockGen) {
|
|
|
|
tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey)
|
|
|
|
gen.AddTx(tx)
|
|
|
|
nonce += 1
|
|
|
|
})
|
|
|
|
|
|
|
|
// verifyIndexes checks if the transaction indexes are present or not
|
|
|
|
// of the specified block.
|
|
|
|
verifyIndexes := func(db ethdb.Database, number uint64, exist bool) {
|
|
|
|
if number == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
block := blocks[number-1]
|
|
|
|
for _, tx := range block.Transactions() {
|
|
|
|
lookup := rawdb.ReadTxLookupEntry(db, tx.Hash())
|
|
|
|
if exist && lookup == nil {
|
|
|
|
t.Fatalf("missing %d %x", number, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
if !exist && lookup != nil {
|
|
|
|
t.Fatalf("unexpected %d %x", number, tx.Hash().Hex())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// verifyRange runs verifyIndexes for a range of blocks, from and to are included.
|
|
|
|
verifyRange := func(db ethdb.Database, from, to uint64, exist bool) {
|
|
|
|
for number := from; number <= to; number += 1 {
|
|
|
|
verifyIndexes(db, number, exist)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
verify := func(db ethdb.Database, expTail uint64) {
|
|
|
|
tail := rawdb.ReadTxIndexTail(db)
|
|
|
|
if tail == nil {
|
|
|
|
t.Fatal("Failed to write tx index tail")
|
|
|
|
}
|
|
|
|
if *tail != expTail {
|
|
|
|
t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail)
|
|
|
|
}
|
|
|
|
if *tail != 0 {
|
|
|
|
verifyRange(db, 0, *tail-1, false)
|
|
|
|
}
|
|
|
|
verifyRange(db, *tail, 128, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
var cases = []struct {
|
|
|
|
limitA uint64
|
|
|
|
tailA uint64
|
|
|
|
limitB uint64
|
|
|
|
tailB uint64
|
|
|
|
limitC uint64
|
|
|
|
tailC uint64
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
// LimitA: 0
|
|
|
|
// TailA: 0
|
|
|
|
//
|
|
|
|
// all blocks are indexed
|
|
|
|
limitA: 0,
|
|
|
|
tailA: 0,
|
|
|
|
|
|
|
|
// LimitB: 1
|
|
|
|
// TailB: 128
|
|
|
|
//
|
|
|
|
// block-128 is indexed
|
|
|
|
limitB: 1,
|
|
|
|
tailB: 128,
|
|
|
|
|
|
|
|
// LimitB: 64
|
|
|
|
// TailB: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitC: 64,
|
|
|
|
tailC: 65,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// LimitA: 64
|
|
|
|
// TailA: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitA: 64,
|
|
|
|
tailA: 65,
|
|
|
|
|
|
|
|
// LimitB: 1
|
|
|
|
// TailB: 128
|
|
|
|
//
|
|
|
|
// block-128 is indexed
|
|
|
|
limitB: 1,
|
|
|
|
tailB: 128,
|
|
|
|
|
|
|
|
// LimitB: 64
|
|
|
|
// TailB: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitC: 64,
|
|
|
|
tailC: 65,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// LimitA: 127
|
|
|
|
// TailA: 2
|
|
|
|
//
|
|
|
|
// block [2, 128] are indexed
|
|
|
|
limitA: 127,
|
|
|
|
tailA: 2,
|
|
|
|
|
|
|
|
// LimitB: 1
|
|
|
|
// TailB: 128
|
|
|
|
//
|
|
|
|
// block-128 is indexed
|
|
|
|
limitB: 1,
|
|
|
|
tailB: 128,
|
|
|
|
|
|
|
|
// LimitB: 64
|
|
|
|
// TailB: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitC: 64,
|
|
|
|
tailC: 65,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// LimitA: 128
|
|
|
|
// TailA: 1
|
|
|
|
//
|
|
|
|
// block [2, 128] are indexed
|
|
|
|
limitA: 128,
|
|
|
|
tailA: 1,
|
|
|
|
|
|
|
|
// LimitB: 1
|
|
|
|
// TailB: 128
|
|
|
|
//
|
|
|
|
// block-128 is indexed
|
|
|
|
limitB: 1,
|
|
|
|
tailB: 128,
|
|
|
|
|
|
|
|
// LimitB: 64
|
|
|
|
// TailB: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitC: 64,
|
|
|
|
tailC: 65,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// LimitA: 129
|
|
|
|
// TailA: 0
|
|
|
|
//
|
|
|
|
// block [0, 128] are indexed
|
|
|
|
limitA: 129,
|
|
|
|
tailA: 0,
|
|
|
|
|
|
|
|
// LimitB: 1
|
|
|
|
// TailB: 128
|
|
|
|
//
|
|
|
|
// block-128 is indexed
|
|
|
|
limitB: 1,
|
|
|
|
tailB: 128,
|
|
|
|
|
|
|
|
// LimitB: 64
|
|
|
|
// TailB: 65
|
|
|
|
//
|
|
|
|
// block [65, 128] are indexed
|
|
|
|
limitC: 64,
|
|
|
|
tailC: 65,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, c := range cases {
|
|
|
|
frdir := t.TempDir()
|
|
|
|
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
|
|
|
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
|
|
|
|
|
|
|
|
// Index the initial blocks from ancient store
|
|
|
|
chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, &c.limitA)
|
|
|
|
chain.indexBlocks(nil, 128, make(chan struct{}))
|
|
|
|
verify(db, c.tailA)
|
|
|
|
|
|
|
|
chain.SetTxLookupLimit(c.limitB)
|
|
|
|
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
|
|
verify(db, c.tailB)
|
|
|
|
|
|
|
|
chain.SetTxLookupLimit(c.limitC)
|
|
|
|
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
|
|
verify(db, c.tailC)
|
|
|
|
|
|
|
|
// Recover all indexes
|
|
|
|
chain.SetTxLookupLimit(0)
|
|
|
|
chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
|
|
|
|
verify(db, 0)
|
|
|
|
|
2022-10-06 11:39:20 +00:00
|
|
|
chain.Stop()
|
2022-09-29 07:50:24 +00:00
|
|
|
db.Close()
|
|
|
|
os.RemoveAll(frdir)
|
|
|
|
}
|
|
|
|
}
|
2022-10-28 08:23:49 +00:00
|
|
|
|
|
|
|
func TestCreateThenDeletePreByzantium(t *testing.T) {
|
|
|
|
// We use Ropsten chain config instead of Testchain config, this is
|
|
|
|
// deliberate: we want to use pre-byz rules where we have intermediate state roots
|
|
|
|
// between transactions.
|
2023-02-09 08:03:00 +00:00
|
|
|
testCreateThenDelete(t, ¶ms.ChainConfig{
|
|
|
|
ChainID: big.NewInt(3),
|
|
|
|
HomesteadBlock: big.NewInt(0),
|
|
|
|
EIP150Block: big.NewInt(0),
|
|
|
|
EIP155Block: big.NewInt(10),
|
|
|
|
EIP158Block: big.NewInt(10),
|
|
|
|
ByzantiumBlock: big.NewInt(1_700_000),
|
|
|
|
})
|
2022-10-28 08:23:49 +00:00
|
|
|
}
|
|
|
|
func TestCreateThenDeletePostByzantium(t *testing.T) {
|
|
|
|
testCreateThenDelete(t, params.TestChainConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening
|
|
|
|
// within the same block.
|
|
|
|
func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
destAddress = crypto.CreateAddress(address, 0)
|
|
|
|
funds = big.NewInt(1000000000000000)
|
|
|
|
)
|
|
|
|
|
|
|
|
// runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF)
|
|
|
|
code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...)
|
|
|
|
initCode := []byte{
|
|
|
|
// SSTORE 1:1
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
byte(vm.SSTORE),
|
|
|
|
// Get the runtime-code on the stack
|
|
|
|
byte(vm.PUSH32)}
|
|
|
|
initCode = append(initCode, code...)
|
|
|
|
initCode = append(initCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x3, // size
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.RETURN), // return 3 bytes of zero-code
|
|
|
|
}...)
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: config,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nonce := uint64(0)
|
|
|
|
signer := types.HomesteadSigner{}
|
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
|
|
|
|
fee := big.NewInt(1)
|
|
|
|
if b.header.BaseFee != nil {
|
|
|
|
fee = b.header.BaseFee
|
|
|
|
}
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: nonce,
|
|
|
|
GasPrice: new(big.Int).Set(fee),
|
|
|
|
Gas: 100000,
|
|
|
|
Data: initCode,
|
|
|
|
})
|
|
|
|
nonce++
|
|
|
|
b.AddTx(tx)
|
|
|
|
tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: nonce,
|
|
|
|
GasPrice: new(big.Int).Set(fee),
|
|
|
|
Gas: 100000,
|
|
|
|
To: &destAddress,
|
|
|
|
})
|
|
|
|
b.AddTx(tx)
|
|
|
|
nonce++
|
|
|
|
})
|
|
|
|
// Import the canonical chain
|
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
|
|
|
|
//Debug: true,
|
|
|
|
//Tracer: logger.NewJSONLogger(nil, os.Stdout),
|
|
|
|
}, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
// Import the blocks
|
|
|
|
for _, block := range blocks {
|
|
|
|
if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-11-16 09:18:52 +00:00
|
|
|
|
|
|
|
// TestTransientStorageReset ensures the transient storage is wiped correctly
|
|
|
|
// between transactions.
|
|
|
|
func TestTransientStorageReset(t *testing.T) {
|
|
|
|
var (
|
|
|
|
engine = ethash.NewFaker()
|
|
|
|
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
address = crypto.PubkeyToAddress(key.PublicKey)
|
|
|
|
destAddress = crypto.CreateAddress(address, 0)
|
|
|
|
funds = big.NewInt(1000000000000000)
|
|
|
|
vmConfig = vm.Config{
|
|
|
|
ExtraEips: []int{1153}, // Enable transient storage EIP
|
|
|
|
}
|
|
|
|
)
|
|
|
|
code := append([]byte{
|
|
|
|
// TLoad value with location 1
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
byte(vm.TLOAD),
|
|
|
|
|
|
|
|
// PUSH location
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
|
|
|
|
// SStore location:value
|
|
|
|
byte(vm.SSTORE),
|
|
|
|
}, make([]byte, 32-6)...)
|
|
|
|
initCode := []byte{
|
|
|
|
// TSTORE 1:1
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
byte(vm.PUSH1), 0x1,
|
|
|
|
byte(vm.TSTORE),
|
|
|
|
|
|
|
|
// Get the runtime-code on the stack
|
|
|
|
byte(vm.PUSH32)}
|
|
|
|
initCode = append(initCode, code...)
|
|
|
|
initCode = append(initCode, []byte{
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.MSTORE),
|
|
|
|
byte(vm.PUSH1), 0x6, // size
|
|
|
|
byte(vm.PUSH1), 0x0, // offset
|
|
|
|
byte(vm.RETURN), // return 6 bytes of zero-code
|
|
|
|
}...)
|
|
|
|
gspec := &Genesis{
|
|
|
|
Config: params.TestChainConfig,
|
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
address: {Balance: funds},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nonce := uint64(0)
|
|
|
|
signer := types.HomesteadSigner{}
|
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
|
|
|
fee := big.NewInt(1)
|
|
|
|
if b.header.BaseFee != nil {
|
|
|
|
fee = b.header.BaseFee
|
|
|
|
}
|
|
|
|
b.SetCoinbase(common.Address{1})
|
|
|
|
tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: nonce,
|
|
|
|
GasPrice: new(big.Int).Set(fee),
|
|
|
|
Gas: 100000,
|
|
|
|
Data: initCode,
|
|
|
|
})
|
|
|
|
nonce++
|
|
|
|
b.AddTxWithVMConfig(tx, vmConfig)
|
|
|
|
|
|
|
|
tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
|
|
|
|
Nonce: nonce,
|
|
|
|
GasPrice: new(big.Int).Set(fee),
|
|
|
|
Gas: 100000,
|
|
|
|
To: &destAddress,
|
|
|
|
})
|
|
|
|
b.AddTxWithVMConfig(tx, vmConfig)
|
|
|
|
nonce++
|
|
|
|
})
|
|
|
|
|
|
|
|
// Initialize the blockchain with 1153 enabled.
|
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
// Import the blocks
|
|
|
|
if _, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("failed to insert into chain: %v", err)
|
|
|
|
}
|
|
|
|
// Check the storage
|
|
|
|
state, err := chain.StateAt(chain.CurrentHeader().Root)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Failed to load state %v", err)
|
|
|
|
}
|
|
|
|
loc := common.BytesToHash([]byte{1})
|
|
|
|
slot := state.GetState(destAddress, loc)
|
|
|
|
if slot != (common.Hash{}) {
|
|
|
|
t.Fatalf("Unexpected dirty storage slot")
|
|
|
|
}
|
|
|
|
}
|
2022-11-22 21:39:52 +00:00
|
|
|
|
|
|
|
func TestEIP3651(t *testing.T) {
|
|
|
|
var (
|
|
|
|
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
|
|
|
|
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
|
2023-01-25 14:32:25 +00:00
|
|
|
engine = beacon.NewFaker()
|
2022-11-22 21:39:52 +00:00
|
|
|
|
|
|
|
// A sender who makes transactions, has some funds
|
|
|
|
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
|
|
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
|
|
|
|
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
|
|
|
|
addr2 = crypto.PubkeyToAddress(key2.PublicKey)
|
|
|
|
funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
|
2023-01-25 14:32:25 +00:00
|
|
|
config = *params.AllEthashProtocolChanges
|
2022-11-22 21:39:52 +00:00
|
|
|
gspec = &Genesis{
|
2023-01-25 14:32:25 +00:00
|
|
|
Config: &config,
|
2022-11-22 21:39:52 +00:00
|
|
|
Alloc: GenesisAlloc{
|
|
|
|
addr1: {Balance: funds},
|
|
|
|
addr2: {Balance: funds},
|
|
|
|
// The address 0xAAAA sloads 0x00 and 0x01
|
|
|
|
aa: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.PC),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
byte(vm.SLOAD),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
// The address 0xBBBB calls 0xAAAA
|
|
|
|
bb: {
|
|
|
|
Code: []byte{
|
|
|
|
byte(vm.PUSH1), 0, // out size
|
|
|
|
byte(vm.DUP1), // out offset
|
|
|
|
byte(vm.DUP1), // out insize
|
|
|
|
byte(vm.DUP1), // in offset
|
|
|
|
byte(vm.PUSH2), // address
|
|
|
|
byte(0xaa),
|
|
|
|
byte(0xaa),
|
|
|
|
byte(vm.GAS), // gas
|
|
|
|
byte(vm.DELEGATECALL),
|
|
|
|
},
|
|
|
|
Nonce: 0,
|
|
|
|
Balance: big.NewInt(0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
gspec.Config.BerlinBlock = common.Big0
|
|
|
|
gspec.Config.LondonBlock = common.Big0
|
2023-01-25 14:32:25 +00:00
|
|
|
gspec.Config.TerminalTotalDifficulty = common.Big0
|
|
|
|
gspec.Config.TerminalTotalDifficultyPassed = true
|
2023-01-25 11:12:28 +00:00
|
|
|
gspec.Config.ShanghaiTime = u64(0)
|
2022-11-22 21:39:52 +00:00
|
|
|
signer := types.LatestSigner(gspec.Config)
|
|
|
|
|
|
|
|
_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
|
|
|
|
b.SetCoinbase(aa)
|
|
|
|
// One transaction to Coinbase
|
|
|
|
txdata := &types.DynamicFeeTx{
|
|
|
|
ChainID: gspec.Config.ChainID,
|
|
|
|
Nonce: 0,
|
|
|
|
To: &bb,
|
|
|
|
Gas: 500000,
|
|
|
|
GasFeeCap: newGwei(5),
|
|
|
|
GasTipCap: big.NewInt(2),
|
|
|
|
AccessList: nil,
|
|
|
|
Data: []byte{},
|
|
|
|
}
|
|
|
|
tx := types.NewTx(txdata)
|
|
|
|
tx, _ = types.SignTx(tx, signer, key1)
|
|
|
|
|
|
|
|
b.AddTx(tx)
|
|
|
|
})
|
|
|
|
chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to create tester chain: %v", err)
|
|
|
|
}
|
|
|
|
if n, err := chain.InsertChain(blocks); err != nil {
|
|
|
|
t.Fatalf("block %d: failed to insert into chain: %v", n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := chain.GetBlockByNumber(1)
|
|
|
|
|
|
|
|
// 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
|
|
|
|
innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2
|
|
|
|
expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list
|
|
|
|
if block.GasUsed() != expectedGas {
|
|
|
|
t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
|
|
|
|
}
|
|
|
|
|
|
|
|
state, _ := chain.State()
|
|
|
|
|
|
|
|
// 3: Ensure that miner received only the tx's tip.
|
|
|
|
actual := state.GetBalance(block.Coinbase())
|
2023-01-25 14:32:25 +00:00
|
|
|
expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64())
|
2022-11-22 21:39:52 +00:00
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
|
|
|
|
// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
|
|
|
|
actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
|
|
|
|
expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
|
|
|
|
if actual.Cmp(expected) != 0 {
|
|
|
|
t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
|
|
|
|
}
|
|
|
|
}
|