4343 lines
		
	
	
		
			153 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			4343 lines
		
	
	
		
			153 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| // Copyright 2014 The go-ethereum Authors
 | |
| // This file is part of the go-ethereum library.
 | |
| //
 | |
| // The go-ethereum library is free software: you can redistribute it and/or modify
 | |
| // it under the terms of the GNU Lesser General Public License as published by
 | |
| // the Free Software Foundation, either version 3 of the License, or
 | |
| // (at your option) any later version.
 | |
| //
 | |
| // The go-ethereum library is distributed in the hope that it will be useful,
 | |
| // but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
| // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 | |
| // GNU Lesser General Public License for more details.
 | |
| //
 | |
| // You should have received a copy of the GNU Lesser General Public License
 | |
| // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
 | |
| 
 | |
| package core
 | |
| 
 | |
| import (
 | |
| 	"errors"
 | |
| 	"fmt"
 | |
| 	"math/big"
 | |
| 	"math/rand"
 | |
| 	"os"
 | |
| 	"sync"
 | |
| 	"testing"
 | |
| 	"time"
 | |
| 
 | |
| 	"github.com/ethereum/go-ethereum/common"
 | |
| 	"github.com/ethereum/go-ethereum/common/math"
 | |
| 	"github.com/ethereum/go-ethereum/consensus"
 | |
| 	"github.com/ethereum/go-ethereum/consensus/beacon"
 | |
| 	"github.com/ethereum/go-ethereum/consensus/ethash"
 | |
| 	"github.com/ethereum/go-ethereum/core/rawdb"
 | |
| 	"github.com/ethereum/go-ethereum/core/state"
 | |
| 	"github.com/ethereum/go-ethereum/core/types"
 | |
| 	"github.com/ethereum/go-ethereum/core/vm"
 | |
| 	"github.com/ethereum/go-ethereum/crypto"
 | |
| 	"github.com/ethereum/go-ethereum/eth/tracers/logger"
 | |
| 	"github.com/ethereum/go-ethereum/ethdb"
 | |
| 	"github.com/ethereum/go-ethereum/params"
 | |
| 	"github.com/ethereum/go-ethereum/trie"
 | |
| )
 | |
| 
 | |
| // So we can deterministically seed different blockchains
 | |
| var (
 | |
| 	canonicalSeed = 1
 | |
| 	forkSeed      = 2
 | |
| )
 | |
| 
 | |
| // newCanonical creates a chain database, and injects a deterministic canonical
 | |
| // chain. Depending on the full flag, if creates either a full block chain or a
 | |
| // header only chain. The database and genesis specification for block generation
 | |
| // are also returned in case more test blocks are needed later.
 | |
| func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *Genesis, *BlockChain, error) {
 | |
| 	var (
 | |
| 		genesis = &Genesis{
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 			Config:  params.AllEthashProtocolChanges,
 | |
| 		}
 | |
| 	)
 | |
| 	// Initialize a fresh chain with only a genesis block
 | |
| 	blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 
 | |
| 	// Create and inject the requested chain
 | |
| 	if n == 0 {
 | |
| 		return rawdb.NewMemoryDatabase(), genesis, blockchain, nil
 | |
| 	}
 | |
| 	if full {
 | |
| 		// Full block-chain requested
 | |
| 		genDb, blocks := makeBlockChainWithGenesis(genesis, n, engine, canonicalSeed)
 | |
| 		_, err := blockchain.InsertChain(blocks)
 | |
| 		return genDb, genesis, blockchain, err
 | |
| 	}
 | |
| 	// Header-only chain requested
 | |
| 	genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed)
 | |
| 	_, err := blockchain.InsertHeaderChain(headers, 1)
 | |
| 	return genDb, genesis, blockchain, err
 | |
| }
 | |
| 
 | |
| func newGwei(n int64) *big.Int {
 | |
| 	return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei))
 | |
| }
 | |
| 
 | |
| // Test fork of length N starting from block i
 | |
| func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
 | |
| 	// Copy old chain up to #i into a new db
 | |
| 	genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatal("could not make new canonical in testFork", err)
 | |
| 	}
 | |
| 	defer blockchain2.Stop()
 | |
| 
 | |
| 	// Assert the chains have the same header/block at #i
 | |
| 	var hash1, hash2 common.Hash
 | |
| 	if full {
 | |
| 		hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
 | |
| 		hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
 | |
| 	} else {
 | |
| 		hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
 | |
| 		hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
 | |
| 	}
 | |
| 	if hash1 != hash2 {
 | |
| 		t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
 | |
| 	}
 | |
| 	// Extend the newly created chain
 | |
| 	var (
 | |
| 		blockChainB  []*types.Block
 | |
| 		headerChainB []*types.Header
 | |
| 	)
 | |
| 	if full {
 | |
| 		blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
 | |
| 		if _, err := blockchain2.InsertChain(blockChainB); err != nil {
 | |
| 			t.Fatalf("failed to insert forking chain: %v", err)
 | |
| 		}
 | |
| 	} else {
 | |
| 		headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
 | |
| 		if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
 | |
| 			t.Fatalf("failed to insert forking chain: %v", err)
 | |
| 		}
 | |
| 	}
 | |
| 	// Sanity check that the forked chain can be imported into the original
 | |
| 	var tdPre, tdPost *big.Int
 | |
| 
 | |
| 	if full {
 | |
| 		cur := blockchain.CurrentBlock()
 | |
| 		tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
 | |
| 		if err := testBlockChainImport(blockChainB, blockchain); err != nil {
 | |
| 			t.Fatalf("failed to import forked block chain: %v", err)
 | |
| 		}
 | |
| 		last := blockChainB[len(blockChainB)-1]
 | |
| 		tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
 | |
| 	} else {
 | |
| 		cur := blockchain.CurrentHeader()
 | |
| 		tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
 | |
| 		if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
 | |
| 			t.Fatalf("failed to import forked header chain: %v", err)
 | |
| 		}
 | |
| 		last := headerChainB[len(headerChainB)-1]
 | |
| 		tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
 | |
| 	}
 | |
| 	// Compare the total difficulties of the chains
 | |
| 	comparator(tdPre, tdPost)
 | |
| }
 | |
| 
 | |
| // testBlockChainImport tries to process a chain of blocks, writing them into
 | |
| // the database if successful.
 | |
| func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
 | |
| 	for _, block := range chain {
 | |
| 		// Try and process the block
 | |
| 		err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
 | |
| 		if err == nil {
 | |
| 			err = blockchain.validator.ValidateBody(block)
 | |
| 		}
 | |
| 		if err != nil {
 | |
| 			if err == ErrKnownBlock {
 | |
| 				continue
 | |
| 			}
 | |
| 			return err
 | |
| 		}
 | |
| 		statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil)
 | |
| 		if err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 		receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{})
 | |
| 		if err != nil {
 | |
| 			blockchain.reportBlock(block, receipts, err)
 | |
| 			return err
 | |
| 		}
 | |
| 		err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas)
 | |
| 		if err != nil {
 | |
| 			blockchain.reportBlock(block, receipts, err)
 | |
| 			return err
 | |
| 		}
 | |
| 
 | |
| 		blockchain.chainmu.MustLock()
 | |
| 		rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
 | |
| 		rawdb.WriteBlock(blockchain.db, block)
 | |
| 		statedb.Commit(false)
 | |
| 		blockchain.chainmu.Unlock()
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // testHeaderChainImport tries to process a chain of header, writing them into
 | |
| // the database if successful.
 | |
| func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
 | |
| 	for _, header := range chain {
 | |
| 		// Try and validate the header
 | |
| 		if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
 | |
| 			return err
 | |
| 		}
 | |
| 		// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
 | |
| 		blockchain.chainmu.MustLock()
 | |
| 		rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
 | |
| 		rawdb.WriteHeader(blockchain.db, header)
 | |
| 		blockchain.chainmu.Unlock()
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| func TestLastBlock(t *testing.T) {
 | |
| 	genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 1, ethash.NewFullFaker(), genDb, 0)
 | |
| 	if _, err := blockchain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("Failed to insert block: %v", err)
 | |
| 	}
 | |
| 	if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) {
 | |
| 		t.Fatalf("Write/Get HeadBlockHash failed")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Test inserts the blocks/headers after the fork choice rule is changed.
 | |
| // The chain is reorged to whatever specified.
 | |
| func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool) {
 | |
| 	// Copy old chain up to #i into a new db
 | |
| 	genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatal("could not make new canonical in testFork", err)
 | |
| 	}
 | |
| 	defer blockchain2.Stop()
 | |
| 
 | |
| 	// Assert the chains have the same header/block at #i
 | |
| 	var hash1, hash2 common.Hash
 | |
| 	if full {
 | |
| 		hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash()
 | |
| 		hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash()
 | |
| 	} else {
 | |
| 		hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash()
 | |
| 		hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash()
 | |
| 	}
 | |
| 	if hash1 != hash2 {
 | |
| 		t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1)
 | |
| 	}
 | |
| 
 | |
| 	// Extend the newly created chain
 | |
| 	if full {
 | |
| 		blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed)
 | |
| 		if _, err := blockchain2.InsertChain(blockChainB); err != nil {
 | |
| 			t.Fatalf("failed to insert forking chain: %v", err)
 | |
| 		}
 | |
| 		if blockchain2.CurrentBlock().Number.Uint64() != blockChainB[len(blockChainB)-1].NumberU64() {
 | |
| 			t.Fatalf("failed to reorg to the given chain")
 | |
| 		}
 | |
| 		if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() {
 | |
| 			t.Fatalf("failed to reorg to the given chain")
 | |
| 		}
 | |
| 	} else {
 | |
| 		headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed)
 | |
| 		if _, err := blockchain2.InsertHeaderChain(headerChainB, 1); err != nil {
 | |
| 			t.Fatalf("failed to insert forking chain: %v", err)
 | |
| 		}
 | |
| 		if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() {
 | |
| 			t.Fatalf("failed to reorg to the given chain")
 | |
| 		}
 | |
| 		if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() {
 | |
| 			t.Fatalf("failed to reorg to the given chain")
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, it can be extended
 | |
| // with various length chains.
 | |
| func TestExtendCanonicalHeaders(t *testing.T) { testExtendCanonical(t, false) }
 | |
| func TestExtendCanonicalBlocks(t *testing.T)  { testExtendCanonical(t, true) }
 | |
| 
 | |
| func testExtendCanonical(t *testing.T, full bool) {
 | |
| 	length := 5
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	// Define the difficulty comparator
 | |
| 	better := func(td1, td2 *big.Int) {
 | |
| 		if td2.Cmp(td1) <= 0 {
 | |
| 			t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
 | |
| 		}
 | |
| 	}
 | |
| 	// Start fork from current height
 | |
| 	testFork(t, processor, length, 1, full, better)
 | |
| 	testFork(t, processor, length, 2, full, better)
 | |
| 	testFork(t, processor, length, 5, full, better)
 | |
| 	testFork(t, processor, length, 10, full, better)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, it can be extended
 | |
| // with various length chains.
 | |
| func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { testExtendCanonicalAfterMerge(t, false) }
 | |
| func TestExtendCanonicalBlocksAfterMerge(t *testing.T)  { testExtendCanonicalAfterMerge(t, true) }
 | |
| 
 | |
| func testExtendCanonicalAfterMerge(t *testing.T, full bool) {
 | |
| 	length := 5
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	testInsertAfterMerge(t, processor, length, 1, full)
 | |
| 	testInsertAfterMerge(t, processor, length, 10, full)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating shorter
 | |
| // forks do not take canonical ownership.
 | |
| func TestShorterForkHeaders(t *testing.T) { testShorterFork(t, false) }
 | |
| func TestShorterForkBlocks(t *testing.T)  { testShorterFork(t, true) }
 | |
| 
 | |
| func testShorterFork(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	// Define the difficulty comparator
 | |
| 	worse := func(td1, td2 *big.Int) {
 | |
| 		if td2.Cmp(td1) >= 0 {
 | |
| 			t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
 | |
| 		}
 | |
| 	}
 | |
| 	// Sum of numbers must be less than `length` for this to be a shorter fork
 | |
| 	testFork(t, processor, 0, 3, full, worse)
 | |
| 	testFork(t, processor, 0, 7, full, worse)
 | |
| 	testFork(t, processor, 1, 1, full, worse)
 | |
| 	testFork(t, processor, 1, 7, full, worse)
 | |
| 	testFork(t, processor, 5, 3, full, worse)
 | |
| 	testFork(t, processor, 5, 4, full, worse)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating shorter
 | |
| // forks do not take canonical ownership.
 | |
| func TestShorterForkHeadersAfterMerge(t *testing.T) { testShorterForkAfterMerge(t, false) }
 | |
| func TestShorterForkBlocksAfterMerge(t *testing.T)  { testShorterForkAfterMerge(t, true) }
 | |
| 
 | |
| func testShorterForkAfterMerge(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	testInsertAfterMerge(t, processor, 0, 3, full)
 | |
| 	testInsertAfterMerge(t, processor, 0, 7, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 1, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 7, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 3, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 4, full)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating longer
 | |
| // forks do take canonical ownership.
 | |
| func TestLongerForkHeaders(t *testing.T) { testLongerFork(t, false) }
 | |
| func TestLongerForkBlocks(t *testing.T)  { testLongerFork(t, true) }
 | |
| 
 | |
| func testLongerFork(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	testInsertAfterMerge(t, processor, 0, 11, full)
 | |
| 	testInsertAfterMerge(t, processor, 0, 15, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 10, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 12, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 6, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 8, full)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating longer
 | |
| // forks do take canonical ownership.
 | |
| func TestLongerForkHeadersAfterMerge(t *testing.T) { testLongerForkAfterMerge(t, false) }
 | |
| func TestLongerForkBlocksAfterMerge(t *testing.T)  { testLongerForkAfterMerge(t, true) }
 | |
| 
 | |
| func testLongerForkAfterMerge(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	testInsertAfterMerge(t, processor, 0, 11, full)
 | |
| 	testInsertAfterMerge(t, processor, 0, 15, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 10, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 12, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 6, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 8, full)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating equal
 | |
| // forks do take canonical ownership.
 | |
| func TestEqualForkHeaders(t *testing.T) { testEqualFork(t, false) }
 | |
| func TestEqualForkBlocks(t *testing.T)  { testEqualFork(t, true) }
 | |
| 
 | |
| func testEqualFork(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	// Define the difficulty comparator
 | |
| 	equal := func(td1, td2 *big.Int) {
 | |
| 		if td2.Cmp(td1) != 0 {
 | |
| 			t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
 | |
| 		}
 | |
| 	}
 | |
| 	// Sum of numbers must be equal to `length` for this to be an equal fork
 | |
| 	testFork(t, processor, 0, 10, full, equal)
 | |
| 	testFork(t, processor, 1, 9, full, equal)
 | |
| 	testFork(t, processor, 2, 8, full, equal)
 | |
| 	testFork(t, processor, 5, 5, full, equal)
 | |
| 	testFork(t, processor, 6, 4, full, equal)
 | |
| 	testFork(t, processor, 9, 1, full, equal)
 | |
| }
 | |
| 
 | |
| // Tests that given a starting canonical chain of a given size, creating equal
 | |
| // forks do take canonical ownership.
 | |
| func TestEqualForkHeadersAfterMerge(t *testing.T) { testEqualForkAfterMerge(t, false) }
 | |
| func TestEqualForkBlocksAfterMerge(t *testing.T)  { testEqualForkAfterMerge(t, true) }
 | |
| 
 | |
| func testEqualForkAfterMerge(t *testing.T, full bool) {
 | |
| 	length := 10
 | |
| 
 | |
| 	// Make first chain starting from genesis
 | |
| 	_, _, processor, err := newCanonical(ethash.NewFaker(), length, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer processor.Stop()
 | |
| 
 | |
| 	testInsertAfterMerge(t, processor, 0, 10, full)
 | |
| 	testInsertAfterMerge(t, processor, 1, 9, full)
 | |
| 	testInsertAfterMerge(t, processor, 2, 8, full)
 | |
| 	testInsertAfterMerge(t, processor, 5, 5, full)
 | |
| 	testInsertAfterMerge(t, processor, 6, 4, full)
 | |
| 	testInsertAfterMerge(t, processor, 9, 1, full)
 | |
| }
 | |
| 
 | |
| // Tests that chains missing links do not get accepted by the processor.
 | |
| func TestBrokenHeaderChain(t *testing.T) { testBrokenChain(t, false) }
 | |
| func TestBrokenBlockChain(t *testing.T)  { testBrokenChain(t, true) }
 | |
| 
 | |
| func testBrokenChain(t *testing.T, full bool) {
 | |
| 	// Make chain starting from genesis
 | |
| 	genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to make new canonical chain: %v", err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	// Create a forked chain, and try to insert with a missing link
 | |
| 	if full {
 | |
| 		chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
 | |
| 		if err := testBlockChainImport(chain, blockchain); err == nil {
 | |
| 			t.Errorf("broken block chain not reported")
 | |
| 		}
 | |
| 	} else {
 | |
| 		chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed)[1:]
 | |
| 		if err := testHeaderChainImport(chain, blockchain); err == nil {
 | |
| 			t.Errorf("broken header chain not reported")
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that reorganising a long difficult chain after a short easy one
 | |
| // overwrites the canonical numbers and links in the database.
 | |
| func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
 | |
| func TestReorgLongBlocks(t *testing.T)  { testReorgLong(t, true) }
 | |
| 
 | |
| func testReorgLong(t *testing.T, full bool) {
 | |
| 	testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
 | |
| }
 | |
| 
 | |
| // Tests that reorganising a short difficult chain after a long easy one
 | |
| // overwrites the canonical numbers and links in the database.
 | |
| func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
 | |
| func TestReorgShortBlocks(t *testing.T)  { testReorgShort(t, true) }
 | |
| 
 | |
| func testReorgShort(t *testing.T, full bool) {
 | |
| 	// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
 | |
| 	// we need a fairly long chain of blocks with different difficulties for a short
 | |
| 	// one to become heavier than a long one. The 96 is an empirical value.
 | |
| 	easy := make([]int64, 96)
 | |
| 	for i := 0; i < len(easy); i++ {
 | |
| 		easy[i] = 60
 | |
| 	}
 | |
| 	diff := make([]int64, len(easy)-1)
 | |
| 	for i := 0; i < len(diff); i++ {
 | |
| 		diff[i] = -9
 | |
| 	}
 | |
| 	testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
 | |
| }
 | |
| 
 | |
| func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
 | |
| 	// Create a pristine chain and database
 | |
| 	genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	// Insert an easy and a difficult chain afterwards
 | |
| 	easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) {
 | |
| 		b.OffsetTime(first[i])
 | |
| 	})
 | |
| 	diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) {
 | |
| 		b.OffsetTime(second[i])
 | |
| 	})
 | |
| 	if full {
 | |
| 		if _, err := blockchain.InsertChain(easyBlocks); err != nil {
 | |
| 			t.Fatalf("failed to insert easy chain: %v", err)
 | |
| 		}
 | |
| 		if _, err := blockchain.InsertChain(diffBlocks); err != nil {
 | |
| 			t.Fatalf("failed to insert difficult chain: %v", err)
 | |
| 		}
 | |
| 	} else {
 | |
| 		easyHeaders := make([]*types.Header, len(easyBlocks))
 | |
| 		for i, block := range easyBlocks {
 | |
| 			easyHeaders[i] = block.Header()
 | |
| 		}
 | |
| 		diffHeaders := make([]*types.Header, len(diffBlocks))
 | |
| 		for i, block := range diffBlocks {
 | |
| 			diffHeaders[i] = block.Header()
 | |
| 		}
 | |
| 		if _, err := blockchain.InsertHeaderChain(easyHeaders, 1); err != nil {
 | |
| 			t.Fatalf("failed to insert easy chain: %v", err)
 | |
| 		}
 | |
| 		if _, err := blockchain.InsertHeaderChain(diffHeaders, 1); err != nil {
 | |
| 			t.Fatalf("failed to insert difficult chain: %v", err)
 | |
| 		}
 | |
| 	}
 | |
| 	// Check that the chain is valid number and link wise
 | |
| 	if full {
 | |
| 		prev := blockchain.CurrentBlock()
 | |
| 		for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().Number.Uint64() - 1); block.NumberU64() != 0; prev, block = block.Header(), blockchain.GetBlockByNumber(block.NumberU64()-1) {
 | |
| 			if prev.ParentHash != block.Hash() {
 | |
| 				t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash, block.Hash())
 | |
| 			}
 | |
| 		}
 | |
| 	} else {
 | |
| 		prev := blockchain.CurrentHeader()
 | |
| 		for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
 | |
| 			if prev.ParentHash != header.Hash() {
 | |
| 				t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	// Make sure the chain total difficulty is the correct one
 | |
| 	want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
 | |
| 	if full {
 | |
| 		cur := blockchain.CurrentBlock()
 | |
| 		if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
 | |
| 			t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
 | |
| 		}
 | |
| 	} else {
 | |
| 		cur := blockchain.CurrentHeader()
 | |
| 		if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
 | |
| 			t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that the insertion functions detect banned hashes.
 | |
| func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
 | |
| func TestBadBlockHashes(t *testing.T)  { testBadHashes(t, true) }
 | |
| 
 | |
| func testBadHashes(t *testing.T, full bool) {
 | |
| 	// Create a pristine chain and database
 | |
| 	genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	// Create a chain, ban a hash and try to import
 | |
| 	if full {
 | |
| 		blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 3, ethash.NewFaker(), genDb, 10)
 | |
| 
 | |
| 		BadHashes[blocks[2].Header().Hash()] = true
 | |
| 		defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
 | |
| 
 | |
| 		_, err = blockchain.InsertChain(blocks)
 | |
| 	} else {
 | |
| 		headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 3, ethash.NewFaker(), genDb, 10)
 | |
| 
 | |
| 		BadHashes[headers[2].Hash()] = true
 | |
| 		defer func() { delete(BadHashes, headers[2].Hash()) }()
 | |
| 
 | |
| 		_, err = blockchain.InsertHeaderChain(headers, 1)
 | |
| 	}
 | |
| 	if !errors.Is(err, ErrBannedHash) {
 | |
| 		t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that bad hashes are detected on boot, and the chain rolled back to a
 | |
| // good state prior to the bad hash.
 | |
| func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
 | |
| func TestReorgBadBlockHashes(t *testing.T)  { testReorgBadHashes(t, true) }
 | |
| 
 | |
| func testReorgBadHashes(t *testing.T, full bool) {
 | |
| 	// Create a pristine chain and database
 | |
| 	genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 	}
 | |
| 	// Create a chain, import and ban afterwards
 | |
| 	headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10)
 | |
| 	blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 4, ethash.NewFaker(), genDb, 10)
 | |
| 
 | |
| 	if full {
 | |
| 		if _, err = blockchain.InsertChain(blocks); err != nil {
 | |
| 			t.Errorf("failed to import blocks: %v", err)
 | |
| 		}
 | |
| 		if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
 | |
| 			t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
 | |
| 		}
 | |
| 		BadHashes[blocks[3].Header().Hash()] = true
 | |
| 		defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
 | |
| 	} else {
 | |
| 		if _, err = blockchain.InsertHeaderChain(headers, 1); err != nil {
 | |
| 			t.Errorf("failed to import headers: %v", err)
 | |
| 		}
 | |
| 		if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
 | |
| 			t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
 | |
| 		}
 | |
| 		BadHashes[headers[3].Hash()] = true
 | |
| 		defer func() { delete(BadHashes, headers[3].Hash()) }()
 | |
| 	}
 | |
| 	blockchain.Stop()
 | |
| 
 | |
| 	// Create a new BlockChain and check that it rolled back the state.
 | |
| 	ncm, err := NewBlockChain(blockchain.db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create new chain manager: %v", err)
 | |
| 	}
 | |
| 	if full {
 | |
| 		if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
 | |
| 			t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
 | |
| 		}
 | |
| 		if blocks[2].Header().GasLimit != ncm.GasLimit() {
 | |
| 			t.Errorf("last  block gasLimit mismatch: have: %d, want %d", ncm.GasLimit(), blocks[2].Header().GasLimit)
 | |
| 		}
 | |
| 	} else {
 | |
| 		if ncm.CurrentHeader().Hash() != headers[2].Hash() {
 | |
| 			t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
 | |
| 		}
 | |
| 	}
 | |
| 	ncm.Stop()
 | |
| }
 | |
| 
 | |
| // Tests chain insertions in the face of one entity containing an invalid nonce.
 | |
| func TestHeadersInsertNonceError(t *testing.T) { testInsertNonceError(t, false) }
 | |
| func TestBlocksInsertNonceError(t *testing.T)  { testInsertNonceError(t, true) }
 | |
| 
 | |
| func testInsertNonceError(t *testing.T, full bool) {
 | |
| 	doTest := func(i int) {
 | |
| 		// Create a pristine chain and database
 | |
| 		genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 		}
 | |
| 		defer blockchain.Stop()
 | |
| 
 | |
| 		// Create and insert a chain with a failing nonce
 | |
| 		var (
 | |
| 			failAt  int
 | |
| 			failRes int
 | |
| 			failNum uint64
 | |
| 		)
 | |
| 		if full {
 | |
| 			blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), i, ethash.NewFaker(), genDb, 0)
 | |
| 
 | |
| 			failAt = rand.Int() % len(blocks)
 | |
| 			failNum = blocks[failAt].NumberU64()
 | |
| 
 | |
| 			blockchain.engine = ethash.NewFakeFailer(failNum)
 | |
| 			failRes, err = blockchain.InsertChain(blocks)
 | |
| 		} else {
 | |
| 			headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), i, ethash.NewFaker(), genDb, 0)
 | |
| 
 | |
| 			failAt = rand.Int() % len(headers)
 | |
| 			failNum = headers[failAt].Number.Uint64()
 | |
| 
 | |
| 			blockchain.engine = ethash.NewFakeFailer(failNum)
 | |
| 			blockchain.hc.engine = blockchain.engine
 | |
| 			failRes, err = blockchain.InsertHeaderChain(headers, 1)
 | |
| 		}
 | |
| 		// Check that the returned error indicates the failure
 | |
| 		if failRes != failAt {
 | |
| 			t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt)
 | |
| 		}
 | |
| 		// Check that all blocks after the failing block have been inserted
 | |
| 		for j := 0; j < i-failAt; j++ {
 | |
| 			if full {
 | |
| 				if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil {
 | |
| 					t.Errorf("test %d: invalid block in chain: %v", i, block)
 | |
| 				}
 | |
| 			} else {
 | |
| 				if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil {
 | |
| 					t.Errorf("test %d: invalid header in chain: %v", i, header)
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	for i := 1; i < 25 && !t.Failed(); i++ {
 | |
| 		doTest(i)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that fast importing a block chain produces the same chain data as the
 | |
| // classical full block processing.
 | |
| func TestFastVsFullChains(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config:  params.TestChainConfig,
 | |
| 			Alloc:   GenesisAlloc{address: {Balance: funds}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, func(i int, block *BlockGen) {
 | |
| 		block.SetCoinbase(common.Address{0x00})
 | |
| 
 | |
| 		// If the block number is multiple of 3, send a few bonus transactions to the miner
 | |
| 		if i%3 == 2 {
 | |
| 			for j := 0; j < i%4+1; j++ {
 | |
| 				tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
 | |
| 				if err != nil {
 | |
| 					panic(err)
 | |
| 				}
 | |
| 				block.AddTx(tx)
 | |
| 			}
 | |
| 		}
 | |
| 		// If the block number is a multiple of 5, add an uncle to the block
 | |
| 		if i%5 == 4 {
 | |
| 			block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))})
 | |
| 		}
 | |
| 	})
 | |
| 	// Import the chain as an archive node for the comparison baseline
 | |
| 	archiveDb := rawdb.NewMemoryDatabase()
 | |
| 	archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer archive.Stop()
 | |
| 
 | |
| 	if n, err := archive.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("failed to process block %d: %v", n, err)
 | |
| 	}
 | |
| 	// Fast import the chain as a non-archive node to test
 | |
| 	fastDb := rawdb.NewMemoryDatabase()
 | |
| 	fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer fast.Stop()
 | |
| 
 | |
| 	headers := make([]*types.Header, len(blocks))
 | |
| 	for i, block := range blocks {
 | |
| 		headers[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
 | |
| 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 | |
| 	}
 | |
| 	// Freezer style fast import the chain.
 | |
| 	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer ancientDb.Close()
 | |
| 	ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer ancient.Stop()
 | |
| 
 | |
| 	if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil {
 | |
| 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	// Iterate over all chain data components, and cross reference
 | |
| 	for i := 0; i < len(blocks); i++ {
 | |
| 		num, hash := blocks[i].NumberU64(), blocks[i].Hash()
 | |
| 
 | |
| 		if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
 | |
| 			t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
 | |
| 		}
 | |
| 		if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
 | |
| 			t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
 | |
| 		}
 | |
| 		if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
 | |
| 			t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
 | |
| 		}
 | |
| 		if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() {
 | |
| 			t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader)
 | |
| 		}
 | |
| 		if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() {
 | |
| 			t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock)
 | |
| 		} else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) {
 | |
| 			t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions())
 | |
| 		} else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) {
 | |
| 			t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles())
 | |
| 		}
 | |
| 
 | |
| 		// Check receipts.
 | |
| 		freceipts := rawdb.ReadReceipts(fastDb, hash, num, fast.Config())
 | |
| 		anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, fast.Config())
 | |
| 		areceipts := rawdb.ReadReceipts(archiveDb, hash, num, fast.Config())
 | |
| 		if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) {
 | |
| 			t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts)
 | |
| 		}
 | |
| 
 | |
| 		// Check that hash-to-number mappings are present in all databases.
 | |
| 		if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num {
 | |
| 			t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m)
 | |
| 		}
 | |
| 		if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num {
 | |
| 			t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m)
 | |
| 		}
 | |
| 		if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num {
 | |
| 			t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Check that the canonical chains are the same between the databases
 | |
| 	for i := 0; i < len(blocks)+1; i++ {
 | |
| 		if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash {
 | |
| 			t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash)
 | |
| 		}
 | |
| 		if anhash, arhash := rawdb.ReadCanonicalHash(ancientDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); anhash != arhash {
 | |
| 			t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that various import methods move the chain head pointers to the correct
 | |
| // positions.
 | |
| func TestLightVsFastVsFullChainHeads(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config:  params.TestChainConfig,
 | |
| 			Alloc:   GenesisAlloc{address: {Balance: funds}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 	)
 | |
| 	height := uint64(1024)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
 | |
| 
 | |
| 	// makeDb creates a db instance for testing.
 | |
| 	makeDb := func() ethdb.Database {
 | |
| 		db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 		}
 | |
| 		return db
 | |
| 	}
 | |
| 	// Configure a subchain to roll back
 | |
| 	remove := blocks[height/2].NumberU64()
 | |
| 
 | |
| 	// Create a small assertion method to check the three heads
 | |
| 	assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) {
 | |
| 		t.Helper()
 | |
| 
 | |
| 		if num := chain.CurrentBlock().Number.Uint64(); num != block {
 | |
| 			t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block)
 | |
| 		}
 | |
| 		if num := chain.CurrentSnapBlock().Number.Uint64(); num != fast {
 | |
| 			t.Errorf("%s head snap-block mismatch: have #%v, want #%v", kind, num, fast)
 | |
| 		}
 | |
| 		if num := chain.CurrentHeader().Number.Uint64(); num != header {
 | |
| 			t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header)
 | |
| 		}
 | |
| 	}
 | |
| 	// Import the chain as an archive node and ensure all pointers are updated
 | |
| 	archiveDb := makeDb()
 | |
| 	defer archiveDb.Close()
 | |
| 
 | |
| 	archiveCaching := *defaultCacheConfig
 | |
| 	archiveCaching.TrieDirtyDisabled = true
 | |
| 
 | |
| 	archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	if n, err := archive.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("failed to process block %d: %v", n, err)
 | |
| 	}
 | |
| 	defer archive.Stop()
 | |
| 
 | |
| 	assert(t, "archive", archive, height, height, height)
 | |
| 	archive.SetHead(remove - 1)
 | |
| 	assert(t, "archive", archive, height/2, height/2, height/2)
 | |
| 
 | |
| 	// Import the chain as a non-archive node and ensure all pointers are updated
 | |
| 	fastDb := makeDb()
 | |
| 	defer fastDb.Close()
 | |
| 	fast, _ := NewBlockChain(fastDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer fast.Stop()
 | |
| 
 | |
| 	headers := make([]*types.Header, len(blocks))
 | |
| 	for i, block := range blocks {
 | |
| 		headers[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := fast.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil {
 | |
| 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 | |
| 	}
 | |
| 	assert(t, "fast", fast, height, height, 0)
 | |
| 	fast.SetHead(remove - 1)
 | |
| 	assert(t, "fast", fast, height/2, height/2, 0)
 | |
| 
 | |
| 	// Import the chain as a ancient-first node and ensure all pointers are updated
 | |
| 	ancientDb := makeDb()
 | |
| 	defer ancientDb.Close()
 | |
| 	ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer ancient.Stop()
 | |
| 
 | |
| 	if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
 | |
| 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 | |
| 	}
 | |
| 	assert(t, "ancient", ancient, height, height, 0)
 | |
| 	ancient.SetHead(remove - 1)
 | |
| 	assert(t, "ancient", ancient, 0, 0, 0)
 | |
| 
 | |
| 	if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 {
 | |
| 		t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
 | |
| 	}
 | |
| 	// Import the chain as a light node and ensure all pointers are updated
 | |
| 	lightDb := makeDb()
 | |
| 	defer lightDb.Close()
 | |
| 	light, _ := NewBlockChain(lightDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	if n, err := light.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	defer light.Stop()
 | |
| 
 | |
| 	assert(t, "light", light, height, 0, 0)
 | |
| 	light.SetHead(remove - 1)
 | |
| 	assert(t, "light", light, height/2, 0, 0)
 | |
| }
 | |
| 
 | |
| // Tests that chain reorganisations handle transaction removals and reinsertions.
 | |
| func TestChainTxReorgs(t *testing.T) {
 | |
| 	var (
 | |
| 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
 | |
| 		key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
 | |
| 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		addr2   = crypto.PubkeyToAddress(key2.PublicKey)
 | |
| 		addr3   = crypto.PubkeyToAddress(key3.PublicKey)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config:   params.TestChainConfig,
 | |
| 			GasLimit: 3141592,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				addr1: {Balance: big.NewInt(1000000000000000)},
 | |
| 				addr2: {Balance: big.NewInt(1000000000000000)},
 | |
| 				addr3: {Balance: big.NewInt(1000000000000000)},
 | |
| 			},
 | |
| 		}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 
 | |
| 	// Create two transactions shared between the chains:
 | |
| 	//  - postponed: transaction included at a later block in the forked chain
 | |
| 	//  - swapped: transaction included at the same block number in the forked chain
 | |
| 	postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
 | |
| 	swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1)
 | |
| 
 | |
| 	// Create two transactions that will be dropped by the forked chain:
 | |
| 	//  - pastDrop: transaction dropped retroactively from a past block
 | |
| 	//  - freshDrop: transaction dropped exactly at the block where the reorg is detected
 | |
| 	var pastDrop, freshDrop *types.Transaction
 | |
| 
 | |
| 	// Create three transactions that will be added in the forked chain:
 | |
| 	//  - pastAdd:   transaction added before the reorganization is detected
 | |
| 	//  - freshAdd:  transaction added at the exact block the reorg is detected
 | |
| 	//  - futureAdd: transaction added after the reorg has already finished
 | |
| 	var pastAdd, freshAdd, futureAdd *types.Transaction
 | |
| 
 | |
| 	_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {
 | |
| 		switch i {
 | |
| 		case 0:
 | |
| 			pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
 | |
| 
 | |
| 			gen.AddTx(pastDrop)  // This transaction will be dropped in the fork from below the split point
 | |
| 			gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork
 | |
| 
 | |
| 		case 2:
 | |
| 			freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2)
 | |
| 
 | |
| 			gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point
 | |
| 			gen.AddTx(swapped)   // This transaction will be swapped out at the exact height
 | |
| 
 | |
| 			gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain
 | |
| 		}
 | |
| 	})
 | |
| 	// Import the chain. This runs all block validation rules.
 | |
| 	db := rawdb.NewMemoryDatabase()
 | |
| 	blockchain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	if i, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert original chain[%d]: %v", i, err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	// overwrite the old chain
 | |
| 	_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 5, func(i int, gen *BlockGen) {
 | |
| 		switch i {
 | |
| 		case 0:
 | |
| 			pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
 | |
| 			gen.AddTx(pastAdd) // This transaction needs to be injected during reorg
 | |
| 
 | |
| 		case 2:
 | |
| 			gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain
 | |
| 			gen.AddTx(swapped)   // This transaction was swapped from the exact current spot in the original chain
 | |
| 
 | |
| 			freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
 | |
| 			gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time
 | |
| 
 | |
| 		case 3:
 | |
| 			futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3)
 | |
| 			gen.AddTx(futureAdd) // This transaction will be added after a full reorg
 | |
| 		}
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	// removed tx
 | |
| 	for i, tx := range (types.Transactions{pastDrop, freshDrop}) {
 | |
| 		if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil {
 | |
| 			t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn)
 | |
| 		}
 | |
| 		if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil {
 | |
| 			t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt)
 | |
| 		}
 | |
| 	}
 | |
| 	// added tx
 | |
| 	for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) {
 | |
| 		if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
 | |
| 			t.Errorf("add %d: expected tx to be found", i)
 | |
| 		}
 | |
| 		if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
 | |
| 			t.Errorf("add %d: expected receipt to be found", i)
 | |
| 		}
 | |
| 	}
 | |
| 	// shared tx
 | |
| 	for i, tx := range (types.Transactions{postponed, swapped}) {
 | |
| 		if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil {
 | |
| 			t.Errorf("share %d: expected tx to be found", i)
 | |
| 		}
 | |
| 		if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil {
 | |
| 			t.Errorf("share %d: expected receipt to be found", i)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestLogReorgs(t *testing.T) {
 | |
| 	var (
 | |
| 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 
 | |
| 		// this code generates a log
 | |
| 		code   = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
 | |
| 		gspec  = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 
 | |
| 	blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	rmLogsCh := make(chan RemovedLogsEvent)
 | |
| 	blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
 | |
| 	_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
 | |
| 		if i == 1 {
 | |
| 			tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, code), signer, key1)
 | |
| 			if err != nil {
 | |
| 				t.Fatalf("failed to create tx: %v", err)
 | |
| 			}
 | |
| 			gen.AddTx(tx)
 | |
| 		}
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert chain: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	_, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
 | |
| 	done := make(chan struct{})
 | |
| 	go func() {
 | |
| 		ev := <-rmLogsCh
 | |
| 		if len(ev.Logs) == 0 {
 | |
| 			t.Error("expected logs")
 | |
| 		}
 | |
| 		close(done)
 | |
| 	}()
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	timeout := time.NewTimer(1 * time.Second)
 | |
| 	defer timeout.Stop()
 | |
| 	select {
 | |
| 	case <-done:
 | |
| 	case <-timeout.C:
 | |
| 		t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // This EVM code generates a log when the contract is created.
 | |
| var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
 | |
| 
 | |
| // This test checks that log events and RemovedLogsEvent are sent
 | |
| // when the chain reorganizes.
 | |
| func TestLogRebirth(t *testing.T) {
 | |
| 	var (
 | |
| 		key1, _       = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		addr1         = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		gspec         = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
 | |
| 		signer        = types.LatestSigner(gspec.Config)
 | |
| 		engine        = ethash.NewFaker()
 | |
| 		blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	// The event channels.
 | |
| 	newLogCh := make(chan []*types.Log, 10)
 | |
| 	rmLogsCh := make(chan RemovedLogsEvent, 10)
 | |
| 	blockchain.SubscribeLogsEvent(newLogCh)
 | |
| 	blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
 | |
| 
 | |
| 	// This chain contains 10 logs.
 | |
| 	genDb, chain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
 | |
| 		if i < 2 {
 | |
| 			for ii := 0; ii < 5; ii++ {
 | |
| 				tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
 | |
| 					Nonce:    gen.TxNonce(addr1),
 | |
| 					GasPrice: gen.header.BaseFee,
 | |
| 					Gas:      uint64(1000001),
 | |
| 					Data:     logCode,
 | |
| 				})
 | |
| 				if err != nil {
 | |
| 					t.Fatalf("failed to create tx: %v", err)
 | |
| 				}
 | |
| 				gen.AddTx(tx)
 | |
| 			}
 | |
| 		}
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 10, 0)
 | |
| 
 | |
| 	// Generate long reorg chain containing more logs. Inserting the
 | |
| 	// chain removes one log and adds four.
 | |
| 	_, forkChain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) {
 | |
| 		if i == 2 {
 | |
| 			// The last (head) block is not part of the reorg-chain, we can ignore it
 | |
| 			return
 | |
| 		}
 | |
| 		for ii := 0; ii < 5; ii++ {
 | |
| 			tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{
 | |
| 				Nonce:    gen.TxNonce(addr1),
 | |
| 				GasPrice: gen.header.BaseFee,
 | |
| 				Gas:      uint64(1000000),
 | |
| 				Data:     logCode,
 | |
| 			})
 | |
| 			if err != nil {
 | |
| 				t.Fatalf("failed to create tx: %v", err)
 | |
| 			}
 | |
| 			gen.AddTx(tx)
 | |
| 		}
 | |
| 		gen.OffsetTime(-9) // higher block difficulty
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(forkChain); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
 | |
| 
 | |
| 	// This chain segment is rooted in the original chain, but doesn't contain any logs.
 | |
| 	// When inserting it, the canonical chain switches away from forkChain and re-emits
 | |
| 	// the log event for the old chain, as well as a RemovedLogsEvent for forkChain.
 | |
| 	newBlocks, _ := GenerateChain(gspec.Config, chain[len(chain)-1], engine, genDb, 1, func(i int, gen *BlockGen) {})
 | |
| 	if _, err := blockchain.InsertChain(newBlocks); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 10, 10)
 | |
| }
 | |
| 
 | |
| // This test is a variation of TestLogRebirth. It verifies that log events are emitted
 | |
| // when a side chain containing log events overtakes the canonical chain.
 | |
| func TestSideLogRebirth(t *testing.T) {
 | |
| 	var (
 | |
| 		key1, _       = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		addr1         = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		gspec         = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}}
 | |
| 		signer        = types.LatestSigner(gspec.Config)
 | |
| 		blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	newLogCh := make(chan []*types.Log, 10)
 | |
| 	rmLogsCh := make(chan RemovedLogsEvent, 10)
 | |
| 	blockchain.SubscribeLogsEvent(newLogCh)
 | |
| 	blockchain.SubscribeRemovedLogsEvent(rmLogsCh)
 | |
| 
 | |
| 	_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
 | |
| 		if i == 1 {
 | |
| 			gen.OffsetTime(-9) // higher block difficulty
 | |
| 		}
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
 | |
| 
 | |
| 	// Generate side chain with lower difficulty
 | |
| 	genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) {
 | |
| 		if i == 1 {
 | |
| 			tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1)
 | |
| 			if err != nil {
 | |
| 				t.Fatalf("failed to create tx: %v", err)
 | |
| 			}
 | |
| 			gen.AddTx(tx)
 | |
| 		}
 | |
| 	})
 | |
| 	if _, err := blockchain.InsertChain(sideChain); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 0, 0)
 | |
| 
 | |
| 	// Generate a new block based on side chain.
 | |
| 	newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {})
 | |
| 	if _, err := blockchain.InsertChain(newBlocks); err != nil {
 | |
| 		t.Fatalf("failed to insert forked chain: %v", err)
 | |
| 	}
 | |
| 	checkLogEvents(t, newLogCh, rmLogsCh, 1, 0)
 | |
| }
 | |
| 
 | |
| func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) {
 | |
| 	t.Helper()
 | |
| 	var (
 | |
| 		countNew int
 | |
| 		countRm  int
 | |
| 		prev     int
 | |
| 	)
 | |
| 	// Drain events.
 | |
| 	for len(logsCh) > 0 {
 | |
| 		x := <-logsCh
 | |
| 		countNew += len(x)
 | |
| 		for _, log := range x {
 | |
| 			// We expect added logs to be in ascending order: 0:0, 0:1, 1:0 ...
 | |
| 			have := 100*int(log.BlockNumber) + int(log.TxIndex)
 | |
| 			if have < prev {
 | |
| 				t.Fatalf("Expected new logs to arrive in ascending order (%d < %d)", have, prev)
 | |
| 			}
 | |
| 			prev = have
 | |
| 		}
 | |
| 	}
 | |
| 	prev = 0
 | |
| 	for len(rmLogsCh) > 0 {
 | |
| 		x := <-rmLogsCh
 | |
| 		countRm += len(x.Logs)
 | |
| 		for _, log := range x.Logs {
 | |
| 			// We expect removed logs to be in ascending order: 0:0, 0:1, 1:0 ...
 | |
| 			have := 100*int(log.BlockNumber) + int(log.TxIndex)
 | |
| 			if have < prev {
 | |
| 				t.Fatalf("Expected removed logs to arrive in ascending order (%d < %d)", have, prev)
 | |
| 			}
 | |
| 			prev = have
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if countNew != wantNew {
 | |
| 		t.Fatalf("wrong number of log events: got %d, want %d", countNew, wantNew)
 | |
| 	}
 | |
| 	if countRm != wantRemoved {
 | |
| 		t.Fatalf("wrong number of removed log events: got %d, want %d", countRm, wantRemoved)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestReorgSideEvent(t *testing.T) {
 | |
| 	var (
 | |
| 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: params.TestChainConfig,
 | |
| 			Alloc:  GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}},
 | |
| 		}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 	blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {})
 | |
| 	if _, err := blockchain.InsertChain(chain); err != nil {
 | |
| 		t.Fatalf("failed to insert chain: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	_, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1)
 | |
| 		if i == 2 {
 | |
| 			gen.OffsetTime(-9)
 | |
| 		}
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create tx: %v", err)
 | |
| 		}
 | |
| 		gen.AddTx(tx)
 | |
| 	})
 | |
| 	chainSideCh := make(chan ChainSideEvent, 64)
 | |
| 	blockchain.SubscribeChainSideEvent(chainSideCh)
 | |
| 	if _, err := blockchain.InsertChain(replacementBlocks); err != nil {
 | |
| 		t.Fatalf("failed to insert chain: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	// first two block of the secondary chain are for a brief moment considered
 | |
| 	// side chains because up to that point the first one is considered the
 | |
| 	// heavier chain.
 | |
| 	expectedSideHashes := map[common.Hash]bool{
 | |
| 		replacementBlocks[0].Hash(): true,
 | |
| 		replacementBlocks[1].Hash(): true,
 | |
| 		chain[0].Hash():             true,
 | |
| 		chain[1].Hash():             true,
 | |
| 		chain[2].Hash():             true,
 | |
| 	}
 | |
| 
 | |
| 	i := 0
 | |
| 
 | |
| 	const timeoutDura = 10 * time.Second
 | |
| 	timeout := time.NewTimer(timeoutDura)
 | |
| done:
 | |
| 	for {
 | |
| 		select {
 | |
| 		case ev := <-chainSideCh:
 | |
| 			block := ev.Block
 | |
| 			if _, ok := expectedSideHashes[block.Hash()]; !ok {
 | |
| 				t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash())
 | |
| 			}
 | |
| 			i++
 | |
| 
 | |
| 			if i == len(expectedSideHashes) {
 | |
| 				timeout.Stop()
 | |
| 
 | |
| 				break done
 | |
| 			}
 | |
| 			timeout.Reset(timeoutDura)
 | |
| 
 | |
| 		case <-timeout.C:
 | |
| 			t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent")
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// make sure no more events are fired
 | |
| 	select {
 | |
| 	case e := <-chainSideCh:
 | |
| 		t.Errorf("unexpected event fired: %v", e)
 | |
| 	case <-time.After(250 * time.Millisecond):
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests if the canonical block can be fetched from the database during chain insertion.
 | |
| func TestCanonicalBlockRetrieval(t *testing.T) {
 | |
| 	_, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create pristine chain: %v", err)
 | |
| 	}
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	_, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 10, func(i int, gen *BlockGen) {})
 | |
| 
 | |
| 	var pend sync.WaitGroup
 | |
| 	pend.Add(len(chain))
 | |
| 
 | |
| 	for i := range chain {
 | |
| 		go func(block *types.Block) {
 | |
| 			defer pend.Done()
 | |
| 
 | |
| 			// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
 | |
| 			for {
 | |
| 				ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64())
 | |
| 				if ch == (common.Hash{}) {
 | |
| 					continue // busy wait for canonical hash to be written
 | |
| 				}
 | |
| 				if ch != block.Hash() {
 | |
| 					t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
 | |
| 					return
 | |
| 				}
 | |
| 				fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64())
 | |
| 				if fb == nil {
 | |
| 					t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
 | |
| 					return
 | |
| 				}
 | |
| 				if fb.Hash() != block.Hash() {
 | |
| 					t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex())
 | |
| 					return
 | |
| 				}
 | |
| 				return
 | |
| 			}
 | |
| 		}(chain[i])
 | |
| 
 | |
| 		if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
 | |
| 			t.Fatalf("failed to insert block %d: %v", i, err)
 | |
| 		}
 | |
| 	}
 | |
| 	pend.Wait()
 | |
| }
 | |
| 
 | |
| func TestEIP155Transition(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _     = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address    = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds      = big.NewInt(1000000000)
 | |
| 		deleteAddr = common.Address{1}
 | |
| 		gspec      = &Genesis{
 | |
| 			Config: ¶ms.ChainConfig{
 | |
| 				ChainID:        big.NewInt(1),
 | |
| 				EIP150Block:    big.NewInt(0),
 | |
| 				EIP155Block:    big.NewInt(2),
 | |
| 				HomesteadBlock: new(big.Int),
 | |
| 			},
 | |
| 			Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}},
 | |
| 		}
 | |
| 	)
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) {
 | |
| 		var (
 | |
| 			tx      *types.Transaction
 | |
| 			err     error
 | |
| 			basicTx = func(signer types.Signer) (*types.Transaction, error) {
 | |
| 				return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
 | |
| 			}
 | |
| 		)
 | |
| 		switch i {
 | |
| 		case 0:
 | |
| 			tx, err = basicTx(types.HomesteadSigner{})
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 		case 2:
 | |
| 			tx, err = basicTx(types.HomesteadSigner{})
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 
 | |
| 			tx, err = basicTx(types.LatestSigner(gspec.Config))
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 		case 3:
 | |
| 			tx, err = basicTx(types.HomesteadSigner{})
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 
 | |
| 			tx, err = basicTx(types.LatestSigner(gspec.Config))
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 		}
 | |
| 	})
 | |
| 
 | |
| 	blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	if _, err := blockchain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	block := blockchain.GetBlockByNumber(1)
 | |
| 	if block.Transactions()[0].Protected() {
 | |
| 		t.Error("Expected block[0].txs[0] to not be replay protected")
 | |
| 	}
 | |
| 
 | |
| 	block = blockchain.GetBlockByNumber(3)
 | |
| 	if block.Transactions()[0].Protected() {
 | |
| 		t.Error("Expected block[3].txs[0] to not be replay protected")
 | |
| 	}
 | |
| 	if !block.Transactions()[1].Protected() {
 | |
| 		t.Error("Expected block[3].txs[1] to be replay protected")
 | |
| 	}
 | |
| 	if _, err := blockchain.InsertChain(blocks[4:]); err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 
 | |
| 	// generate an invalid chain id transaction
 | |
| 	config := ¶ms.ChainConfig{
 | |
| 		ChainID:        big.NewInt(2),
 | |
| 		EIP150Block:    big.NewInt(0),
 | |
| 		EIP155Block:    big.NewInt(2),
 | |
| 		HomesteadBlock: new(big.Int),
 | |
| 	}
 | |
| 	blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 4, func(i int, block *BlockGen) {
 | |
| 		var (
 | |
| 			tx      *types.Transaction
 | |
| 			err     error
 | |
| 			basicTx = func(signer types.Signer) (*types.Transaction, error) {
 | |
| 				return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key)
 | |
| 			}
 | |
| 		)
 | |
| 		if i == 0 {
 | |
| 			tx, err = basicTx(types.LatestSigner(config))
 | |
| 			if err != nil {
 | |
| 				t.Fatal(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 		}
 | |
| 	})
 | |
| 	_, err := blockchain.InsertChain(blocks)
 | |
| 	if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) {
 | |
| 		t.Errorf("have %v, want %v", have, want)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestEIP161AccountRemoval(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000)
 | |
| 		theAddr = common.Address{1}
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: ¶ms.ChainConfig{
 | |
| 				ChainID:        big.NewInt(1),
 | |
| 				HomesteadBlock: new(big.Int),
 | |
| 				EIP155Block:    new(big.Int),
 | |
| 				EIP150Block:    new(big.Int),
 | |
| 				EIP158Block:    big.NewInt(2),
 | |
| 			},
 | |
| 			Alloc: GenesisAlloc{address: {Balance: funds}},
 | |
| 		}
 | |
| 	)
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) {
 | |
| 		var (
 | |
| 			tx     *types.Transaction
 | |
| 			err    error
 | |
| 			signer = types.LatestSigner(gspec.Config)
 | |
| 		)
 | |
| 		switch i {
 | |
| 		case 0:
 | |
| 			tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
 | |
| 		case 1:
 | |
| 			tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
 | |
| 		case 2:
 | |
| 			tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key)
 | |
| 		}
 | |
| 		if err != nil {
 | |
| 			t.Fatal(err)
 | |
| 		}
 | |
| 		block.AddTx(tx)
 | |
| 	})
 | |
| 	// account must exist pre eip 161
 | |
| 	blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer blockchain.Stop()
 | |
| 
 | |
| 	if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	if st, _ := blockchain.State(); !st.Exist(theAddr) {
 | |
| 		t.Error("expected account to exist")
 | |
| 	}
 | |
| 
 | |
| 	// account needs to be deleted post eip 161
 | |
| 	if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	if st, _ := blockchain.State(); st.Exist(theAddr) {
 | |
| 		t.Error("account should not exist")
 | |
| 	}
 | |
| 
 | |
| 	// account mustn't be created post eip 161
 | |
| 	if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	if st, _ := blockchain.State(); st.Exist(theAddr) {
 | |
| 		t.Error("account should not exist")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // This is a regression test (i.e. as weird as it is, don't delete it ever), which
 | |
| // tests that under weird reorg conditions the blockchain and its internal header-
 | |
| // chain return the same latest block/header.
 | |
| //
 | |
| // https://github.com/ethereum/go-ethereum/pull/15941
 | |
| func TestBlockchainHeaderchainReorgConsistency(t *testing.T) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
 | |
| 
 | |
| 	// Generate a bunch of fork blocks, each side forking from the canonical chain
 | |
| 	forks := make([]*types.Block, len(blocks))
 | |
| 	for i := 0; i < len(forks); i++ {
 | |
| 		parent := genesis.ToBlock()
 | |
| 		if i > 0 {
 | |
| 			parent = blocks[i-1]
 | |
| 		}
 | |
| 		fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
 | |
| 		forks[i] = fork[0]
 | |
| 	}
 | |
| 	// Import the canonical and fork chain side by side, verifying the current block
 | |
| 	// and current header consistency
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	for i := 0; i < len(blocks); i++ {
 | |
| 		if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", i, err)
 | |
| 		}
 | |
| 		if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
 | |
| 			t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
 | |
| 		}
 | |
| 		if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
 | |
| 			t.Fatalf(" fork %d: failed to insert into chain: %v", i, err)
 | |
| 		}
 | |
| 		if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() {
 | |
| 			t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4])
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that importing small side forks doesn't leave junk in the trie database
 | |
| // cache (which would eventually cause memory issues).
 | |
| func TestTrieForkGC(t *testing.T) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
 | |
| 
 | |
| 	// Generate a bunch of fork blocks, each side forking from the canonical chain
 | |
| 	forks := make([]*types.Block, len(blocks))
 | |
| 	for i := 0; i < len(forks); i++ {
 | |
| 		parent := genesis.ToBlock()
 | |
| 		if i > 0 {
 | |
| 			parent = blocks[i-1]
 | |
| 		}
 | |
| 		fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
 | |
| 		forks[i] = fork[0]
 | |
| 	}
 | |
| 	// Import the canonical and fork chain side by side, forcing the trie cache to cache both
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	for i := 0; i < len(blocks); i++ {
 | |
| 		if _, err := chain.InsertChain(blocks[i : i+1]); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", i, err)
 | |
| 		}
 | |
| 		if _, err := chain.InsertChain(forks[i : i+1]); err != nil {
 | |
| 			t.Fatalf("fork %d: failed to insert into chain: %v", i, err)
 | |
| 		}
 | |
| 	}
 | |
| 	// Dereference all the recent tries and ensure no past trie is left in
 | |
| 	for i := 0; i < TriesInMemory; i++ {
 | |
| 		chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root())
 | |
| 		chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root())
 | |
| 	}
 | |
| 	if len(chain.stateCache.TrieDB().Nodes()) > 0 {
 | |
| 		t.Fatalf("stale tries still alive after garbase collection")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that doing large reorgs works even if the state associated with the
 | |
| // forking point is not available any more.
 | |
| func TestLargeReorgTrieGC(t *testing.T) {
 | |
| 	// Generate the original common chain segment and the two competing forks
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
 | |
| 	original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) })
 | |
| 	competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) })
 | |
| 
 | |
| 	// Import the shared chain and the original canonical one
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if _, err := chain.InsertChain(shared); err != nil {
 | |
| 		t.Fatalf("failed to insert shared chain: %v", err)
 | |
| 	}
 | |
| 	if _, err := chain.InsertChain(original); err != nil {
 | |
| 		t.Fatalf("failed to insert original chain: %v", err)
 | |
| 	}
 | |
| 	// Ensure that the state associated with the forking point is pruned away
 | |
| 	if node, _ := chain.stateCache.TrieDB().Node(shared[len(shared)-1].Root()); node != nil {
 | |
| 		t.Fatalf("common-but-old ancestor still cache")
 | |
| 	}
 | |
| 	// Import the competitor chain without exceeding the canonical's TD and ensure
 | |
| 	// we have not processed any of the blocks (protection against malicious blocks)
 | |
| 	if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil {
 | |
| 		t.Fatalf("failed to insert competitor chain: %v", err)
 | |
| 	}
 | |
| 	for i, block := range competitor[:len(competitor)-2] {
 | |
| 		if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
 | |
| 			t.Fatalf("competitor %d: low TD chain became processed", i)
 | |
| 		}
 | |
| 	}
 | |
| 	// Import the head of the competitor chain, triggering the reorg and ensure we
 | |
| 	// successfully reprocess all the stashed away blocks.
 | |
| 	if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil {
 | |
| 		t.Fatalf("failed to finalize competitor chain: %v", err)
 | |
| 	}
 | |
| 	for i, block := range competitor[:len(competitor)-TriesInMemory] {
 | |
| 		if node, _ := chain.stateCache.TrieDB().Node(block.Root()); node != nil {
 | |
| 			t.Fatalf("competitor %d: competing chain state missing", i)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestBlockchainRecovery(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000)
 | |
| 		gspec   = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
 | |
| 	)
 | |
| 	height := uint64(1024)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil)
 | |
| 
 | |
| 	// Import the chain as a ancient-first node and ensure all pointers are updated
 | |
| 	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer ancientDb.Close()
 | |
| 	ancient, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 
 | |
| 	headers := make([]*types.Header, len(blocks))
 | |
| 	for i, block := range blocks {
 | |
| 		headers[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := ancient.InsertHeaderChain(headers, 1); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil {
 | |
| 		t.Fatalf("failed to insert receipt %d: %v", n, err)
 | |
| 	}
 | |
| 	rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior
 | |
| 	ancient.Stop()
 | |
| 
 | |
| 	// Destroy head fast block manually
 | |
| 	midBlock := blocks[len(blocks)/2]
 | |
| 	rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash())
 | |
| 
 | |
| 	// Reopen broken blockchain again
 | |
| 	ancient, _ = NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer ancient.Stop()
 | |
| 	if num := ancient.CurrentBlock().Number.Uint64(); num != 0 {
 | |
| 		t.Errorf("head block mismatch: have #%v, want #%v", num, 0)
 | |
| 	}
 | |
| 	if num := ancient.CurrentSnapBlock().Number.Uint64(); num != midBlock.NumberU64() {
 | |
| 		t.Errorf("head snap-block mismatch: have #%v, want #%v", num, midBlock.NumberU64())
 | |
| 	}
 | |
| 	if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() {
 | |
| 		t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64())
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain.
 | |
| func TestInsertReceiptChainRollback(t *testing.T) {
 | |
| 	// Generate forked chain. The returned BlockChain object is used to process the side chain blocks.
 | |
| 	tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains()
 | |
| 	if err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	defer tmpChain.Stop()
 | |
| 	// Get the side chain receipts.
 | |
| 	if _, err := tmpChain.InsertChain(sideblocks); err != nil {
 | |
| 		t.Fatal("processing side chain failed:", err)
 | |
| 	}
 | |
| 	t.Log("sidechain head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
 | |
| 	sidechainReceipts := make([]types.Receipts, len(sideblocks))
 | |
| 	for i, block := range sideblocks {
 | |
| 		sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
 | |
| 	}
 | |
| 	// Get the canon chain receipts.
 | |
| 	if _, err := tmpChain.InsertChain(canonblocks); err != nil {
 | |
| 		t.Fatal("processing canon chain failed:", err)
 | |
| 	}
 | |
| 	t.Log("canon head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash())
 | |
| 	canonReceipts := make([]types.Receipts, len(canonblocks))
 | |
| 	for i, block := range canonblocks {
 | |
| 		canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash())
 | |
| 	}
 | |
| 
 | |
| 	// Set up a BlockChain that uses the ancient store.
 | |
| 	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer ancientDb.Close()
 | |
| 
 | |
| 	ancientChain, _ := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
 | |
| 	defer ancientChain.Stop()
 | |
| 
 | |
| 	// Import the canonical header chain.
 | |
| 	canonHeaders := make([]*types.Header, len(canonblocks))
 | |
| 	for i, block := range canonblocks {
 | |
| 		canonHeaders[i] = block.Header()
 | |
| 	}
 | |
| 	if _, err = ancientChain.InsertHeaderChain(canonHeaders, 1); err != nil {
 | |
| 		t.Fatal("can't import canon headers:", err)
 | |
| 	}
 | |
| 
 | |
| 	// Try to insert blocks/receipts of the side chain.
 | |
| 	_, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks)))
 | |
| 	if err == nil {
 | |
| 		t.Fatal("expected error from InsertReceiptChain.")
 | |
| 	}
 | |
| 	if ancientChain.CurrentSnapBlock().Number.Uint64() != 0 {
 | |
| 		t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentSnapBlock().Number)
 | |
| 	}
 | |
| 	if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 {
 | |
| 		t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen)
 | |
| 	}
 | |
| 
 | |
| 	// Insert blocks/receipts of the canonical chain.
 | |
| 	_, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks)))
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("can't import canon chain receipts: %v", err)
 | |
| 	}
 | |
| 	if ancientChain.CurrentSnapBlock().Number.Uint64() != canonblocks[len(canonblocks)-1].NumberU64() {
 | |
| 		t.Fatalf("failed to insert ancient recept chain after rollback")
 | |
| 	}
 | |
| 	if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 {
 | |
| 		t.Fatalf("wrong ancients count %d", frozen)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that importing a very large side fork, which is larger than the canon chain,
 | |
| // but where the difficulty per block is kept low: this means that it will not
 | |
| // overtake the 'canon' chain until after it's passed canon by about 200 blocks.
 | |
| //
 | |
| // Details at:
 | |
| //   - https://github.com/ethereum/go-ethereum/issues/18977
 | |
| //   - https://github.com/ethereum/go-ethereum/pull/18988
 | |
| func TestLowDiffLongChain(t *testing.T) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	// We must use a pretty long chain to ensure that the fork doesn't overtake us
 | |
| 	// until after at least 128 blocks post tip
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		b.OffsetTime(-9)
 | |
| 	})
 | |
| 
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.stopWithoutSaving()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	// Generate fork chain, starting from an early block
 | |
| 	parent := blocks[10]
 | |
| 	fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{2})
 | |
| 	})
 | |
| 
 | |
| 	// And now import the fork
 | |
| 	if i, err := chain.InsertChain(fork); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", i, err)
 | |
| 	}
 | |
| 	head := chain.CurrentBlock()
 | |
| 	if got := fork[len(fork)-1].Hash(); got != head.Hash() {
 | |
| 		t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
 | |
| 	}
 | |
| 	// Sanity check that all the canonical numbers are present
 | |
| 	header := chain.CurrentHeader()
 | |
| 	for number := head.Number.Uint64(); number > 0; number-- {
 | |
| 		if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() {
 | |
| 			t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash())
 | |
| 		}
 | |
| 		header = chain.GetHeader(header.ParentHash, number-1)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that importing a sidechain (S), where
 | |
| // - S is sidechain, containing blocks [Sn...Sm]
 | |
| // - C is canon chain, containing blocks [G..Cn..Cm]
 | |
| // - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock
 | |
| // - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain
 | |
| //
 | |
| // The mergePoint can be these values:
 | |
| // -1: the transition won't happen
 | |
| // 0:  the transition happens since genesis
 | |
| // 1:  the transition happens after some chain segments
 | |
| func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	chainConfig := *params.TestChainConfig
 | |
| 	var (
 | |
| 		merger = consensus.NewMerger(rawdb.NewMemoryDatabase())
 | |
| 		engine = beacon.New(ethash.NewFaker())
 | |
| 		key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		addr   = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		nonce  = uint64(0)
 | |
| 
 | |
| 		gspec = &Genesis{
 | |
| 			Config:  &chainConfig,
 | |
| 			Alloc:   GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 		signer     = types.LatestSigner(gspec.Config)
 | |
| 		mergeBlock = math.MaxInt32
 | |
| 	)
 | |
| 	// Generate and import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	// Activate the transition since genesis if required
 | |
| 	if mergePoint == 0 {
 | |
| 		mergeBlock = 0
 | |
| 		merger.ReachTTD()
 | |
| 		merger.FinalizePoS()
 | |
| 
 | |
| 		// Set the terminal total difficulty in the config
 | |
| 		gspec.Config.TerminalTotalDifficulty = big.NewInt(0)
 | |
| 	}
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create tx: %v", err)
 | |
| 		}
 | |
| 		gen.AddTx(tx)
 | |
| 		if int(gen.header.Number.Uint64()) >= mergeBlock {
 | |
| 			gen.SetPoS()
 | |
| 		}
 | |
| 		nonce++
 | |
| 	})
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	lastPrunedIndex := len(blocks) - TriesInMemory - 1
 | |
| 	lastPrunedBlock := blocks[lastPrunedIndex]
 | |
| 	firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
 | |
| 
 | |
| 	// Verify pruning of lastPrunedBlock
 | |
| 	if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
 | |
| 		t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
 | |
| 	}
 | |
| 	// Verify firstNonPrunedBlock is not pruned
 | |
| 	if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
 | |
| 		t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
 | |
| 	}
 | |
| 
 | |
| 	// Activate the transition in the middle of the chain
 | |
| 	if mergePoint == 1 {
 | |
| 		merger.ReachTTD()
 | |
| 		merger.FinalizePoS()
 | |
| 		// Set the terminal total difficulty in the config
 | |
| 		ttd := big.NewInt(int64(len(blocks)))
 | |
| 		ttd.Mul(ttd, params.GenesisDifficulty)
 | |
| 		gspec.Config.TerminalTotalDifficulty = ttd
 | |
| 		mergeBlock = len(blocks)
 | |
| 	}
 | |
| 
 | |
| 	// Generate the sidechain
 | |
| 	// First block should be a known block, block after should be a pruned block. So
 | |
| 	// canon(pruned), side, side...
 | |
| 
 | |
| 	// Generate fork chain, make it longer than canon
 | |
| 	parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock
 | |
| 	parent := blocks[parentIndex]
 | |
| 	fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{2})
 | |
| 		if int(b.header.Number.Uint64()) >= mergeBlock {
 | |
| 			b.SetPoS()
 | |
| 		}
 | |
| 	})
 | |
| 	// Prepend the parent(s)
 | |
| 	var sidechain []*types.Block
 | |
| 	for i := numCanonBlocksInSidechain; i > 0; i-- {
 | |
| 		sidechain = append(sidechain, blocks[parentIndex+1-i])
 | |
| 	}
 | |
| 	sidechain = append(sidechain, fork...)
 | |
| 	n, err := chain.InsertChain(sidechain)
 | |
| 	if err != nil {
 | |
| 		t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n)
 | |
| 	}
 | |
| 	head := chain.CurrentBlock()
 | |
| 	if got := fork[len(fork)-1].Hash(); got != head.Hash() {
 | |
| 		t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests that importing a sidechain (S), where
 | |
| //   - S is sidechain, containing blocks [Sn...Sm]
 | |
| //   - C is canon chain, containing blocks [G..Cn..Cm]
 | |
| //   - The common ancestor Cc is pruned
 | |
| //   - The first block in S: Sn, is == Cn
 | |
| //
 | |
| // That is: the sidechain for import contains some blocks already present in canon chain.
 | |
| // So the blocks are:
 | |
| //
 | |
| //	[ Cn, Cn+1, Cc, Sn+3 ... Sm]
 | |
| //	^    ^    ^  pruned
 | |
| func TestPrunedImportSide(t *testing.T) {
 | |
| 	//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
 | |
| 	//glogger.Verbosity(3)
 | |
| 	//log.Root().SetHandler(log.Handler(glogger))
 | |
| 	testSideImport(t, 3, 3, -1)
 | |
| 	testSideImport(t, 3, -3, -1)
 | |
| 	testSideImport(t, 10, 0, -1)
 | |
| 	testSideImport(t, 1, 10, -1)
 | |
| 	testSideImport(t, 1, -10, -1)
 | |
| }
 | |
| 
 | |
| func TestPrunedImportSideWithMerging(t *testing.T) {
 | |
| 	//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
 | |
| 	//glogger.Verbosity(3)
 | |
| 	//log.Root().SetHandler(log.Handler(glogger))
 | |
| 	testSideImport(t, 3, 3, 0)
 | |
| 	testSideImport(t, 3, -3, 0)
 | |
| 	testSideImport(t, 10, 0, 0)
 | |
| 	testSideImport(t, 1, 10, 0)
 | |
| 	testSideImport(t, 1, -10, 0)
 | |
| 
 | |
| 	testSideImport(t, 3, 3, 1)
 | |
| 	testSideImport(t, 3, -3, 1)
 | |
| 	testSideImport(t, 10, 0, 1)
 | |
| 	testSideImport(t, 1, 10, 1)
 | |
| 	testSideImport(t, 1, -10, 1)
 | |
| }
 | |
| 
 | |
| func TestInsertKnownHeaders(t *testing.T)      { testInsertKnownChainData(t, "headers") }
 | |
| func TestInsertKnownReceiptChain(t *testing.T) { testInsertKnownChainData(t, "receipts") }
 | |
| func TestInsertKnownBlocks(t *testing.T)       { testInsertKnownChainData(t, "blocks") }
 | |
| 
 | |
| func testInsertKnownChainData(t *testing.T, typ string) {
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
 | |
| 
 | |
| 	// A longer chain but total difficulty is lower.
 | |
| 	blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) })
 | |
| 
 | |
| 	// A shorter chain but total difficulty is higher.
 | |
| 	blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		b.OffsetTime(-9) // A higher difficulty
 | |
| 	})
 | |
| 	// Import the shared chain and the original canonical one
 | |
| 	chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer chaindb.Close()
 | |
| 
 | |
| 	chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	var (
 | |
| 		inserter func(blocks []*types.Block, receipts []types.Receipts) error
 | |
| 		asserter func(t *testing.T, block *types.Block)
 | |
| 	)
 | |
| 	if typ == "headers" {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			headers := make([]*types.Header, 0, len(blocks))
 | |
| 			for _, block := range blocks {
 | |
| 				headers = append(headers, block.Header())
 | |
| 			}
 | |
| 			_, err := chain.InsertHeaderChain(headers, 1)
 | |
| 			return err
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentHeader().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	} else if typ == "receipts" {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			headers := make([]*types.Header, 0, len(blocks))
 | |
| 			for _, block := range blocks {
 | |
| 				headers = append(headers, block.Header())
 | |
| 			}
 | |
| 			_, err := chain.InsertHeaderChain(headers, 1)
 | |
| 			if err != nil {
 | |
| 				return err
 | |
| 			}
 | |
| 			_, err = chain.InsertReceiptChain(blocks, receipts, 0)
 | |
| 			return err
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentSnapBlock().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	} else {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			_, err := chain.InsertChain(blocks)
 | |
| 			return err
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentBlock().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	if err := inserter(blocks, receipts); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	// Reimport the chain data again. All the imported
 | |
| 	// chain data are regarded "known" data.
 | |
| 	if err := inserter(blocks, receipts); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks[len(blocks)-1])
 | |
| 
 | |
| 	// Import a long canonical chain with some known data as prefix.
 | |
| 	rollback := blocks[len(blocks)/2].NumberU64()
 | |
| 
 | |
| 	chain.SetHead(rollback - 1)
 | |
| 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks2[len(blocks2)-1])
 | |
| 
 | |
| 	// Import a heavier shorter but higher total difficulty chain with some known data as prefix.
 | |
| 	if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks3[len(blocks3)-1])
 | |
| 
 | |
| 	// Import a longer but lower total difficulty chain with some known data as prefix.
 | |
| 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	// The head shouldn't change.
 | |
| 	asserter(t, blocks3[len(blocks3)-1])
 | |
| 
 | |
| 	// Rollback the heavier chain and re-insert the longer chain again
 | |
| 	chain.SetHead(rollback - 1)
 | |
| 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks2[len(blocks2)-1])
 | |
| }
 | |
| 
 | |
| func TestInsertKnownHeadersWithMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "headers", 0)
 | |
| }
 | |
| func TestInsertKnownReceiptChainWithMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "receipts", 0)
 | |
| }
 | |
| func TestInsertKnownBlocksWithMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "blocks", 0)
 | |
| }
 | |
| func TestInsertKnownHeadersAfterMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "headers", 1)
 | |
| }
 | |
| func TestInsertKnownReceiptChainAfterMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "receipts", 1)
 | |
| }
 | |
| func TestInsertKnownBlocksAfterMerging(t *testing.T) {
 | |
| 	testInsertKnownChainDataWithMerging(t, "blocks", 1)
 | |
| }
 | |
| 
 | |
| // mergeHeight can be assigned in these values:
 | |
| // 0: means the merging is applied since genesis
 | |
| // 1: means the merging is applied after the first segment
 | |
| func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) {
 | |
| 	// Copy the TestChainConfig so we can modify it during tests
 | |
| 	chainConfig := *params.TestChainConfig
 | |
| 	var (
 | |
| 		genesis = &Genesis{
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 			Config:  &chainConfig,
 | |
| 		}
 | |
| 		engine     = beacon.New(ethash.NewFaker())
 | |
| 		mergeBlock = uint64(math.MaxUint64)
 | |
| 	)
 | |
| 	// Apply merging since genesis
 | |
| 	if mergeHeight == 0 {
 | |
| 		genesis.Config.TerminalTotalDifficulty = big.NewInt(0)
 | |
| 		mergeBlock = uint64(0)
 | |
| 	}
 | |
| 
 | |
| 	genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32,
 | |
| 		func(i int, b *BlockGen) {
 | |
| 			if b.header.Number.Uint64() >= mergeBlock {
 | |
| 				b.SetPoS()
 | |
| 			}
 | |
| 			b.SetCoinbase(common.Address{1})
 | |
| 		})
 | |
| 
 | |
| 	// Apply merging after the first segment
 | |
| 	if mergeHeight == 1 {
 | |
| 		// TTD is genesis diff + blocks
 | |
| 		ttd := big.NewInt(1 + int64(len(blocks)))
 | |
| 		ttd.Mul(ttd, params.GenesisDifficulty)
 | |
| 		genesis.Config.TerminalTotalDifficulty = ttd
 | |
| 		mergeBlock = uint64(len(blocks))
 | |
| 	}
 | |
| 	// Longer chain and shorter chain
 | |
| 	blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		if b.header.Number.Uint64() >= mergeBlock {
 | |
| 			b.SetPoS()
 | |
| 		}
 | |
| 	})
 | |
| 	blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed
 | |
| 		if b.header.Number.Uint64() >= mergeBlock {
 | |
| 			b.SetPoS()
 | |
| 		}
 | |
| 	})
 | |
| 	// Import the shared chain and the original canonical one
 | |
| 	chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer chaindb.Close()
 | |
| 
 | |
| 	chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	var (
 | |
| 		inserter func(blocks []*types.Block, receipts []types.Receipts) error
 | |
| 		asserter func(t *testing.T, block *types.Block)
 | |
| 	)
 | |
| 	if typ == "headers" {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			headers := make([]*types.Header, 0, len(blocks))
 | |
| 			for _, block := range blocks {
 | |
| 				headers = append(headers, block.Header())
 | |
| 			}
 | |
| 			i, err := chain.InsertHeaderChain(headers, 1)
 | |
| 			if err != nil {
 | |
| 				return fmt.Errorf("index %d, number %d: %w", i, headers[i].Number, err)
 | |
| 			}
 | |
| 			return err
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentHeader().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	} else if typ == "receipts" {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			headers := make([]*types.Header, 0, len(blocks))
 | |
| 			for _, block := range blocks {
 | |
| 				headers = append(headers, block.Header())
 | |
| 			}
 | |
| 			i, err := chain.InsertHeaderChain(headers, 1)
 | |
| 			if err != nil {
 | |
| 				return fmt.Errorf("index %d: %w", i, err)
 | |
| 			}
 | |
| 			_, err = chain.InsertReceiptChain(blocks, receipts, 0)
 | |
| 			return err
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentSnapBlock().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	} else {
 | |
| 		inserter = func(blocks []*types.Block, receipts []types.Receipts) error {
 | |
| 			i, err := chain.InsertChain(blocks)
 | |
| 			if err != nil {
 | |
| 				return fmt.Errorf("index %d: %w", i, err)
 | |
| 			}
 | |
| 			return nil
 | |
| 		}
 | |
| 		asserter = func(t *testing.T, block *types.Block) {
 | |
| 			if chain.CurrentBlock().Hash() != block.Hash() {
 | |
| 				t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	if err := inserter(blocks, receipts); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 
 | |
| 	// Reimport the chain data again. All the imported
 | |
| 	// chain data are regarded "known" data.
 | |
| 	if err := inserter(blocks, receipts); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks[len(blocks)-1])
 | |
| 
 | |
| 	// Import a long canonical chain with some known data as prefix.
 | |
| 	rollback := blocks[len(blocks)/2].NumberU64()
 | |
| 	chain.SetHead(rollback - 1)
 | |
| 	if err := inserter(blocks, receipts); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks[len(blocks)-1])
 | |
| 
 | |
| 	// Import a longer chain with some known data as prefix.
 | |
| 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks2[len(blocks2)-1])
 | |
| 
 | |
| 	// Import a shorter chain with some known data as prefix.
 | |
| 	// The reorg is expected since the fork choice rule is
 | |
| 	// already changed.
 | |
| 	if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	// The head shouldn't change.
 | |
| 	asserter(t, blocks3[len(blocks3)-1])
 | |
| 
 | |
| 	// Reimport the longer chain again, the reorg is still expected
 | |
| 	chain.SetHead(rollback - 1)
 | |
| 	if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil {
 | |
| 		t.Fatalf("failed to insert chain data: %v", err)
 | |
| 	}
 | |
| 	asserter(t, blocks2[len(blocks2)-1])
 | |
| }
 | |
| 
 | |
| // getLongAndShortChains returns two chains: A is longer, B is heavier.
 | |
| func getLongAndShortChains() (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	// Generate and import the canonical chain,
 | |
| 	// Offset the time, to keep the difficulty low
 | |
| 	genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 	})
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	// Generate fork chain, make it shorter than canon, with common ancestor pretty early
 | |
| 	parentIndex := 3
 | |
| 	parent := longChain[parentIndex]
 | |
| 	heavyChainExt, _ := GenerateChain(genesis.Config, parent, engine, genDb, 75, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{2})
 | |
| 		b.OffsetTime(-9)
 | |
| 	})
 | |
| 	var heavyChain []*types.Block
 | |
| 	heavyChain = append(heavyChain, longChain[:parentIndex+1]...)
 | |
| 	heavyChain = append(heavyChain, heavyChainExt...)
 | |
| 
 | |
| 	// Verify that the test is sane
 | |
| 	var (
 | |
| 		longerTd  = new(big.Int)
 | |
| 		shorterTd = new(big.Int)
 | |
| 	)
 | |
| 	for index, b := range longChain {
 | |
| 		longerTd.Add(longerTd, b.Difficulty())
 | |
| 		if index <= parentIndex {
 | |
| 			shorterTd.Add(shorterTd, b.Difficulty())
 | |
| 		}
 | |
| 	}
 | |
| 	for _, b := range heavyChain {
 | |
| 		shorterTd.Add(shorterTd, b.Difficulty())
 | |
| 	}
 | |
| 	if shorterTd.Cmp(longerTd) <= 0 {
 | |
| 		return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain td (%v) must be larger than canon td (%v)", shorterTd, longerTd)
 | |
| 	}
 | |
| 	longerNum := longChain[len(longChain)-1].NumberU64()
 | |
| 	shorterNum := heavyChain[len(heavyChain)-1].NumberU64()
 | |
| 	if shorterNum >= longerNum {
 | |
| 		return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain num (%v) must be lower than canon num (%v)", shorterNum, longerNum)
 | |
| 	}
 | |
| 	return chain, longChain, heavyChain, genesis, nil
 | |
| }
 | |
| 
 | |
| // TestReorgToShorterRemovesCanonMapping tests that if we
 | |
| // 1. Have a chain [0 ... N .. X]
 | |
| // 2. Reorg to shorter but heavier chain [0 ... N ... Y]
 | |
| // 3. Then there should be no canon mapping for the block at height X
 | |
| // 4. The forked block should still be retrievable by hash
 | |
| func TestReorgToShorterRemovesCanonMapping(t *testing.T) {
 | |
| 	chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
 | |
| 	if err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(canonblocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	canonNum := chain.CurrentBlock().Number.Uint64()
 | |
| 	canonHash := chain.CurrentBlock().Hash()
 | |
| 	_, err = chain.InsertChain(sideblocks)
 | |
| 	if err != nil {
 | |
| 		t.Errorf("Got error, %v", err)
 | |
| 	}
 | |
| 	head := chain.CurrentBlock()
 | |
| 	if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
 | |
| 		t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
 | |
| 	}
 | |
| 	// We have now inserted a sidechain.
 | |
| 	if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
 | |
| 		t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
 | |
| 	}
 | |
| 	if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
 | |
| 		t.Errorf("expected header to be gone: %v", headerByNum.Number)
 | |
| 	}
 | |
| 	if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
 | |
| 		t.Errorf("expected block to be present: %x", blockByHash.Hash())
 | |
| 	}
 | |
| 	if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
 | |
| 		t.Errorf("expected header to be present: %x", headerByHash.Hash())
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario
 | |
| // as TestReorgToShorterRemovesCanonMapping, but applied on headerchain
 | |
| // imports -- that is, for fast sync
 | |
| func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) {
 | |
| 	chain, canonblocks, sideblocks, _, err := getLongAndShortChains()
 | |
| 	if err != nil {
 | |
| 		t.Fatal(err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	// Convert into headers
 | |
| 	canonHeaders := make([]*types.Header, len(canonblocks))
 | |
| 	for i, block := range canonblocks {
 | |
| 		canonHeaders[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := chain.InsertHeaderChain(canonHeaders, 0); err != nil {
 | |
| 		t.Fatalf("header %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	canonNum := chain.CurrentHeader().Number.Uint64()
 | |
| 	canonHash := chain.CurrentBlock().Hash()
 | |
| 	sideHeaders := make([]*types.Header, len(sideblocks))
 | |
| 	for i, block := range sideblocks {
 | |
| 		sideHeaders[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := chain.InsertHeaderChain(sideHeaders, 0); err != nil {
 | |
| 		t.Fatalf("header %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	head := chain.CurrentHeader()
 | |
| 	if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() {
 | |
| 		t.Fatalf("head wrong, expected %x got %x", head.Hash(), got)
 | |
| 	}
 | |
| 	// We have now inserted a sidechain.
 | |
| 	if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil {
 | |
| 		t.Errorf("expected block to be gone: %v", blockByNum.NumberU64())
 | |
| 	}
 | |
| 	if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil {
 | |
| 		t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64())
 | |
| 	}
 | |
| 	if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil {
 | |
| 		t.Errorf("expected block to be present: %x", blockByHash.Hash())
 | |
| 	}
 | |
| 	if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil {
 | |
| 		t.Errorf("expected header to be present: %x", headerByHash.Hash())
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestTransactionIndices(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(100000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config:  params.TestChainConfig,
 | |
| 			Alloc:   GenesisAlloc{address: {Balance: funds}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
 | |
| 		if err != nil {
 | |
| 			panic(err)
 | |
| 		}
 | |
| 		block.AddTx(tx)
 | |
| 	})
 | |
| 
 | |
| 	check := func(tail *uint64, chain *BlockChain) {
 | |
| 		stored := rawdb.ReadTxIndexTail(chain.db)
 | |
| 		if tail == nil && stored != nil {
 | |
| 			t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
 | |
| 		}
 | |
| 		if tail != nil && *stored != *tail {
 | |
| 			t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
 | |
| 		}
 | |
| 		if tail != nil {
 | |
| 			for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
 | |
| 				block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
 | |
| 				if block.Transactions().Len() == 0 {
 | |
| 					continue
 | |
| 				}
 | |
| 				for _, tx := range block.Transactions() {
 | |
| 					if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
 | |
| 						t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
 | |
| 					}
 | |
| 				}
 | |
| 			}
 | |
| 			for i := uint64(0); i < *tail; i++ {
 | |
| 				block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
 | |
| 				if block.Transactions().Len() == 0 {
 | |
| 					continue
 | |
| 				}
 | |
| 				for _, tx := range block.Transactions() {
 | |
| 					if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
 | |
| 						t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
 | |
| 					}
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	// Init block chain with external ancients, check all needed indices has been indexed.
 | |
| 	limit := []uint64{0, 32, 64, 128}
 | |
| 	for _, l := range limit {
 | |
| 		frdir := t.TempDir()
 | |
| 		ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
 | |
| 		rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
 | |
| 
 | |
| 		l := l
 | |
| 		chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create tester chain: %v", err)
 | |
| 		}
 | |
| 		chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
 | |
| 
 | |
| 		var tail uint64
 | |
| 		if l != 0 {
 | |
| 			tail = uint64(128) - l + 1
 | |
| 		}
 | |
| 		check(&tail, chain)
 | |
| 		chain.Stop()
 | |
| 		ancientDb.Close()
 | |
| 		os.RemoveAll(frdir)
 | |
| 	}
 | |
| 
 | |
| 	// Reconstruct a block chain which only reserves HEAD-64 tx indices
 | |
| 	ancientDb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	defer ancientDb.Close()
 | |
| 
 | |
| 	rawdb.WriteAncientBlocks(ancientDb, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
 | |
| 	limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
 | |
| 	for _, l := range limit {
 | |
| 		l := l
 | |
| 		chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create tester chain: %v", err)
 | |
| 		}
 | |
| 		var tail uint64
 | |
| 		if l != 0 {
 | |
| 			tail = uint64(128) - l + 1
 | |
| 		}
 | |
| 		chain.indexBlocks(rawdb.ReadTxIndexTail(ancientDb), 128, make(chan struct{}))
 | |
| 		check(&tail, chain)
 | |
| 		chain.Stop()
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
 | |
| 	// Configure and generate a sample block chain
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(100000000000000000)
 | |
| 		gspec   = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}}
 | |
| 		signer  = types.LatestSigner(gspec.Config)
 | |
| 	)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 128, func(i int, block *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key)
 | |
| 		if err != nil {
 | |
| 			panic(err)
 | |
| 		}
 | |
| 		block.AddTx(tx)
 | |
| 	})
 | |
| 
 | |
| 	check := func(tail *uint64, chain *BlockChain) {
 | |
| 		stored := rawdb.ReadTxIndexTail(chain.db)
 | |
| 		if tail == nil && stored != nil {
 | |
| 			t.Fatalf("Oldest indexded block mismatch, want nil, have %d", *stored)
 | |
| 		}
 | |
| 		if tail != nil && *stored != *tail {
 | |
| 			t.Fatalf("Oldest indexded block mismatch, want %d, have %d", *tail, *stored)
 | |
| 		}
 | |
| 		if tail != nil {
 | |
| 			for i := *tail; i <= chain.CurrentBlock().Number.Uint64(); i++ {
 | |
| 				block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
 | |
| 				if block.Transactions().Len() == 0 {
 | |
| 					continue
 | |
| 				}
 | |
| 				for _, tx := range block.Transactions() {
 | |
| 					if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index == nil {
 | |
| 						t.Fatalf("Miss transaction indice, number %d hash %s", i, tx.Hash().Hex())
 | |
| 					}
 | |
| 				}
 | |
| 			}
 | |
| 			for i := uint64(0); i < *tail; i++ {
 | |
| 				block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i)
 | |
| 				if block.Transactions().Len() == 0 {
 | |
| 					continue
 | |
| 				}
 | |
| 				for _, tx := range block.Transactions() {
 | |
| 					if index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()); index != nil {
 | |
| 						t.Fatalf("Transaction indice should be deleted, number %d hash %s", i, tx.Hash().Hex())
 | |
| 					}
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create temp freezer db: %v", err)
 | |
| 	}
 | |
| 	defer ancientDb.Close()
 | |
| 
 | |
| 	// Import all blocks into ancient db, only HEAD-32 indices are kept.
 | |
| 	l := uint64(32)
 | |
| 	chain, err := NewBlockChain(ancientDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	headers := make([]*types.Header, len(blocks))
 | |
| 	for i, block := range blocks {
 | |
| 		headers[i] = block.Header()
 | |
| 	}
 | |
| 	if n, err := chain.InsertHeaderChain(headers, 0); err != nil {
 | |
| 		t.Fatalf("failed to insert header %d: %v", n, err)
 | |
| 	}
 | |
| 	// The indices before ancient-N(32) should be ignored. After that all blocks should be indexed.
 | |
| 	if n, err := chain.InsertReceiptChain(blocks, receipts, 64); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	tail := uint64(32)
 | |
| 	check(&tail, chain)
 | |
| }
 | |
| 
 | |
| // Benchmarks large blocks with value transfers to non-existing accounts
 | |
| func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) {
 | |
| 	var (
 | |
| 		signer          = types.HomesteadSigner{}
 | |
| 		testBankKey, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
 | |
| 		bankFunds       = big.NewInt(100000000000000000)
 | |
| 		gspec           = &Genesis{
 | |
| 			Config: params.TestChainConfig,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				testBankAddress: {Balance: bankFunds},
 | |
| 				common.HexToAddress("0xc0de"): {
 | |
| 					Code:    []byte{0x60, 0x01, 0x50},
 | |
| 					Balance: big.NewInt(0),
 | |
| 				}, // push 1, pop
 | |
| 			},
 | |
| 			GasLimit: 100e6, // 100 M
 | |
| 		}
 | |
| 	)
 | |
| 	// Generate the original common chain segment and the two competing forks
 | |
| 	engine := ethash.NewFaker()
 | |
| 
 | |
| 	blockGenerator := func(i int, block *BlockGen) {
 | |
| 		block.SetCoinbase(common.Address{1})
 | |
| 		for txi := 0; txi < numTxs; txi++ {
 | |
| 			uniq := uint64(i*numTxs + txi)
 | |
| 			recipient := recipientFn(uniq)
 | |
| 			tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
 | |
| 			if err != nil {
 | |
| 				b.Error(err)
 | |
| 			}
 | |
| 			block.AddTx(tx)
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	_, shared, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, blockGenerator)
 | |
| 	b.StopTimer()
 | |
| 	b.ResetTimer()
 | |
| 	for i := 0; i < b.N; i++ {
 | |
| 		// Import the shared chain and the original canonical one
 | |
| 		chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 		if err != nil {
 | |
| 			b.Fatalf("failed to create tester chain: %v", err)
 | |
| 		}
 | |
| 		b.StartTimer()
 | |
| 		if _, err := chain.InsertChain(shared); err != nil {
 | |
| 			b.Fatalf("failed to insert shared chain: %v", err)
 | |
| 		}
 | |
| 		b.StopTimer()
 | |
| 		block := chain.GetBlockByHash(chain.CurrentBlock().Hash())
 | |
| 		if got := block.Transactions().Len(); got != numTxs*numBlocks {
 | |
| 			b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) {
 | |
| 	var (
 | |
| 		numTxs    = 1000
 | |
| 		numBlocks = 1
 | |
| 	)
 | |
| 	recipientFn := func(nonce uint64) common.Address {
 | |
| 		return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce))
 | |
| 	}
 | |
| 	dataFn := func(nonce uint64) []byte {
 | |
| 		return nil
 | |
| 	}
 | |
| 	benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
 | |
| }
 | |
| 
 | |
| func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) {
 | |
| 	var (
 | |
| 		numTxs    = 1000
 | |
| 		numBlocks = 1
 | |
| 	)
 | |
| 	b.StopTimer()
 | |
| 	b.ResetTimer()
 | |
| 
 | |
| 	recipientFn := func(nonce uint64) common.Address {
 | |
| 		return common.BigToAddress(new(big.Int).SetUint64(1337))
 | |
| 	}
 | |
| 	dataFn := func(nonce uint64) []byte {
 | |
| 		return nil
 | |
| 	}
 | |
| 	benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
 | |
| }
 | |
| 
 | |
| func BenchmarkBlockChain_1x1000Executions(b *testing.B) {
 | |
| 	var (
 | |
| 		numTxs    = 1000
 | |
| 		numBlocks = 1
 | |
| 	)
 | |
| 	b.StopTimer()
 | |
| 	b.ResetTimer()
 | |
| 
 | |
| 	recipientFn := func(nonce uint64) common.Address {
 | |
| 		return common.BigToAddress(new(big.Int).SetUint64(0xc0de))
 | |
| 	}
 | |
| 	dataFn := func(nonce uint64) []byte {
 | |
| 		return nil
 | |
| 	}
 | |
| 	benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn)
 | |
| }
 | |
| 
 | |
| // Tests that importing a some old blocks, where all blocks are before the
 | |
| // pruning point.
 | |
| // This internally leads to a sidechain import, since the blocks trigger an
 | |
| // ErrPrunedAncestor error.
 | |
| // This may e.g. happen if
 | |
| //  1. Downloader rollbacks a batch of inserted blocks and exits
 | |
| //  2. Downloader starts to sync again
 | |
| //  3. The blocks fetched are all known and canonical blocks
 | |
| func TestSideImportPrunedBlocks(t *testing.T) {
 | |
| 	// Generate a canonical chain to act as the main dataset
 | |
| 	engine := ethash.NewFaker()
 | |
| 	genesis := &Genesis{
 | |
| 		Config:  params.TestChainConfig,
 | |
| 		BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 	}
 | |
| 	// Generate and import the canonical chain
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil)
 | |
| 
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	lastPrunedIndex := len(blocks) - TriesInMemory - 1
 | |
| 	lastPrunedBlock := blocks[lastPrunedIndex]
 | |
| 
 | |
| 	// Verify pruning of lastPrunedBlock
 | |
| 	if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) {
 | |
| 		t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64())
 | |
| 	}
 | |
| 	firstNonPrunedBlock := blocks[len(blocks)-TriesInMemory]
 | |
| 	// Verify firstNonPrunedBlock is not pruned
 | |
| 	if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) {
 | |
| 		t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64())
 | |
| 	}
 | |
| 	// Now re-import some old blocks
 | |
| 	blockToReimport := blocks[5:8]
 | |
| 	_, err = chain.InsertChain(blockToReimport)
 | |
| 	if err != nil {
 | |
| 		t.Errorf("Got error, %v", err)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestDeleteCreateRevert tests a weird state transition corner case that we hit
 | |
| // while changing the internals of statedb. The workflow is that a contract is
 | |
| // self destructed, then in a followup transaction (but same block) it's created
 | |
| // again and the transaction reverted.
 | |
| //
 | |
| // The original statedb implementation flushed dirty objects to the tries after
 | |
| // each transaction, so this works ok. The rework accumulated writes in memory
 | |
| // first, but the journal wiped the entire state object on create-revert.
 | |
| func TestDeleteCreateRevert(t *testing.T) {
 | |
| 	var (
 | |
| 		aa     = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
 | |
| 		bb     = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(100000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: params.TestChainConfig,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				address: {Balance: funds},
 | |
| 				// The address 0xAAAAA selfdestructs if called
 | |
| 				aa: {
 | |
| 					// Code needs to just selfdestruct
 | |
| 					Code:    []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)},
 | |
| 					Nonce:   1,
 | |
| 					Balance: big.NewInt(0),
 | |
| 				},
 | |
| 				// The address 0xBBBB send 1 wei to 0xAAAA, then reverts
 | |
| 				bb: {
 | |
| 					Code: []byte{
 | |
| 						byte(vm.PC),          // [0]
 | |
| 						byte(vm.DUP1),        // [0,0]
 | |
| 						byte(vm.DUP1),        // [0,0,0]
 | |
| 						byte(vm.DUP1),        // [0,0,0,0]
 | |
| 						byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value)
 | |
| 						byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa]
 | |
| 						byte(vm.GAS),
 | |
| 						byte(vm.CALL),
 | |
| 						byte(vm.REVERT),
 | |
| 					},
 | |
| 					Balance: big.NewInt(1),
 | |
| 				},
 | |
| 			},
 | |
| 		}
 | |
| 	)
 | |
| 
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		// One transaction to AAAA
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(0, aa,
 | |
| 			big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 		// One transaction to BBBB
 | |
| 		tx, _ = types.SignTx(types.NewTransaction(1, bb,
 | |
| 			big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestDeleteRecreateSlots tests a state-transition that contains both deletion
 | |
| // and recreation of contract state.
 | |
| // Contract A exists, has slots 1 and 2 set
 | |
| // Tx 1: Selfdestruct A
 | |
| // Tx 2: Re-create A, set slots 3 and 4
 | |
| // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
 | |
| // and then the new slots exist
 | |
| func TestDeleteRecreateSlots(t *testing.T) {
 | |
| 	var (
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _    = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address   = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds     = big.NewInt(1000000000000000)
 | |
| 		bb        = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
 | |
| 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | |
| 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | |
| 	)
 | |
| 	// Populate two slots
 | |
| 	aaStorage[common.HexToHash("01")] = common.HexToHash("01")
 | |
| 	aaStorage[common.HexToHash("02")] = common.HexToHash("02")
 | |
| 
 | |
| 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | |
| 	// both initcode and deployment code
 | |
| 	// initcode:
 | |
| 	// 1. Set slots 3=3, 4=4,
 | |
| 	// 2. Return aaCode
 | |
| 
 | |
| 	initCode := []byte{
 | |
| 		byte(vm.PUSH1), 0x3, // value
 | |
| 		byte(vm.PUSH1), 0x3, // location
 | |
| 		byte(vm.SSTORE),     // Set slot[3] = 3
 | |
| 		byte(vm.PUSH1), 0x4, // value
 | |
| 		byte(vm.PUSH1), 0x4, // location
 | |
| 		byte(vm.SSTORE), // Set slot[4] = 4
 | |
| 		// Slots are set, now return the code
 | |
| 		byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
 | |
| 		byte(vm.PUSH1), 0x0, // memory start on stack
 | |
| 		byte(vm.MSTORE),
 | |
| 		// Code is now in memory.
 | |
| 		byte(vm.PUSH1), 0x2, // size
 | |
| 		byte(vm.PUSH1), byte(32 - 2), // offset
 | |
| 		byte(vm.RETURN),
 | |
| 	}
 | |
| 	if l := len(initCode); l > 32 {
 | |
| 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
 | |
| 	}
 | |
| 	bbCode := []byte{
 | |
| 		// Push initcode onto stack
 | |
| 		byte(vm.PUSH1) + byte(len(initCode)-1)}
 | |
| 	bbCode = append(bbCode, initCode...)
 | |
| 	bbCode = append(bbCode, []byte{
 | |
| 		byte(vm.PUSH1), 0x0, // memory start on stack
 | |
| 		byte(vm.MSTORE),
 | |
| 		byte(vm.PUSH1), 0x00, // salt
 | |
| 		byte(vm.PUSH1), byte(len(initCode)), // size
 | |
| 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | |
| 		byte(vm.PUSH1), 0x00, // endowment
 | |
| 		byte(vm.CREATE2),
 | |
| 	}...)
 | |
| 
 | |
| 	initHash := crypto.Keccak256Hash(initCode)
 | |
| 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
 | |
| 	t.Logf("Destination address: %x\n", aa)
 | |
| 
 | |
| 	gspec := &Genesis{
 | |
| 		Config: params.TestChainConfig,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 			// The address 0xAAAAA selfdestructs if called
 | |
| 			aa: {
 | |
| 				// Code needs to just selfdestruct
 | |
| 				Code:    aaCode,
 | |
| 				Nonce:   1,
 | |
| 				Balance: big.NewInt(0),
 | |
| 				Storage: aaStorage,
 | |
| 			},
 | |
| 			// The contract BB recreates AA
 | |
| 			bb: {
 | |
| 				Code:    bbCode,
 | |
| 				Balance: big.NewInt(1),
 | |
| 			},
 | |
| 		},
 | |
| 	}
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		// One transaction to AA, to kill it
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(0, aa,
 | |
| 			big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 		// One transaction to BB, to recreate AA
 | |
| 		tx, _ = types.SignTx(types.NewTransaction(1, bb,
 | |
| 			big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
 | |
| 		Debug:  true,
 | |
| 		Tracer: logger.NewJSONLogger(nil, os.Stdout),
 | |
| 	}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	statedb, _ := chain.State()
 | |
| 
 | |
| 	// If all is correct, then slot 1 and 2 are zero
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
 | |
| 		t.Errorf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
 | |
| 		t.Errorf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| 	// Also, 3 and 4 should be set
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp {
 | |
| 		t.Fatalf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp {
 | |
| 		t.Fatalf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestDeleteRecreateAccount tests a state-transition that contains deletion of a
 | |
| // contract with storage, and a recreate of the same contract via a
 | |
| // regular value-transfer
 | |
| // Expected outcome is that _all_ slots are cleared from A
 | |
| func TestDeleteRecreateAccount(t *testing.T) {
 | |
| 	var (
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000000000)
 | |
| 
 | |
| 		aa        = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43")
 | |
| 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | |
| 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | |
| 	)
 | |
| 	// Populate two slots
 | |
| 	aaStorage[common.HexToHash("01")] = common.HexToHash("01")
 | |
| 	aaStorage[common.HexToHash("02")] = common.HexToHash("02")
 | |
| 
 | |
| 	gspec := &Genesis{
 | |
| 		Config: params.TestChainConfig,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 			// The address 0xAAAAA selfdestructs if called
 | |
| 			aa: {
 | |
| 				// Code needs to just selfdestruct
 | |
| 				Code:    aaCode,
 | |
| 				Nonce:   1,
 | |
| 				Balance: big.NewInt(0),
 | |
| 				Storage: aaStorage,
 | |
| 			},
 | |
| 		},
 | |
| 	}
 | |
| 
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		// One transaction to AA, to kill it
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(0, aa,
 | |
| 			big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 		// One transaction to AA, to recreate it (but without storage
 | |
| 		tx, _ = types.SignTx(types.NewTransaction(1, aa,
 | |
| 			big.NewInt(1), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
 | |
| 		Debug:  true,
 | |
| 		Tracer: logger.NewJSONLogger(nil, os.Stdout),
 | |
| 	}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 	statedb, _ := chain.State()
 | |
| 
 | |
| 	// If all is correct, then both slots are zero
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
 | |
| 		t.Errorf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| 	if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
 | |
| 		t.Errorf("got %x exp %x", got, exp)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion
 | |
| // and recreation of contract state.
 | |
| // Contract A exists, has slots 1 and 2 set
 | |
| // Tx 1: Selfdestruct A
 | |
| // Tx 2: Re-create A, set slots 3 and 4
 | |
| // Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct,
 | |
| // and then the new slots exist
 | |
| func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) {
 | |
| 	var (
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _    = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address   = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds     = big.NewInt(1000000000000000)
 | |
| 		bb        = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
 | |
| 		aaStorage = make(map[common.Hash]common.Hash)          // Initial storage in AA
 | |
| 		aaCode    = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct)
 | |
| 	)
 | |
| 	// Populate two slots
 | |
| 	aaStorage[common.HexToHash("01")] = common.HexToHash("01")
 | |
| 	aaStorage[common.HexToHash("02")] = common.HexToHash("02")
 | |
| 
 | |
| 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | |
| 	// both initcode and deployment code
 | |
| 	// initcode:
 | |
| 	// 1. Set slots 3=blocknum+1, 4=4,
 | |
| 	// 2. Return aaCode
 | |
| 
 | |
| 	initCode := []byte{
 | |
| 		byte(vm.PUSH1), 0x1, //
 | |
| 		byte(vm.NUMBER),     // value = number + 1
 | |
| 		byte(vm.ADD),        //
 | |
| 		byte(vm.PUSH1), 0x3, // location
 | |
| 		byte(vm.SSTORE),     // Set slot[3] = number + 1
 | |
| 		byte(vm.PUSH1), 0x4, // value
 | |
| 		byte(vm.PUSH1), 0x4, // location
 | |
| 		byte(vm.SSTORE), // Set slot[4] = 4
 | |
| 		// Slots are set, now return the code
 | |
| 		byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
 | |
| 		byte(vm.PUSH1), 0x0, // memory start on stack
 | |
| 		byte(vm.MSTORE),
 | |
| 		// Code is now in memory.
 | |
| 		byte(vm.PUSH1), 0x2, // size
 | |
| 		byte(vm.PUSH1), byte(32 - 2), // offset
 | |
| 		byte(vm.RETURN),
 | |
| 	}
 | |
| 	if l := len(initCode); l > 32 {
 | |
| 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
 | |
| 	}
 | |
| 	bbCode := []byte{
 | |
| 		// Push initcode onto stack
 | |
| 		byte(vm.PUSH1) + byte(len(initCode)-1)}
 | |
| 	bbCode = append(bbCode, initCode...)
 | |
| 	bbCode = append(bbCode, []byte{
 | |
| 		byte(vm.PUSH1), 0x0, // memory start on stack
 | |
| 		byte(vm.MSTORE),
 | |
| 		byte(vm.PUSH1), 0x00, // salt
 | |
| 		byte(vm.PUSH1), byte(len(initCode)), // size
 | |
| 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | |
| 		byte(vm.PUSH1), 0x00, // endowment
 | |
| 		byte(vm.CREATE2),
 | |
| 	}...)
 | |
| 
 | |
| 	initHash := crypto.Keccak256Hash(initCode)
 | |
| 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
 | |
| 	t.Logf("Destination address: %x\n", aa)
 | |
| 	gspec := &Genesis{
 | |
| 		Config: params.TestChainConfig,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 			// The address 0xAAAAA selfdestructs if called
 | |
| 			aa: {
 | |
| 				// Code needs to just selfdestruct
 | |
| 				Code:    aaCode,
 | |
| 				Nonce:   1,
 | |
| 				Balance: big.NewInt(0),
 | |
| 				Storage: aaStorage,
 | |
| 			},
 | |
| 			// The contract BB recreates AA
 | |
| 			bb: {
 | |
| 				Code:    bbCode,
 | |
| 				Balance: big.NewInt(1),
 | |
| 			},
 | |
| 		},
 | |
| 	}
 | |
| 	var nonce uint64
 | |
| 
 | |
| 	type expectation struct {
 | |
| 		exist    bool
 | |
| 		blocknum int
 | |
| 		values   map[int]int
 | |
| 	}
 | |
| 	var current = &expectation{
 | |
| 		exist:    true, // exists in genesis
 | |
| 		blocknum: 0,
 | |
| 		values:   map[int]int{1: 1, 2: 2},
 | |
| 	}
 | |
| 	var expectations []*expectation
 | |
| 	var newDestruct = func(e *expectation, b *BlockGen) *types.Transaction {
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(nonce, aa,
 | |
| 			big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		nonce++
 | |
| 		if e.exist {
 | |
| 			e.exist = false
 | |
| 			e.values = nil
 | |
| 		}
 | |
| 		//t.Logf("block %d; adding destruct\n", e.blocknum)
 | |
| 		return tx
 | |
| 	}
 | |
| 	var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction {
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
 | |
| 			big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		nonce++
 | |
| 		if !e.exist {
 | |
| 			e.exist = true
 | |
| 			e.values = map[int]int{3: e.blocknum + 1, 4: 4}
 | |
| 		}
 | |
| 		//t.Logf("block %d; adding resurrect\n", e.blocknum)
 | |
| 		return tx
 | |
| 	}
 | |
| 
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 150, func(i int, b *BlockGen) {
 | |
| 		var exp = new(expectation)
 | |
| 		exp.blocknum = i + 1
 | |
| 		exp.values = make(map[int]int)
 | |
| 		for k, v := range current.values {
 | |
| 			exp.values[k] = v
 | |
| 		}
 | |
| 		exp.exist = current.exist
 | |
| 
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		if i%2 == 0 {
 | |
| 			b.AddTx(newDestruct(exp, b))
 | |
| 		}
 | |
| 		if i%3 == 0 {
 | |
| 			b.AddTx(newResurrect(exp, b))
 | |
| 		}
 | |
| 		if i%5 == 0 {
 | |
| 			b.AddTx(newDestruct(exp, b))
 | |
| 		}
 | |
| 		if i%7 == 0 {
 | |
| 			b.AddTx(newResurrect(exp, b))
 | |
| 		}
 | |
| 		expectations = append(expectations, exp)
 | |
| 		current = exp
 | |
| 	})
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
 | |
| 		//Debug:  true,
 | |
| 		//Tracer: vm.NewJSONLogger(nil, os.Stdout),
 | |
| 	}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	var asHash = func(num int) common.Hash {
 | |
| 		return common.BytesToHash([]byte{byte(num)})
 | |
| 	}
 | |
| 	for i, block := range blocks {
 | |
| 		blockNum := i + 1
 | |
| 		if n, err := chain.InsertChain([]*types.Block{block}); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 		}
 | |
| 		statedb, _ := chain.State()
 | |
| 		// If all is correct, then slot 1 and 2 are zero
 | |
| 		if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp {
 | |
| 			t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
 | |
| 		}
 | |
| 		if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp {
 | |
| 			t.Errorf("block %d, got %x exp %x", blockNum, got, exp)
 | |
| 		}
 | |
| 		exp := expectations[i]
 | |
| 		if exp.exist {
 | |
| 			if !statedb.Exist(aa) {
 | |
| 				t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa)
 | |
| 			}
 | |
| 			for slot, val := range exp.values {
 | |
| 				if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue {
 | |
| 					t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue)
 | |
| 				}
 | |
| 			}
 | |
| 		} else {
 | |
| 			if statedb.Exist(aa) {
 | |
| 				t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa)
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestInitThenFailCreateContract tests a pretty notorious case that happened
 | |
| // on mainnet over blocks 7338108, 7338110 and 7338115.
 | |
| //   - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated
 | |
| //     with 0.001 ether (thus created but no code)
 | |
| //   - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on
 | |
| //     the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the
 | |
| //     deployment fails due to OOG during initcode execution
 | |
| //   - Block 7338115: another tx checks the balance of
 | |
| //     e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as
 | |
| //     zero.
 | |
| //
 | |
| // The problem being that the snapshotter maintains a destructset, and adds items
 | |
| // to the destructset in case something is created "onto" an existing item.
 | |
| // We need to either roll back the snapDestructs, or not place it into snapDestructs
 | |
| // in the first place.
 | |
| func TestInitThenFailCreateContract(t *testing.T) {
 | |
| 	var (
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000000000)
 | |
| 		bb      = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
 | |
| 	)
 | |
| 
 | |
| 	// The bb-code needs to CREATE2 the aa contract. It consists of
 | |
| 	// both initcode and deployment code
 | |
| 	// initcode:
 | |
| 	// 1. If blocknum < 1, error out (e.g invalid opcode)
 | |
| 	// 2. else, return a snippet of code
 | |
| 	initCode := []byte{
 | |
| 		byte(vm.PUSH1), 0x1, // y (2)
 | |
| 		byte(vm.NUMBER), // x (number)
 | |
| 		byte(vm.GT),     // x > y?
 | |
| 		byte(vm.PUSH1), byte(0x8),
 | |
| 		byte(vm.JUMPI), // jump to label if number > 2
 | |
| 		byte(0xFE),     // illegal opcode
 | |
| 		byte(vm.JUMPDEST),
 | |
| 		byte(vm.PUSH1), 0x2, // size
 | |
| 		byte(vm.PUSH1), 0x0, // offset
 | |
| 		byte(vm.RETURN), // return 2 bytes of zero-code
 | |
| 	}
 | |
| 	if l := len(initCode); l > 32 {
 | |
| 		t.Fatalf("init code is too long for a pushx, need a more elaborate deployer")
 | |
| 	}
 | |
| 	bbCode := []byte{
 | |
| 		// Push initcode onto stack
 | |
| 		byte(vm.PUSH1) + byte(len(initCode)-1)}
 | |
| 	bbCode = append(bbCode, initCode...)
 | |
| 	bbCode = append(bbCode, []byte{
 | |
| 		byte(vm.PUSH1), 0x0, // memory start on stack
 | |
| 		byte(vm.MSTORE),
 | |
| 		byte(vm.PUSH1), 0x00, // salt
 | |
| 		byte(vm.PUSH1), byte(len(initCode)), // size
 | |
| 		byte(vm.PUSH1), byte(32 - len(initCode)), // offset
 | |
| 		byte(vm.PUSH1), 0x00, // endowment
 | |
| 		byte(vm.CREATE2),
 | |
| 	}...)
 | |
| 
 | |
| 	initHash := crypto.Keccak256Hash(initCode)
 | |
| 	aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:])
 | |
| 	t.Logf("Destination address: %x\n", aa)
 | |
| 
 | |
| 	gspec := &Genesis{
 | |
| 		Config: params.TestChainConfig,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 			// The address aa has some funds
 | |
| 			aa: {Balance: big.NewInt(100000)},
 | |
| 			// The contract BB tries to create code onto AA
 | |
| 			bb: {
 | |
| 				Code:    bbCode,
 | |
| 				Balance: big.NewInt(1),
 | |
| 			},
 | |
| 		},
 | |
| 	}
 | |
| 	nonce := uint64(0)
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		// One transaction to BB
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(nonce, bb,
 | |
| 			big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key)
 | |
| 		b.AddTx(tx)
 | |
| 		nonce++
 | |
| 	})
 | |
| 
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
 | |
| 		//Debug:  true,
 | |
| 		//Tracer: vm.NewJSONLogger(nil, os.Stdout),
 | |
| 	}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	statedb, _ := chain.State()
 | |
| 	if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
 | |
| 		t.Fatalf("Genesis err, got %v exp %v", got, exp)
 | |
| 	}
 | |
| 	// First block tries to create, but fails
 | |
| 	{
 | |
| 		block := blocks[0]
 | |
| 		if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
 | |
| 		}
 | |
| 		statedb, _ = chain.State()
 | |
| 		if got, exp := statedb.GetBalance(aa), big.NewInt(100000); got.Cmp(exp) != 0 {
 | |
| 			t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp)
 | |
| 		}
 | |
| 	}
 | |
| 	// Import the rest of the blocks
 | |
| 	for _, block := range blocks[1:] {
 | |
| 		if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestEIP2718Transition tests that an EIP-2718 transaction will be accepted
 | |
| // after the fork block has passed. This is verified by sending an EIP-2930
 | |
| // access list transaction, which specifies a single slot access, and then
 | |
| // checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated
 | |
| // correctly.
 | |
| func TestEIP2718Transition(t *testing.T) {
 | |
| 	var (
 | |
| 		aa     = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(1000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: params.TestChainConfig,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				address: {Balance: funds},
 | |
| 				// The address 0xAAAA sloads 0x00 and 0x01
 | |
| 				aa: {
 | |
| 					Code: []byte{
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.SLOAD),
 | |
| 						byte(vm.SLOAD),
 | |
| 					},
 | |
| 					Nonce:   0,
 | |
| 					Balance: big.NewInt(0),
 | |
| 				},
 | |
| 			},
 | |
| 		}
 | |
| 	)
 | |
| 	// Generate blocks
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 
 | |
| 		// One transaction to 0xAAAA
 | |
| 		signer := types.LatestSigner(gspec.Config)
 | |
| 		tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{
 | |
| 			ChainID:  gspec.Config.ChainID,
 | |
| 			Nonce:    0,
 | |
| 			To:       &aa,
 | |
| 			Gas:      30000,
 | |
| 			GasPrice: b.header.BaseFee,
 | |
| 			AccessList: types.AccessList{{
 | |
| 				Address:     aa,
 | |
| 				StorageKeys: []common.Hash{{0}},
 | |
| 			}},
 | |
| 		})
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	block := chain.GetBlockByNumber(1)
 | |
| 
 | |
| 	// Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list
 | |
| 	expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
 | |
| 		vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
 | |
| 	if block.GasUsed() != expected {
 | |
| 		t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed())
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestEIP1559Transition tests the following:
 | |
| //
 | |
| //  1. A transaction whose gasFeeCap is greater than the baseFee is valid.
 | |
| //  2. Gas accounting for access lists on EIP-1559 transactions is correct.
 | |
| //  3. Only the transaction's tip will be received by the coinbase.
 | |
| //  4. The transaction sender pays for both the tip and baseFee.
 | |
| //  5. The coinbase receives only the partially realized tip when
 | |
| //     gasFeeCap - gasTipCap < baseFee.
 | |
| //  6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap).
 | |
| func TestEIP1559Transition(t *testing.T) {
 | |
| 	var (
 | |
| 		aa     = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
 | |
| 		engine = ethash.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
 | |
| 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		addr2   = crypto.PubkeyToAddress(key2.PublicKey)
 | |
| 		funds   = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: params.AllEthashProtocolChanges,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				addr1: {Balance: funds},
 | |
| 				addr2: {Balance: funds},
 | |
| 				// The address 0xAAAA sloads 0x00 and 0x01
 | |
| 				aa: {
 | |
| 					Code: []byte{
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.SLOAD),
 | |
| 						byte(vm.SLOAD),
 | |
| 					},
 | |
| 					Nonce:   0,
 | |
| 					Balance: big.NewInt(0),
 | |
| 				},
 | |
| 			},
 | |
| 		}
 | |
| 	)
 | |
| 
 | |
| 	gspec.Config.BerlinBlock = common.Big0
 | |
| 	gspec.Config.LondonBlock = common.Big0
 | |
| 	signer := types.LatestSigner(gspec.Config)
 | |
| 
 | |
| 	genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 
 | |
| 		// One transaction to 0xAAAA
 | |
| 		accesses := types.AccessList{types.AccessTuple{
 | |
| 			Address:     aa,
 | |
| 			StorageKeys: []common.Hash{{0}},
 | |
| 		}}
 | |
| 
 | |
| 		txdata := &types.DynamicFeeTx{
 | |
| 			ChainID:    gspec.Config.ChainID,
 | |
| 			Nonce:      0,
 | |
| 			To:         &aa,
 | |
| 			Gas:        30000,
 | |
| 			GasFeeCap:  newGwei(5),
 | |
| 			GasTipCap:  big.NewInt(2),
 | |
| 			AccessList: accesses,
 | |
| 			Data:       []byte{},
 | |
| 		}
 | |
| 		tx := types.NewTx(txdata)
 | |
| 		tx, _ = types.SignTx(tx, signer, key1)
 | |
| 
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	block := chain.GetBlockByNumber(1)
 | |
| 
 | |
| 	// 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
 | |
| 	expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas +
 | |
| 		vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929
 | |
| 	if block.GasUsed() != expectedGas {
 | |
| 		t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
 | |
| 	}
 | |
| 
 | |
| 	state, _ := chain.State()
 | |
| 
 | |
| 	// 3: Ensure that miner received only the tx's tip.
 | |
| 	actual := state.GetBalance(block.Coinbase())
 | |
| 	expected := new(big.Int).Add(
 | |
| 		new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()),
 | |
| 		ethash.ConstantinopleBlockReward,
 | |
| 	)
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| 
 | |
| 	// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
 | |
| 	actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
 | |
| 	expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| 
 | |
| 	blocks, _ = GenerateChain(gspec.Config, block, engine, genDb, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(common.Address{2})
 | |
| 
 | |
| 		txdata := &types.LegacyTx{
 | |
| 			Nonce:    0,
 | |
| 			To:       &aa,
 | |
| 			Gas:      30000,
 | |
| 			GasPrice: newGwei(5),
 | |
| 		}
 | |
| 		tx := types.NewTx(txdata)
 | |
| 		tx, _ = types.SignTx(tx, signer, key2)
 | |
| 
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	block = chain.GetBlockByNumber(2)
 | |
| 	state, _ = chain.State()
 | |
| 	effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64()
 | |
| 
 | |
| 	// 6+5: Ensure that miner received only the tx's effective tip.
 | |
| 	actual = state.GetBalance(block.Coinbase())
 | |
| 	expected = new(big.Int).Add(
 | |
| 		new(big.Int).SetUint64(block.GasUsed()*effectiveTip),
 | |
| 		ethash.ConstantinopleBlockReward,
 | |
| 	)
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| 
 | |
| 	// 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee).
 | |
| 	actual = new(big.Int).Sub(funds, state.GetBalance(addr2))
 | |
| 	expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64()))
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // Tests the scenario the chain is requested to another point with the missing state.
 | |
| // It expects the state is recovered and all relevant chain markers are set correctly.
 | |
| func TestSetCanonical(t *testing.T) {
 | |
| 	//log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
 | |
| 
 | |
| 	var (
 | |
| 		key, _  = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		funds   = big.NewInt(100000000000000000)
 | |
| 		gspec   = &Genesis{
 | |
| 			Config:  params.TestChainConfig,
 | |
| 			Alloc:   GenesisAlloc{address: {Balance: funds}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 		signer = types.LatestSigner(gspec.Config)
 | |
| 		engine = ethash.NewFaker()
 | |
| 	)
 | |
| 	// Generate and import the canonical chain
 | |
| 	_, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key)
 | |
| 		if err != nil {
 | |
| 			panic(err)
 | |
| 		}
 | |
| 		gen.AddTx(tx)
 | |
| 	})
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	defer chain.Stop()
 | |
| 
 | |
| 	if n, err := chain.InsertChain(canon); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	// Generate the side chain and import them
 | |
| 	_, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) {
 | |
| 		tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key)
 | |
| 		if err != nil {
 | |
| 			panic(err)
 | |
| 		}
 | |
| 		gen.AddTx(tx)
 | |
| 	})
 | |
| 	for _, block := range side {
 | |
| 		err := chain.InsertBlockWithoutSetHead(block)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("Failed to insert into chain: %v", err)
 | |
| 		}
 | |
| 	}
 | |
| 	for _, block := range side {
 | |
| 		got := chain.GetBlockByHash(block.Hash())
 | |
| 		if got == nil {
 | |
| 			t.Fatalf("Lost the inserted block")
 | |
| 		}
 | |
| 	}
 | |
| 
 | |
| 	// Set the chain head to the side chain, ensure all the relevant markers are updated.
 | |
| 	verify := func(head *types.Block) {
 | |
| 		if chain.CurrentBlock().Hash() != head.Hash() {
 | |
| 			t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
 | |
| 		}
 | |
| 		if chain.CurrentSnapBlock().Hash() != head.Hash() {
 | |
| 			t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
 | |
| 		}
 | |
| 		if chain.CurrentHeader().Hash() != head.Hash() {
 | |
| 			t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
 | |
| 		}
 | |
| 		if !chain.HasState(head.Root()) {
 | |
| 			t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
 | |
| 		}
 | |
| 	}
 | |
| 	chain.SetCanonical(side[len(side)-1])
 | |
| 	verify(side[len(side)-1])
 | |
| 
 | |
| 	// Reset the chain head to original chain
 | |
| 	chain.SetCanonical(canon[TriesInMemory-1])
 | |
| 	verify(canon[TriesInMemory-1])
 | |
| }
 | |
| 
 | |
| // TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted
 | |
| // correctly in case reorg is called.
 | |
| func TestCanonicalHashMarker(t *testing.T) {
 | |
| 	var cases = []struct {
 | |
| 		forkA int
 | |
| 		forkB int
 | |
| 	}{
 | |
| 		// ForkA: 10 blocks
 | |
| 		// ForkB: 1 blocks
 | |
| 		//
 | |
| 		// reorged:
 | |
| 		//      markers [2, 10] should be deleted
 | |
| 		//      markers [1] should be updated
 | |
| 		{10, 1},
 | |
| 
 | |
| 		// ForkA: 10 blocks
 | |
| 		// ForkB: 2 blocks
 | |
| 		//
 | |
| 		// reorged:
 | |
| 		//      markers [3, 10] should be deleted
 | |
| 		//      markers [1, 2] should be updated
 | |
| 		{10, 2},
 | |
| 
 | |
| 		// ForkA: 10 blocks
 | |
| 		// ForkB: 10 blocks
 | |
| 		//
 | |
| 		// reorged:
 | |
| 		//      markers [1, 10] should be updated
 | |
| 		{10, 10},
 | |
| 
 | |
| 		// ForkA: 10 blocks
 | |
| 		// ForkB: 11 blocks
 | |
| 		//
 | |
| 		// reorged:
 | |
| 		//      markers [1, 11] should be updated
 | |
| 		{10, 11},
 | |
| 	}
 | |
| 	for _, c := range cases {
 | |
| 		var (
 | |
| 			gspec = &Genesis{
 | |
| 				Config:  params.TestChainConfig,
 | |
| 				Alloc:   GenesisAlloc{},
 | |
| 				BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 			}
 | |
| 			engine = ethash.NewFaker()
 | |
| 		)
 | |
| 		_, forkA, _ := GenerateChainWithGenesis(gspec, engine, c.forkA, func(i int, gen *BlockGen) {})
 | |
| 		_, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {})
 | |
| 
 | |
| 		// Initialize test chain
 | |
| 		chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil)
 | |
| 		if err != nil {
 | |
| 			t.Fatalf("failed to create tester chain: %v", err)
 | |
| 		}
 | |
| 		// Insert forkA and forkB, the canonical should on forkA still
 | |
| 		if n, err := chain.InsertChain(forkA); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 		}
 | |
| 		if n, err := chain.InsertChain(forkB); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 		}
 | |
| 
 | |
| 		verify := func(head *types.Block) {
 | |
| 			if chain.CurrentBlock().Hash() != head.Hash() {
 | |
| 				t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash())
 | |
| 			}
 | |
| 			if chain.CurrentSnapBlock().Hash() != head.Hash() {
 | |
| 				t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash())
 | |
| 			}
 | |
| 			if chain.CurrentHeader().Hash() != head.Hash() {
 | |
| 				t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash())
 | |
| 			}
 | |
| 			if !chain.HasState(head.Root()) {
 | |
| 				t.Fatalf("Lost block state %v %x", head.Number(), head.Hash())
 | |
| 			}
 | |
| 		}
 | |
| 
 | |
| 		// Switch canonical chain to forkB if necessary
 | |
| 		if len(forkA) < len(forkB) {
 | |
| 			verify(forkB[len(forkB)-1])
 | |
| 		} else {
 | |
| 			verify(forkA[len(forkA)-1])
 | |
| 			chain.SetCanonical(forkB[len(forkB)-1])
 | |
| 			verify(forkB[len(forkB)-1])
 | |
| 		}
 | |
| 
 | |
| 		// Ensure all hash markers are updated correctly
 | |
| 		for i := 0; i < len(forkB); i++ {
 | |
| 			block := forkB[i]
 | |
| 			hash := chain.GetCanonicalHash(block.NumberU64())
 | |
| 			if hash != block.Hash() {
 | |
| 				t.Fatalf("Unexpected canonical hash %d", block.NumberU64())
 | |
| 			}
 | |
| 		}
 | |
| 		if c.forkA > c.forkB {
 | |
| 			for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ {
 | |
| 				hash := chain.GetCanonicalHash(i)
 | |
| 				if hash != (common.Hash{}) {
 | |
| 					t.Fatalf("Unexpected canonical hash %d", i)
 | |
| 				}
 | |
| 			}
 | |
| 		}
 | |
| 		chain.Stop()
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestTxIndexer tests the tx indexes are updated correctly.
 | |
| func TestTxIndexer(t *testing.T) {
 | |
| 	var (
 | |
| 		testBankKey, _  = crypto.GenerateKey()
 | |
| 		testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey)
 | |
| 		testBankFunds   = big.NewInt(1000000000000000000)
 | |
| 
 | |
| 		gspec = &Genesis{
 | |
| 			Config:  params.TestChainConfig,
 | |
| 			Alloc:   GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
 | |
| 			BaseFee: big.NewInt(params.InitialBaseFee),
 | |
| 		}
 | |
| 		engine = ethash.NewFaker()
 | |
| 		nonce  = uint64(0)
 | |
| 	)
 | |
| 	_, blocks, receipts := GenerateChainWithGenesis(gspec, engine, 128, func(i int, gen *BlockGen) {
 | |
| 		tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey)
 | |
| 		gen.AddTx(tx)
 | |
| 		nonce += 1
 | |
| 	})
 | |
| 
 | |
| 	// verifyIndexes checks if the transaction indexes are present or not
 | |
| 	// of the specified block.
 | |
| 	verifyIndexes := func(db ethdb.Database, number uint64, exist bool) {
 | |
| 		if number == 0 {
 | |
| 			return
 | |
| 		}
 | |
| 		block := blocks[number-1]
 | |
| 		for _, tx := range block.Transactions() {
 | |
| 			lookup := rawdb.ReadTxLookupEntry(db, tx.Hash())
 | |
| 			if exist && lookup == nil {
 | |
| 				t.Fatalf("missing %d %x", number, tx.Hash().Hex())
 | |
| 			}
 | |
| 			if !exist && lookup != nil {
 | |
| 				t.Fatalf("unexpected %d %x", number, tx.Hash().Hex())
 | |
| 			}
 | |
| 		}
 | |
| 	}
 | |
| 	// verifyRange runs verifyIndexes for a range of blocks, from and to are included.
 | |
| 	verifyRange := func(db ethdb.Database, from, to uint64, exist bool) {
 | |
| 		for number := from; number <= to; number += 1 {
 | |
| 			verifyIndexes(db, number, exist)
 | |
| 		}
 | |
| 	}
 | |
| 	verify := func(db ethdb.Database, expTail uint64) {
 | |
| 		tail := rawdb.ReadTxIndexTail(db)
 | |
| 		if tail == nil {
 | |
| 			t.Fatal("Failed to write tx index tail")
 | |
| 		}
 | |
| 		if *tail != expTail {
 | |
| 			t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail)
 | |
| 		}
 | |
| 		if *tail != 0 {
 | |
| 			verifyRange(db, 0, *tail-1, false)
 | |
| 		}
 | |
| 		verifyRange(db, *tail, 128, true)
 | |
| 	}
 | |
| 
 | |
| 	var cases = []struct {
 | |
| 		limitA uint64
 | |
| 		tailA  uint64
 | |
| 		limitB uint64
 | |
| 		tailB  uint64
 | |
| 		limitC uint64
 | |
| 		tailC  uint64
 | |
| 	}{
 | |
| 		{
 | |
| 			// LimitA: 0
 | |
| 			// TailA:  0
 | |
| 			//
 | |
| 			// all blocks are indexed
 | |
| 			limitA: 0,
 | |
| 			tailA:  0,
 | |
| 
 | |
| 			// LimitB: 1
 | |
| 			// TailB:  128
 | |
| 			//
 | |
| 			// block-128 is indexed
 | |
| 			limitB: 1,
 | |
| 			tailB:  128,
 | |
| 
 | |
| 			// LimitB: 64
 | |
| 			// TailB:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitC: 64,
 | |
| 			tailC:  65,
 | |
| 		},
 | |
| 		{
 | |
| 			// LimitA: 64
 | |
| 			// TailA:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitA: 64,
 | |
| 			tailA:  65,
 | |
| 
 | |
| 			// LimitB: 1
 | |
| 			// TailB:  128
 | |
| 			//
 | |
| 			// block-128 is indexed
 | |
| 			limitB: 1,
 | |
| 			tailB:  128,
 | |
| 
 | |
| 			// LimitB: 64
 | |
| 			// TailB:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitC: 64,
 | |
| 			tailC:  65,
 | |
| 		},
 | |
| 		{
 | |
| 			// LimitA: 127
 | |
| 			// TailA:  2
 | |
| 			//
 | |
| 			// block [2, 128] are indexed
 | |
| 			limitA: 127,
 | |
| 			tailA:  2,
 | |
| 
 | |
| 			// LimitB: 1
 | |
| 			// TailB:  128
 | |
| 			//
 | |
| 			// block-128 is indexed
 | |
| 			limitB: 1,
 | |
| 			tailB:  128,
 | |
| 
 | |
| 			// LimitB: 64
 | |
| 			// TailB:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitC: 64,
 | |
| 			tailC:  65,
 | |
| 		},
 | |
| 		{
 | |
| 			// LimitA: 128
 | |
| 			// TailA:  1
 | |
| 			//
 | |
| 			// block [2, 128] are indexed
 | |
| 			limitA: 128,
 | |
| 			tailA:  1,
 | |
| 
 | |
| 			// LimitB: 1
 | |
| 			// TailB:  128
 | |
| 			//
 | |
| 			// block-128 is indexed
 | |
| 			limitB: 1,
 | |
| 			tailB:  128,
 | |
| 
 | |
| 			// LimitB: 64
 | |
| 			// TailB:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitC: 64,
 | |
| 			tailC:  65,
 | |
| 		},
 | |
| 		{
 | |
| 			// LimitA: 129
 | |
| 			// TailA:  0
 | |
| 			//
 | |
| 			// block [0, 128] are indexed
 | |
| 			limitA: 129,
 | |
| 			tailA:  0,
 | |
| 
 | |
| 			// LimitB: 1
 | |
| 			// TailB:  128
 | |
| 			//
 | |
| 			// block-128 is indexed
 | |
| 			limitB: 1,
 | |
| 			tailB:  128,
 | |
| 
 | |
| 			// LimitB: 64
 | |
| 			// TailB:  65
 | |
| 			//
 | |
| 			// block [65, 128] are indexed
 | |
| 			limitC: 64,
 | |
| 			tailC:  65,
 | |
| 		},
 | |
| 	}
 | |
| 	for _, c := range cases {
 | |
| 		frdir := t.TempDir()
 | |
| 		db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
 | |
| 		rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0))
 | |
| 
 | |
| 		// Index the initial blocks from ancient store
 | |
| 		chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, &c.limitA)
 | |
| 		chain.indexBlocks(nil, 128, make(chan struct{}))
 | |
| 		verify(db, c.tailA)
 | |
| 
 | |
| 		chain.SetTxLookupLimit(c.limitB)
 | |
| 		chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
 | |
| 		verify(db, c.tailB)
 | |
| 
 | |
| 		chain.SetTxLookupLimit(c.limitC)
 | |
| 		chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
 | |
| 		verify(db, c.tailC)
 | |
| 
 | |
| 		// Recover all indexes
 | |
| 		chain.SetTxLookupLimit(0)
 | |
| 		chain.indexBlocks(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}))
 | |
| 		verify(db, 0)
 | |
| 
 | |
| 		chain.Stop()
 | |
| 		db.Close()
 | |
| 		os.RemoveAll(frdir)
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestCreateThenDeletePreByzantium(t *testing.T) {
 | |
| 	// We use Ropsten chain config instead of Testchain config, this is
 | |
| 	// deliberate: we want to use pre-byz rules where we have intermediate state roots
 | |
| 	// between transactions.
 | |
| 	testCreateThenDelete(t, ¶ms.ChainConfig{
 | |
| 		ChainID:        big.NewInt(3),
 | |
| 		HomesteadBlock: big.NewInt(0),
 | |
| 		EIP150Block:    big.NewInt(0),
 | |
| 		EIP155Block:    big.NewInt(10),
 | |
| 		EIP158Block:    big.NewInt(10),
 | |
| 		ByzantiumBlock: big.NewInt(1_700_000),
 | |
| 	})
 | |
| }
 | |
| func TestCreateThenDeletePostByzantium(t *testing.T) {
 | |
| 	testCreateThenDelete(t, params.TestChainConfig)
 | |
| }
 | |
| 
 | |
| // testCreateThenDelete tests a creation and subsequent deletion of a contract, happening
 | |
| // within the same block.
 | |
| func testCreateThenDelete(t *testing.T, config *params.ChainConfig) {
 | |
| 	var (
 | |
| 		engine = ethash.NewFaker()
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key, _      = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address     = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		destAddress = crypto.CreateAddress(address, 0)
 | |
| 		funds       = big.NewInt(1000000000000000)
 | |
| 	)
 | |
| 
 | |
| 	// runtime code is 	0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF)
 | |
| 	code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...)
 | |
| 	initCode := []byte{
 | |
| 		// SSTORE 1:1
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 		byte(vm.SSTORE),
 | |
| 		// Get the runtime-code on the stack
 | |
| 		byte(vm.PUSH32)}
 | |
| 	initCode = append(initCode, code...)
 | |
| 	initCode = append(initCode, []byte{
 | |
| 		byte(vm.PUSH1), 0x0, // offset
 | |
| 		byte(vm.MSTORE),
 | |
| 		byte(vm.PUSH1), 0x3, // size
 | |
| 		byte(vm.PUSH1), 0x0, // offset
 | |
| 		byte(vm.RETURN), // return 3 bytes of zero-code
 | |
| 	}...)
 | |
| 	gspec := &Genesis{
 | |
| 		Config: config,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 		},
 | |
| 	}
 | |
| 	nonce := uint64(0)
 | |
| 	signer := types.HomesteadSigner{}
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) {
 | |
| 		fee := big.NewInt(1)
 | |
| 		if b.header.BaseFee != nil {
 | |
| 			fee = b.header.BaseFee
 | |
| 		}
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
 | |
| 			Nonce:    nonce,
 | |
| 			GasPrice: new(big.Int).Set(fee),
 | |
| 			Gas:      100000,
 | |
| 			Data:     initCode,
 | |
| 		})
 | |
| 		nonce++
 | |
| 		b.AddTx(tx)
 | |
| 		tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
 | |
| 			Nonce:    nonce,
 | |
| 			GasPrice: new(big.Int).Set(fee),
 | |
| 			Gas:      100000,
 | |
| 			To:       &destAddress,
 | |
| 		})
 | |
| 		b.AddTx(tx)
 | |
| 		nonce++
 | |
| 	})
 | |
| 	// Import the canonical chain
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{
 | |
| 		//Debug:  true,
 | |
| 		//Tracer: logger.NewJSONLogger(nil, os.Stdout),
 | |
| 	}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	// Import the blocks
 | |
| 	for _, block := range blocks {
 | |
| 		if _, err := chain.InsertChain([]*types.Block{block}); err != nil {
 | |
| 			t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err)
 | |
| 		}
 | |
| 	}
 | |
| }
 | |
| 
 | |
| // TestTransientStorageReset ensures the transient storage is wiped correctly
 | |
| // between transactions.
 | |
| func TestTransientStorageReset(t *testing.T) {
 | |
| 	var (
 | |
| 		engine      = ethash.NewFaker()
 | |
| 		key, _      = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		address     = crypto.PubkeyToAddress(key.PublicKey)
 | |
| 		destAddress = crypto.CreateAddress(address, 0)
 | |
| 		funds       = big.NewInt(1000000000000000)
 | |
| 		vmConfig    = vm.Config{
 | |
| 			ExtraEips: []int{1153}, // Enable transient storage EIP
 | |
| 		}
 | |
| 	)
 | |
| 	code := append([]byte{
 | |
| 		// TLoad value with location 1
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 		byte(vm.TLOAD),
 | |
| 
 | |
| 		// PUSH location
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 
 | |
| 		// SStore location:value
 | |
| 		byte(vm.SSTORE),
 | |
| 	}, make([]byte, 32-6)...)
 | |
| 	initCode := []byte{
 | |
| 		// TSTORE 1:1
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 		byte(vm.PUSH1), 0x1,
 | |
| 		byte(vm.TSTORE),
 | |
| 
 | |
| 		// Get the runtime-code on the stack
 | |
| 		byte(vm.PUSH32)}
 | |
| 	initCode = append(initCode, code...)
 | |
| 	initCode = append(initCode, []byte{
 | |
| 		byte(vm.PUSH1), 0x0, // offset
 | |
| 		byte(vm.MSTORE),
 | |
| 		byte(vm.PUSH1), 0x6, // size
 | |
| 		byte(vm.PUSH1), 0x0, // offset
 | |
| 		byte(vm.RETURN), // return 6 bytes of zero-code
 | |
| 	}...)
 | |
| 	gspec := &Genesis{
 | |
| 		Config: params.TestChainConfig,
 | |
| 		Alloc: GenesisAlloc{
 | |
| 			address: {Balance: funds},
 | |
| 		},
 | |
| 	}
 | |
| 	nonce := uint64(0)
 | |
| 	signer := types.HomesteadSigner{}
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		fee := big.NewInt(1)
 | |
| 		if b.header.BaseFee != nil {
 | |
| 			fee = b.header.BaseFee
 | |
| 		}
 | |
| 		b.SetCoinbase(common.Address{1})
 | |
| 		tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{
 | |
| 			Nonce:    nonce,
 | |
| 			GasPrice: new(big.Int).Set(fee),
 | |
| 			Gas:      100000,
 | |
| 			Data:     initCode,
 | |
| 		})
 | |
| 		nonce++
 | |
| 		b.AddTxWithVMConfig(tx, vmConfig)
 | |
| 
 | |
| 		tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{
 | |
| 			Nonce:    nonce,
 | |
| 			GasPrice: new(big.Int).Set(fee),
 | |
| 			Gas:      100000,
 | |
| 			To:       &destAddress,
 | |
| 		})
 | |
| 		b.AddTxWithVMConfig(tx, vmConfig)
 | |
| 		nonce++
 | |
| 	})
 | |
| 
 | |
| 	// Initialize the blockchain with 1153 enabled.
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	// Import the blocks
 | |
| 	if _, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("failed to insert into chain: %v", err)
 | |
| 	}
 | |
| 	// Check the storage
 | |
| 	state, err := chain.StateAt(chain.CurrentHeader().Root)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("Failed to load state %v", err)
 | |
| 	}
 | |
| 	loc := common.BytesToHash([]byte{1})
 | |
| 	slot := state.GetState(destAddress, loc)
 | |
| 	if slot != (common.Hash{}) {
 | |
| 		t.Fatalf("Unexpected dirty storage slot")
 | |
| 	}
 | |
| }
 | |
| 
 | |
| func TestEIP3651(t *testing.T) {
 | |
| 	var (
 | |
| 		aa     = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
 | |
| 		bb     = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
 | |
| 		engine = beacon.NewFaker()
 | |
| 
 | |
| 		// A sender who makes transactions, has some funds
 | |
| 		key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
 | |
| 		key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
 | |
| 		addr1   = crypto.PubkeyToAddress(key1.PublicKey)
 | |
| 		addr2   = crypto.PubkeyToAddress(key2.PublicKey)
 | |
| 		funds   = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether))
 | |
| 		config  = *params.AllEthashProtocolChanges
 | |
| 		gspec   = &Genesis{
 | |
| 			Config: &config,
 | |
| 			Alloc: GenesisAlloc{
 | |
| 				addr1: {Balance: funds},
 | |
| 				addr2: {Balance: funds},
 | |
| 				// The address 0xAAAA sloads 0x00 and 0x01
 | |
| 				aa: {
 | |
| 					Code: []byte{
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.PC),
 | |
| 						byte(vm.SLOAD),
 | |
| 						byte(vm.SLOAD),
 | |
| 					},
 | |
| 					Nonce:   0,
 | |
| 					Balance: big.NewInt(0),
 | |
| 				},
 | |
| 				// The address 0xBBBB calls 0xAAAA
 | |
| 				bb: {
 | |
| 					Code: []byte{
 | |
| 						byte(vm.PUSH1), 0, // out size
 | |
| 						byte(vm.DUP1),  // out offset
 | |
| 						byte(vm.DUP1),  // out insize
 | |
| 						byte(vm.DUP1),  // in offset
 | |
| 						byte(vm.PUSH2), // address
 | |
| 						byte(0xaa),
 | |
| 						byte(0xaa),
 | |
| 						byte(vm.GAS), // gas
 | |
| 						byte(vm.DELEGATECALL),
 | |
| 					},
 | |
| 					Nonce:   0,
 | |
| 					Balance: big.NewInt(0),
 | |
| 				},
 | |
| 			},
 | |
| 		}
 | |
| 	)
 | |
| 
 | |
| 	gspec.Config.BerlinBlock = common.Big0
 | |
| 	gspec.Config.LondonBlock = common.Big0
 | |
| 	gspec.Config.TerminalTotalDifficulty = common.Big0
 | |
| 	gspec.Config.TerminalTotalDifficultyPassed = true
 | |
| 	gspec.Config.ShanghaiTime = u64(0)
 | |
| 	signer := types.LatestSigner(gspec.Config)
 | |
| 
 | |
| 	_, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) {
 | |
| 		b.SetCoinbase(aa)
 | |
| 		// One transaction to Coinbase
 | |
| 		txdata := &types.DynamicFeeTx{
 | |
| 			ChainID:    gspec.Config.ChainID,
 | |
| 			Nonce:      0,
 | |
| 			To:         &bb,
 | |
| 			Gas:        500000,
 | |
| 			GasFeeCap:  newGwei(5),
 | |
| 			GasTipCap:  big.NewInt(2),
 | |
| 			AccessList: nil,
 | |
| 			Data:       []byte{},
 | |
| 		}
 | |
| 		tx := types.NewTx(txdata)
 | |
| 		tx, _ = types.SignTx(tx, signer, key1)
 | |
| 
 | |
| 		b.AddTx(tx)
 | |
| 	})
 | |
| 	chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil)
 | |
| 	if err != nil {
 | |
| 		t.Fatalf("failed to create tester chain: %v", err)
 | |
| 	}
 | |
| 	if n, err := chain.InsertChain(blocks); err != nil {
 | |
| 		t.Fatalf("block %d: failed to insert into chain: %v", n, err)
 | |
| 	}
 | |
| 
 | |
| 	block := chain.GetBlockByNumber(1)
 | |
| 
 | |
| 	// 1+2: Ensure EIP-1559 access lists are accounted for via gas usage.
 | |
| 	innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2
 | |
| 	expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list
 | |
| 	if block.GasUsed() != expectedGas {
 | |
| 		t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed())
 | |
| 	}
 | |
| 
 | |
| 	state, _ := chain.State()
 | |
| 
 | |
| 	// 3: Ensure that miner received only the tx's tip.
 | |
| 	actual := state.GetBalance(block.Coinbase())
 | |
| 	expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64())
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| 
 | |
| 	// 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee).
 | |
| 	actual = new(big.Int).Sub(funds, state.GetBalance(addr1))
 | |
| 	expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64()))
 | |
| 	if actual.Cmp(expected) != 0 {
 | |
| 		t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual)
 | |
| 	}
 | |
| }
 |