plugeth/tests/block_test_util.go

327 lines
11 KiB
Go
Raw Normal View History

2015-07-07 00:54:22 +00:00
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
2015-07-07 00:54:22 +00:00
//
// The go-ethereum library is free software: you can redistribute it and/or modify
2015-07-07 00:54:22 +00:00
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
2015-07-07 00:54:22 +00:00
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2015-07-07 00:54:22 +00:00
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
2015-07-07 00:54:22 +00:00
// Package tests implements execution of Ethereum JSON tests.
package tests
import (
"bytes"
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"os"
2015-03-16 10:27:38 +00:00
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
2015-03-23 21:05:12 +00:00
"github.com/ethereum/go-ethereum/core/state"
2015-03-16 22:10:26 +00:00
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// A BlockTest checks handling of entire blocks.
type BlockTest struct {
json btJSON
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (t *BlockTest) UnmarshalJSON(in []byte) error {
return json.Unmarshal(in, &t.json)
}
type btJSON struct {
Blocks []btBlock `json:"blocks"`
Genesis btHeader `json:"genesisBlockHeader"`
Pre core.GenesisAlloc `json:"pre"`
Post core.GenesisAlloc `json:"postState"`
BestBlock common.UnprefixedHash `json:"lastblockhash"`
Network string `json:"network"`
SealEngine string `json:"sealEngine"`
}
type btBlock struct {
BlockHeader *btHeader
ExpectException string
Rlp string
UncleHeaders []*btHeader
}
//go:generate go run github.com/fjl/gencodec -type btHeader -field-override btHeaderMarshaling -out gen_btheader.go
type btHeader struct {
Bloom types.Bloom
Coinbase common.Address
MixHash common.Hash
Nonce types.BlockNonce
Number *big.Int
Hash common.Hash
ParentHash common.Hash
ReceiptTrie common.Hash
StateRoot common.Hash
TransactionsTrie common.Hash
UncleHash common.Hash
ExtraData []byte
Difficulty *big.Int
GasLimit uint64
GasUsed uint64
Timestamp uint64
BaseFeePerGas *big.Int
}
type btHeaderMarshaling struct {
ExtraData hexutil.Bytes
Number *math.HexOrDecimal256
Difficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
GasUsed math.HexOrDecimal64
Timestamp math.HexOrDecimal64
BaseFeePerGas *math.HexOrDecimal256
2015-06-14 21:55:03 +00:00
}
func (t *BlockTest) Run(snapshotter bool) error {
config, ok := Forks[t.json.Network]
if !ok {
return UnsupportedForkError{t.json.Network}
}
// import pre accounts & construct test genesis block & state root
db := rawdb.NewMemoryDatabase()
gspec := t.genesis(config)
gblock, err := gspec.Commit(db)
2015-06-14 21:55:03 +00:00
if err != nil {
return err
}
if gblock.Hash() != t.json.Genesis.Hash {
return fmt.Errorf("genesis block hash doesn't match test: computed=%x, test=%x", gblock.Hash().Bytes()[:6], t.json.Genesis.Hash[:6])
2015-06-10 16:04:56 +00:00
}
if gblock.Root() != t.json.Genesis.StateRoot {
return fmt.Errorf("genesis block state root does not match test: computed=%x, test=%x", gblock.Root().Bytes()[:6], t.json.Genesis.StateRoot[:6])
2015-06-10 16:04:56 +00:00
}
var engine consensus.Engine
if t.json.SealEngine == "NoProof" {
engine = ethash.NewFaker()
} else {
engine = ethash.NewShared()
}
cache := &core.CacheConfig{TrieCleanLimit: 0}
if snapshotter {
cache.SnapshotLimit = 1
cache.SnapshotWait = true
}
chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{}, nil, nil)
2015-06-10 16:04:56 +00:00
if err != nil {
return err
2015-06-10 16:04:56 +00:00
}
defer chain.Stop()
validBlocks, err := t.insertBlocks(chain)
2015-06-10 16:04:56 +00:00
if err != nil {
return err
2015-06-10 16:04:56 +00:00
}
cmlast := chain.CurrentBlock().Hash()
if common.Hash(t.json.BestBlock) != cmlast {
return fmt.Errorf("last block hash validation mismatch: want: %x, have: %x", t.json.BestBlock, cmlast)
}
newDB, err := chain.State()
if err != nil {
return err
}
if err = t.validatePostState(newDB); err != nil {
return fmt.Errorf("post state validation failed: %v", err)
2015-06-10 16:04:56 +00:00
}
// Cross-check the snapshot-to-hash against the trie hash
if snapshotter {
all: bloom-filter based pruning mechanism (#21724) * cmd, core, tests: initial state pruner core: fix db inspector cmd/geth: add verify-state cmd/geth: add verification tool core/rawdb: implement flatdb cmd, core: fix rebase core/state: use new contract code layout core/state/pruner: avoid deleting genesis state cmd/geth: add helper function core, cmd: fix extract genesis core: minor fixes contracts: remove useless core/state/snapshot: plugin stacktrie core: polish core/state/snapshot: iterate storage concurrently core/state/snapshot: fix iteration core: add comments core/state/snapshot: polish code core/state: polish core/state/snapshot: rebase core/rawdb: add comments core/rawdb: fix tests core/rawdb: improve tests core/state/snapshot: fix concurrent iteration core/state: run pruning during the recovery core, trie: implement martin's idea core, eth: delete flatdb and polish pruner trie: fix import core/state/pruner: add log core/state/pruner: fix issues core/state/pruner: don't read back core/state/pruner: fix contract code write core/state/pruner: check root node presence cmd, core: polish log core/state: use HEAD-127 as the target core/state/snapshot: improve tests cmd/geth: fix verification tool cmd/geth: use HEAD as the verification default target all: replace the bloomfilter with martin's fork cmd, core: polish code core, cmd: forcibly delete state root core/state/pruner: add hash64 core/state/pruner: fix blacklist core/state: remove blacklist cmd, core: delete trie clean cache before pruning cmd, core: fix lint cmd, core: fix rebase core/state: fix the special case for clique networks core/state/snapshot: remove useless code core/state/pruner: capping the snapshot after pruning cmd, core, eth: fixes core/rawdb: update db inspector cmd/geth: polish code core/state/pruner: fsync bloom filter cmd, core: print warning log core/state/pruner: adjust the parameters for bloom filter cmd, core: create the bloom filter by size core: polish core/state/pruner: sanitize invalid bloomfilter size cmd: address comments cmd/geth: address comments cmd/geth: address comment core/state/pruner: address comments core/state/pruner: rename homedir to datadir cmd, core: address comments core/state/pruner: address comment core/state: address comments core, cmd, tests: address comments core: address comments core/state/pruner: release the iterator after each commit core/state/pruner: improve pruner cmd, core: adjust bloom paramters core/state/pruner: fix lint core/state/pruner: fix tests core: fix rebase core/state/pruner: remove atomic rename core/state/pruner: address comments all: run go mod tidy core/state/pruner: avoid false-positive for the middle state roots core/state/pruner: add checks for middle roots cmd/geth: replace crit with error * core/state/pruner: fix lint * core: drop legacy bloom filter * core/state/snapshot: improve pruner * core/state/snapshot: polish concurrent logs to report ETA vs. hashes * core/state/pruner: add progress report for pruning and compaction too * core: fix snapshot test API * core/state: fix some pruning logs * core/state/pruner: support recovering from bloom flush fail Co-authored-by: Péter Szilágyi <peterke@gmail.com>
2021-02-08 11:16:30 +00:00
if err := chain.Snapshots().Verify(chain.CurrentBlock().Root()); err != nil {
return err
}
}
return t.validateImportedHeaders(chain, validBlocks)
2015-06-10 16:04:56 +00:00
}
func (t *BlockTest) genesis(config *params.ChainConfig) *core.Genesis {
return &core.Genesis{
Config: config,
Nonce: t.json.Genesis.Nonce.Uint64(),
Timestamp: t.json.Genesis.Timestamp,
ParentHash: t.json.Genesis.ParentHash,
ExtraData: t.json.Genesis.ExtraData,
GasLimit: t.json.Genesis.GasLimit,
GasUsed: t.json.Genesis.GasUsed,
Difficulty: t.json.Genesis.Difficulty,
Mixhash: t.json.Genesis.MixHash,
Coinbase: t.json.Genesis.Coinbase,
Alloc: t.json.Pre,
BaseFee: t.json.Genesis.BaseFeePerGas,
}
}
/* See https://github.com/ethereum/tests/wiki/Blockchain-Tests-II
Whether a block is valid or not is a bit subtle, it's defined by presence of
blockHeader, transactions and uncleHeaders fields. If they are missing, the block is
invalid and we must verify that we do not accept it.
Since some tests mix valid and invalid blocks we need to check this for every block.
If a block is invalid it does not necessarily fail the test, if it's invalidness is
expected we are expected to ignore it and continue processing and then validate the
post state.
*/
func (t *BlockTest) insertBlocks(blockchain *core.BlockChain) ([]btBlock, error) {
validBlocks := make([]btBlock, 0)
// insert the test blocks, which will execute all transactions
for bi, b := range t.json.Blocks {
cb, err := b.decode()
if err != nil {
if b.BlockHeader == nil {
continue // OK - block is supposed to be invalid, continue with next block
} else {
return nil, fmt.Errorf("block RLP decoding failed when expected to succeed: %v", err)
}
}
// RLP decoding worked, try to insert into chain:
2016-01-19 22:48:50 +00:00
blocks := types.Blocks{cb}
i, err := blockchain.InsertChain(blocks)
if err != nil {
if b.BlockHeader == nil {
continue // OK - block is supposed to be invalid, continue with next block
} else {
return nil, fmt.Errorf("block #%v insertion into chain failed: %v", blocks[i].Number(), err)
}
}
if b.BlockHeader == nil {
if data, err := json.MarshalIndent(cb.Header(), "", " "); err == nil {
fmt.Fprintf(os.Stderr, "block (index %d) insertion should have failed due to: %v:\n%v\n",
bi, b.ExpectException, string(data))
}
return nil, fmt.Errorf("block (index %d) insertion should have failed due to: %v",
bi, b.ExpectException)
}
// validate RLP decoding by checking all values against test file JSON
if err = validateHeader(b.BlockHeader, cb.Header()); err != nil {
return nil, fmt.Errorf("deserialised block header validation failed: %v", err)
}
validBlocks = append(validBlocks, b)
}
return validBlocks, nil
}
func validateHeader(h *btHeader, h2 *types.Header) error {
if h.Bloom != h2.Bloom {
return fmt.Errorf("bloom: want: %x have: %x", h.Bloom, h2.Bloom)
}
if h.Coinbase != h2.Coinbase {
return fmt.Errorf("coinbase: want: %x have: %x", h.Coinbase, h2.Coinbase)
}
if h.MixHash != h2.MixDigest {
return fmt.Errorf("MixHash: want: %x have: %x", h.MixHash, h2.MixDigest)
}
if h.Nonce != h2.Nonce {
return fmt.Errorf("nonce: want: %x have: %x", h.Nonce, h2.Nonce)
}
if h.Number.Cmp(h2.Number) != 0 {
return fmt.Errorf("number: want: %v have: %v", h.Number, h2.Number)
}
if h.ParentHash != h2.ParentHash {
return fmt.Errorf("parent hash: want: %x have: %x", h.ParentHash, h2.ParentHash)
}
if h.ReceiptTrie != h2.ReceiptHash {
return fmt.Errorf("receipt hash: want: %x have: %x", h.ReceiptTrie, h2.ReceiptHash)
}
if h.TransactionsTrie != h2.TxHash {
return fmt.Errorf("tx hash: want: %x have: %x", h.TransactionsTrie, h2.TxHash)
}
if h.StateRoot != h2.Root {
return fmt.Errorf("state hash: want: %x have: %x", h.StateRoot, h2.Root)
}
if h.UncleHash != h2.UncleHash {
return fmt.Errorf("uncle hash: want: %x have: %x", h.UncleHash, h2.UncleHash)
}
if !bytes.Equal(h.ExtraData, h2.Extra) {
return fmt.Errorf("extra data: want: %x have: %x", h.ExtraData, h2.Extra)
}
if h.Difficulty.Cmp(h2.Difficulty) != 0 {
return fmt.Errorf("difficulty: want: %v have: %v", h.Difficulty, h2.Difficulty)
}
if h.GasLimit != h2.GasLimit {
return fmt.Errorf("gasLimit: want: %d have: %d", h.GasLimit, h2.GasLimit)
}
if h.GasUsed != h2.GasUsed {
return fmt.Errorf("gasUsed: want: %d have: %d", h.GasUsed, h2.GasUsed)
}
if h.Timestamp != h2.Time {
return fmt.Errorf("timestamp: want: %v have: %v", h.Timestamp, h2.Time)
}
return nil
}
func (t *BlockTest) validatePostState(statedb *state.StateDB) error {
// validate post state accounts in test file against what we have in state db
for addr, acct := range t.json.Post {
// address is indirectly verified by the other fields, as it's the db key
code2 := statedb.GetCode(addr)
balance2 := statedb.GetBalance(addr)
nonce2 := statedb.GetNonce(addr)
if !bytes.Equal(code2, acct.Code) {
return fmt.Errorf("account code mismatch for addr: %s want: %v have: %s", addr, acct.Code, hex.EncodeToString(code2))
}
if balance2.Cmp(acct.Balance) != 0 {
return fmt.Errorf("account balance mismatch for addr: %s, want: %d, have: %d", addr, acct.Balance, balance2)
}
if nonce2 != acct.Nonce {
return fmt.Errorf("account nonce mismatch for addr: %s want: %d have: %d", addr, acct.Nonce, nonce2)
}
}
return nil
}
func (t *BlockTest) validateImportedHeaders(cm *core.BlockChain, validBlocks []btBlock) error {
// to get constant lookup when verifying block headers by hash (some tests have many blocks)
bmap := make(map[common.Hash]btBlock, len(t.json.Blocks))
for _, b := range validBlocks {
bmap[b.BlockHeader.Hash] = b
}
// iterate over blocks backwards from HEAD and validate imported
// headers vs test file. some tests have reorgs, and we import
// block-by-block, so we can only validate imported headers after
// all blocks have been processed by BlockChain, as they may not
// be part of the longest chain until last block is imported.
for b := cm.CurrentBlock(); b != nil && b.NumberU64() != 0; b = cm.GetBlockByHash(b.Header().ParentHash) {
if err := validateHeader(bmap[b.Hash()].BlockHeader, b.Header()); err != nil {
return fmt.Errorf("imported block header validation failed: %v", err)
}
}
return nil
}
func (bb *btBlock) decode() (*types.Block, error) {
data, err := hexutil.Decode(bb.Rlp)
if err != nil {
2015-06-10 22:11:30 +00:00
return nil, err
}
var b types.Block
err = rlp.DecodeBytes(data, &b)
return &b, err
}