Merge remote-tracking branch 'origin/develop' into feature/etc-plugin

This commit is contained in:
philip-morlier 2023-09-28 12:45:24 -07:00
commit 31ada7d3e3
657 changed files with 30846 additions and 24626 deletions

View File

@ -6,7 +6,7 @@ version: 2.1
jobs:
test:
docker:
- image: cimg/go:1.19.4
- image: cimg/go:1.20
steps:
- checkout
- run:
@ -38,7 +38,7 @@ jobs:
command: go test ./core/rawdb/
build_geth_push:
docker: # run the steps with Docker
- image: cimg/go:1.19.4 # ...with this image as the primary container
- image: cimg/go:1.20 # ...with this image as the primary container
# this is where all `steps` will run
steps:
- checkout

View File

@ -13,7 +13,7 @@ jobs:
- stage: lint
os: linux
dist: bionic
go: 1.20.x
go: 1.21.x
env:
- lint
git:
@ -28,7 +28,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.20.x
go: 1.21.x
env:
- docker
services:
@ -45,7 +45,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
go: 1.20.x
go: 1.21.x
env:
- docker
services:
@ -63,10 +63,9 @@ jobs:
os: linux
dist: bionic
sudo: required
go: 1.20.x
go: 1.21.x
env:
- azure-linux
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
@ -97,10 +96,9 @@ jobs:
- stage: build
if: type = push
os: osx
go: 1.20.x
go: 1.21.x
env:
- azure-osx
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
@ -112,41 +110,34 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.20.x
env:
- GO111MODULE=on
go: 1.21.x
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES
- stage: build
if: type = pull_request
os: linux
arch: arm64
dist: bionic
go: 1.19.x
env:
- GO111MODULE=on
go: 1.20.x
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES
- stage: build
os: linux
dist: bionic
go: 1.19.x
env:
- GO111MODULE=on
go: 1.20.x
script:
- go run build/ci.go test $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES
# This builder does the Ubuntu PPA nightly uploads
- stage: build
if: type = cron || (type = push && tag ~= /^v[0-9]/)
os: linux
dist: bionic
go: 1.20.x
go: 1.21.x
env:
- ubuntu-ppa
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
@ -167,10 +158,9 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.20.x
go: 1.21.x
env:
- azure-purge
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
@ -181,9 +171,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.20.x
env:
- GO111MODULE=on
go: 1.21.x
script:
- go run build/ci.go test -race $TEST_PACKAGES
- travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES

View File

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.20-alpine as builder
FROM golang:1.21-alpine as builder
RUN apk add --no-cache gcc musl-dev binutils-gold linux-headers git

View File

@ -6,7 +6,7 @@
GOBIN = ./build/bin
GO ?= latest
GORUN = env GO111MODULE=on go run
GORUN = go run
geth:
$(GORUN) build/ci.go install ./cmd/geth
@ -23,7 +23,7 @@ lint: ## Run linters.
$(GORUN) build/ci.go lint
clean:
env GO111MODULE=on go clean -cache
go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.

View File

@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@ -246,17 +247,37 @@ func (abi *ABI) HasReceive() bool {
// revertSelector is a special function selector for revert reason unpacking.
var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
// panicSelector is a special function selector for panic reason unpacking.
var panicSelector = crypto.Keccak256([]byte("Panic(uint256)"))[:4]
// panicReasons map is for readable panic codes
// see this linkage for the deails
// https://docs.soliditylang.org/en/v0.8.21/control-structures.html#panic-via-assert-and-error-via-require
// the reason string list is copied from ether.js
// https://github.com/ethers-io/ethers.js/blob/fa3a883ff7c88611ce766f58bdd4b8ac90814470/src.ts/abi/interface.ts#L207-L218
var panicReasons = map[uint64]string{
0x00: "generic panic",
0x01: "assert(false)",
0x11: "arithmetic underflow or overflow",
0x12: "division or modulo by zero",
0x21: "enum overflow",
0x22: "invalid encoded storage byte array accessed",
0x31: "out-of-bounds array access; popping on an empty array",
0x32: "out-of-bounds access of an array or bytesN",
0x41: "out of memory",
0x51: "uninitialized function",
}
// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
// the provided revert reason is abi-encoded as if it were a call to a function
// `Error(string)`. So it's a special tool for it.
// the provided revert reason is abi-encoded as if it were a call to function
// `Error(string)` or `Panic(uint256)`. So it's a special tool for it.
func UnpackRevert(data []byte) (string, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
}
if !bytes.Equal(data[:4], revertSelector) {
return "", errors.New("invalid data for unpacking")
}
switch {
case bytes.Equal(data[:4], revertSelector):
typ, err := NewType("string", "", nil)
if err != nil {
return "", err
@ -266,4 +287,25 @@ func UnpackRevert(data []byte) (string, error) {
return "", err
}
return unpacked[0].(string), nil
case bytes.Equal(data[:4], panicSelector):
typ, err := NewType("uint256", "", nil)
if err != nil {
return "", err
}
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
if err != nil {
return "", err
}
pCode := unpacked[0].(*big.Int)
// uint64 safety check for future
// but the code is not bigger than MAX(uint64) now
if pCode.IsUint64() {
if reason, ok := panicReasons[pCode.Uint64()]; ok {
return reason, nil
}
}
return fmt.Sprintf("unknown panic code: %#x", pCode), nil
default:
return "", errors.New("invalid data for unpacking")
}
}

View File

@ -1173,6 +1173,8 @@ func TestUnpackRevert(t *testing.T) {
{"", "", errors.New("invalid data for unpacking")},
{"08c379a1", "", errors.New("invalid data for unpacking")},
{"08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d72657665727420726561736f6e00000000000000000000000000000000000000", "revert reason", nil},
{"4e487b710000000000000000000000000000000000000000000000000000000000000000", "generic panic", nil},
{"4e487b7100000000000000000000000000000000000000000000000000000000000000ff", "unknown panic code: 0xff", nil},
}
for index, c := range cases {
t.Run(fmt.Sprintf("case %d", index), func(t *testing.T) {

View File

@ -29,7 +29,7 @@ import (
var (
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
// have any code associated with it (i.e. suicided).
// have any code associated with it (i.e. self-destructed).
ErrNoCode = errors.New("no contract code at given address")
// ErrNoPendingState is raised when attempting to perform a pending state action

View File

@ -199,7 +199,6 @@ func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address,
if err != nil {
return nil, err
}
return stateDB.GetCode(contract), nil
}
@ -212,7 +211,6 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
if err != nil {
return nil, err
}
return stateDB.GetBalance(contract), nil
}
@ -225,7 +223,6 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
if err != nil {
return 0, err
}
return stateDB.GetNonce(contract), nil
}
@ -238,7 +235,6 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
if err != nil {
return nil, err
}
val := stateDB.GetState(contract, key)
return val[:], nil
}
@ -681,7 +677,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
// Get the last block
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
if err != nil {
return fmt.Errorf("could not fetch parent")
return errors.New("could not fetch parent")
}
// Check transaction validity
signer := types.MakeSigner(b.blockchain.Config(), block.Number(), block.Time())
@ -700,8 +696,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
}
block.AddTxWithChain(b.blockchain, tx)
})
stateDB, _ := b.blockchain.State()
stateDB, err := b.blockchain.State()
if err != nil {
return err
}
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
b.pendingReceipts = receipts[0]
@ -815,17 +813,18 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
// Get the last block
block := b.blockchain.GetBlockByHash(b.pendingBlock.ParentHash())
if block == nil {
return fmt.Errorf("could not find parent")
return errors.New("could not find parent")
}
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
block.OffsetTime(int64(adjustment.Seconds()))
})
stateDB, _ := b.blockchain.State()
stateDB, err := b.blockchain.State()
if err != nil {
return err
}
b.pendingBlock = blocks[0]
b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil)
return nil
}
@ -892,7 +891,7 @@ func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (typ
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) {
logs := rawdb.ReadLogs(fb.db, hash, number, fb.bc.Config())
logs := rawdb.ReadLogs(fb.db, hash, number)
return logs, nil
}

View File

@ -161,6 +161,7 @@ func TestAdjustTime(t *testing.T) {
func TestNewAdjustTimeFail(t *testing.T) {
testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
sim := simTestBackend(testAddr)
defer sim.blockchain.Stop()
// Create tx and send
head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough

View File

@ -127,11 +127,12 @@ func NewMethod(name string, rawName string, funType FunctionType, mutability str
state = state + " "
}
identity := fmt.Sprintf("function %v", rawName)
if funType == Fallback {
switch funType {
case Fallback:
identity = "fallback"
} else if funType == Receive {
case Receive:
identity = "receive"
} else if funType == Constructor {
case Constructor:
identity = "constructor"
}
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))

View File

@ -84,11 +84,12 @@ func TestMethodString(t *testing.T) {
for _, test := range table {
var got string
if test.method == "fallback" {
switch test.method {
case "fallback":
got = abi.Fallback.String()
} else if test.method == "receive" {
case "receive":
got = abi.Receive.String()
} else {
default:
got = abi.Methods[test.method].String()
}
if got != test.expectation {

View File

@ -228,7 +228,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
return nil, errors.New("abi: purely underscored output cannot unpack to struct")
}
// this abi has already been paired, skip it... unless there exists another, yet unassigned

View File

@ -17,6 +17,7 @@
package abi
import (
"errors"
"fmt"
)
@ -40,7 +41,7 @@ func isIdentifierSymbol(c byte) bool {
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
if len(unescapedSelector) == 0 {
return "", "", fmt.Errorf("empty token")
return "", "", errors.New("empty token")
}
firstChar := unescapedSelector[0]
position := 1
@ -110,7 +111,7 @@ func parseCompositeType(unescapedSelector string) ([]interface{}, string, error)
func parseType(unescapedSelector string) (interface{}, string, error) {
if len(unescapedSelector) == 0 {
return nil, "", fmt.Errorf("empty type")
return nil, "", errors.New("empty type")
}
if unescapedSelector[0] == '(' {
return parseCompositeType(unescapedSelector)

View File

@ -70,7 +70,7 @@ var (
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
return Type{}, errors.New("invalid arg type in abi")
}
typ.stringKind = t
@ -109,7 +109,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
}
typ.stringKind = embeddedType.stringKind + sliced
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
return Type{}, errors.New("invalid formatting of array type")
}
return typ, err
}
@ -348,7 +348,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) {
}
}
// requireLengthPrefix returns whether the type requires any sort of length
// requiresLengthPrefix returns whether the type requires any sort of length
// prefixing.
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy

View File

@ -18,6 +18,7 @@ package abi
import (
"encoding/binary"
"errors"
"fmt"
"math"
"math/big"
@ -125,7 +126,7 @@ func readBool(word []byte) (bool, error) {
// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
if t.T != FunctionTy {
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array")
}
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
@ -138,7 +139,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
// ReadFixedBytes uses reflection to create a fixed array to be read from.
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
return nil, errors.New("abi: invalid type in call to make fixed byte array")
}
// convert
array := reflect.New(t.GetType()).Elem()
@ -159,14 +160,15 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
// this value will become our slice or our array, depending on the type
var refSlice reflect.Value
if t.T == SliceTy {
switch t.T {
case SliceTy:
// declare our slice
refSlice = reflect.MakeSlice(t.GetType(), size, size)
} else if t.T == ArrayTy {
case ArrayTy:
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
} else {
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
default:
return nil, errors.New("abi: invalid type in array/slice unpacking stage")
}
// Arrays have packed elements, resulting in longer unpack steps.

View File

@ -17,6 +17,7 @@
package external
import (
"errors"
"fmt"
"math/big"
"sync"
@ -98,11 +99,11 @@ func (api *ExternalSigner) Status() (string, error) {
}
func (api *ExternalSigner) Open(passphrase string) error {
return fmt.Errorf("operation not supported on external signers")
return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Close() error {
return fmt.Errorf("operation not supported on external signers")
return errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) Accounts() []accounts.Account {
@ -145,7 +146,7 @@ func (api *ExternalSigner) Contains(account accounts.Account) bool {
}
func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
return accounts.Account{}, fmt.Errorf("operation not supported on external signers")
return accounts.Account{}, errors.New("operation not supported on external signers")
}
func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain ethereum.ChainStateReader) {
@ -242,14 +243,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
}
func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
return []byte{}, fmt.Errorf("password-operations not supported on external signers")
return []byte{}, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
return nil, fmt.Errorf("password-operations not supported on external signers")
return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) {
return nil, fmt.Errorf("password-operations not supported on external signers")
return nil, errors.New("password-operations not supported on external signers")
}
func (api *ExternalSigner) listAccounts() ([]common.Address, error) {

View File

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"golang.org/x/exp/slices"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
@ -38,11 +39,10 @@ import (
// exist yet, the code will attempt to create a watcher at most this often.
const minReloadInterval = 2 * time.Second
type accountsByURL []accounts.Account
func (s accountsByURL) Len() int { return len(s) }
func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 }
func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// byURL defines the sorting order for accounts.
func byURL(a, b accounts.Account) int {
return a.URL.Cmp(b.URL)
}
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
@ -67,7 +67,7 @@ type accountCache struct {
keydir string
watcher *watcher
mu sync.Mutex
all accountsByURL
all []accounts.Account
byAddr map[common.Address][]accounts.Account
throttle *time.Timer
notify chan struct{}
@ -194,7 +194,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
copy(err.Matches, matches)
sort.Sort(accountsByURL(err.Matches))
slices.SortFunc(err.Matches, byURL)
return accounts.Account{}, err
}
}

View File

@ -17,12 +17,12 @@
package keystore
import (
"errors"
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
@ -30,6 +30,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"golang.org/x/exp/slices"
)
var (
@ -74,7 +75,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
select {
case <-ks.changes:
default:
return fmt.Errorf("wasn't notified of new accounts")
return errors.New("wasn't notified of new accounts")
}
return nil
}
@ -202,7 +203,7 @@ func TestCacheAddDeleteOrder(t *testing.T) {
// Check that the account list is sorted by filename.
wantAccounts := make([]accounts.Account, len(accs))
copy(wantAccounts, accs)
sort.Sort(accountsByURL(wantAccounts))
slices.SortFunc(wantAccounts, byURL)
list := cache.accounts()
if !reflect.DeepEqual(list, wantAccounts) {
t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts))

View File

@ -20,7 +20,6 @@ import (
"math/rand"
"os"
"runtime"
"sort"
"strings"
"sync"
"sync/atomic"
@ -31,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
"golang.org/x/exp/slices"
)
var testSigData = make([]byte, 32)
@ -397,19 +397,19 @@ func TestImportRace(t *testing.T) {
t.Fatalf("failed to export account: %v", acc)
}
_, ks2 := tmpKeyStore(t, true)
var atom uint32
var atom atomic.Uint32
var wg sync.WaitGroup
wg.Add(2)
for i := 0; i < 2; i++ {
go func() {
defer wg.Done()
if _, err := ks2.Import(json, "new", "new"); err != nil {
atomic.AddUint32(&atom, 1)
atom.Add(1)
}
}()
}
wg.Wait()
if atom != 1 {
if atom.Load() != 1 {
t.Errorf("Import is racy")
}
}
@ -424,7 +424,7 @@ func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, walle
for _, account := range live {
liveList = append(liveList, account)
}
sort.Sort(accountsByURL(liveList))
slices.SortFunc(liveList, byURL)
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))

View File

@ -225,10 +225,13 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
if err != nil {
return nil, err
}
key := crypto.ToECDSAUnsafe(keyBytes)
key, err := crypto.ToECDSA(keyBytes)
if err != nil {
return nil, fmt.Errorf("invalid key: %w", err)
}
id, err := uuid.FromBytes(keyId)
if err != nil {
return nil, err
return nil, fmt.Errorf("invalid UUID: %w", err)
}
return &Key{
Id: id,

View File

@ -20,6 +20,7 @@
package keystore
import (
"os"
"time"
"github.com/ethereum/go-ethereum/log"
@ -77,7 +78,9 @@ func (w *watcher) loop() {
}
defer watcher.Close()
if err := watcher.Add(w.ac.keydir); err != nil {
if !os.IsNotExist(err) {
logger.Warn("Failed to watch keystore folder", "err", err)
}
return
}

View File

@ -24,6 +24,7 @@ import (
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
@ -125,7 +126,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
return fmt.Errorf("cannot unpair: not paired")
return errors.New("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
@ -141,7 +142,7 @@ func (s *SecureChannelSession) Unpair() error {
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
return fmt.Errorf("session already opened")
return errors.New("session already opened")
}
response, err := s.open()
@ -215,7 +216,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
return nil, fmt.Errorf("channel not open")
return nil, errors.New("channel not open")
}
data, err := s.encryptAPDU(data)
@ -254,7 +255,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
return nil, fmt.Errorf("invalid MAC in response")
return nil, errors.New("invalid MAC in response")
}
rapdu := &responseAPDU{}
@ -319,7 +320,7 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
return nil, fmt.Errorf("expected end of padding, got 0")
return nil, errors.New("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after

View File

@ -252,7 +252,7 @@ func (w *Wallet) release() error {
// with the wallet.
func (w *Wallet) pair(puk []byte) error {
if w.session.paired() {
return fmt.Errorf("wallet already paired")
return errors.New("wallet already paired")
}
pairing, err := w.session.pair(puk)
if err != nil {
@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
// unpair deletes an existing pairing.
func (s *Session) unpair() error {
if !s.verified {
return fmt.Errorf("unpair requires that the PIN be verified")
return errors.New("unpair requires that the PIN be verified")
}
return s.Channel.Unpair()
}
@ -907,7 +907,7 @@ func (s *Session) initialize(seed []byte) error {
return err
}
if status == "Online" {
return fmt.Errorf("card is already initialized, cowardly refusing to proceed")
return errors.New("card is already initialized, cowardly refusing to proceed")
}
s.Wallet.lock.Lock()

View File

@ -65,7 +65,7 @@ type Hub struct {
// TODO(karalabe): remove if hotplug lands on Windows
commsPend int // Number of operations blocking enumeration
commsLock sync.Mutex // Lock protecting the pending counter and enumeration
enumFails uint32 // Number of times enumeration has failed
enumFails atomic.Uint32 // Number of times enumeration has failed
}
// NewLedgerHub creates a new hardware wallet manager for Ledger devices.
@ -151,7 +151,7 @@ func (hub *Hub) refreshWallets() {
return
}
// If USB enumeration is continually failing, don't keep trying indefinitely
if atomic.LoadUint32(&hub.enumFails) > 2 {
if hub.enumFails.Load() > 2 {
return
}
// Retrieve the current list of USB wallet devices
@ -172,7 +172,7 @@ func (hub *Hub) refreshWallets() {
}
infos, err := usb.Enumerate(hub.vendorID, 0)
if err != nil {
failcount := atomic.AddUint32(&hub.enumFails, 1)
failcount := hub.enumFails.Add(1)
if runtime.GOOS == "linux" {
// See rationale before the enumeration why this is needed and only on Linux.
hub.commsLock.Unlock()
@ -181,7 +181,7 @@ func (hub *Hub) refreshWallets() {
"vendor", hub.vendorID, "failcount", failcount, "err", err)
return
}
atomic.StoreUint32(&hub.enumFails, 0)
hub.enumFails.Store(0)
for _, info := range infos {
for _, id := range hub.productIDs {

View File

@ -624,7 +624,7 @@ func (w *wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID
return signed, nil
}
// SignHashWithPassphrase implements accounts.Wallet, however signing arbitrary
// SignTextWithPassphrase implements accounts.Wallet, however signing arbitrary
// data is not supported for Ledger wallets, so this method will always return
// an error.
func (w *wallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {

View File

@ -80,6 +80,7 @@ var (
InvalidPayloadAttributes = &EngineAPIError{code: -38003, msg: "Invalid payload attributes"}
TooLargeRequest = &EngineAPIError{code: -38004, msg: "Too large request"}
InvalidParams = &EngineAPIError{code: -32602, msg: "Invalid parameters"}
UnsupportedFork = &EngineAPIError{code: -38005, msg: "Unsupported fork"}
STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}

View File

@ -20,12 +20,14 @@ func (p PayloadAttributes) MarshalJSON() ([]byte, error) {
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
var enc PayloadAttributes
enc.Timestamp = hexutil.Uint64(p.Timestamp)
enc.Random = p.Random
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
enc.Withdrawals = p.Withdrawals
enc.BeaconRoot = p.BeaconRoot
return json.Marshal(&enc)
}
@ -36,6 +38,7 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
Random *common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
var dec PayloadAttributes
if err := json.Unmarshal(input, &dec); err != nil {
@ -56,5 +59,8 @@ func (p *PayloadAttributes) UnmarshalJSON(input []byte) error {
if dec.Withdrawals != nil {
p.Withdrawals = dec.Withdrawals
}
if dec.BeaconRoot != nil {
p.BeaconRoot = dec.BeaconRoot
}
return nil
}

View File

@ -32,6 +32,8 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var enc ExecutableData
enc.ParentHash = e.ParentHash
@ -54,6 +56,8 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
}
}
enc.Withdrawals = e.Withdrawals
enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed)
enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas)
return json.Marshal(&enc)
}
@ -75,6 +79,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"`
ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"`
}
var dec ExecutableData
if err := json.Unmarshal(input, &dec); err != nil {
@ -142,5 +148,11 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
if dec.Withdrawals != nil {
e.Withdrawals = dec.Withdrawals
}
if dec.BlobGasUsed != nil {
e.BlobGasUsed = (*uint64)(dec.BlobGasUsed)
}
if dec.ExcessBlobGas != nil {
e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
return nil
}

View File

@ -17,10 +17,14 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override bool `json:"shouldOverrideBuilder"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
enc.BlobsBundle = e.BlobsBundle
enc.Override = e.Override
return json.Marshal(&enc)
}
@ -29,6 +33,8 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override *bool `json:"shouldOverrideBuilder"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
@ -42,5 +48,11 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
return errors.New("missing required field 'blockValue' for ExecutionPayloadEnvelope")
}
e.BlockValue = (*big.Int)(dec.BlockValue)
if dec.BlobsBundle != nil {
e.BlobsBundle = dec.BlobsBundle
}
if dec.Override != nil {
e.Override = *dec.Override
}
return nil
}

View File

@ -35,6 +35,7 @@ type PayloadAttributes struct {
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BeaconRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
// JSON type overrides for PayloadAttributes.
@ -61,6 +62,8 @@ type ExecutableData struct {
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
Withdrawals []*types.Withdrawal `json:"withdrawals"`
BlobGasUsed *uint64 `json:"blobGasUsed"`
ExcessBlobGas *uint64 `json:"excessBlobGas"`
}
// JSON type overrides for executableData.
@ -73,6 +76,8 @@ type executableDataMarshaling struct {
ExtraData hexutil.Bytes
LogsBloom hexutil.Bytes
Transactions []hexutil.Bytes
BlobGasUsed *hexutil.Uint64
ExcessBlobGas *hexutil.Uint64
}
//go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go
@ -80,6 +85,14 @@ type executableDataMarshaling struct {
type ExecutionPayloadEnvelope struct {
ExecutionPayload *ExecutableData `json:"executionPayload" gencodec:"required"`
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override bool `json:"shouldOverrideBuilder"`
}
type BlobsBundleV1 struct {
Commitments []hexutil.Bytes `json:"commitments"`
Proofs []hexutil.Bytes `json:"proofs"`
Blobs []hexutil.Bytes `json:"blobs"`
}
// JSON type overrides for ExecutionPayloadEnvelope.
@ -155,11 +168,12 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// len(extraData) <= 32
// uncleHash = emptyUncleHash
// difficulty = 0
// if versionedHashes != nil, versionedHashes match to blob transactions
//
// and that the blockhash of the constructed block matches the parameters. Nil
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in params.
func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) {
func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
txs, err := decodeTransactions(params.Transactions)
if err != nil {
return nil, err
@ -174,6 +188,18 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) {
if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
}
var blobHashes []common.Hash
for _, tx := range txs {
blobHashes = append(blobHashes, tx.BlobHashes()...)
}
if len(blobHashes) != len(versionedHashes) {
return nil, fmt.Errorf("invalid number of versionedHashes: %v blobHashes: %v", versionedHashes, blobHashes)
}
for i := 0; i < len(blobHashes); i++ {
if blobHashes[i] != versionedHashes[i] {
return nil, fmt.Errorf("invalid versionedHash at %v: %v blobHashes: %v", i, versionedHashes, blobHashes)
}
}
// Only set withdrawalsRoot if it is non-nil. This allows CLs to use
// ExecutableData before withdrawals are enabled by marshaling
// Withdrawals as the json null value.
@ -199,6 +225,9 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) {
Extra: params.ExtraData,
MixDigest: params.Random,
WithdrawalsHash: withdrawalsRoot,
ExcessBlobGas: params.ExcessBlobGas,
BlobGasUsed: params.BlobGasUsed,
ParentBeaconRoot: beaconRoot,
}
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals)
if block.Hash() != params.BlockHash {
@ -209,7 +238,7 @@ func ExecutableDataToBlock(params ExecutableData) (*types.Block, error) {
// BlockToExecutableData constructs the ExecutableData structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadEnvelope {
func BlockToExecutableData(block *types.Block, fees *big.Int, sidecars []*types.BlobTxSidecar) *ExecutionPayloadEnvelope {
data := &ExecutableData{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
@ -226,8 +255,22 @@ func BlockToExecutableData(block *types.Block, fees *big.Int) *ExecutionPayloadE
Random: block.MixDigest(),
ExtraData: block.Extra(),
Withdrawals: block.Withdrawals(),
BlobGasUsed: block.BlobGasUsed(),
ExcessBlobGas: block.ExcessBlobGas(),
}
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees}
bundle := BlobsBundleV1{
Commitments: make([]hexutil.Bytes, 0),
Blobs: make([]hexutil.Bytes, 0),
Proofs: make([]hexutil.Bytes, 0),
}
for _, sidecar := range sidecars {
for j := range sidecar.Blobs {
bundle.Blobs = append(bundle.Blobs, hexutil.Bytes(sidecar.Blobs[j][:]))
bundle.Commitments = append(bundle.Commitments, hexutil.Bytes(sidecar.Commitments[j][:]))
bundle.Proofs = append(bundle.Proofs, hexutil.Bytes(sidecar.Proofs[j][:]))
}
}
return &ExecutionPayloadEnvelope{ExecutionPayload: data, BlockValue: fees, BlobsBundle: &bundle, Override: false}
}
// ExecutionPayloadBodyV1 is used in the response to GetPayloadBodiesByHashV1 and GetPayloadBodiesByRangeV1

View File

@ -1,20 +1,25 @@
# This file contains sha256 checksums of optional build dependencies.
e447b498cde50215c4f7619e5124b0fc4e25fb5d16ea47271c47f278e7aa763a go1.20.3.src.tar.gz
c1e1161d6d859deb576e6cfabeb40e3d042ceb1c6f444f617c3c9d76269c3565 go1.20.3.darwin-amd64.tar.gz
86b0ed0f2b2df50fa8036eea875d1cf2d76cefdacf247c44639a1464b7e36b95 go1.20.3.darwin-arm64.tar.gz
340e80abd047c597fdc0f50a6cc59617f06c297d62f7fc77f4a0164e2da6f7aa go1.20.3.freebsd-386.tar.gz
2169fcd8b6c94c5fbe07c0b470ccfb6001d343f6548ad49f3d9ab78e3b5753c7 go1.20.3.freebsd-amd64.tar.gz
e12384311403f1389d14cc1c1295bfb4e0dd5ab919403b80da429f671a223507 go1.20.3.linux-386.tar.gz
979694c2c25c735755bf26f4f45e19e64e4811d661dd07b8c010f7a8e18adfca go1.20.3.linux-amd64.tar.gz
eb186529f13f901e7a2c4438a05c2cd90d74706aaa0a888469b2a4a617b6ee54 go1.20.3.linux-arm64.tar.gz
b421e90469a83671641f81b6e20df6500f033e9523e89cbe7b7223704dd1035c go1.20.3.linux-armv6l.tar.gz
943c89aa1624ea544a022b31e3d6e16a037200e436370bdd5fd67f3fa60be282 go1.20.3.linux-ppc64le.tar.gz
126cf823a5634ef2544b866db107b9d351d3ea70d9e240b0bdcfb46f4dcae54b go1.20.3.linux-s390x.tar.gz
37e9146e1f9d681cfcaa6fee6c7b890c44c64bc50228c9588f3c4231346d33bd go1.20.3.windows-386.zip
143a2837821c7dbacf7744cbb1a8421c1f48307c6fdfaeffc5f8c2f69e1b7932 go1.20.3.windows-amd64.zip
158cb159e00bc979f473e0f5b5a561613129c5e51067967b72b8e072e5a4db81 go1.20.3.windows-arm64.zip
# https://github.com/ethereum/execution-spec-tests/releases
24bac679f3a2d8240d8e08e7f6a70b70c2dabf673317d924cf1d1887b9fe1f81 fixtures.tar.gz
# https://go.dev/dl/
bfa36bf75e9a1e9cbbdb9abcf9d1707e479bd3a07880a8ae3564caee5711cb99 go1.21.1.src.tar.gz
809f5b0ef4f7dcdd5f51e9630a5b2e5a1006f22a047126d61560cdc365678a19 go1.21.1.darwin-amd64.tar.gz
ffd40391a1e995855488b008ad9326ff8c2e81803a6e80894401003bae47fcf1 go1.21.1.darwin-arm64.tar.gz
9919a9a4dc82371aba3da5b7c830bcb6249fc1502cd26d959eb340a60e41ee01 go1.21.1.freebsd-386.tar.gz
2571f10f6047e04d87c1f5986a05e5e8f7b511faf98803ef12b66d563845d2a1 go1.21.1.freebsd-amd64.tar.gz
b93850666cdadbd696a986cf7b03111fe99db8c34a9aaa113d7c96d0081e1901 go1.21.1.linux-386.tar.gz
b3075ae1ce5dab85f89bc7905d1632de23ca196bd8336afd93fa97434cfa55ae go1.21.1.linux-amd64.tar.gz
7da1a3936a928fd0b2602ed4f3ef535b8cd1990f1503b8d3e1acc0fa0759c967 go1.21.1.linux-arm64.tar.gz
f3716a43f59ae69999841d6007b42c9e286e8d8ce470656fb3e70d7be2d7ca85 go1.21.1.linux-armv6l.tar.gz
eddf018206f8a5589bda75252b72716d26611efebabdca5d0083ec15e9e41ab7 go1.21.1.linux-ppc64le.tar.gz
a83b3e8eb4dbf76294e773055eb51397510ff4d612a247bad9903560267bba6d go1.21.1.linux-s390x.tar.gz
170256c820f466f29d64876f25f4dfa4029ed9902a0a9095d8bd603aecf4d83b go1.21.1.windows-386.zip
10a4f5b63215d11d1770453733dbcbf024f3f74872f84e28d7ea59f0250316c6 go1.21.1.windows-amd64.zip
41135ce6e0ced4bc1e459cb96bd4090c9dc2062e24179c3f337d855af9b560ef go1.21.1.windows-arm64.zip
# https://github.com/golangci/golangci-lint/releases
fba08acc4027f69f07cef48fbff70b8a7ecdfaa1c2aba9ad3fb31d60d9f5d4bc golangci-lint-1.51.1-darwin-amd64.tar.gz
75b8f0ff3a4e68147156be4161a49d4576f1be37a0b506473f8c482140c1e7f2 golangci-lint-1.51.1-darwin-arm64.tar.gz
e06b3459aaed356e1667580be00b05f41f3b2e29685d12cdee571c23e1edb414 golangci-lint-1.51.1-freebsd-386.tar.gz

View File

@ -120,14 +120,14 @@ var (
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish,
// kinetic
debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", // EOL: 04/2024
"xenial": "golang-go", // EOL: 04/2026
"bionic": "golang-go", // EOL: 04/2028
"focal": "golang-go", // EOL: 04/2030
"jammy": "golang-go", // EOL: 04/2032
"kinetic": "golang-go", // EOL: 07/2023
"lunar": "golang-go", // EOL: 01/2024
}
@ -139,7 +139,7 @@ var (
// This is the version of Go that will be downloaded by
//
// go run ci.go install -dlgo
dlgoVersion = "1.20.3"
dlgoVersion = "1.21.1"
// This is the version of Go that will be used to bootstrap the PPA builder.
//
@ -148,6 +148,13 @@ var (
// we need to switch over to a recursive builder to jumpt across supported
// versions.
gobootVersion = "1.19.6"
// This is the version of execution-spec-tests that we are using.
// When updating, you must also update build/checksums.txt.
executionSpecTestsVersion = "1.0.2"
// This is where the tests should be unpacked.
executionSpecTestsDir = "tests/spec-tests"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@ -200,6 +207,7 @@ func doInstall(cmdline []string) {
staticlink = flag.Bool("static", false, "Create statically-linked executable")
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
// Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
@ -207,12 +215,16 @@ func doInstall(cmdline []string) {
csdb := build.MustLoadChecksums("build/checksums.txt")
tc.Root = build.DownloadGo(csdb, dlgoVersion)
}
// Disable CLI markdown doc generation in release builds and enable linking
// the CKZG library since we can make it portable here.
buildTags := []string{"urfave_cli_no_docs", "ckzg"}
// Disable CLI markdown doc generation in release builds.
buildTags := []string{"urfave_cli_no_docs"}
// Enable linking the CKZG library since we can make it work with additional flags.
if env.UbuntuVersion != "trusty" {
buildTags = append(buildTags, "ckzg")
}
// Configure the build.
env := build.Env()
gobuild := tc.Go("build", buildFlags(env, *staticlink, buildTags)...)
// arm64 CI builders are memory-constrained and can't handle concurrent builds,
@ -289,16 +301,26 @@ func doTest(cmdline []string) {
coverage = flag.Bool("coverage", false, "Whether to record code coverage")
verbose = flag.Bool("v", false, "Whether to log verbosely")
race = flag.Bool("race", false, "Execute the race detector")
cachedir = flag.String("cachedir", "./build/cache", "directory for caching downloads")
)
flag.CommandLine.Parse(cmdline)
// Get test fixtures.
csdb := build.MustLoadChecksums("build/checksums.txt")
downloadSpecTestFixtures(csdb, *cachedir)
// Configure the toolchain.
tc := build.GoToolchain{GOARCH: *arch, CC: *cc}
if *dlgo {
csdb := build.MustLoadChecksums("build/checksums.txt")
tc.Root = build.DownloadGo(csdb, dlgoVersion)
}
gotest := tc.Go("test", "-tags=ckzg")
gotest := tc.Go("test")
// CI needs a bit more time for the statetests (default 10m).
gotest.Args = append(gotest.Args, "-timeout=20m")
// Enable CKZG backend in CI.
gotest.Args = append(gotest.Args, "-tags=ckzg")
// Test a single package at a time. CI builders are slow
// and some tests run into timeouts under load.
@ -321,6 +343,21 @@ func doTest(cmdline []string) {
build.MustRun(gotest)
}
// downloadSpecTestFixtures downloads and extracts the execution-spec-tests fixtures.
func downloadSpecTestFixtures(csdb *build.ChecksumDB, cachedir string) string {
ext := ".tar.gz"
base := "fixtures" // TODO(MariusVanDerWijden) rename once the version becomes part of the filename
url := fmt.Sprintf("https://github.com/ethereum/execution-spec-tests/releases/download/v%s/%s%s", executionSpecTestsVersion, base, ext)
archivePath := filepath.Join(cachedir, base+ext)
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
if err := build.ExtractArchive(archivePath, executionSpecTestsDir); err != nil {
log.Fatal(err)
}
return filepath.Join(cachedir, base)
}
// doLint runs golangci-lint on requested packages.
func doLint(cmdline []string) {
var (

View File

@ -28,7 +28,7 @@ override_dh_auto_build:
mv .mod $(GOPATH)/pkg/mod
# A fresh Go was built, all dependency downloads faked, hope build works now
../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} -ubuntu {{.Distro}}
override_dh_auto_test:

View File

@ -46,12 +46,13 @@ import (
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"golang.org/x/exp/slices"
)
var (
@ -152,13 +153,6 @@ func (i info) gpl() bool {
return false
}
// authors implements the sort.Interface for strings in case-insensitive mode.
type authors []string
func (as authors) Len() int { return len(as) }
func (as authors) Less(i, j int) bool { return strings.ToLower(as[i]) < strings.ToLower(as[j]) }
func (as authors) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
func main() {
var (
files = getFiles()
@ -299,7 +293,9 @@ func writeAuthors(files []string) {
}
}
// Write sorted list of authors back to the file.
sort.Sort(authors(list))
slices.SortFunc(list, func(a, b string) bool {
return strings.ToLower(a) < strings.ToLower(b)
})
content := new(bytes.Buffer)
content.WriteString(authorsFileHeader)
for _, a := range list {

View File

@ -23,6 +23,7 @@ import (
"fmt"
"net"
"os"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/crypto"
@ -107,21 +108,20 @@ func main() {
if err != nil {
utils.Fatalf("-ListenUDP: %v", err)
}
realaddr := conn.LocalAddr().(*net.UDPAddr)
if natm != nil {
if !realaddr.IP.IsLoopback() {
go nat.Map(natm, nil, "udp", realaddr.Port, realaddr.Port, "ethereum discovery")
}
if ext, err := natm.ExternalIP(); err == nil {
realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port}
}
}
printNotice(&nodeKey.PublicKey, *realaddr)
defer conn.Close()
db, _ := enode.OpenDB("")
ln := enode.NewLocalNode(db, nodeKey)
listenerAddr := conn.LocalAddr().(*net.UDPAddr)
if natm != nil && !listenerAddr.IP.IsLoopback() {
natAddr := doPortMapping(natm, ln, listenerAddr)
if natAddr != nil {
listenerAddr = natAddr
}
}
printNotice(&nodeKey.PublicKey, *listenerAddr)
cfg := discover.Config{
PrivateKey: nodeKey,
NetRestrict: restrictList,
@ -148,3 +148,61 @@ func printNotice(nodeKey *ecdsa.PublicKey, addr net.UDPAddr) {
fmt.Println("Note: you're using cmd/bootnode, a developer tool.")
fmt.Println("We recommend using a regular node as bootstrap node for production deployments.")
}
func doPortMapping(natm nat.Interface, ln *enode.LocalNode, addr *net.UDPAddr) *net.UDPAddr {
const (
protocol = "udp"
name = "ethereum discovery"
)
newLogger := func(external int, internal int) log.Logger {
return log.New("proto", protocol, "extport", external, "intport", internal, "interface", natm)
}
var (
intport = addr.Port
extaddr = &net.UDPAddr{IP: addr.IP, Port: addr.Port}
mapTimeout = nat.DefaultMapTimeout
log = newLogger(addr.Port, intport)
)
addMapping := func() {
// Get the external address.
var err error
extaddr.IP, err = natm.ExternalIP()
if err != nil {
log.Debug("Couldn't get external IP", "err", err)
return
}
// Create the mapping.
p, err := natm.AddMapping(protocol, extaddr.Port, intport, name, mapTimeout)
if err != nil {
log.Debug("Couldn't add port mapping", "err", err)
return
}
if p != uint16(extaddr.Port) {
extaddr.Port = int(p)
log = newLogger(extaddr.Port, intport)
log.Info("NAT mapped alternative port")
} else {
log.Info("NAT mapped port")
}
// Update IP/port information of the local node.
ln.SetStaticIP(extaddr.IP)
ln.SetFallbackUDP(extaddr.Port)
}
// Perform mapping once, synchronously.
log.Info("Attempting port mapping")
addMapping()
// Refresh the mapping periodically.
go func() {
refresh := time.NewTimer(mapTimeout)
defer refresh.Stop()
for range refresh.C {
addMapping()
refresh.Reset(mapTimeout)
}
}()
return extaddr
}

View File

@ -29,7 +29,7 @@ GLOBAL OPTIONS:
--loglevel value log level to emit to the screen (default: 4)
--keystore value Directory for the keystore (default: "$HOME/.ethereum/keystore")
--configdir value Directory for Clef configuration (default: "$HOME/.clef")
--chainid value Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli) (default: 1)
--chainid value Chain id to use for signing (1=mainnet, 5=Goerli) (default: 1)
--lightkdf Reduce key-derivation RAM & CPU usage at some expense of KDF strength
--nousb Disables monitoring for and managing USB hardware wallets
--pcscdpath value Path to the smartcard daemon (pcscd) socket file (default: "/run/pcscd/pcscd.comm")

View File

@ -27,6 +27,7 @@ import (
"fmt"
"io"
"math/big"
"net"
"os"
"os/signal"
"path/filepath"
@ -99,7 +100,7 @@ var (
chainIdFlag = &cli.Int64Flag{
Name: "chainid",
Value: params.MainnetChainConfig.ChainID.Int64(),
Usage: "Chain id to use for signing (1=mainnet, 4=Rinkeby, 5=Goerli)",
Usage: "Chain id to use for signing (1=mainnet, 5=Goerli)",
}
rpcPortFlag = &cli.IntFlag{
Name: "http.port",
@ -732,6 +733,7 @@ func signer(c *cli.Context) error {
cors := utils.SplitAndTrim(c.String(utils.HTTPCORSDomainFlag.Name))
srv := rpc.NewServer()
srv.SetBatchLimits(node.DefaultConfig.BatchRequestLimit, node.DefaultConfig.BatchResponseMaxSize)
err := node.RegisterApis(rpcAPI, []string{"account"}, srv)
if err != nil {
utils.Fatalf("Could not register API: %w", err)
@ -742,7 +744,7 @@ func signer(c *cli.Context) error {
port := c.Int(rpcPortFlag.Name)
// start http server
httpEndpoint := fmt.Sprintf("%s:%d", c.String(utils.HTTPListenAddrFlag.Name), port)
httpEndpoint := net.JoinHostPort(c.String(utils.HTTPListenAddrFlag.Name), fmt.Sprintf("%d", port))
httpServer, addr, err := node.StartHTTPEndpoint(httpEndpoint, rpc.DefaultHTTPTimeouts, handler)
if err != nil {
utils.Fatalf("Could not start RPC api: %v", err)

View File

@ -44,7 +44,7 @@ set to standard output. The following filters are supported:
- `-limit <N>` limits the output set to N entries, taking the top N nodes by score
- `-ip <CIDR>` filters nodes by IP subnet
- `-min-age <duration>` filters nodes by 'first seen' time
- `-eth-network <mainnet/rinkeby/goerli/sepolia>` filters nodes by "eth" ENR entry
- `-eth-network <mainnet/goerli/sepolia/holesky>` filters nodes by "eth" ENR entry
- `-les-server` filters nodes by LES server support
- `-snap` filters nodes by snap protocol support

View File

@ -17,6 +17,7 @@
package main
import (
"errors"
"sync"
"sync/atomic"
"time"
@ -51,7 +52,14 @@ type resolver interface {
RequestENR(*enode.Node) (*enode.Node, error)
}
func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler {
func newCrawler(input nodeSet, bootnodes []*enode.Node, disc resolver, iters ...enode.Iterator) (*crawler, error) {
if len(input) == 0 {
input.add(bootnodes...)
}
if len(input) == 0 {
return nil, errors.New("no input nodes to start crawling")
}
c := &crawler{
input: input,
output: make(nodeSet, len(input)),
@ -67,7 +75,7 @@ func newCrawler(input nodeSet, disc resolver, iters ...enode.Iterator) *crawler
for id, n := range input {
c.output[id] = n
}
return c
return c, nil
}
func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {
@ -87,11 +95,11 @@ func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {
go c.runIterator(doneCh, it)
}
var (
added uint64
updated uint64
skipped uint64
recent uint64
removed uint64
added atomic.Uint64
updated atomic.Uint64
skipped atomic.Uint64
recent atomic.Uint64
removed atomic.Uint64
wg sync.WaitGroup
)
wg.Add(nthreads)
@ -103,15 +111,15 @@ func (c *crawler) run(timeout time.Duration, nthreads int) nodeSet {
case n := <-c.ch:
switch c.updateNode(n) {
case nodeSkipIncompat:
atomic.AddUint64(&skipped, 1)
skipped.Add(1)
case nodeSkipRecent:
atomic.AddUint64(&recent, 1)
recent.Add(1)
case nodeRemoved:
atomic.AddUint64(&removed, 1)
removed.Add(1)
case nodeAdded:
atomic.AddUint64(&added, 1)
added.Add(1)
default:
atomic.AddUint64(&updated, 1)
updated.Add(1)
}
case <-c.closed:
return
@ -138,11 +146,11 @@ loop:
break loop
case <-statusTicker.C:
log.Info("Crawling in progress",
"added", atomic.LoadUint64(&added),
"updated", atomic.LoadUint64(&updated),
"removed", atomic.LoadUint64(&removed),
"ignored(recent)", atomic.LoadUint64(&recent),
"ignored(incompatible)", atomic.LoadUint64(&skipped))
"added", added.Load(),
"updated", updated.Load(),
"removed", removed.Load(),
"ignored(recent)", recent.Load(),
"ignored(incompatible)", skipped.Load())
}
}

View File

@ -143,7 +143,7 @@ var discoveryNodeFlags = []cli.Flag{
func discv4Ping(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
disc, _ := startV4(ctx)
defer disc.Close()
start := time.Now()
@ -156,7 +156,7 @@ func discv4Ping(ctx *cli.Context) error {
func discv4RequestRecord(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
disc, _ := startV4(ctx)
defer disc.Close()
respN, err := disc.RequestENR(n)
@ -169,7 +169,7 @@ func discv4RequestRecord(ctx *cli.Context) error {
func discv4Resolve(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV4(ctx)
disc, _ := startV4(ctx)
defer disc.Close()
fmt.Println(disc.Resolve(n).String())
@ -196,10 +196,13 @@ func discv4ResolveJSON(ctx *cli.Context) error {
nodeargs = append(nodeargs, n)
}
// Run the crawler.
disc := startV4(ctx)
disc, config := startV4(ctx)
defer disc.Close()
c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs))
c, err := newCrawler(inputSet, config.Bootnodes, disc, enode.IterNodes(nodeargs))
if err != nil {
return err
}
c.revalidateInterval = 0
output := c.run(0, 1)
writeNodesJSON(nodesFile, output)
@ -211,14 +214,18 @@ func discv4Crawl(ctx *cli.Context) error {
return errors.New("need nodes file as argument")
}
nodesFile := ctx.Args().First()
var inputSet nodeSet
inputSet := make(nodeSet)
if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile)
}
disc := startV4(ctx)
disc, config := startV4(ctx)
defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes())
c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes())
if err != nil {
return err
}
c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output)
@ -238,14 +245,14 @@ func discv4Test(ctx *cli.Context) error {
}
// startV4 starts an ephemeral discovery V4 node.
func startV4(ctx *cli.Context) *discover.UDPv4 {
func startV4(ctx *cli.Context) (*discover.UDPv4, discover.Config) {
ln, config := makeDiscoveryConfig(ctx)
socket := listen(ctx, ln)
disc, err := discover.ListenV4(socket, ln, config)
if err != nil {
exit(err)
}
return disc
return disc, config
}
func makeDiscoveryConfig(ctx *cli.Context) (*enode.LocalNode, discover.Config) {
@ -337,7 +344,7 @@ func listen(ctx *cli.Context, ln *enode.LocalNode) *net.UDPConn {
}
func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
s := params.RinkebyBootnodes
s := params.MainnetBootnodes
if ctx.IsSet(bootnodesFlag.Name) {
input := ctx.String(bootnodesFlag.Name)
if input == "" {

View File

@ -81,7 +81,7 @@ var (
func discv5Ping(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV5(ctx)
disc, _ := startV5(ctx)
defer disc.Close()
fmt.Println(disc.Ping(n))
@ -90,7 +90,7 @@ func discv5Ping(ctx *cli.Context) error {
func discv5Resolve(ctx *cli.Context) error {
n := getNodeArg(ctx)
disc := startV5(ctx)
disc, _ := startV5(ctx)
defer disc.Close()
fmt.Println(disc.Resolve(n))
@ -102,14 +102,18 @@ func discv5Crawl(ctx *cli.Context) error {
return errors.New("need nodes file as argument")
}
nodesFile := ctx.Args().First()
var inputSet nodeSet
inputSet := make(nodeSet)
if common.FileExist(nodesFile) {
inputSet = loadNodesJSON(nodesFile)
}
disc := startV5(ctx)
disc, config := startV5(ctx)
defer disc.Close()
c := newCrawler(inputSet, disc, disc.RandomNodes())
c, err := newCrawler(inputSet, config.Bootnodes, disc, disc.RandomNodes())
if err != nil {
return err
}
c.revalidateInterval = 10 * time.Minute
output := c.run(ctx.Duration(crawlTimeoutFlag.Name), ctx.Int(crawlParallelismFlag.Name))
writeNodesJSON(nodesFile, output)
@ -127,7 +131,7 @@ func discv5Test(ctx *cli.Context) error {
}
func discv5Listen(ctx *cli.Context) error {
disc := startV5(ctx)
disc, _ := startV5(ctx)
defer disc.Close()
fmt.Println(disc.Self())
@ -135,12 +139,12 @@ func discv5Listen(ctx *cli.Context) error {
}
// startV5 starts an ephemeral discovery v5 node.
func startV5(ctx *cli.Context) *discover.UDPv5 {
func startV5(ctx *cli.Context) (*discover.UDPv5, discover.Config) {
ln, config := makeDiscoveryConfig(ctx)
socket := listen(ctx, ln)
disc, err := discover.ListenV5(socket, ln, config)
if err != nil {
exit(err)
}
return disc
return disc, config
}

View File

@ -20,7 +20,6 @@ import (
"context"
"errors"
"fmt"
"sort"
"strconv"
"strings"
"time"
@ -33,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/urfave/cli/v2"
"golang.org/x/exp/slices"
)
const (
@ -288,11 +288,17 @@ func makeDeletionChanges(records map[string]recordSet, keep map[string]string) [
// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order.
func sortChanges(changes []types.Change) {
score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3}
sort.Slice(changes, func(i, j int) bool {
if changes[i].Action == changes[j].Action {
return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name
slices.SortFunc(changes, func(a, b types.Change) int {
if a.Action == b.Action {
return strings.Compare(*a.ResourceRecordSet.Name, *b.ResourceRecordSet.Name)
}
return score[string(changes[i].Action)] < score[string(changes[j].Action)]
if score[string(a.Action)] < score[string(b.Action)] {
return -1
}
if score[string(a.Action)] > score[string(b.Action)] {
return 1
}
return 0
})
}

View File

@ -77,7 +77,7 @@ func (c *Chain) RootAt(height int) common.Hash {
// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()), c.blocks[0].Time())
return forkid.NewID(c.chainConfig, c.blocks[0], uint64(c.Len()), c.blocks[0].Time())
}
// Shorten returns a copy chain of a desired height from the imported

View File

@ -113,8 +113,6 @@ func setupGeth(stack *node.Node) error {
NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763
DatabaseCache: 10,
TrieCleanCache: 10,
TrieCleanCacheJournal: "",
TrieCleanCacheRejournal: 60 * time.Minute,
TrieDirtyCache: 16,
TrieTimeout: 60 * time.Minute,
SnapshotCache: 10,
@ -122,6 +120,7 @@ func setupGeth(stack *node.Node) error {
if err != nil {
return err
}
backend.SetSynced()
_, err = backend.BlockChain().InsertChain(chain.blocks[1:])
return err

View File

@ -21,11 +21,11 @@ import (
"encoding/json"
"fmt"
"os"
"sort"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode"
"golang.org/x/exp/slices"
)
const jsonIndent = " "
@ -77,8 +77,8 @@ func (ns nodeSet) nodes() []*enode.Node {
result = append(result, n.N)
}
// Sort by ID.
sort.Slice(result, func(i, j int) bool {
return bytes.Compare(result[i].ID().Bytes(), result[j].ID().Bytes()) < 0
slices.SortFunc(result, func(a, b *enode.Node) int {
return bytes.Compare(a.ID().Bytes(), b.ID().Bytes())
})
return result
}
@ -103,8 +103,14 @@ func (ns nodeSet) topN(n int) nodeSet {
for _, v := range ns {
byscore = append(byscore, v)
}
sort.Slice(byscore, func(i, j int) bool {
return byscore[i].Score >= byscore[j].Score
slices.SortFunc(byscore, func(a, b nodeJSON) int {
if a.Score > b.Score {
return -1
}
if a.Score < b.Score {
return 1
}
return 0
})
result := make(nodeSet, n)
for _, v := range byscore[:n] {

View File

@ -25,6 +25,7 @@ import (
"strings"
"time"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/params"
@ -228,13 +229,13 @@ func ethFilter(args []string) (nodeFilter, error) {
var filter forkid.Filter
switch args[0] {
case "mainnet":
filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash)
case "rinkeby":
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash)
filter = forkid.NewStaticFilter(params.MainnetChainConfig, core.DefaultGenesisBlock().ToBlock())
case "goerli":
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
filter = forkid.NewStaticFilter(params.GoerliChainConfig, core.DefaultGoerliGenesisBlock().ToBlock())
case "sepolia":
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, core.DefaultSepoliaGenesisBlock().ToBlock())
case "holesky":
filter = forkid.NewStaticFilter(params.HoleskyChainConfig, core.DefaultHoleskyGenesisBlock().ToBlock())
default:
return nil, fmt.Errorf("unknown network %q", args[0])
}

View File

@ -342,7 +342,7 @@ To make `t8n` apply these, the following inputs are required:
- For ethash, it is `5000000000000000000` `wei`,
- If this is not defined, mining rewards are not applied,
- A value of `0` is valid, and causes accounts to be 'touched'.
- For each ommer, the tool needs to be given an `addres\` and a `delta`. This
- For each ommer, the tool needs to be given an `address\` and a `delta`. This
is done via the `ommers` field in `env`.
Note: the tool does not verify that e.g. the normal uncle rules apply,

View File

@ -22,7 +22,9 @@ import (
"fmt"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2"
)
@ -38,11 +40,17 @@ func blockTestCmd(ctx *cli.Context) error {
if len(ctx.Args().First()) == 0 {
return errors.New("path-to-test argument required")
}
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
var tracer vm.EVMLogger
// Configure the EVM logger
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(&logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
DisableStorage: ctx.Bool(DisableStorageFlag.Name),
EnableReturnData: !ctx.Bool(DisableReturnDataFlag.Name),
}, os.Stderr)
}
// Load the test content from the input file
src, err := os.ReadFile(ctx.Args().First())
if err != nil {
@ -53,7 +61,7 @@ func blockTestCmd(ctx *cli.Context) error {
return err
}
for i, test := range tests {
if err := test.Run(false); err != nil {
if err := test.Run(false, rawdb.HashScheme, tracer); err != nil {
return fmt.Errorf("test %v: %w", i, err)
}
}

View File

@ -306,7 +306,7 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
return inputData, nil
}
// dispatchOutput writes the output data to either stderr or stdout, or to the specified
// dispatchBlock writes the output data to either stderr or stdout, or to the specified
// files
func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error {
raw, _ := rlp.EncodeToBytes(block)

View File

@ -19,12 +19,12 @@ package t8ntool
import (
"fmt"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
@ -58,6 +58,8 @@ type ExecutionResult struct {
GasUsed math.HexOrDecimal64 `json:"gasUsed"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"`
CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"`
CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"`
}
type ommer struct {
@ -83,6 +85,10 @@ type stEnv struct {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *big.Int `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"`
ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
type stEnvMarshaling struct {
@ -98,6 +104,9 @@ type stEnvMarshaling struct {
Timestamp math.HexOrDecimal64
ParentTimestamp math.HexOrDecimal64
BaseFee *math.HexOrDecimal256
ExcessBlobGas *math.HexOrDecimal64
ParentExcessBlobGas *math.HexOrDecimal64
ParentBlobGasUsed *math.HexOrDecimal64
}
type rejectedTx struct {
@ -154,6 +163,19 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd
}
// If excessBlobGas is defined, add it to the vmContext.
if pre.Env.ExcessBlobGas != nil {
vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas
} else {
// If it is not explicitly defined, but we have the parent values, we try
// to calculate it ourselves.
parentExcessBlobGas := pre.Env.ParentExcessBlobGas
parentBlobGasUsed := pre.Env.ParentBlobGasUsed
if parentExcessBlobGas != nil && parentBlobGasUsed != nil {
excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed)
vmContext.ExcessBlobGas = &excessBlobGas
}
}
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
// done in StateProcessor.Process(block, ...), right before transactions are applied.
if chainConfig.DAOForkSupport &&
@ -161,8 +183,18 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 {
misc.ApplyDAOHardFork(statedb)
}
if beaconRoot := pre.Env.ParentBeaconBlockRoot; beaconRoot != nil {
evm := vm.NewEVM(vmContext, vm.TxContext{}, statedb, chainConfig, vmConfig)
core.ProcessBeaconBlockRoot(*beaconRoot, evm, statedb)
}
var blobGasUsed uint64
for i, tx := range txs {
if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil {
errMsg := "blob tx used but field env.ExcessBlobGas missing"
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg})
continue
}
msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee)
if err != nil {
log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err)
@ -192,6 +224,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
gaspool.SetGas(prevGas)
continue
}
if tx.Type() == types.BlobTxType {
blobGasUsed += params.BlobTxBlobGasPerBlob
}
includedTxs = append(includedTxs, tx)
if hashError != nil {
return nil, nil, NewError(ErrorMissingBlockhash, hashError)
@ -240,7 +275,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if miningReward >= 0 {
// Add mining reward. The mining reward may be `0`, which only makes a difference in the cases
// where
// - the coinbase suicided, or
// - the coinbase self-destructed, or
// - there are only 'bad' transactions, which aren't executed. In those cases,
// the coinbase gets no txfee, so isn't created, and thus needs to be touched
var (
@ -267,9 +302,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
statedb.AddBalance(w.Address, amount)
}
// Commit block
root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber))
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
if err != nil {
fmt.Fprintf(os.Stderr, "Could not commit state: %v", err)
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
}
execRs := &ExecutionResult{
@ -288,6 +322,16 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
h := types.DeriveSha(types.Withdrawals(pre.Env.Withdrawals), trie.NewStackTrie(nil))
execRs.WithdrawalsRoot = &h
}
if vmContext.ExcessBlobGas != nil {
execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas)
execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed)
}
// Re-create statedb instance with new root upon the updated database
// for accessing latest states.
statedb, err = state.New(root, statedb.Database(), nil)
if err != nil {
return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err))
}
return statedb, execRs, nil
}
@ -303,7 +347,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(false)
root, _ := statedb.Commit(0, false)
statedb, _ = state.New(root, sdb, nil)
return statedb
}

View File

@ -33,6 +33,10 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
@ -51,6 +55,10 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
enc.Withdrawals = s.Withdrawals
enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee)
enc.ParentUncleHash = s.ParentUncleHash
enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas)
enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas)
enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed)
enc.ParentBeaconBlockRoot = s.ParentBeaconBlockRoot
return json.Marshal(&enc)
}
@ -73,6 +81,10 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"`
BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"`
ParentUncleHash *common.Hash `json:"parentUncleHash"`
ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"`
ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"`
ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"`
ParentBeaconBlockRoot *common.Hash `json:"parentBeaconBlockRoot"`
}
var dec stEnv
if err := json.Unmarshal(input, &dec); err != nil {
@ -130,5 +142,17 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.ParentUncleHash != nil {
s.ParentUncleHash = *dec.ParentUncleHash
}
if dec.ExcessBlobGas != nil {
s.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas)
}
if dec.ParentExcessBlobGas != nil {
s.ParentExcessBlobGas = (*uint64)(dec.ParentExcessBlobGas)
}
if dec.ParentBlobGasUsed != nil {
s.ParentBlobGasUsed = (*uint64)(dec.ParentBlobGasUsed)
}
if dec.ParentBeaconBlockRoot != nil {
s.ParentBeaconBlockRoot = dec.ParentBeaconBlockRoot
}
return nil
}

View File

@ -28,7 +28,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@ -192,105 +192,20 @@ func Transition(ctx *cli.Context) error {
// Set the chain id
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
var txsWithKeys []*txWithKey
if txStr != stdinSelector {
inFile, err := os.Open(txStr)
if err != nil {
return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
}
defer inFile.Close()
decoder := json.NewDecoder(inFile)
if strings.HasSuffix(txStr, ".rlp") {
var body hexutil.Bytes
if err := decoder.Decode(&body); err != nil {
if txs, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil {
return err
}
var txs types.Transactions
if err := rlp.DecodeBytes(body, &txs); err != nil {
if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil {
return err
}
for _, tx := range txs {
txsWithKeys = append(txsWithKeys, &txWithKey{
key: nil,
tx: tx,
})
}
} else {
if err := decoder.Decode(&txsWithKeys); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
}
}
} else {
if len(inputData.TxRlp) > 0 {
// Decode the body of already signed transactions
body := common.FromHex(inputData.TxRlp)
var txs types.Transactions
if err := rlp.DecodeBytes(body, &txs); err != nil {
if err := applyShanghaiChecks(&prestate.Env, chainConfig); err != nil {
return err
}
for _, tx := range txs {
txsWithKeys = append(txsWithKeys, &txWithKey{
key: nil,
tx: tx,
})
if err := applyMergeChecks(&prestate.Env, chainConfig); err != nil {
return err
}
} else {
// JSON encoded transactions
txsWithKeys = inputData.Txs
}
}
// We may have to sign the transactions.
signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp)
if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err))
}
// Sanity check, to not `panic` in state_transition
if chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
if prestate.Env.BaseFee != nil {
// Already set, base fee has precedent over parent base fee.
} else if prestate.Env.ParentBaseFee != nil && prestate.Env.Number != 0 {
parent := &types.Header{
Number: new(big.Int).SetUint64(prestate.Env.Number - 1),
BaseFee: prestate.Env.ParentBaseFee,
GasUsed: prestate.Env.ParentGasUsed,
GasLimit: prestate.Env.ParentGasLimit,
}
prestate.Env.BaseFee = misc.CalcBaseFee(chainConfig, parent)
} else {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
if chainConfig.IsShanghai(big.NewInt(int64(prestate.Env.Number)), prestate.Env.Timestamp) && prestate.Env.Withdrawals == nil {
return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section"))
}
isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0
env := prestate.Env
if isMerged {
// post-merge:
// - random must be supplied
// - difficulty must be zero
switch {
case env.Random == nil:
return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env"))
case env.Difficulty != nil && env.Difficulty.BitLen() != 0:
return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env"))
}
prestate.Env.Difficulty = nil
} else if env.Difficulty == nil {
// pre-merge:
// If difficulty was not provided by caller, we need to calculate it.
switch {
case env.ParentDifficulty == nil:
return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
case env.Number == 0:
return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
case env.Timestamp <= env.ParentTimestamp:
return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
env.Timestamp, env.ParentTimestamp))
}
prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
if err := applyCancunChecks(&prestate.Env, chainConfig); err != nil {
return err
}
// Run the test and aggregate the result
s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
@ -358,33 +273,149 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error {
// and secondly to read them with the standard tx json format
func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) {
var signedTxs []*types.Transaction
for i, txWithKey := range txs {
tx := txWithKey.tx
key := txWithKey.key
v, r, s := tx.RawSignatureValues()
if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 {
// This transaction needs to be signed
for i, tx := range txs {
var (
v, r, s = tx.tx.RawSignatureValues()
signed *types.Transaction
err error
)
if txWithKey.protected {
signed, err = types.SignTx(tx, signer, key)
if tx.key == nil || v.BitLen()+r.BitLen()+s.BitLen() != 0 {
// Already signed
signedTxs = append(signedTxs, tx.tx)
continue
}
// This transaction needs to be signed
if tx.protected {
signed, err = types.SignTx(tx.tx, signer, tx.key)
} else {
signed, err = types.SignTx(tx, types.FrontierSigner{}, key)
signed, err = types.SignTx(tx.tx, types.FrontierSigner{}, tx.key)
}
if err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("tx %d: failed to sign tx: %v", i, err))
}
signedTxs = append(signedTxs, signed)
} else {
// Already signed
signedTxs = append(signedTxs, tx)
}
}
return signedTxs, nil
}
func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (types.Transactions, error) {
var txsWithKeys []*txWithKey
var signed types.Transactions
if txStr != stdinSelector {
data, err := os.ReadFile(txStr)
if err != nil {
return nil, NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err))
}
if strings.HasSuffix(txStr, ".rlp") { // A file containing an rlp list
var body hexutil.Bytes
if err := json.Unmarshal(data, &body); err != nil {
return nil, err
}
// Already signed transactions
if err := rlp.DecodeBytes(body, &signed); err != nil {
return nil, err
}
return signed, nil
}
if err := json.Unmarshal(data, &txsWithKeys); err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err))
}
} else {
if len(inputData.TxRlp) > 0 {
// Decode the body of already signed transactions
body := common.FromHex(inputData.TxRlp)
// Already signed transactions
if err := rlp.DecodeBytes(body, &signed); err != nil {
return nil, err
}
return signed, nil
}
// JSON encoded transactions
txsWithKeys = inputData.Txs
}
// We may have to sign the transactions.
signer := types.MakeSigner(chainConfig, big.NewInt(int64(env.Number)), env.Timestamp)
return signUnsignedTransactions(txsWithKeys, signer)
}
func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
if !chainConfig.IsLondon(big.NewInt(int64(env.Number))) {
return nil
}
// Sanity check, to not `panic` in state_transition
if env.BaseFee != nil {
// Already set, base fee has precedent over parent base fee.
return nil
}
if env.ParentBaseFee == nil || env.Number == 0 {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{
Number: new(big.Int).SetUint64(env.Number - 1),
BaseFee: env.ParentBaseFee,
GasUsed: env.ParentGasUsed,
GasLimit: env.ParentGasLimit,
})
return nil
}
func applyShanghaiChecks(env *stEnv, chainConfig *params.ChainConfig) error {
if !chainConfig.IsShanghai(big.NewInt(int64(env.Number)), env.Timestamp) {
return nil
}
if env.Withdrawals == nil {
return NewError(ErrorConfig, errors.New("Shanghai config but missing 'withdrawals' in env section"))
}
return nil
}
func applyMergeChecks(env *stEnv, chainConfig *params.ChainConfig) error {
isMerged := chainConfig.TerminalTotalDifficulty != nil && chainConfig.TerminalTotalDifficulty.BitLen() == 0
if !isMerged {
// pre-merge: If difficulty was not provided by caller, we need to calculate it.
if env.Difficulty != nil {
// already set
return nil
}
switch {
case env.ParentDifficulty == nil:
return NewError(ErrorConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty"))
case env.Number == 0:
return NewError(ErrorConfig, errors.New("currentDifficulty needs to be provided for block number 0"))
case env.Timestamp <= env.ParentTimestamp:
return NewError(ErrorConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)",
env.Timestamp, env.ParentTimestamp))
}
env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp,
env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash)
return nil
}
// post-merge:
// - random must be supplied
// - difficulty must be zero
switch {
case env.Random == nil:
return NewError(ErrorConfig, errors.New("post-merge requires currentRandom to be defined in env"))
case env.Difficulty != nil && env.Difficulty.BitLen() != 0:
return NewError(ErrorConfig, errors.New("post-merge difficulty must be zero (or omitted) in env"))
}
env.Difficulty = nil
return nil
}
func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error {
if !chainConfig.IsCancun(big.NewInt(int64(env.Number)), env.Timestamp) {
env.ParentBeaconBlockRoot = nil // un-set it if it has been set too early
return nil
}
// Post-cancun
// We require EIP-4788 beacon root to be set in the env
if env.ParentBeaconBlockRoot == nil {
return NewError(ErrorConfig, errors.New("post-cancun env requires parentBeaconBlockRoot to be set"))
}
return nil
}
type Alloc map[common.Address]core.GenesisAccount
func (g Alloc) OnRoot(common.Hash) {}

View File

@ -23,6 +23,7 @@ import (
"os"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2"
)
@ -31,99 +32,107 @@ var (
DebugFlag = &cli.BoolFlag{
Name: "debug",
Usage: "output full trace logs",
}
MemProfileFlag = &cli.StringFlag{
Name: "memprofile",
Usage: "creates a memory profile at the given path",
}
CPUProfileFlag = &cli.StringFlag{
Name: "cpuprofile",
Usage: "creates a CPU profile at the given path",
Category: flags.VMCategory,
}
StatDumpFlag = &cli.BoolFlag{
Name: "statdump",
Usage: "displays stack and heap memory information",
Category: flags.VMCategory,
}
CodeFlag = &cli.StringFlag{
Name: "code",
Usage: "EVM code",
Category: flags.VMCategory,
}
CodeFileFlag = &cli.StringFlag{
Name: "codefile",
Usage: "File containing EVM code. If '-' is specified, code is read from stdin ",
Category: flags.VMCategory,
}
GasFlag = &cli.Uint64Flag{
Name: "gas",
Usage: "gas limit for the evm",
Value: 10000000000,
Category: flags.VMCategory,
}
PriceFlag = &flags.BigFlag{
Name: "price",
Usage: "price set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
ValueFlag = &flags.BigFlag{
Name: "value",
Usage: "value set for the evm",
Value: new(big.Int),
Category: flags.VMCategory,
}
DumpFlag = &cli.BoolFlag{
Name: "dump",
Usage: "dumps the state after the run",
Category: flags.VMCategory,
}
InputFlag = &cli.StringFlag{
Name: "input",
Usage: "input for the EVM",
Category: flags.VMCategory,
}
InputFileFlag = &cli.StringFlag{
Name: "inputfile",
Usage: "file containing input for the EVM",
}
VerbosityFlag = &cli.IntFlag{
Name: "verbosity",
Usage: "sets the verbosity level",
Category: flags.VMCategory,
}
BenchFlag = &cli.BoolFlag{
Name: "bench",
Usage: "benchmark the execution",
Category: flags.VMCategory,
}
CreateFlag = &cli.BoolFlag{
Name: "create",
Usage: "indicates the action should be create rather than call",
Category: flags.VMCategory,
}
GenesisFlag = &cli.StringFlag{
Name: "prestate",
Usage: "JSON file with prestate (genesis) config",
Category: flags.VMCategory,
}
MachineFlag = &cli.BoolFlag{
Name: "json",
Usage: "output trace logs in machine readable format (json)",
Category: flags.VMCategory,
}
SenderFlag = &cli.StringFlag{
Name: "sender",
Usage: "The transaction origin",
Category: flags.VMCategory,
}
ReceiverFlag = &cli.StringFlag{
Name: "receiver",
Usage: "The transaction receiver (execution context)",
Category: flags.VMCategory,
}
DisableMemoryFlag = &cli.BoolFlag{
Name: "nomemory",
Value: true,
Usage: "disable memory output",
Category: flags.VMCategory,
}
DisableStackFlag = &cli.BoolFlag{
Name: "nostack",
Usage: "disable stack output",
Category: flags.VMCategory,
}
DisableStorageFlag = &cli.BoolFlag{
Name: "nostorage",
Usage: "disable storage output",
Category: flags.VMCategory,
}
DisableReturnDataFlag = &cli.BoolFlag{
Name: "noreturndata",
Value: true,
Usage: "enable return data output",
Category: flags.VMCategory,
}
)
@ -183,34 +192,38 @@ var blockBuilderCommand = &cli.Command{
},
}
var app = flags.NewApp("the evm command line interface")
func init() {
app.Flags = []cli.Flag{
BenchFlag,
CreateFlag,
DebugFlag,
VerbosityFlag,
// vmFlags contains flags related to running the EVM.
var vmFlags = []cli.Flag{
CodeFlag,
CodeFileFlag,
CreateFlag,
GasFlag,
PriceFlag,
ValueFlag,
DumpFlag,
InputFlag,
InputFileFlag,
MemProfileFlag,
CPUProfileFlag,
StatDumpFlag,
GenesisFlag,
MachineFlag,
SenderFlag,
ReceiverFlag,
}
// traceFlags contains flags that configure tracing output.
var traceFlags = []cli.Flag{
BenchFlag,
DebugFlag,
DumpFlag,
MachineFlag,
StatDumpFlag,
DisableMemoryFlag,
DisableStackFlag,
DisableStorageFlag,
DisableReturnDataFlag,
}
var app = flags.NewApp("the evm command line interface")
func init() {
app.Flags = flags.Merge(vmFlags, traceFlags, debug.Flags)
app.Commands = []*cli.Command{
compileCommand,
disasmCommand,
@ -221,6 +234,14 @@ func init() {
transactionCommand,
blockBuilderCommand,
}
app.Before = func(ctx *cli.Context) error {
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx)
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
return nil
}
}
func main() {

View File

@ -24,7 +24,6 @@ import (
"math/big"
"os"
goruntime "runtime"
"runtime/pprof"
"testing"
"time"
@ -34,14 +33,13 @@ import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/core/vm/runtime"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/urfave/cli/v2"
)
@ -51,6 +49,7 @@ var runCommand = &cli.Command{
Usage: "run arbitrary evm binary",
ArgsUsage: "<code>",
Description: `The run command runs arbitrary EVM code.`,
Flags: flags.Merge(vmFlags, traceFlags),
}
// readGenesis will read the given JSON format genesis file and return
@ -108,9 +107,6 @@ func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) (output []by
}
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
logconfig := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
DisableStack: ctx.Bool(DisableStackFlag.Name),
@ -126,8 +122,8 @@ func runCmd(ctx *cli.Context) error {
chainConfig *params.ChainConfig
sender = common.BytesToAddress([]byte("sender"))
receiver = common.BytesToAddress([]byte("receiver"))
genesisConfig *core.Genesis
preimages = ctx.Bool(DumpFlag.Name)
blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests
)
if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(logconfig, os.Stdout)
@ -137,19 +133,30 @@ func runCmd(ctx *cli.Context) error {
} else {
debugLogger = logger.NewStructLogger(logconfig)
}
initialGas := ctx.Uint64(GasFlag.Name)
genesisConfig := new(core.Genesis)
genesisConfig.GasLimit = initialGas
if ctx.String(GenesisFlag.Name) != "" {
gen := readGenesis(ctx.String(GenesisFlag.Name))
genesisConfig = gen
db := rawdb.NewMemoryDatabase()
genesis := gen.MustCommit(db)
sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: preimages})
statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = gen.Config
} else {
sdb := state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), &trie.Config{Preimages: preimages})
statedb, _ = state.New(types.EmptyRootHash, sdb, nil)
genesisConfig = new(core.Genesis)
genesisConfig = readGenesis(ctx.String(GenesisFlag.Name))
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
} else {
genesisConfig.Config = params.AllEthashProtocolChanges
}
db := rawdb.NewMemoryDatabase()
triedb := trie.NewDatabase(db, &trie.Config{
Preimages: preimages,
HashDB: hashdb.Defaults,
})
defer triedb.Close()
genesis := genesisConfig.MustCommit(db, triedb)
sdb := state.NewDatabaseWithNodeDB(db, triedb)
statedb, _ = state.New(genesis.Root(), sdb, nil)
chainConfig = genesisConfig.Config
if ctx.String(SenderFlag.Name) != "" {
sender = common.HexToAddress(ctx.String(SenderFlag.Name))
}
@ -203,10 +210,6 @@ func runCmd(ctx *cli.Context) error {
}
code = common.Hex2Bytes(bin)
}
initialGas := ctx.Uint64(GasFlag.Name)
if genesisConfig.GasLimit != 0 {
initialGas = genesisConfig.GasLimit
}
runtimeConfig := runtime.Config{
Origin: sender,
State: statedb,
@ -217,24 +220,12 @@ func runCmd(ctx *cli.Context) error {
Time: genesisConfig.Timestamp,
Coinbase: genesisConfig.Coinbase,
BlockNumber: new(big.Int).SetUint64(genesisConfig.Number),
BlobHashes: blobHashes,
EVMConfig: vm.Config{
Tracer: tracer,
},
}
if cpuProfilePath := ctx.String(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
if chainConfig != nil {
runtimeConfig.ChainConfig = chainConfig
} else {
@ -278,24 +269,10 @@ func runCmd(ctx *cli.Context) error {
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
statedb.Commit(true)
statedb.IntermediateRoot(true)
statedb.Commit(genesisConfig.Number, true)
fmt.Println(string(statedb.Dump(nil)))
}
if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.Bool(DebugFlag.Name) {
if debugLogger != nil {
fmt.Fprintln(os.Stderr, "#### TRACE ####")

View File

@ -23,10 +23,11 @@ import (
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2"
)
@ -50,11 +51,6 @@ type StatetestResult struct {
}
func stateTestCmd(ctx *cli.Context) error {
// Configure the go-ethereum logger
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name)))
log.Root().SetHandler(glogger)
// Configure the EVM logger
config := &logger.Config{
EnableMemory: !ctx.Bool(DisableMemoryFlag.Name),
@ -104,10 +100,9 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
for _, st := range test.Subtests() {
// Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
_, s, err := test.Run(st, cfg, false)
// print state root for evmlab tracing
if s != nil {
root := s.IntermediateRoot(false)
test.Run(st, cfg, false, rawdb.HashScheme, func(err error, snaps *snapshot.Tree, state *state.StateDB) {
if state != nil {
root := state.IntermediateRoot(false)
result.Root = &root
if jsonOut {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%#x\"}\n", root)
@ -116,12 +111,12 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error {
if err != nil {
// Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error()
if dump && s != nil {
dump := s.RawDump(nil)
if dump {
dump := state.RawDump(nil)
result.State = &dump
}
}
})
results = append(results, *result)
}
}

View File

@ -259,6 +259,22 @@ func TestT8n(t *testing.T) {
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // Cancun tests
base: "./testdata/28",
input: t8nInput{
"alloc.json", "txs.rlp", "env.json", "Cancun", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
{ // More cancun tests
base: "./testdata/29",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Cancun", "",
},
output: t8nOutput{alloc: true, result: true},
expOut: "exp.json",
},
} {
args := []string{"t8n"}
args = append(args, tc.output.get()...)

View File

@ -1,4 +1,4 @@
## Input transactions in RLP form
This testdata folder is used to examplify how transaction input can be provided in rlp form.
This testdata folder is used to exemplify how transaction input can be provided in rlp form.
Please see the README in `evm` folder for how this is performed.

View File

@ -1 +1 @@
These files examplify a selfdestruct to the `0`-address.
These files exemplify a selfdestruct to the `0`-address.

View File

@ -1 +1 @@
These files examplify how to sign a transaction using the pre-EIP155 scheme.
These files exemplify how to sign a transaction using the pre-EIP155 scheme.

16
cmd/evm/testdata/28/alloc.json vendored Normal file
View File

@ -0,0 +1,16 @@
{
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x016345785d8a0000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
},
"0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x016345785d8a0000",
"code" : "0x60004960015500",
"nonce" : "0x00",
"storage" : {
}
}
}

23
cmd/evm/testdata/28/env.json vendored Normal file
View File

@ -0,0 +1,23 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentNumber" : "0x01",
"currentTimestamp" : "0x079e",
"currentGasLimit" : "0x7fffffffffffffff",
"previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6",
"currentBlobGasUsed" : "0x00",
"parentTimestamp" : "0x03b6",
"parentDifficulty" : "0x00",
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"withdrawals" : [
],
"parentBaseFee" : "0x0a",
"parentGasUsed" : "0x00",
"parentGasLimit" : "0x7fffffffffffffff",
"parentExcessBlobGas" : "0x00",
"parentBlobGasUsed" : "0x00",
"blockHashes" : {
"0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6"
},
"parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
}

47
cmd/evm/testdata/28/exp.json vendored Normal file
View File

@ -0,0 +1,47 @@
{
"alloc": {
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": {
"balance": "0x150ca"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x16345785d80c3a9",
"nonce": "0x1"
},
"0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"code": "0x60004960015500",
"storage": {
"0x0000000000000000000000000000000000000000000000000000000000000001": "0x01a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
},
"balance": "0x16345785d8a0000"
}
},
"result": {
"stateRoot": "0xa40cb3fab01848e922a48bd24191815df9f721ad4b60376edac75161517663e8",
"txRoot": "0x4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce",
"receiptsRoot": "0xbff643da765981266133094092d98c81d2ac8e9a83a7bbda46c3d736f1f874ac",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"type": "0x3",
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0xa865",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0xa865",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"currentDifficulty": null,
"gasUsed": "0xa865",
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x20000"
}
}

1
cmd/evm/testdata/28/txs.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf88bb88903f8860180026483061a8094b94f5374fce5edbc8e2a8697c15331677e6ebf0b8080c00ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d801a025e16bb498552165016751911c3608d79000ab89dc3100776e729e6ea13091c7a03acacff7fc0cff6eda8a927dec93ca17765e1ee6cbc06c5954ce102e097c01d2"

16
cmd/evm/testdata/29/alloc.json vendored Normal file
View File

@ -0,0 +1,16 @@
{
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : {
"balance" : "0x016345785d8a0000",
"code" : "0x",
"nonce" : "0x00",
"storage" : {
}
},
"0xbEac00dDB15f3B6d645C48263dC93862413A222D" : {
"balance" : "0x1",
"code" : "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"nonce" : "0x00",
"storage" : {
}
}
}

20
cmd/evm/testdata/29/env.json vendored Normal file
View File

@ -0,0 +1,20 @@
{
"currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentNumber" : "0x01",
"currentTimestamp" : "0x079e",
"currentGasLimit" : "0x7fffffffffffffff",
"previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6",
"currentBlobGasUsed" : "0x00",
"parentTimestamp" : "0x03b6",
"parentDifficulty" : "0x00",
"parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"withdrawals" : [
],
"parentBaseFee" : "0x0a",
"parentGasUsed" : "0x00",
"parentGasLimit" : "0x7fffffffffffffff",
"parentExcessBlobGas" : "0x00",
"parentBlobGasUsed" : "0x00",
"parentBeaconBlockRoot": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
}

45
cmd/evm/testdata/29/exp.json vendored Normal file
View File

@ -0,0 +1,45 @@
{
"alloc": {
"0xbeac00ddb15f3b6d645c48263dc93862413a222d": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
},
"balance": "0x1"
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
}
},
"result": {
"stateRoot": "0x2db9f6bc233e8fd0af2d8023404493a19b37d9d69ace71f4e73158851fced574",
"txRoot": "0x248074fabe112f7d93917f292b64932394f835bb98da91f21501574d58ec92ab",
"receiptsRoot": "0xf78dfb743fbd92ade140711c8bbc542b5e307f0ab7984eff35d751969fe57efa",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [
{
"type": "0x2",
"root": "0x",
"status": "0x1",
"cumulativeGasUsed": "0x5208",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"logs": null,
"transactionHash": "0x84f70aba406a55628a0620f26d260f90aeb6ccc55fed6ec2ac13dd4f727032ed",
"contractAddress": "0x0000000000000000000000000000000000000000",
"gasUsed": "0x5208",
"effectiveGasPrice": null,
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"transactionIndex": "0x0"
}
],
"currentDifficulty": null,
"gasUsed": "0x5208",
"currentBaseFee": "0x9",
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"currentExcessBlobGas": "0x0",
"currentBlobGasUsed": "0x0"
}
}

29
cmd/evm/testdata/29/readme.md vendored Normal file
View File

@ -0,0 +1,29 @@
## EIP 4788
This test contains testcases for EIP-4788. The 4788-contract is
located at address `0xbeac00ddb15f3b6d645c48263dc93862413a222d`, and this test executes a simple transaction. It also
implicitly invokes the system tx, which sets calls the contract and sets the
storage values
```
$ dir=./testdata/29/ && go run . t8n --state.fork=Cancun --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout
INFO [08-15|20:07:56.335] Trie dumping started root=ecde45..2af8a7
INFO [08-15|20:07:56.335] Trie dumping complete accounts=2 elapsed="225.848µs"
INFO [08-15|20:07:56.335] Wrote file file=result.json
{
"alloc": {
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x16345785d871db8",
"nonce": "0x1"
},
"0xbeac00541d49391ed88abf392bfc1f4dea8c4143": {
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604457602036146024575f5ffd5b620180005f350680545f35146037575f5ffd5b6201800001545f5260205ff35b6201800042064281555f359062018000015500",
"storage": {
"0x000000000000000000000000000000000000000000000000000000000000079e": "0x000000000000000000000000000000000000000000000000000000000000079e",
"0x000000000000000000000000000000000000000000000000000000000001879e": "0x0000beac00beac00beac00beac00beac00beac00beac00beac00beac00beac00"
},
"balance": "0x
}
}
}
```

19
cmd/evm/testdata/29/txs.json vendored Normal file
View File

@ -0,0 +1,19 @@
[
{
"input" : "0x",
"gas" : "0x10000000",
"nonce" : "0x0",
"to" : "0x1111111111111111111111111111111111111111",
"value" : "0x0",
"secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
"chainId" : "0x1",
"type" : "0x2",
"v": "0x0",
"r": "0x0",
"s": "0x0",
"maxFeePerGas" : "0xfa0",
"maxPriorityFeePerGas" : "0x0",
"accessList" : [
]
}
]

View File

@ -1,2 +1,2 @@
These files examplify a transition where a transaction (excuted on block 5) requests
These files exemplify a transition where a transaction (executed on block 5) requests
the blockhash for block `1`.

View File

@ -1,3 +1,3 @@
These files examplify a transition where a transaction (excuted on block 5) requests
These files exemplify a transition where a transaction (executed on block 5) requests
the blockhash for block `4`, but where the hash for that block is missing.
It's expected that executing these should cause `exit` with errorcode `4`.

View File

@ -1 +1 @@
These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).
These files exemplify a transition where there are no transactions, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2).

View File

@ -7,7 +7,7 @@ This test contains testcases for EIP-2930, which uses transactions with access l
The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the
following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`.
Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`.
Essentially, this contract does `SLOAD(0)` and `SLOAD(3)`.
The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`.

View File

@ -7,7 +7,7 @@ This test contains testcases for EIP-1559, which uses an new transaction type an
The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the
following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`.
Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`.
Essentially, this contract does `SLOAD(0)` and `SLOAD(1)`.
The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`.

View File

@ -280,7 +280,7 @@ To make `t8n` apply these, the following inputs are required:
- For ethash, it is `5000000000000000000` `wei`,
- If this is not defined, mining rewards are not applied,
- A value of `0` is valid, and causes accounts to be 'touched'.
- For each ommer, the tool needs to be given an `addres\` and a `delta`. This
- For each ommer, the tool needs to be given an `address\` and a `delta`. This
is done via the `ommers` field in `env`.
Note: the tool does not verify that e.g. the normal uncle rules apply,

View File

@ -12,7 +12,6 @@ First things first, the `faucet` needs to connect to an Ethereum network, for wh
- `-genesis` is a path to a file containing the network `genesis.json`. or using:
- `-goerli` with the faucet with Görli network config
- `-rinkeby` with the faucet with Rinkeby network config
- `-sepolia` with the faucet with Sepolia network config
- `-network` is the devp2p network id used during connection
- `-bootnodes` is a list of `enode://` ids to join the network through

View File

@ -86,7 +86,6 @@ var (
twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API")
goerliFlag = flag.Bool("goerli", false, "Initializes the faucet with Görli network config")
rinkebyFlag = flag.Bool("rinkeby", false, "Initializes the faucet with Rinkeby network config")
sepoliaFlag = flag.Bool("sepolia", false, "Initializes the faucet with Sepolia network config")
)
@ -140,7 +139,7 @@ func main() {
log.Crit("Failed to render the faucet template", "err", err)
}
// Load and parse the genesis block requested by the user
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *rinkebyFlag, *sepoliaFlag)
genesis, err := getGenesis(*genesisFlag, *goerliFlag, *sepoliaFlag)
if err != nil {
log.Crit("Failed to parse genesis config", "err", err)
}
@ -269,11 +268,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network ui
}
}
// Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach()
if err != nil {
stack.Close()
return nil, err
}
api := stack.Attach()
client := ethclient.NewClient(api)
return &faucet{
@ -880,7 +875,7 @@ func authNoAuth(url string) (string, string, common.Address, error) {
}
// getGenesis returns a genesis based on input args
func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
func getGenesis(genesisFlag string, goerliFlag bool, sepoliaFlag bool) (*core.Genesis, error) {
switch {
case genesisFlag != "":
var genesis core.Genesis
@ -888,8 +883,6 @@ func getGenesis(genesisFlag string, goerliFlag bool, rinkebyFlag bool, sepoliaFl
return &genesis, err
case goerliFlag:
return core.DefaultGoerliGenesisBlock(), nil
case rinkebyFlag:
return core.DefaultRinkebyGenesisBlock(), nil
case sepoliaFlag:
return core.DefaultSepoliaGenesisBlock(), nil
default:

View File

@ -61,7 +61,7 @@ func TestRemoteDbWithHeaders(t *testing.T) {
}
func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) {
var ok uint32
var ok atomic.Uint32
server := &http.Server{
Addr: "localhost:0",
Handler: &testHandler{func(w http.ResponseWriter, r *http.Request) {
@ -72,12 +72,12 @@ func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) {
if have, want := r.Header.Get("second"), "two"; have != want {
t.Fatalf("missing header, have %v want %v", have, want)
}
atomic.StoreUint32(&ok, 1)
ok.Store(1)
}}}
go server.Serve(ln)
defer server.Close()
runGeth(t, gethArgs...).WaitExit()
if atomic.LoadUint32(&ok) != 1 {
if ok.Load() != 1 {
t.Fatal("Test fail, expected invocation to succeed")
}
}

View File

@ -39,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
@ -49,7 +48,9 @@ var (
Name: "init",
Usage: "Bootstrap and initialize a new genesis block",
ArgsUsage: "<genesisPath>",
Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags),
Flags: flags.Merge([]cli.Flag{
utils.CachePreimagesFlag,
}, utils.DatabaseFlags),
Description: `
The init command initializes a new genesis block and definition for the network.
This is a destructive action and changes the network in which you will be
@ -94,7 +95,9 @@ if one is set. Otherwise it prints the genesis from the datadir.`,
utils.MetricsInfluxDBBucketFlag,
utils.MetricsInfluxDBOrganizationFlag,
utils.TxLookupLimitFlag,
}, utils.DatabasePathFlags),
utils.TransactionHistoryFlag,
utils.StateHistoryFlag,
}, utils.DatabaseFlags),
Description: `
The import command imports blocks from an RLP-encoded form. The form can be one file
with several RLP-encoded blocks, or several files can be used.
@ -110,7 +113,7 @@ processing will proceed even if an individual RLP-file import failure occurs.`,
Flags: flags.Merge([]cli.Flag{
utils.CacheFlag,
utils.SyncModeFlag,
}, utils.DatabasePathFlags),
}, utils.DatabaseFlags),
Description: `
Requires a first argument of the file to write to.
Optional second and third arguments control the first and
@ -126,7 +129,7 @@ be gzipped.`,
Flags: flags.Merge([]cli.Flag{
utils.CacheFlag,
utils.SyncModeFlag,
}, utils.DatabasePathFlags),
}, utils.DatabaseFlags),
Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead.
@ -140,7 +143,7 @@ It's deprecated, please use "geth db import" instead.
Flags: flags.Merge([]cli.Flag{
utils.CacheFlag,
utils.SyncModeFlag,
}, utils.DatabasePathFlags),
}, utils.DatabaseFlags),
Description: `
The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead.
@ -159,7 +162,7 @@ It's deprecated, please use "geth db export" instead.
utils.IncludeIncompletesFlag,
utils.StartKeyFlag,
utils.DumpLimitFlag,
}, utils.DatabasePathFlags),
}, utils.DatabaseFlags),
Description: `
This command dumps out the state for a given block (or latest, if none provided).
`,
@ -195,14 +198,15 @@ func initGenesis(ctx *cli.Context) error {
if err != nil {
utils.Fatalf("Failed to open database: %v", err)
}
triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{
Preimages: ctx.Bool(utils.CachePreimagesFlag.Name),
})
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
defer triedb.Close()
_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis)
if err != nil {
utils.Fatalf("Failed to write genesis block: %v", err)
}
chaindb.Close()
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
}
return nil
@ -241,7 +245,7 @@ func dumpGenesis(ctx *cli.Context) error {
if ctx.IsSet(utils.DataDirFlag.Name) {
utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
}
utils.Fatalf("no network preset provided. no exisiting genesis in the default datadir")
utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
return nil
}
@ -261,16 +265,16 @@ func importChain(ctx *cli.Context) error {
defer db.Close()
// Start periodically gathering memory profiles
var peakMemAlloc, peakMemSys uint64
var peakMemAlloc, peakMemSys atomic.Uint64
go func() {
stats := new(runtime.MemStats)
for {
runtime.ReadMemStats(stats)
if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
if peakMemAlloc.Load() < stats.Alloc {
peakMemAlloc.Store(stats.Alloc)
}
if atomic.LoadUint64(&peakMemSys) < stats.Sys {
atomic.StoreUint64(&peakMemSys, stats.Sys)
if peakMemSys.Load() < stats.Sys {
peakMemSys.Store(stats.Sys)
}
time.Sleep(5 * time.Second)
}
@ -303,8 +307,8 @@ func importChain(ctx *cli.Context) error {
mem := new(runtime.MemStats)
runtime.ReadMemStats(mem)
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000)
fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs))
@ -332,7 +336,8 @@ func exportChain(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chain, _ := utils.MakeChain(ctx, stack, true)
chain, db := utils.MakeChain(ctx, stack, true)
defer db.Close()
start := time.Now()
var err error
@ -372,6 +377,7 @@ func importPreimages(ctx *cli.Context) error {
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
start := time.Now()
if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
@ -390,6 +396,7 @@ func exportPreimages(ctx *cli.Context) error {
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
start := time.Now()
if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
@ -401,6 +408,8 @@ func exportPreimages(ctx *cli.Context) error {
func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
var header *types.Header
if ctx.NArg() > 1 {
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
@ -465,10 +474,10 @@ func dump(ctx *cli.Context) error {
if err != nil {
return err
}
config := &trie.Config{
Preimages: true, // always enable preimage lookup
}
state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil)
triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
defer triedb.Close()
state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
if err != nil {
return err
}

View File

@ -22,16 +22,17 @@ import (
"fmt"
"os"
"reflect"
"runtime"
"strings"
"unicode"
"github.com/urfave/cli/v2"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
@ -42,6 +43,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/naoina/toml"
"github.com/urfave/cli/v2"
)
var (
@ -170,8 +172,26 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverrideCancun.Name)
cfg.Eth.OverrideCancun = &v
}
if ctx.IsSet(utils.OverrideVerkle.Name) {
v := ctx.Uint64(utils.OverrideVerkle.Name)
cfg.Eth.OverrideVerkle = &v
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Create gauge with geth system and build information
if eth != nil { // The 'eth' backend may be nil in light mode
var protos []string
for _, p := range eth.Protocols() {
protos = append(protos, fmt.Sprintf("%v/%d", p.Name, p.Version))
}
metrics.NewRegisteredGaugeInfo("geth/info", nil).Update(metrics.GaugeInfoValue{
"arch": runtime.GOARCH,
"os": runtime.GOOS,
"version": cfg.Node.Version,
"protocols": strings.Join(protos, ","),
})
}
// Configure log filter RPC API.
filterSystem := utils.RegisterFilterAPI(stack, backend, &cfg.Eth)
@ -189,6 +209,22 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
if ctx.IsSet(utils.SyncTargetFlag.Name) && cfg.Eth.SyncMode == downloader.FullSync {
utils.RegisterFullSyncTester(stack, eth, ctx.Path(utils.SyncTargetFlag.Name))
}
// Start the dev mode if requested, or launch the engine API for
// interacting with external consensus client.
if ctx.IsSet(utils.DeveloperFlag.Name) {
simBeacon, err := catalyst.NewSimulatedBeacon(ctx.Uint64(utils.DeveloperPeriodFlag.Name), eth)
if err != nil {
utils.Fatalf("failed to register dev mode catalyst service: %v", err)
}
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
stack.RegisterLifecycle(simBeacon)
} else if cfg.Eth.SyncMode != downloader.LightSync {
err := catalyst.Register(stack, eth)
if err != nil {
utils.Fatalf("failed to register catalyst service: %v", err)
}
}
return stack, backend
}
@ -272,6 +308,10 @@ func deprecated(field string) bool {
return true
case "ethconfig.Config.EWASMInterpreter":
return true
case "ethconfig.Config.TrieCleanCacheJournal":
return true
case "ethconfig.Config.TrieCleanCacheRejournal":
return true
default:
return false
}

View File

@ -75,10 +75,7 @@ func localConsole(ctx *cli.Context) error {
defer stack.Close()
// Attach to the newly started node and create the JavaScript console.
client, err := stack.Attach()
if err != nil {
return fmt.Errorf("failed to attach to the inproc geth: %v", err)
}
client := stack.Attach()
config := console.Config{
DataDir: utils.MakeDataDir(ctx),
DocRoot: ctx.String(utils.JSpathFlag.Name),

View File

@ -48,7 +48,7 @@ var (
Name: "removedb",
Usage: "Remove blockchain and state databases",
ArgsUsage: "",
Flags: utils.DatabasePathFlags,
Flags: utils.DatabaseFlags,
Description: `
Remove blockchain and state databases`,
}
@ -77,7 +77,7 @@ Remove blockchain and state databases`,
ArgsUsage: "<prefix> <start>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Usage: "Inspect the storage size for each type of data in the database",
Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
}
@ -85,7 +85,7 @@ Remove blockchain and state databases`,
Action: checkStateContent,
Name: "check-state-content",
ArgsUsage: "<start (optional)>",
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Usage: "Verify that state data is cryptographically correct",
Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
@ -97,7 +97,7 @@ a data corruption.`,
Usage: "Print leveldb statistics",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
}
dbCompactCmd = &cli.Command{
Action: dbCompact,
@ -107,7 +107,7 @@ a data corruption.`,
utils.SyncModeFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command performs a database compaction.
WARNING: This operation may take a very long time to finish, and may cause database
corruption if it is aborted during execution'!`,
@ -119,7 +119,7 @@ corruption if it is aborted during execution'!`,
ArgsUsage: "<hex-encoded key>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command looks up the specified database key from the database.",
}
dbDeleteCmd = &cli.Command{
@ -129,7 +129,7 @@ corruption if it is aborted during execution'!`,
ArgsUsage: "<hex-encoded key>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command deletes the specified database key from the database.
WARNING: This is a low-level operation which may cause database corruption!`,
}
@ -140,7 +140,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<hex-encoded key> <hex-encoded value>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: `This command sets a given database key to the given value.
WARNING: This is a low-level operation which may cause database corruption!`,
}
@ -151,7 +151,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command looks up the specified database key from the database.",
}
dbDumpFreezerIndex = &cli.Command{
@ -161,7 +161,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "This command displays information about the freezer index.",
}
dbImportCmd = &cli.Command{
@ -171,7 +171,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<dumpfile> <start (optional)",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = &cli.Command{
@ -181,7 +181,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
ArgsUsage: "<type> <dumpfile>",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = &cli.Command{
@ -190,7 +190,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
Usage: "Shows metadata about the chain status.",
Flags: flags.Merge([]cli.Flag{
utils.SyncModeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: "Shows metadata about the chain status.",
}
)
@ -482,6 +482,9 @@ func dbDumpTrie(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
defer triedb.Close()
var (
state []byte
storage []byte
@ -515,12 +518,16 @@ func dbDumpTrie(ctx *cli.Context) error {
}
}
id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
theTrie, err := trie.New(id, trie.NewDatabase(db))
theTrie, err := trie.New(id, triedb)
if err != nil {
return err
}
trieIt, err := theTrie.NodeIterator(start)
if err != nil {
return err
}
var count int64
it := trie.NewIterator(theTrie.NodeIterator(start))
it := trie.NewIterator(trieIt)
for it.Next() {
if max > 0 && count == max {
fmt.Printf("Exiting after %d values\n", count)
@ -587,6 +594,7 @@ func importLDBdata(ctx *cli.Context) error {
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
return utils.ImportLDBData(db, fName, int64(start), stop)
}
@ -683,6 +691,7 @@ func exportChaindata(ctx *cli.Context) error {
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
@ -690,6 +699,8 @@ func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)

View File

@ -176,12 +176,12 @@ func TestCustomBackend(t *testing.T) {
{ // Can't start pebble on top of leveldb
initArgs: []string{"--db.engine", "leveldb"},
execArgs: []string{"--db.engine", "pebble"},
execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
execExpect: `Fatal: Could not open database: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`,
},
{ // Can't start leveldb on top of pebble
initArgs: []string{"--db.engine", "pebble"},
execArgs: []string{"--db.engine", "leveldb"},
execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
execExpect: `Fatal: Could not open database: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`,
},
{ // Reject invalid backend choice
initArgs: []string{"--db.engine", "mssql"},

View File

@ -107,10 +107,10 @@ func ipcEndpoint(ipcPath, datadir string) string {
// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several
// nodes simultaneously, we need to distinguish between them, which we do by
// the pipe filename instead of folder.
var nextIPC = uint32(0)
var nextIPC atomic.Uint32
func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc {
ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1))
ipcName := fmt.Sprintf("geth-%d.ipc", nextIPC.Add(1))
args = append([]string{"--networkid=42", "--port=0", "--authrpc.port", "0", "--ipcpath", ipcName}, args...)
t.Logf("Starting %v with rpc: %v", name, args)
@ -146,13 +146,13 @@ func startLightServer(t *testing.T) *gethrpc {
t.Logf("Importing keys to geth")
runGeth(t, "account", "import", "--datadir", datadir, "--password", "./testdata/password.txt", "--lightkdf", "./testdata/key.prv").WaitExit()
account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105"
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4")
server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--miner.etherbase=0x02f0d131f1f97aef08aec6e3291b957d9efe7105", "--mine", "--light.serve=100", "--light.maxpeers=1", "--discv4=false", "--nat=extip:127.0.0.1", "--verbosity=4")
return server
}
func startClient(t *testing.T, name string) *gethrpc {
datadir := initGeth(t)
return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
return startGethWithIpc(t, name, "--datadir", datadir, "--discv4=false", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4")
}
func TestPriorityClient(t *testing.T) {

View File

@ -40,6 +40,8 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"go.uber.org/automaxprocs/maxprocs"
"github.com/ethereum/go-ethereum/plugins"
"github.com/ethereum/go-ethereum/plugins/wrappers/backendwrapper"
@ -71,6 +73,7 @@ var (
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideCancun,
utils.OverrideVerkle,
utils.EnablePersonal,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
@ -83,21 +86,23 @@ var (
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
utils.BlobPoolDataDirFlag,
utils.BlobPoolDataCapFlag,
utils.BlobPoolPriceBumpFlag,
utils.SyncModeFlag,
utils.SyncTargetFlag,
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
utils.SnapshotFlag,
utils.TxLookupLimitFlag,
utils.TransactionHistoryFlag,
utils.StateHistoryFlag,
utils.LightServeFlag,
utils.LightIngressFlag,
utils.LightEgressFlag,
utils.LightMaxPeersFlag,
utils.LightNoPruneFlag,
utils.LightKDFFlag,
utils.UltraLightServersFlag,
utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag,
utils.LightNoSyncServeFlag,
utils.EthRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
@ -127,14 +132,16 @@ var (
utils.MinerNewPayloadTimeout,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV4Flag,
utils.DiscoveryV5Flag,
utils.LegacyDiscoveryV5Flag,
utils.NetrestrictFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
utils.DNSDiscoveryFlag,
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.DeveloperGasLimitFlag,
utils.DeveloperPeriodFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.EthStatsURLFlag,
@ -144,7 +151,7 @@ var (
utils.GpoMaxGasPriceFlag,
utils.GpoIgnoreGasPriceFlag,
configFileFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags)
}, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{
utils.HTTPEnabledFlag,
@ -174,6 +181,8 @@ var (
utils.RPCGlobalEVMTimeoutFlag,
utils.RPCGlobalTxFeeCapFlag,
utils.AllowUnprotectedTxs,
utils.BatchRequestLimit,
utils.BatchResponseMaxSize,
}
metricsFlags = []cli.Flag{
@ -241,10 +250,16 @@ func init() {
debug.Flags,
metricsFlags,
)
flags.AutoEnvVars(app.Flags, "GETH")
app.Before = func(ctx *cli.Context) error {
maxprocs.Set() // Automatically set GOMAXPROCS to match Linux container CPU quota.
flags.MigrateGlobalFlags(ctx)
return debug.Setup(ctx)
if err := debug.Setup(ctx); err != nil {
return err
}
flags.CheckEnvVars(ctx, app.Flags, "GETH")
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
@ -265,15 +280,15 @@ func main() {
func prepare(ctx *cli.Context) {
// If we're running a known preset, log it for convenience.
switch {
case ctx.IsSet(utils.RinkebyFlag.Name):
log.Info("Starting Geth on Rinkeby testnet...")
case ctx.IsSet(utils.GoerliFlag.Name):
log.Info("Starting Geth on Görli testnet...")
case ctx.IsSet(utils.SepoliaFlag.Name):
log.Info("Starting Geth on Sepolia testnet...")
case ctx.IsSet(utils.HoleskyFlag.Name):
log.Info("Starting Geth on Holesky testnet...")
case ctx.IsSet(utils.DeveloperFlag.Name):
log.Info("Starting Geth in ephemeral dev mode...")
log.Warn(`You are running Geth in --dev mode. Please note the following:
@ -298,8 +313,8 @@ func prepare(ctx *cli.Context) {
// If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.String(utils.SyncModeFlag.Name) != "light" && !ctx.IsSet(utils.CacheFlag.Name) && !ctx.IsSet(utils.NetworkIdFlag.Name) {
// Make sure we're not on any supported preconfigured testnet either
if !ctx.IsSet(utils.SepoliaFlag.Name) &&
!ctx.IsSet(utils.RinkebyFlag.Name) &&
if !ctx.IsSet(utils.HoleskyFlag.Name) &&
!ctx.IsSet(utils.SepoliaFlag.Name) &&
!ctx.IsSet(utils.GoerliFlag.Name) &&
!ctx.IsSet(utils.DeveloperFlag.Name) {
// Nope, we're really on mainnet. Bump that cache up!
@ -341,8 +356,11 @@ func geth(ctx *cli.Context) error {
return fmt.Errorf("invalid command: %q", args[0])
}
}
stack, backend := makeFullNode(ctx)
wrapperBackend := backendwrapper.NewBackend(backend)
trieCfg := plugethCaptureTrieConfig(ctx, stack, backend)
wrapperBackend := backendwrapper.NewBackend(backend, trieCfg)
pluginsInitializeNode(stack, wrapperBackend)
if ok, err := plugins.RunSubcommand(ctx); ok {
stack.Close()
@ -375,10 +393,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
stack.AccountManager().Subscribe(events)
// Create a client to interact with local geth node.
rpcClient, err := stack.Attach()
if err != nil {
utils.Fatalf("Failed to attach to self: %v", err)
}
rpcClient := stack.Attach()
ethClient := ethclient.NewClient(rpcClient)
go func() {
@ -439,7 +454,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
}
// Start auxiliary services if enabled
if ctx.Bool(utils.MiningEnabledFlag.Name) || ctx.Bool(utils.DeveloperFlag.Name) {
if ctx.Bool(utils.MiningEnabledFlag.Name) {
// Mining only makes sense if a full Ethereum node is running
if ctx.String(utils.SyncModeFlag.Name) == "light" {
utils.Fatalf("Light clients do not support mining")
@ -450,7 +465,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isCon
}
// Set the gas price to the limits from the CLI and start mining
gasprice := flags.GlobalBig(ctx, utils.MinerGasPriceFlag.Name)
ethBackend.TxPool().SetGasPrice(gasprice)
ethBackend.TxPool().SetGasTip(gasprice)
if err := ethBackend.StartMining(); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}

View File

@ -1,13 +1,25 @@
package main
import (
"github.com/ethereum/go-ethereum/cmd/utils"
gcore "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
"github.com/ethereum/go-ethereum/plugins"
"github.com/ethereum/go-ethereum/plugins/wrappers"
"github.com/ethereum/go-ethereum/rpc"
"github.com/openrelayxyz/plugeth-utils/core"
"github.com/openrelayxyz/plugeth-utils/restricted"
"github.com/urfave/cli/v2"
)
func apiTranslate(apis []core.API) []rpc.API {
@ -120,3 +132,69 @@ func pluginBlockChain() {
}
BlockChain(plugins.DefaultPluginLoader)
}
func plugethCaptureTrieConfig(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) *trie.Config {
ethCfg := new(ethconfig.Config)
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheTrieFlag.Name) {
ethCfg.TrieCleanCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheTrieFlag.Name) / 100
}
if ctx.IsSet(utils.CacheNoPrefetchFlag.Name) {
ethCfg.NoPrefetch = ctx.Bool(utils.CacheNoPrefetchFlag.Name)
}
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheGCFlag.Name) {
ethCfg.TrieDirtyCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheGCFlag.Name) / 100
}
if ctx.IsSet(utils.GCModeFlag.Name) {
ethCfg.NoPruning = ctx.String(utils.GCModeFlag.Name) == "archive"
}
if ctx.IsSet(utils.CacheFlag.Name) || ctx.IsSet(utils.CacheSnapshotFlag.Name) {
ethCfg.SnapshotCache = ctx.Int(utils.CacheFlag.Name) * ctx.Int(utils.CacheSnapshotFlag.Name) / 100
}
ethCfg.Preimages = ctx.Bool(utils.CachePreimagesFlag.Name)
if ethCfg.NoPruning && !ethCfg.Preimages {
ethCfg.Preimages = true
log.Info("Enabling recording of key preimages since archive mode is used")
}
if ctx.IsSet(utils.StateHistoryFlag.Name) {
ethCfg.StateHistory = ctx.Uint64(utils.StateHistoryFlag.Name)
}
chaindb := backend.ChainDb()
scheme, err := utils.ParseStateScheme(ctx, chaindb)
if err != nil {
utils.Fatalf("%v", err)
}
ethCfg.StateScheme = scheme
cacheCfg := &gcore.CacheConfig{
TrieCleanLimit: ethCfg.TrieCleanCache,
TrieCleanNoPrefetch: ethCfg.NoPrefetch,
TrieDirtyLimit: ethCfg.TrieDirtyCache,
TrieDirtyDisabled: ethCfg.NoPruning,
TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
SnapshotLimit: ethCfg.SnapshotCache,
Preimages: ethCfg.Preimages,
StateHistory: ethCfg.StateHistory,
StateScheme: ethCfg.StateScheme,
}
config := &trie.Config{Preimages: cacheCfg.Preimages}
if cacheCfg.StateScheme == rawdb.HashScheme {
config.HashDB = &hashdb.Config{
CleanCacheSize: cacheCfg.TrieCleanLimit * 1024 * 1024,
}
}
if cacheCfg.StateScheme == rawdb.PathScheme {
config.PathDB = &pathdb.Config{
StateHistory: cacheCfg.StateHistory,
CleanCacheSize: cacheCfg.TrieCleanLimit * 1024 * 1024,
DirtyCacheSize: cacheCfg.TrieDirtyLimit * 1024 * 1024,
}
}
return config
}

View File

@ -50,9 +50,8 @@ var (
ArgsUsage: "<root>",
Action: pruneState,
Flags: flags.Merge([]cli.Flag{
utils.CacheTrieJournalFlag,
utils.BloomFilterSizeFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot prune-state <state-root>
will prune historical state data with the help of the state snapshot.
@ -62,10 +61,7 @@ two version states are available: genesis and the specific one.
The default pruning target is the HEAD-127 state.
WARNING: It's necessary to delete the trie clean cache after the pruning.
If you specify another directory for the trie clean cache via "--cache.trie.journal"
during the use of Geth, please also specify it here for correct deletion. Otherwise
the trie clean cache with default directory will be deleted.
WARNING: it's only supported in hash mode(--state.scheme=hash)".
`,
},
{
@ -73,7 +69,7 @@ the trie clean cache with default directory will be deleted.
Usage: "Recalculate state hash based on the snapshot for verification",
ArgsUsage: "<root>",
Action: verifyState,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot verify-state <state-root>
will traverse the whole accounts and storages set based on the specified
@ -86,7 +82,7 @@ In other words, this command does the snapshot to trie conversion.
Usage: "Check that there is no 'dangling' snap storage",
ArgsUsage: "<root>",
Action: checkDanglingStorage,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot check-dangling-storage <state-root> traverses the snap storage
data, and verifies that all snapshot storage data has a corresponding account.
@ -97,7 +93,7 @@ data, and verifies that all snapshot storage data has a corresponding account.
Usage: "Check all snapshot layers for the a specific account",
ArgsUsage: "<address | hash>",
Action: checkAccount,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot inspect-account <address | hash> checks all snapshot layers and prints out
information about the specified address.
@ -108,7 +104,7 @@ information about the specified address.
Usage: "Traverse the state with given root hash and perform quick verification",
ArgsUsage: "<root>",
Action: traverseState,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot traverse-state <state-root>
will traverse the whole state from the given state root and will abort if any
@ -123,7 +119,7 @@ It's also usable without snapshot enabled.
Usage: "Traverse the state with given root hash and perform detailed verification",
ArgsUsage: "<root>",
Action: traverseRawState,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth snapshot traverse-rawstate <state-root>
will traverse the whole state from the given root and will abort if any referenced
@ -144,7 +140,7 @@ It's also usable without snapshot enabled.
utils.ExcludeStorageFlag,
utils.StartKeyFlag,
utils.DumpLimitFlag,
}, utils.NetworkFlags, utils.DatabasePathFlags),
}, utils.NetworkFlags, utils.DatabaseFlags),
Description: `
This command is semantically equivalent to 'geth dump', but uses the snapshots
as the backend data source, making this command a lot faster.
@ -160,15 +156,17 @@ block is used.
// Deprecation: this command should be deprecated once the hash-based
// scheme is deprecated.
func pruneState(ctx *cli.Context) error {
stack, config := makeConfigNode(ctx)
stack, _ := makeConfigNode(ctx)
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, false)
defer chaindb.Close()
if rawdb.ReadStateScheme(chaindb) != rawdb.HashScheme {
log.Crit("Offline pruning is not required for path scheme")
}
prunerconfig := pruner.Config{
Datadir: stack.ResolvePath(""),
Cachedir: stack.ResolvePath(config.Eth.TrieCleanCacheJournal),
BloomSize: ctx.Uint64(utils.BloomFilterSizeFlag.Name),
}
pruner, err := pruner.NewPruner(chaindb, prunerconfig)
@ -207,13 +205,16 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
snapconfig := snapshot.Config{
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
defer triedb.Close()
snapConfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapconfig, chaindb, trie.NewDatabase(chaindb), headBlock.Root())
snaptree, err := snapshot.New(snapConfig, chaindb, triedb, headBlock.Root())
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
@ -244,7 +245,9 @@ func checkDanglingStorage(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
return snapshot.CheckDanglingStorage(utils.MakeChainDatabase(ctx, stack, true))
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
return snapshot.CheckDanglingStorage(db)
}
// traverseState is a helper function used for pruning verification.
@ -255,6 +258,11 @@ func traverseState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
@ -279,7 +287,6 @@ func traverseState(ctx *cli.Context) error {
root = headBlock.Root()
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
@ -292,7 +299,12 @@ func traverseState(ctx *cli.Context) error {
lastReport time.Time
start = time.Now()
)
accIter := trie.NewIterator(t.NodeIterator(nil))
acctIt, err := t.NodeIterator(nil)
if err != nil {
log.Error("Failed to open iterator", "root", root, "err", err)
return err
}
accIter := trie.NewIterator(acctIt)
for accIter.Next() {
accounts += 1
var acc types.StateAccount
@ -307,9 +319,19 @@ func traverseState(ctx *cli.Context) error {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return err
}
storageIter := trie.NewIterator(storageTrie.NodeIterator(nil))
storageIt, err := storageTrie.NodeIterator(nil)
if err != nil {
log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
return err
}
storageIter := trie.NewIterator(storageIt)
for storageIter.Next() {
slots += 1
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
}
if storageIter.Err != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Err)
@ -345,6 +367,11 @@ func traverseRawState(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
triedb := utils.MakeTrieDatabase(ctx, chaindb, false, true)
defer triedb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
@ -369,7 +396,6 @@ func traverseRawState(ctx *cli.Context) error {
root = headBlock.Root()
log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64())
}
triedb := trie.NewDatabase(chaindb)
t, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
if err != nil {
log.Error("Failed to open trie", "root", root, "err", err)
@ -385,7 +411,16 @@ func traverseRawState(ctx *cli.Context) error {
hasher = crypto.NewKeccakState()
got = make([]byte, 32)
)
accIter := t.NodeIterator(nil)
accIter, err := t.NodeIterator(nil)
if err != nil {
log.Error("Failed to open iterator", "root", root, "err", err)
return err
}
reader, err := triedb.Reader(root)
if err != nil {
log.Error("State is non-existent", "root", root)
return nil
}
for accIter.Next(true) {
nodes += 1
node := accIter.Hash()
@ -393,7 +428,7 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
blob := rawdb.ReadLegacyTrieNode(chaindb, node)
blob, _ := reader.Node(common.Hash{}, accIter.Path(), node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
@ -422,7 +457,11 @@ func traverseRawState(ctx *cli.Context) error {
log.Error("Failed to open storage trie", "root", acc.Root, "err", err)
return errors.New("missing storage trie")
}
storageIter := storageTrie.NodeIterator(nil)
storageIter, err := storageTrie.NodeIterator(nil)
if err != nil {
log.Error("Failed to open storage iterator", "root", acc.Root, "err", err)
return err
}
for storageIter.Next(true) {
nodes += 1
node := storageIter.Hash()
@ -430,7 +469,7 @@ func traverseRawState(ctx *cli.Context) error {
// Check the presence for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
blob := rawdb.ReadLegacyTrieNode(chaindb, node)
blob, _ := reader.Node(common.BytesToHash(accIter.LeafKey()), storageIter.Path(), node)
if len(blob) == 0 {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
@ -447,6 +486,10 @@ func traverseRawState(ctx *cli.Context) error {
if storageIter.Leaf() {
slots += 1
}
if time.Since(lastReport) > time.Second*8 {
log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start)))
lastReport = time.Now()
}
}
if storageIter.Error() != nil {
log.Error("Failed to traverse storage trie", "root", acc.Root, "err", storageIter.Error())
@ -490,13 +533,16 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
triedb := utils.MakeTrieDatabase(ctx, db, false, true)
defer triedb.Close()
snapConfig := snapshot.Config{
CacheSize: 256,
Recovery: false,
NoBuild: true,
AsyncBuild: false,
}
snaptree, err := snapshot.New(snapConfig, db, trie.NewDatabase(db), root)
snaptree, err := snapshot.New(snapConfig, db, triedb, root)
if err != nil {
return err
}
@ -517,14 +563,14 @@ func dumpState(ctx *cli.Context) error {
Root common.Hash `json:"root"`
}{root})
for accIt.Next() {
account, err := snapshot.FullAccount(accIt.Account())
account, err := types.FullAccount(accIt.Account())
if err != nil {
return err
}
da := &state.DumpAccount{
Balance: account.Balance.String(),
Nonce: account.Nonce,
Root: account.Root,
Root: account.Root.Bytes(),
CodeHash: account.CodeHash,
SecureKey: accIt.Hash().Bytes(),
}

View File

@ -0,0 +1,4 @@
untrusted comment: signature from minisign secret key
RUQkliYstQBOKLK05Sy5f3bVRMBqJT26ABo6Vbp3BNJAVjejoqYCu4GWE/+7qcDfHBqYIniDCbFIUvYEnOHxV6vZ93wO1xJWDQw=
trusted comment: timestamp:1693986492 file:data.json hashed
6Fdw2H+W1ZXK7QXSF77Z5AWC7+AEFAfDmTSxNGylU5HLT1AuSJQmxslj+VjtUBamYCvOuET7plbXza942AlWDw==

View File

@ -45,7 +45,7 @@ var (
Usage: "verify the conversion of a MPT into a verkle tree",
ArgsUsage: "<root>",
Action: verifyVerkle,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth verkle verify <state-root>
This command takes a root commitment and attempts to rebuild the tree.
@ -56,7 +56,7 @@ This command takes a root commitment and attempts to rebuild the tree.
Usage: "Dump a verkle tree to a DOT file",
ArgsUsage: "<root> <key1> [<key 2> ...]",
Action: expandVerkle,
Flags: flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
Flags: flags.Merge(utils.NetworkFlags, utils.DatabaseFlags),
Description: `
geth verkle dump <state-root> <key 1> [<key 2> ...]
This command will produce a dot file representing the tree, rooted at <root>.
@ -74,7 +74,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
switch node := root.(type) {
case *verkle.InternalNode:
for i, child := range node.Children() {
childC := child.ComputeCommitment().Bytes()
childC := child.Commit().Bytes()
childS, err := resolver(childC[:])
if bytes.Equal(childC[:], zero[:]) {
@ -86,7 +86,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
// depth is set to 0, the tree isn't rebuilt so it's not a problem
childN, err := verkle.ParseNode(childS, 0, childC[:])
if err != nil {
return fmt.Errorf("decode error child %x in db: %w", child.ComputeCommitment().Bytes(), err)
return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err)
}
if err := checkChildren(childN, resolver); err != nil {
return fmt.Errorf("%x%w", i, err) // write the path to the erroring node
@ -100,7 +100,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error
return nil
}
}
return errors.New("Both balance and nonce are 0")
return errors.New("both balance and nonce are 0")
case verkle.Empty:
// nothing to do
default:
@ -115,6 +115,7 @@ func verifyVerkle(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
headBlock := rawdb.ReadHeadBlock(chaindb)
if headBlock == nil {
log.Error("Failed to load head block")
@ -163,6 +164,7 @@ func expandVerkle(ctx *cli.Context) error {
defer stack.Close()
chaindb := utils.MakeChainDatabase(ctx, stack, true)
defer chaindb.Close()
var (
rootC common.Hash
keylist [][]byte

View File

@ -30,17 +30,24 @@ import (
)
func TestVerification(t *testing.T) {
// Signatures generated with `minisign`
t.Run("minisig", func(t *testing.T) {
// For this test, the pubkey is in testdata/minisign.pub
// Signatures generated with `minisign`. Legacy format, not pre-hashed file.
t.Run("minisig-legacy", func(t *testing.T) {
// For this test, the pubkey is in testdata/vcheck/minisign.pub
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
testVerification(t, pub, "./testdata/vcheck/minisig-sigs/")
})
t.Run("minisig-new", func(t *testing.T) {
// For this test, the pubkey is in testdata/vcheck/minisign.pub
// (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' )
// `minisign -S -s ./minisign.sec -m data.json -x ./minisig-sigs-new/data.json.minisig`
pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp"
testVerification(t, pub, "./testdata/vcheck/minisig-sigs-new/")
})
// Signatures generated with `signify-openbsd`
t.Run("signify-openbsd", func(t *testing.T) {
t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2")
// For this test, the pubkey is in testdata/signifykey.pub
// For this test, the pubkey is in testdata/vcheck/signifykey.pub
// (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' )
pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/"
testVerification(t, pub, "./testdata/vcheck/signify-sigs/")
@ -58,6 +65,9 @@ func testVerification(t *testing.T, pubkey, sigdir string) {
if err != nil {
t.Fatal(err)
}
if len(files) == 0 {
t.Fatal("Missing tests")
}
for _, f := range files {
sig, err := os.ReadFile(filepath.Join(sigdir, f.Name()))
if err != nil {

View File

@ -26,6 +26,7 @@ import (
"fmt"
"math"
"math/big"
"net"
"net/http"
"os"
"path/filepath"
@ -41,13 +42,13 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/txpool/legacypool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/kzg4844"
"github.com/ethereum/go-ethereum/eth"
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/filters"
@ -60,7 +61,6 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@ -74,6 +74,9 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/triedb/hashdb"
"github.com/ethereum/go-ethereum/trie/triedb/pathdb"
pcsclite "github.com/gballet/go-libpcsclite"
gopsutil "github.com/shirou/gopsutil/mem"
"github.com/urfave/cli/v2"
@ -141,7 +144,7 @@ var (
}
NetworkIdFlag = &cli.Uint64Flag{
Name: "networkid",
Usage: "Explicitly set network id (integer)(For testnets: use --rinkeby, --goerli, --sepolia instead)",
Usage: "Explicitly set network id (integer)(For testnets: use --goerli, --sepolia, --holesky instead)",
Value: ethconfig.Defaults.NetworkId,
Category: flags.EthCategory,
}
@ -150,11 +153,6 @@ var (
Usage: "Ethereum mainnet",
Category: flags.EthCategory,
}
RinkebyFlag = &cli.BoolFlag{
Name: "rinkeby",
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
Category: flags.EthCategory,
}
GoerliFlag = &cli.BoolFlag{
Name: "goerli",
Usage: "Görli network: pre-configured proof-of-authority test network",
@ -165,14 +163,18 @@ var (
Usage: "Sepolia network: pre-configured proof-of-work test network",
Category: flags.EthCategory,
}
HoleskyFlag = &cli.BoolFlag{
Name: "holesky",
Usage: "Holesky network: pre-configured proof-of-stake test network",
Category: flags.EthCategory,
}
// Dev mode
DeveloperFlag = &cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
Category: flags.DevCategory,
}
DeveloperPeriodFlag = &cli.IntFlag{
DeveloperPeriodFlag = &cli.Uint64Flag{
Name: "dev.period",
Usage: "Block period to use in developer mode (0 = mine only if transaction pending)",
Category: flags.DevCategory,
@ -231,30 +233,12 @@ var (
}
defaultSyncMode = ethconfig.Defaults.SyncMode
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
Value: &defaultSyncMode,
Category: flags.EthCategory,
}
GCModeFlag = &cli.StringFlag{
Name: "gcmode",
Usage: `Blockchain garbage collection mode ("full", "archive")`,
Value: "full",
Category: flags.EthCategory,
}
SnapshotFlag = &cli.BoolFlag{
Name: "snapshot",
Usage: `Enables snapshot-database mode (default = enable)`,
Value: true,
Category: flags.EthCategory,
}
TxLookupLimitFlag = &cli.Uint64Flag{
Name: "txlookuplimit",
Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)",
Value: ethconfig.Defaults.TxLookupLimit,
Category: flags.EthCategory,
}
LightKDFFlag = &cli.BoolFlag{
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
@ -265,11 +249,6 @@ var (
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
Category: flags.EthCategory,
}
LegacyWhitelistFlag = &cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --eth.requiredblocks)",
Category: flags.DeprecatedCategory,
}
BloomFilterSizeFlag = &cli.Uint64Flag{
Name: "bloomfilter.size",
Usage: "Megabytes of memory allocated to bloom-filter for pruning",
@ -281,6 +260,41 @@ var (
Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
OverrideVerkle = &cli.Uint64Flag{
Name: "override.verkle",
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
SyncModeFlag = &flags.TextMarshalerFlag{
Name: "syncmode",
Usage: `Blockchain sync mode ("snap", "full" or "light")`,
Value: &defaultSyncMode,
Category: flags.StateCategory,
}
GCModeFlag = &cli.StringFlag{
Name: "gcmode",
Usage: `Blockchain garbage collection mode, only relevant in state.scheme=hash ("full", "archive")`,
Value: "full",
Category: flags.StateCategory,
}
StateSchemeFlag = &cli.StringFlag{
Name: "state.scheme",
Usage: "Scheme to use for storing ethereum state ('hash' or 'path')",
Value: rawdb.HashScheme,
Category: flags.StateCategory,
}
StateHistoryFlag = &cli.Uint64Flag{
Name: "history.state",
Usage: "Number of recent blocks to retain state history for (default = 90,000 blocks, 0 = entire chain)",
Value: ethconfig.Defaults.StateHistory,
Category: flags.StateCategory,
}
TransactionHistoryFlag = &cli.Uint64Flag{
Name: "history.transactions",
Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)",
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.StateCategory,
}
// Light server and client settings
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
@ -306,23 +320,6 @@ var (
Value: ethconfig.Defaults.LightPeers,
Category: flags.LightCategory,
}
UltraLightServersFlag = &cli.StringFlag{
Name: "ulc.servers",
Usage: "List of trusted ultra-light servers",
Value: strings.Join(ethconfig.Defaults.UltraLightServers, ","),
Category: flags.LightCategory,
}
UltraLightFractionFlag = &cli.IntFlag{
Name: "ulc.fraction",
Usage: "Minimum % of trusted ultra-light servers required to announce a new head",
Value: ethconfig.Defaults.UltraLightFraction,
Category: flags.LightCategory,
}
UltraLightOnlyAnnounceFlag = &cli.BoolFlag{
Name: "ulc.onlyannounce",
Usage: "Ultra light server sends announcements only",
Category: flags.LightCategory,
}
LightNoPruneFlag = &cli.BoolFlag{
Name: "light.nopruning",
Usage: "Disable ancient light chain data pruning",
@ -347,18 +344,18 @@ var (
TxPoolJournalFlag = &cli.StringFlag{
Name: "txpool.journal",
Usage: "Disk journal for local transaction to survive node restarts",
Value: txpool.DefaultConfig.Journal,
Value: ethconfig.Defaults.TxPool.Journal,
Category: flags.TxPoolCategory,
}
TxPoolRejournalFlag = &cli.DurationFlag{
Name: "txpool.rejournal",
Usage: "Time interval to regenerate the local transaction journal",
Value: txpool.DefaultConfig.Rejournal,
Value: ethconfig.Defaults.TxPool.Rejournal,
Category: flags.TxPoolCategory,
}
TxPoolPriceLimitFlag = &cli.Uint64Flag{
Name: "txpool.pricelimit",
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
Usage: "Minimum gas price tip to enforce for acceptance into the pool",
Value: ethconfig.Defaults.TxPool.PriceLimit,
Category: flags.TxPoolCategory,
}
@ -398,7 +395,25 @@ var (
Value: ethconfig.Defaults.TxPool.Lifetime,
Category: flags.TxPoolCategory,
}
// Blob transaction pool settings
BlobPoolDataDirFlag = &cli.StringFlag{
Name: "blobpool.datadir",
Usage: "Data directory to store blob transactions in",
Value: ethconfig.Defaults.BlobPool.Datadir,
Category: flags.BlobPoolCategory,
}
BlobPoolDataCapFlag = &cli.Uint64Flag{
Name: "blobpool.datacap",
Usage: "Disk space to allocate for pending blob transactions (soft limit)",
Value: ethconfig.Defaults.BlobPool.Datacap,
Category: flags.BlobPoolCategory,
}
BlobPoolPriceBumpFlag = &cli.Uint64Flag{
Name: "blobpool.pricebump",
Usage: "Price bump percentage to replace an already existing blob transaction",
Value: ethconfig.Defaults.BlobPool.PriceBump,
Category: flags.BlobPoolCategory,
}
// Performance tuning settings
CacheFlag = &cli.IntFlag{
Name: "cache",
@ -418,18 +433,6 @@ var (
Value: 15,
Category: flags.PerfCategory,
}
CacheTrieJournalFlag = &cli.StringFlag{
Name: "cache.trie.journal",
Usage: "Disk journal directory for trie cache to survive node restarts",
Value: ethconfig.Defaults.TrieCleanCacheJournal,
Category: flags.PerfCategory,
}
CacheTrieRejournalFlag = &cli.DurationFlag{
Name: "cache.trie.rejournal",
Usage: "Time interval to regenerate the trie cache journal",
Value: ethconfig.Defaults.TrieCleanCacheRejournal,
Category: flags.PerfCategory,
}
CacheGCFlag = &cli.IntFlag{
Name: "cache.gc",
Usage: "Percentage of cache memory allowance to use for trie pruning (default = 25% full mode, 0% archive mode)",
@ -726,6 +729,18 @@ var (
Usage: "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC",
Category: flags.APICategory,
}
BatchRequestLimit = &cli.IntFlag{
Name: "rpc.batch-request-limit",
Usage: "Maximum number of requests in a batch",
Value: node.DefaultConfig.BatchRequestLimit,
Category: flags.APICategory,
}
BatchResponseMaxSize = &cli.IntFlag{
Name: "rpc.batch-response-max-size",
Usage: "Maximum number of bytes returned from a batched call",
Value: node.DefaultConfig.BatchResponseMaxSize,
Category: flags.APICategory,
}
EnablePersonal = &cli.BoolFlag{
Name: "rpc.enabledeprecatedpersonal",
Usage: "Enables the (deprecated) personal namespace",
@ -778,8 +793,16 @@ var (
Usage: "Disables the peer discovery mechanism (manual peer addition)",
Category: flags.NetworkingCategory,
}
DiscoveryV4Flag = &cli.BoolFlag{
Name: "discovery.v4",
Aliases: []string{"discv4"},
Usage: "Enables the V4 discovery mechanism",
Category: flags.NetworkingCategory,
Value: true,
}
DiscoveryV5Flag = &cli.BoolFlag{
Name: "v5disc",
Name: "discovery.v5",
Aliases: []string{"discv5"},
Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism",
Category: flags.NetworkingCategory,
}
@ -939,25 +962,26 @@ Please note that --` + MetricsHTTPFlag.Name + ` must be set to start the server.
var (
// TestnetFlags is the flag group of all built-in supported testnets.
TestnetFlags = []cli.Flag{
RinkebyFlag,
GoerliFlag,
SepoliaFlag,
HoleskyFlag,
}
// NetworkFlags is the flag group of all built-in supported networks.
NetworkFlags = append([]cli.Flag{MainnetFlag}, TestnetFlags...)
// DatabasePathFlags is the flag group of all database path flags.
DatabasePathFlags = []cli.Flag{
// DatabaseFlags is the flag group of all database flags.
DatabaseFlags = []cli.Flag{
DataDirFlag,
AncientFlag,
RemoteDBFlag,
StateSchemeFlag,
HttpHeaderFlag,
}
)
func init() {
if rawdb.PebbleEnabled {
DatabasePathFlags = append(DatabasePathFlags, DBEngineFlag)
DatabaseFlags = append(DatabaseFlags, DBEngineFlag)
}
}
@ -971,15 +995,15 @@ func MakeDataDir(ctx *cli.Context) string {
return pluginPath
}
// end PluGeth injection
if ctx.Bool(RinkebyFlag.Name) {
return filepath.Join(path, "rinkeby")
}
if ctx.Bool(GoerliFlag.Name) {
return filepath.Join(path, "goerli")
}
if ctx.Bool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia")
}
if ctx.Bool(HoleskyFlag.Name) {
return filepath.Join(path, "holesky")
}
return path
}
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@ -988,7 +1012,7 @@ func MakeDataDir(ctx *cli.Context) string {
// setNodeKey creates a node key from set command line flags, either loading it
// from a file or as a specified hex value. If neither flags were provided, this
// method returns nil and an emphemeral key is to be generated.
// method returns nil and an ephemeral key is to be generated.
func setNodeKey(ctx *cli.Context, cfg *p2p.Config) {
var (
hex = ctx.String(NodeKeyHexFlag.Name)
@ -1021,6 +1045,12 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
// setBootstrapNodes creates a list of bootstrap nodes from the command line
// flags, reverting to pre-configured ones if none have been specified.
// Priority order for bootnodes configuration:
//
// 1. --bootnodes flag
// 2. Config file
// 3. Network preset flags (e.g. --goerli)
// 4. default to mainnet nodes
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes
// begin PluGeth injection
@ -1028,33 +1058,37 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = pluginUrls
}
// end PluGeth injection
switch {
case ctx.IsSet(BootnodesFlag.Name):
if ctx.IsSet(BootnodesFlag.Name) {
urls = SplitAndTrim(ctx.String(BootnodesFlag.Name))
} else {
if cfg.BootstrapNodes != nil {
return // Already set by config file, don't apply defaults.
}
switch {
case ctx.Bool(HoleskyFlag.Name):
urls = params.HoleskyBootnodes
case ctx.Bool(SepoliaFlag.Name):
urls = params.SepoliaBootnodes
case ctx.Bool(RinkebyFlag.Name):
urls = params.RinkebyBootnodes
case ctx.Bool(GoerliFlag.Name):
urls = params.GoerliBootnodes
}
// don't apply defaults if BootstrapNodes is already set
if cfg.BootstrapNodes != nil {
return
}
cfg.BootstrapNodes = mustParseBootnodes(urls)
}
cfg.BootstrapNodes = make([]*enode.Node, 0, len(urls))
func mustParseBootnodes(urls []string) []*enode.Node {
nodes := make([]*enode.Node, 0, len(urls))
for _, url := range urls {
if url != "" {
node, err := enode.Parse(enode.ValidSchemes, url)
if err != nil {
log.Crit("Bootstrap URL invalid", "enode", url, "err", err)
continue
return nil
}
cfg.BootstrapNodes = append(cfg.BootstrapNodes, node)
nodes = append(nodes, node)
}
}
return nodes
}
// setBootstrapNodesV5 creates a list of bootstrap nodes from the command line
@ -1159,6 +1193,14 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.IsSet(AllowUnprotectedTxs.Name) {
cfg.AllowUnprotectedTxs = ctx.Bool(AllowUnprotectedTxs.Name)
}
if ctx.IsSet(BatchRequestLimit.Name) {
cfg.BatchRequestLimit = ctx.Int(BatchRequestLimit.Name)
}
if ctx.IsSet(BatchResponseMaxSize.Name) {
cfg.BatchResponseMaxSize = ctx.Int(BatchResponseMaxSize.Name)
}
}
// setGraphQL creates the GraphQL listener interface string from the set
@ -1224,19 +1266,6 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(LightMaxPeersFlag.Name) {
cfg.LightPeers = ctx.Int(LightMaxPeersFlag.Name)
}
if ctx.IsSet(UltraLightServersFlag.Name) {
cfg.UltraLightServers = strings.Split(ctx.String(UltraLightServersFlag.Name), ",")
}
if ctx.IsSet(UltraLightFractionFlag.Name) {
cfg.UltraLightFraction = ctx.Int(UltraLightFractionFlag.Name)
}
if cfg.UltraLightFraction <= 0 && cfg.UltraLightFraction > 100 {
log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", ethconfig.Defaults.UltraLightFraction)
cfg.UltraLightFraction = ethconfig.Defaults.UltraLightFraction
}
if ctx.IsSet(UltraLightOnlyAnnounceFlag.Name) {
cfg.UltraLightOnlyAnnounce = ctx.Bool(UltraLightOnlyAnnounceFlag.Name)
}
if ctx.IsSet(LightNoPruneFlag.Name) {
cfg.LightNoPrune = ctx.Bool(LightNoPruneFlag.Name)
}
@ -1377,13 +1406,17 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
cfg.NoDiscovery = true
}
// if we're running a light client or server, force enable the v5 peer discovery
// unless it is explicitly disabled with --nodiscover note that explicitly specifying
// --v5disc overrides --nodiscover, in which case the later only disables v4 discovery
forceV5Discovery := (lightClient || lightServer) && !ctx.Bool(NoDiscoverFlag.Name)
if ctx.IsSet(DiscoveryV5Flag.Name) {
// Disallow --nodiscover when used in conjunction with light mode.
if (lightClient || lightServer) && ctx.Bool(NoDiscoverFlag.Name) {
Fatalf("Cannot use --" + NoDiscoverFlag.Name + " in light client or light server mode")
}
CheckExclusive(ctx, DiscoveryV4Flag, NoDiscoverFlag)
CheckExclusive(ctx, DiscoveryV5Flag, NoDiscoverFlag)
cfg.DiscoveryV4 = ctx.Bool(DiscoveryV4Flag.Name)
cfg.DiscoveryV5 = ctx.Bool(DiscoveryV5Flag.Name)
} else if forceV5Discovery {
// If we're running a light client or server, force enable the v5 peer discovery.
if lightClient || lightServer {
cfg.DiscoveryV5 = true
}
@ -1482,12 +1515,12 @@ func SetDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = ctx.String(DataDirFlag.Name)
case ctx.Bool(DeveloperFlag.Name):
cfg.DataDir = "" // unless explicitly requested, use memory databases
case ctx.Bool(RinkebyFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
case ctx.Bool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.Bool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
case ctx.Bool(HoleskyFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "holesky")
}
}
@ -1511,7 +1544,7 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) {
}
}
func setTxPool(ctx *cli.Context, cfg *txpool.Config) {
func setTxPool(ctx *cli.Context, cfg *legacypool.Config) {
if ctx.IsSet(TxPoolLocalsFlag.Name) {
locals := strings.Split(ctx.String(TxPoolLocalsFlag.Name), ",")
for _, account := range locals {
@ -1644,16 +1677,11 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, GoerliFlag, SepoliaFlag, HoleskyFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.String(GCModeFlag.Name) == "archive" && ctx.Uint64(TxLookupLimitFlag.Name) != 0 {
ctx.Set(TxLookupLimitFlag.Name, "0")
log.Warn("Disable transaction unindexing for archive node")
}
if ctx.IsSet(LightServeFlag.Name) && ctx.Uint64(TxLookupLimitFlag.Name) != 0 {
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
}
// Set configurations from CLI flags
setEtherbase(ctx, cfg)
setGPO(ctx, &cfg.GPO, ctx.String(SyncModeFlag.Name) == "light")
setTxPool(ctx, &cfg.TxPool)
@ -1724,18 +1752,40 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
cfg.Preimages = true
log.Info("Enabling recording of key preimages since archive mode is used")
}
if ctx.IsSet(TxLookupLimitFlag.Name) {
cfg.TxLookupLimit = ctx.Uint64(TxLookupLimitFlag.Name)
if ctx.IsSet(StateHistoryFlag.Name) {
cfg.StateHistory = ctx.Uint64(StateHistoryFlag.Name)
}
// Parse state scheme, abort the process if it's not compatible.
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
scheme, err := ParseStateScheme(ctx, chaindb)
chaindb.Close()
if err != nil {
Fatalf("%v", err)
}
cfg.StateScheme = scheme
// Parse transaction history flag, if user is still using legacy config
// file with 'TxLookupLimit' configured, copy the value to 'TransactionHistory'.
if cfg.TransactionHistory == ethconfig.Defaults.TransactionHistory && cfg.TxLookupLimit != ethconfig.Defaults.TxLookupLimit {
log.Warn("The config option 'TxLookupLimit' is deprecated and will be removed, please use 'TransactionHistory'")
cfg.TransactionHistory = cfg.TxLookupLimit
}
if ctx.IsSet(TransactionHistoryFlag.Name) {
cfg.TransactionHistory = ctx.Uint64(TransactionHistoryFlag.Name)
} else if ctx.IsSet(TxLookupLimitFlag.Name) {
log.Warn("The flag --txlookuplimit is deprecated and will be removed, please use --history.transactions")
cfg.TransactionHistory = ctx.Uint64(TxLookupLimitFlag.Name)
}
if ctx.String(GCModeFlag.Name) == "archive" && cfg.TransactionHistory != 0 {
cfg.TransactionHistory = 0
log.Warn("Disabled transaction unindexing for archive node")
}
if ctx.IsSet(LightServeFlag.Name) && cfg.TransactionHistory != 0 {
log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited")
}
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheTrieFlag.Name) {
cfg.TrieCleanCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheTrieFlag.Name) / 100
}
if ctx.IsSet(CacheTrieJournalFlag.Name) {
cfg.TrieCleanCacheJournal = ctx.String(CacheTrieJournalFlag.Name)
}
if ctx.IsSet(CacheTrieRejournalFlag.Name) {
cfg.TrieCleanCacheRejournal = ctx.Duration(CacheTrieRejournalFlag.Name)
}
if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
cfg.TrieDirtyCache = ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
}
@ -1794,28 +1844,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
cfg.Genesis = core.DefaultGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash)
case ctx.Bool(HoleskyFlag.Name):
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 17000
}
cfg.Genesis = core.DefaultHoleskyGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.HoleskyGenesisHash)
case ctx.Bool(SepoliaFlag.Name):
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 11155111
}
cfg.Genesis = core.DefaultSepoliaGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash)
case ctx.Bool(RinkebyFlag.Name):
log.Warn("")
log.Warn("--------------------------------------------------------------------------------")
log.Warn("Please note, Rinkeby has been deprecated. It will still work for the time being,")
log.Warn("but there will be no further hard-forks shipped for it.")
log.Warn("The network will be permanently halted in Q2/Q3 of 2023.")
log.Warn("For the most future proof testnet, choose Sepolia as")
log.Warn("your replacement environment (--sepolia instead of --rinkeby).")
log.Warn("--------------------------------------------------------------------------------")
log.Warn("")
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 4
}
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.RinkebyGenesisHash)
case ctx.Bool(GoerliFlag.Name):
if !ctx.IsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 5
@ -1871,17 +1911,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
log.Info("Using developer account", "address", developer.Address)
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.Int(DeveloperPeriodFlag.Name)), ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address)
cfg.Genesis = core.DeveloperGenesisBlock(ctx.Uint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.IsSet(DataDirFlag.Name) {
// If datadir doesn't exist we need to open db in write-mode
// so leveldb can create files.
readonly := true
if !common.FileExist(stack.ResolvePath("chaindata")) {
readonly = false
}
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
chaindb := MakeChainDatabase(ctx, stack, readonly)
chaindb := tryMakeReadOnlyDatabase(ctx, stack)
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
cfg.Genesis = nil // fallback to db content
}
@ -1931,9 +1963,6 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
if err := lescatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the Engine API service: %v", err)
}
return backend.ApiBackend, nil
}
backend, err := eth.New(stack, cfg)
@ -1946,9 +1975,6 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
Fatalf("Failed to create the LES server: %v", err)
}
}
if err := ethcatalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the Engine API service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.APIBackend))
return backend.APIBackend, backend
}
@ -1995,7 +2021,7 @@ func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, path string) {
if err := rlp.DecodeBytes(rlpBlob, &block); err != nil {
Fatalf("Failed to decode block: %v", err)
}
ethcatalyst.RegisterFullSyncTester(stack, eth, &block)
catalyst.RegisterFullSyncTester(stack, eth, &block)
log.Info("Registered full-sync tester", "number", block.NumberU64(), "hash", block.Hash())
}
@ -2051,7 +2077,7 @@ func SetupMetrics(ctx *cli.Context) {
}
if ctx.IsSet(MetricsHTTPFlag.Name) {
address := fmt.Sprintf("%s:%d", ctx.String(MetricsHTTPFlag.Name), ctx.Int(MetricsPortFlag.Name))
address := net.JoinHostPort(ctx.String(MetricsHTTPFlag.Name), fmt.Sprintf("%d", ctx.Int(MetricsPortFlag.Name)))
log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address)
exp.Setup(address)
} else if ctx.IsSet(MetricsPortFlag.Name) {
@ -2105,6 +2131,18 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.
return chainDb
}
// tryMakeReadOnlyDatabase try to open the chain database in read-only mode,
// or fallback to write mode if the database is not initialized.
func tryMakeReadOnlyDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
// If the database doesn't exist we need to open it in write-mode to allow
// the engine to create files.
readonly := true
if rawdb.PreexistingDatabase(stack.ResolvePath("chaindata")) == "" {
readonly = false
}
return MakeChainDatabase(ctx, stack, readonly)
}
func IsNetworkPreset(ctx *cli.Context) bool {
for _, flag := range NetworkFlags {
bFlag, _ := flag.(*cli.BoolFlag)
@ -2126,7 +2164,7 @@ func DialRPCWithHeaders(endpoint string, headers []string) (*rpc.Client, error)
}
var opts []rpc.ClientOption
if len(headers) > 0 {
var customHeaders = make(http.Header)
customHeaders := make(http.Header)
for _, h := range headers {
kv := strings.Split(h, ":")
if len(kv) != 2 {
@ -2144,10 +2182,10 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
switch {
case ctx.Bool(MainnetFlag.Name):
genesis = core.DefaultGenesisBlock()
case ctx.Bool(HoleskyFlag.Name):
genesis = core.DefaultHoleskyGenesisBlock()
case ctx.Bool(SepoliaFlag.Name):
genesis = core.DefaultSepoliaGenesisBlock()
case ctx.Bool(RinkebyFlag.Name):
genesis = core.DefaultRinkebyGenesisBlock()
case ctx.Bool(GoerliFlag.Name):
genesis = core.DefaultGoerliGenesisBlock()
case ctx.Bool(DeveloperFlag.Name):
@ -2173,6 +2211,10 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
if gcmode := ctx.String(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" {
Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name)
}
scheme, err := ParseStateScheme(ctx, chainDb)
if err != nil {
Fatalf("%v", err)
}
cache := &core.CacheConfig{
TrieCleanLimit: ethconfig.Defaults.TrieCleanCache,
TrieCleanNoPrefetch: ctx.Bool(CacheNoPrefetchFlag.Name),
@ -2181,6 +2223,8 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
TrieTimeLimit: ethconfig.Defaults.TrieTimeout,
SnapshotLimit: ethconfig.Defaults.SnapshotCache,
Preimages: ctx.Bool(CachePreimagesFlag.Name),
StateScheme: scheme,
StateHistory: ctx.Uint64(StateHistoryFlag.Name),
}
if cache.TrieDirtyDisabled && !cache.Preimages {
cache.Preimages = true
@ -2225,3 +2269,62 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
}
return preloads
}
// ParseStateScheme resolves scheme identifier from CLI flag. If the provided
// state scheme is not compatible with the one of persistent scheme, an error
// will be returned.
//
// - none: use the scheme consistent with persistent state, or fallback
// to hash-based scheme if state is empty.
// - hash: use hash-based scheme or error out if not compatible with
// persistent state scheme.
// - path: use path-based scheme or error out if not compatible with
// persistent state scheme.
func ParseStateScheme(ctx *cli.Context, disk ethdb.Database) (string, error) {
// If state scheme is not specified, use the scheme consistent
// with persistent state, or fallback to hash mode if database
// is empty.
stored := rawdb.ReadStateScheme(disk)
if !ctx.IsSet(StateSchemeFlag.Name) {
if stored == "" {
// use default scheme for empty database, flip it when
// path mode is chosen as default
log.Info("State schema set to default", "scheme", "hash")
return rawdb.HashScheme, nil
}
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
scheme := ctx.String(StateSchemeFlag.Name)
if stored == "" || scheme == stored {
log.Info("State scheme set by user", "scheme", scheme)
return scheme, nil
}
return "", fmt.Errorf("incompatible state scheme, stored: %s, provided: %s", stored, scheme)
}
// MakeTrieDatabase constructs a trie database based on the configured scheme.
func MakeTrieDatabase(ctx *cli.Context, disk ethdb.Database, preimage bool, readOnly bool) *trie.Database {
config := &trie.Config{
Preimages: preimage,
}
scheme, err := ParseStateScheme(ctx, disk)
if err != nil {
Fatalf("%v", err)
}
if scheme == rawdb.HashScheme {
// Read-only mode is not implemented in hash mode,
// ignore the parameter silently. TODO(rjl493456442)
// please config it if read mode is implemented.
config.HashDB = hashdb.Defaults
return trie.NewDatabase(disk, config)
}
if readOnly {
config.PathDB = pathdb.ReadOnly
} else {
config.PathDB = pathdb.Defaults
}
return trie.NewDatabase(disk, config)
}

View File

@ -19,6 +19,7 @@ package utils
import (
"fmt"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/urfave/cli/v2"
)
@ -33,15 +34,49 @@ var ShowDeprecated = &cli.Command{
var DeprecatedFlags = []cli.Flag{
NoUSBFlag,
LegacyWhitelistFlag,
CacheTrieJournalFlag,
CacheTrieRejournalFlag,
LegacyDiscoveryV5Flag,
TxLookupLimitFlag,
}
var (
// (Deprecated May 2020, shown in aliased flags section)
// Deprecated May 2020, shown in aliased flags section
NoUSBFlag = &cli.BoolFlag{
Name: "nousb",
Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)",
Category: flags.DeprecatedCategory,
}
// Deprecated March 2022
LegacyWhitelistFlag = &cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --eth.requiredblocks)",
Category: flags.DeprecatedCategory,
}
// Deprecated July 2023
CacheTrieJournalFlag = &cli.StringFlag{
Name: "cache.trie.journal",
Usage: "Disk journal directory for trie cache to survive node restarts",
Category: flags.DeprecatedCategory,
}
CacheTrieRejournalFlag = &cli.DurationFlag{
Name: "cache.trie.rejournal",
Usage: "Time interval to regenerate the trie cache journal",
Category: flags.DeprecatedCategory,
}
LegacyDiscoveryV5Flag = &cli.BoolFlag{
Name: "v5disc",
Usage: "Enables the experimental RLPx V5 (Topic Discovery) mechanism (deprecated, use --discv5 instead)",
Category: flags.DeprecatedCategory,
}
// Deprecated August 2023
TxLookupLimitFlag = &cli.Uint64Flag{
Name: "txlookuplimit",
Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain) (deprecated, use history.transactions instead)",
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.DeprecatedCategory,
}
)
// showDeprecated displays deprecated flags that will be soon removed from the codebase.

View File

@ -170,6 +170,20 @@ func TestBasicLRUContains(t *testing.T) {
}
}
// Test that Peek doesn't update recent-ness
func TestBasicLRUPeek(t *testing.T) {
cache := NewBasicLRU[int, int](2)
cache.Add(1, 1)
cache.Add(2, 2)
if v, ok := cache.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1")
}
cache.Add(3, 3)
if cache.Contains(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
func BenchmarkLRU(b *testing.B) {
var (
capacity = 1000

View File

@ -82,7 +82,7 @@ func (i *HexOrDecimal256) MarshalText() ([]byte, error) {
// it however accepts either "0x"-prefixed (hex encoded) or non-prefixed (decimal)
type Decimal256 big.Int
// NewHexOrDecimal256 creates a new Decimal256
// NewDecimal256 creates a new Decimal256
func NewDecimal256(x int64) *Decimal256 {
b := big.NewInt(x)
d := Decimal256(*b)

View File

@ -65,6 +65,11 @@ func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
// If b is larger than len(h), b will be cropped from the left.
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
// Cmp compares two hashes.
func (h Hash) Cmp(other Hash) int {
return bytes.Compare(h[:], other[:])
}
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
@ -226,6 +231,11 @@ func IsHexAddress(s string) bool {
return len(s) == 2*AddressLength && isHex(s)
}
// Cmp compares two addresses.
func (a Address) Cmp(other Address) int {
return bytes.Compare(a[:], other[:])
}
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }

View File

@ -25,6 +25,7 @@ import (
"reflect"
"strings"
"testing"
"time"
)
func TestBytesConversion(t *testing.T) {
@ -583,3 +584,14 @@ func TestAddressEIP55(t *testing.T) {
t.Fatal("Unexpected address after unmarshal")
}
}
func BenchmarkPrettyDuration(b *testing.B) {
var x = PrettyDuration(time.Duration(int64(1203123912312)))
b.Logf("Pre %s", time.Duration(x).String())
var a string
b.ResetTimer()
for i := 0; i < b.N; i++ {
a = x.String()
}
b.Logf("Post %s", a)
}

View File

@ -23,7 +23,8 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
@ -257,7 +258,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return consensus.ErrInvalidNumber
}
// Verify the header's EIP-1559 attributes.
if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil {
if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil {
return err
}
// Verify existence / non-existence of withdrawalsHash.
@ -268,13 +269,24 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if !shanghai && header.WithdrawalsHash != nil {
return fmt.Errorf("invalid withdrawalsHash: have %x, expected nil", header.WithdrawalsHash)
}
// Verify the existence / non-existence of excessDataGas
// Verify the existence / non-existence of cancun-specific header fields
cancun := chain.Config().IsCancun(header.Number, header.Time)
if cancun && header.ExcessDataGas == nil {
return errors.New("missing excessDataGas")
if !cancun {
switch {
case header.ExcessBlobGas != nil:
return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas)
case header.BlobGasUsed != nil:
return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed)
case header.ParentBeaconRoot != nil:
return fmt.Errorf("invalid parentBeaconRoot, have %#x, expected nil", header.ParentBeaconRoot)
}
} else {
if header.ParentBeaconRoot == nil {
return errors.New("header is missing beaconRoot")
}
if err := eip4844.VerifyEIP4844Header(parent, header); err != nil {
return err
}
if !cancun && header.ExcessDataGas != nil {
return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas)
}
return nil
}

View File

@ -205,7 +205,7 @@ func (sb *blockNumberOrHashOrRLP) UnmarshalJSON(data []byte) error {
}
// GetSigner returns the signer for a specific clique block.
// Can be called with either a blocknumber, blockhash or an rlp encoded blob.
// Can be called with a block number, a block hash or a rlp encoded blob.
// The RLP encoded blob can either be a block or a header.
func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address, error) {
if len(rlpOrBlockNr.RLP) == 0 {

View File

@ -33,6 +33,7 @@ import (
lru "github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@ -343,7 +344,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainHeaderReader, header
if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil {
return err
}
} else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil {
} else if err := eip1559.VerifyEIP1559Header(chain.Config(), parent, header); err != nil {
// Verify the header's EIP-1559 attributes.
return err
}

View File

@ -30,7 +30,7 @@ import (
)
// This test case is a repro of an annoying bug that took us forever to catch.
// In Clique PoA networks (Rinkeby, Görli, etc), consecutive blocks might have
// In Clique PoA networks (Görli, etc), consecutive blocks might have
// the same state root (no block subsidy, empty block). If a node crashes, the
// chain ends up losing the recent state and needs to regenerate it from blocks
// already in the database. The bug was that processing the block *prior* to an

View File

@ -19,7 +19,6 @@ package clique
import (
"bytes"
"encoding/json"
"sort"
"time"
"github.com/ethereum/go-ethereum/common"
@ -29,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/exp/slices"
)
// Vote represents a single vote that an authorized signer made to modify the
@ -62,13 +62,6 @@ type Snapshot struct {
Tally map[common.Address]Tally `json:"tally"` // Current vote tally to avoid recalculating
}
// signersAscending implements the sort interface to allow sorting a list of addresses
type signersAscending []common.Address
func (s signersAscending) Len() int { return len(s) }
func (s signersAscending) Less(i, j int) bool { return bytes.Compare(s[i][:], s[j][:]) < 0 }
func (s signersAscending) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// newSnapshot creates a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
@ -315,7 +308,7 @@ func (s *Snapshot) signers() []common.Address {
for sig := range s.Signers {
sigs = append(sigs, sig)
}
sort.Sort(signersAscending(sigs))
slices.SortFunc(sigs, common.Address.Cmp)
return sigs
}

Some files were not shown because too many files have changed in this diff Show More