Merge tag 'v1.10.12' into HEAD

This commit is contained in:
Austin Roberts 2021-11-29 10:16:13 -06:00
commit 6060d4adc9
107 changed files with 2249 additions and 811 deletions

View File

@ -120,36 +120,6 @@ jobs:
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Linux Azure MIPS xgo uploads
- stage: build
if: type = push
os: linux
dist: bionic
services:
- docker
go: 1.17.x
env:
- azure-linux-mips
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads # This builder does the Android Maven and Azure uploads
- stage: build - stage: build
if: type = push if: type = push

View File

@ -2,11 +2,7 @@
# with Go source code. If you know what GOPATH is then you probably # with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make. # don't need to bother with make.
.PHONY: geth android ios geth-cross evm all test clean .PHONY: geth android ios evm all test clean
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
GOBIN = ./build/bin GOBIN = ./build/bin
GO ?= latest GO ?= latest
@ -53,95 +49,3 @@ devtools:
env GOBIN= go install ./cmd/abigen env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
# Cross Compilation Targets (xgo)
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
@echo "Full cross compilation done:"
@ls -ld $(GOBIN)/geth-*
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
@echo "Linux cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-*
geth-linux-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
@echo "Linux ARM cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
geth-linux-mips:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips
geth-linux-mipsle:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPSle cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
geth-linux-mips64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
geth-linux-mips64le:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64le cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
geth-darwin: geth-darwin-386 geth-darwin-amd64
@echo "Darwin cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-*
geth-darwin-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
geth-windows: geth-windows-386 geth-windows-amd64
@echo "Windows cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-*
geth-windows-386:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64:
$(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64

View File

@ -462,6 +462,9 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad
// SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated
// chain doesn't have miners, we just return a gas price of 1 for any call. // chain doesn't have miners, we just return a gas price of 1 for any call.
func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) {
if b.pendingBlock.Header().BaseFee != nil {
return b.pendingBlock.Header().BaseFee, nil
}
return big.NewInt(1), nil return big.NewInt(1), nil
} }

View File

@ -916,8 +916,8 @@ func TestSuggestGasPrice(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("could not get gas price: %v", err) t.Errorf("could not get gas price: %v", err)
} }
if gasPrice.Uint64() != uint64(1) { if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() {
t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64())
} }
} }

View File

@ -370,7 +370,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
rawTx, err = c.createLegacyTx(opts, contract, input) rawTx, err = c.createLegacyTx(opts, contract, input)
} else { } else {
// Only query for basefee if gasPrice not specified // Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); err != nil { if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
return nil, errHead return nil, errHead
} else if head.BaseFee != nil { } else if head.BaseFee != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, head) rawTx, err = c.createDynamicTx(opts, contract, input, head)

View File

@ -33,7 +33,6 @@ Available commands are:
nsis -- creates a Windows NSIS installer nsis -- creates a Windows NSIS installer
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
xgo [ -alltools ] [ options ] -- cross builds according to options
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
For all commands, -n prevents execution of external programs (dry run mode). For all commands, -n prevents execution of external programs (dry run mode).
@ -188,8 +187,6 @@ func main() {
doAndroidArchive(os.Args[2:]) doAndroidArchive(os.Args[2:])
case "xcode": case "xcode":
doXCodeFramework(os.Args[2:]) doXCodeFramework(os.Args[2:])
case "xgo":
doXgo(os.Args[2:])
case "purge": case "purge":
doPurge(os.Args[2:]) doPurge(os.Args[2:])
default: default:
@ -1209,48 +1206,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata {
} }
} }
// Cross compilation
func doXgo(cmdline []string) {
var (
alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
var tc build.GoToolchain
// Make sure xgo is available for cross compilation
build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest"))
// If all tools building is requested, build everything the builder wants
args := append(buildFlags(env), flag.Args()...)
if *alltools {
args = append(args, []string{"--dest", GOBIN}...)
for _, res := range allToolsArchiveFiles {
if strings.HasPrefix(res, GOBIN) {
// Binary tool found, cross build it explicitly
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
build.MustRun(xgoTool(args))
args = args[:len(args)-1]
}
}
return
}
// Otherwise execute the explicit cross compilation
path := args[len(args)-1]
args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...)
build.MustRun(xgoTool(args))
}
func xgoTool(args []string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...)
return cmd
}
// Binary distribution cleanups // Binary distribution cleanups
func doPurge(cmdline []string) { func doPurge(cmdline []string) {

View File

@ -235,6 +235,8 @@ func ethFilter(args []string) (nodeFilter, error) {
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash)
case "ropsten": case "ropsten":
filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash)
case "sepolia":
filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash)
default: default:
return nil, fmt.Errorf("unknown network %q", args[0]) return nil, fmt.Errorf("unknown network %q", args[0])
} }

View File

@ -96,7 +96,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state // Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txs types.Transactions, miningReward int64, txs types.Transactions, miningReward int64,
getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the // Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes // required blockhashes

View File

@ -121,6 +121,9 @@ func Transaction(ctx *cli.Context) error {
} }
var results []result var results []result
for it.Next() { for it.Next() {
if err := it.Err(); err != nil {
return NewError(ErrorIO, err)
}
var tx types.Transaction var tx types.Transaction
err := rlp.DecodeBytes(it.Value(), &tx) err := rlp.DecodeBytes(it.Value(), &tx)
if err != nil { if err != nil {

View File

@ -89,10 +89,10 @@ func Transition(ctx *cli.Context) error {
var ( var (
err error err error
tracer vm.Tracer tracer vm.EVMLogger
baseDir = "" baseDir = ""
) )
var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)
// If user specified a basedir, make sure it exists // If user specified a basedir, make sure it exists
if ctx.IsSet(OutputBasedir.Name) { if ctx.IsSet(OutputBasedir.Name) {
@ -119,7 +119,7 @@ func Transition(ctx *cli.Context) error {
prevFile.Close() prevFile.Close()
} }
}() }()
getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) {
if prevFile != nil { if prevFile != nil {
prevFile.Close() prevFile.Close()
} }
@ -131,7 +131,7 @@ func Transition(ctx *cli.Context) error {
return vm.NewJSONLogger(logConfig, traceFile), nil return vm.NewJSONLogger(logConfig, traceFile), nil
} }
} else { } else {
getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) {
return nil, nil return nil, nil
} }
} }

View File

@ -116,7 +116,7 @@ func runCmd(ctx *cli.Context) error {
} }
var ( var (
tracer vm.Tracer tracer vm.EVMLogger
debugLogger *vm.StructLogger debugLogger *vm.StructLogger
statedb *state.StateDB statedb *state.StateDB
chainConfig *params.ChainConfig chainConfig *params.ChainConfig

View File

@ -65,7 +65,7 @@ func stateTestCmd(ctx *cli.Context) error {
EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name),
} }
var ( var (
tracer vm.Tracer tracer vm.EVMLogger
debugger *vm.StructLogger debugger *vm.StructLogger
) )
switch { switch {

View File

@ -9,6 +9,7 @@ import (
"testing" "testing"
"github.com/docker/docker/pkg/reexec" "github.com/docker/docker/pkg/reexec"
"github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool"
"github.com/ethereum/go-ethereum/internal/cmdtest" "github.com/ethereum/go-ethereum/internal/cmdtest"
) )
@ -170,13 +171,45 @@ func TestT8n(t *testing.T) {
output: t8nOutput{result: true}, output: t8nOutput{result: true},
expOut: "exp2.json", expOut: "exp2.json",
}, },
{ // Difficulty calculation - with uncles + Berlin
base: "./testdata/14",
input: t8nInput{
"alloc.json", "txs.json", "env.uncles.json", "Berlin", "",
},
output: t8nOutput{result: true},
expOut: "exp_berlin.json",
},
{ // Difficulty calculation on arrow glacier
base: "./testdata/19",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "London", "",
},
output: t8nOutput{result: true},
expOut: "exp_london.json",
},
{ // Difficulty calculation on arrow glacier
base: "./testdata/19",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "ArrowGlacier", "",
},
output: t8nOutput{result: true},
expOut: "exp_arrowglacier.json",
},
} { } {
args := []string{"t8n"} args := []string{"t8n"}
args = append(args, tc.output.get()...) args = append(args, tc.output.get()...)
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)
var qArgs []string // quoted args for debugging purposes
for _, arg := range args {
if len(arg) == 0 {
qArgs = append(qArgs, `""`)
} else {
qArgs = append(qArgs, arg)
}
}
tt.Logf("args: %v\n", strings.Join(qArgs, " "))
tt.Run("evm-test", args...) tt.Run("evm-test", args...)
tt.Logf("args: %v\n", strings.Join(args, " "))
// Compare the expected output, if provided // Compare the expected output, if provided
if tc.expOut != "" { if tc.expOut != "" {
want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut))
@ -265,6 +298,14 @@ func TestT9n(t *testing.T) {
}, },
expOut: "exp.json", expOut: "exp.json",
}, },
{ // Invalid RLP
base: "./testdata/18",
input: t9nInput{
inTxs: "invalid.rlp",
stFork: "London",
},
expExitCode: t8ntool.ErrorIO,
},
} { } {
args := []string{"t9n"} args := []string{"t9n"}

11
cmd/evm/testdata/14/exp_berlin.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"receipts": [],
"currentDifficulty": "0x1ff9000000000"
}
}

9
cmd/evm/testdata/18/README.md vendored Normal file
View File

@ -0,0 +1,9 @@
# Invalid rlp
This folder contains a sample of invalid RLP, and it's expected
that the t9n handles this properly:
```
$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London
ERROR(11): rlp: value size exceeds available input length
```

1
cmd/evm/testdata/18/invalid.rlp vendored Normal file
View File

@ -0,0 +1 @@
"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3"

12
cmd/evm/testdata/19/alloc.json vendored Normal file
View File

@ -0,0 +1,12 @@
{
"a94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"balance": "0x5ffd4878be161d74",
"code": "0x",
"nonce": "0xac",
"storage": {}
},
"0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{
"balance": "0xfeedbead",
"nonce" : "0x00"
}
}

9
cmd/evm/testdata/19/env.json vendored Normal file
View File

@ -0,0 +1,9 @@
{
"currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"currentGasLimit": "0x750a163df65e8a",
"currentBaseFee": "0x500",
"currentNumber": "13000000",
"currentTimestamp": "100015",
"parentTimestamp" : "99999",
"parentDifficulty" : "0x2000000000000"
}

View File

@ -0,0 +1,11 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000000200000",
"receipts": []
}
}

11
cmd/evm/testdata/19/exp_london.json vendored Normal file
View File

@ -0,0 +1,11 @@
{
"result": {
"stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc",
"txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"currentDifficulty": "0x2000080000000",
"receipts": []
}
}

9
cmd/evm/testdata/19/readme.md vendored Normal file
View File

@ -0,0 +1,9 @@
## Difficulty calculation
This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller,
this time on `ArrowGlacier` (Eip 4345).
Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block):
```
[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier
```

1
cmd/evm/testdata/19/txs.json vendored Normal file
View File

@ -0,0 +1 @@
[]

View File

@ -66,6 +66,7 @@ It expects the genesis file as argument.`,
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -140,7 +141,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The import-preimages command imports hash preimages from an RLP encoded stream.`, The import-preimages command imports hash preimages from an RLP encoded stream.
It's deprecated, please use "geth db import" instead.
`,
} }
exportPreimagesCommand = cli.Command{ exportPreimagesCommand = cli.Command{
Action: utils.MigrateFlags(exportPreimages), Action: utils.MigrateFlags(exportPreimages),
@ -154,7 +157,9 @@ be gzipped.`,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The export-preimages command export hash preimages to an RLP encoded stream`, The export-preimages command exports hash preimages to an RLP encoded stream.
It's deprecated, please use "geth db export" instead.
`,
} }
dumpCommand = cli.Command{ dumpCommand = cli.Command{
Action: utils.MigrateFlags(dump), Action: utils.MigrateFlags(dump),
@ -368,7 +373,6 @@ func exportPreimages(ctx *cli.Context) error {
if len(ctx.Args()) < 1 { if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
stack, _ := makeConfigNode(ctx) stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()

View File

@ -156,8 +156,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
// makeFullNode loads geth configuration and creates the Ethereum backend. // makeFullNode loads geth configuration and creates the Ethereum backend.
func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
stack, cfg := makeConfigNode(ctx) stack, cfg := makeConfigNode(ctx)
if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) { if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) {
cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name)) cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
} }
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)

View File

@ -134,6 +134,8 @@ func remoteConsole(ctx *cli.Context) error {
path = filepath.Join(path, "rinkeby") path = filepath.Join(path, "rinkeby")
} else if ctx.GlobalBool(utils.GoerliFlag.Name) { } else if ctx.GlobalBool(utils.GoerliFlag.Name) {
path = filepath.Join(path, "goerli") path = filepath.Join(path, "goerli")
} else if ctx.GlobalBool(utils.SepoliaFlag.Name) {
path = filepath.Join(path, "sepolia")
} }
} }
endpoint = fmt.Sprintf("%s/geth.ipc", path) endpoint = fmt.Sprintf("%s/geth.ipc", path)

View File

@ -17,12 +17,16 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"os/signal"
"path/filepath" "path/filepath"
"sort" "sort"
"strconv" "strconv"
"strings"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
@ -63,6 +67,8 @@ Remove blockchain and state databases`,
dbPutCmd, dbPutCmd,
dbGetSlotsCmd, dbGetSlotsCmd,
dbDumpFreezerIndex, dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
}, },
} }
dbInspectCmd = cli.Command{ dbInspectCmd = cli.Command{
@ -74,6 +80,7 @@ Remove blockchain and state databases`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -89,6 +96,7 @@ Remove blockchain and state databases`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -102,6 +110,7 @@ Remove blockchain and state databases`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CacheFlag, utils.CacheFlag,
@ -121,6 +130,7 @@ corruption if it is aborted during execution'!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -136,6 +146,7 @@ corruption if it is aborted during execution'!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -152,6 +163,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -168,6 +180,7 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -183,11 +196,42 @@ WARNING: This is a low-level operation which may cause database corruption!`,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.MainnetFlag, utils.MainnetFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
Description: "This command displays information about the freezer index.", Description: "This command displays information about the freezer index.",
} }
dbImportCmd = cli.Command{
Action: utils.MigrateFlags(importLDBdata),
Name: "import",
Usage: "Imports leveldb-data from an exported RLP dump.",
ArgsUsage: "<dumpfile> <start (optional)",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "The import command imports the specific chain data from an RLP encoded stream.",
}
dbExportCmd = cli.Command{
Action: utils.MigrateFlags(exportChaindata),
Name: "export",
Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
ArgsUsage: "<type> <dumpfile>",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
) )
func removeDB(ctx *cli.Context) error { func removeDB(ctx *cli.Context) error {
@ -510,3 +554,133 @@ func parseHexOrString(str string) ([]byte, error) {
} }
return b, err return b, err
} }
func importLDBdata(ctx *cli.Context) error {
start := 0
switch ctx.NArg() {
case 1:
break
case 2:
s, err := strconv.Atoi(ctx.Args().Get(1))
if err != nil {
return fmt.Errorf("second arg must be an integer: %v", err)
}
start = s
default:
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
var (
fName = ctx.Args().Get(0)
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during ldb import, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, false)
return utils.ImportLDBData(db, fName, int64(start), stop)
}
type preimageIterator struct {
iter ethdb.Iterator
}
func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
for iter.iter.Next() {
key := iter.iter.Key()
if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.iter.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *preimageIterator) Release() {
iter.iter.Release()
}
type snapshotIterator struct {
init bool
account ethdb.Iterator
storage ethdb.Iterator
}
func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
if !iter.init {
iter.init = true
return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
}
for iter.account.Next() {
key := iter.account.Key()
if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
return utils.OpBatchAdd, key, iter.account.Value(), true
}
}
for iter.storage.Next() {
key := iter.storage.Key()
if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
return utils.OpBatchAdd, key, iter.storage.Value(), true
}
}
return 0, nil, nil, false
}
func (iter *snapshotIterator) Release() {
iter.account.Release()
iter.storage.Release()
}
// chainExporters defines the export scheme for all exportable chain data.
var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
"preimage": func(db ethdb.Database) utils.ChainDataIterator {
iter := db.NewIterator(rawdb.PreimagePrefix, nil)
return &preimageIterator{iter: iter}
},
"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
return &snapshotIterator{account: account, storage: storage}
},
}
func exportChaindata(ctx *cli.Context) error {
if ctx.NArg() < 2 {
return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
}
// Parse the required chain data type, make sure it's supported.
kind := ctx.Args().Get(0)
kind = strings.ToLower(strings.Trim(kind, " "))
exporter, ok := chainExporters[kind]
if !ok {
var kinds []string
for kind := range chainExporters {
kinds = append(kinds, kind)
}
return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
}
var (
stack, _ = makeConfigNode(ctx)
interrupt = make(chan os.Signal, 1)
stop = make(chan struct{})
)
defer stack.Close()
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {
if _, ok := <-interrupt; ok {
log.Info("Interrupted during db export, stopping at next batch")
}
close(stop)
}()
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}

View File

@ -42,6 +42,10 @@ import (
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/plugins" "github.com/ethereum/go-ethereum/plugins"
"github.com/ethereum/go-ethereum/plugins/wrappers" "github.com/ethereum/go-ethereum/plugins/wrappers"
// Force-load the native, to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -69,7 +73,7 @@ var (
utils.NoUSBFlag, utils.NoUSBFlag,
utils.USBFlag, utils.USBFlag,
utils.SmartCardDaemonPathFlag, utils.SmartCardDaemonPathFlag,
utils.OverrideLondonFlag, utils.OverrideArrowGlacierFlag,
utils.EthashCacheDirFlag, utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag, utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag, utils.EthashCachesOnDiskFlag,
@ -139,6 +143,7 @@ var (
utils.DeveloperFlag, utils.DeveloperFlag,
utils.DeveloperPeriodFlag, utils.DeveloperPeriodFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.VMEnableDebugFlag, utils.VMEnableDebugFlag,

View File

@ -62,6 +62,7 @@ var (
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.CacheTrieJournalFlag, utils.CacheTrieJournalFlag,
@ -92,6 +93,7 @@ the trie clean cache with default directory will be deleted.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -112,6 +114,7 @@ In other words, this command does the snapshot to trie conversion.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -134,6 +137,7 @@ It's also usable without snapshot enabled.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
}, },
@ -157,6 +161,7 @@ It's also usable without snapshot enabled.
utils.DataDirFlag, utils.DataDirFlag,
utils.AncientFlag, utils.AncientFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.GoerliFlag, utils.GoerliFlag,
utils.ExcludeCodeFlag, utils.ExcludeCodeFlag,

View File

@ -45,6 +45,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.GoerliFlag, utils.GoerliFlag,
utils.RinkebyFlag, utils.RinkebyFlag,
utils.RopstenFlag, utils.RopstenFlag,
utils.SepoliaFlag,
utils.SyncModeFlag, utils.SyncModeFlag,
utils.ExitWhenSyncedFlag, utils.ExitWhenSyncedFlag,
utils.GCModeFlag, utils.GCModeFlag,

View File

@ -23,7 +23,6 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
) )
@ -80,14 +79,8 @@ func (w *wizard) run() {
} else if err := json.Unmarshal(blob, &w.conf); err != nil { } else if err := json.Unmarshal(blob, &w.conf); err != nil {
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err) log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
} else { } else {
// Dial all previously known servers concurrently // Dial all previously known servers
var pend sync.WaitGroup
for server, pubkey := range w.conf.Servers { for server, pubkey := range w.conf.Servers {
pend.Add(1)
go func(server string, pubkey []byte) {
defer pend.Done()
log.Info("Dialing previously configured server", "server", server) log.Info("Dialing previously configured server", "server", server)
client, err := dial(server, pubkey) client, err := dial(server, pubkey)
if err != nil { if err != nil {
@ -96,9 +89,7 @@ func (w *wizard) run() {
w.lock.Lock() w.lock.Lock()
w.servers[server] = client w.servers[server] = client
w.lock.Unlock() w.lock.Unlock()
}(server, pubkey)
} }
pend.Wait()
w.networkStats() w.networkStats()
} }
// Basics done, loop ad infinitum about what to do // Basics done, loop ad infinitum about what to do

View File

@ -18,7 +18,9 @@
package utils package utils
import ( import (
"bufio"
"compress/gzip" "compress/gzip"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las
} }
// ImportPreimages imports a batch of exported hash preimages into the database. // ImportPreimages imports a batch of exported hash preimages into the database.
// It's a part of the deprecated functionality, should be removed in the future.
func ImportPreimages(db ethdb.Database, fn string) error { func ImportPreimages(db ethdb.Database, fn string) error {
log.Info("Importing preimages", "file", fn) log.Info("Importing preimages", "file", fn)
@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
defer fh.Close() defer fh.Close()
var reader io.Reader = fh var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(fn, ".gz") { if strings.HasSuffix(fn, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil { if reader, err = gzip.NewReader(reader); err != nil {
return err return err
@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
} }
stream := rlp.NewStream(reader, 0) stream := rlp.NewStream(reader, 0)
// Import the preimages in batches to prevent disk trashing // Import the preimages in batches to prevent disk thrashing
preimages := make(map[common.Hash][]byte) preimages := make(map[common.Hash][]byte)
for { for {
@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error {
// ExportPreimages exports all known hash preimages into the specified file, // ExportPreimages exports all known hash preimages into the specified file,
// truncating any data already present in the file. // truncating any data already present in the file.
// It's a part of the deprecated functionality, should be removed in the future.
func ExportPreimages(db ethdb.Database, fn string) error { func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exporting preimages", "file", fn) log.Info("Exporting preimages", "file", fn)
@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error {
log.Info("Exported preimages", "file", fn) log.Info("Exported preimages", "file", fn)
return nil return nil
} }
// exportHeader is used in the export/import flow. When we do an export,
// the first element we output is the exportHeader.
// Whenever a backwards-incompatible change is made, the Version header
// should be bumped.
// If the importer sees a higher version, it should reject the import.
type exportHeader struct {
Magic string // Always set to 'gethdbdump' for disambiguation
Version uint64
Kind string
UnixTime uint64
}
const exportMagic = "gethdbdump"
const (
OpBatchAdd = 0
OpBatchDel = 1
)
// ImportLDBData imports a batch of snapshot data into the database
func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
log.Info("Importing leveldb data", "file", f)
// Open the file handle and potentially unwrap the gzip stream
fh, err := os.Open(f)
if err != nil {
return err
}
defer fh.Close()
var reader io.Reader = bufio.NewReader(fh)
if strings.HasSuffix(f, ".gz") {
if reader, err = gzip.NewReader(reader); err != nil {
return err
}
}
stream := rlp.NewStream(reader, 0)
// Read the header
var header exportHeader
if err := stream.Decode(&header); err != nil {
return fmt.Errorf("could not decode header: %v", err)
}
if header.Magic != exportMagic {
return errors.New("incompatible data, wrong magic")
}
if header.Version != 0 {
return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
}
log.Info("Importing data", "file", f, "type", header.Kind, "data age",
common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
// Import the snapshot in batches to prevent disk thrashing
var (
count int64
start = time.Now()
logged = time.Now()
batch = db.NewBatch()
)
for {
// Read the next entry
var (
op byte
key, val []byte
)
if err := stream.Decode(&op); err != nil {
if err == io.EOF {
break
}
return err
}
if err := stream.Decode(&key); err != nil {
return err
}
if err := stream.Decode(&val); err != nil {
return err
}
if count < startIndex {
count++
continue
}
switch op {
case OpBatchDel:
batch.Delete(key)
case OpBatchAdd:
batch.Put(key, val)
default:
return fmt.Errorf("unknown op %d\n", op)
}
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
}
// Check interruption emitted by ctrl+c
if count%1000 == 0 {
select {
case <-interrupt:
if err := batch.Write(); err != nil {
return err
}
log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
}
if count%1000 == 0 && time.Since(logged) > 8*time.Second {
log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
count += 1
}
// Flush the last batch snapshot data
if batch.ValueSize() > 0 {
if err := batch.Write(); err != nil {
return err
}
}
log.Info("Imported chain data", "file", f, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}
// ChainDataIterator is an interface wraps all necessary functions to iterate
// the exporting chain data.
type ChainDataIterator interface {
// Next returns the key-value pair for next exporting entry in the iterator.
// When the end is reached, it will return (0, nil, nil, false).
Next() (byte, []byte, []byte, bool)
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
Release()
}
// ExportChaindata exports the given data type (truncating any data already present)
// in the file. If the suffix is 'gz', gzip compression is used.
func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
log.Info("Exporting chain data", "file", fn, "kind", kind)
defer iter.Release()
// Open the file handle and potentially wrap with a gzip stream
fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
return err
}
defer fh.Close()
var writer io.Writer = fh
if strings.HasSuffix(fn, ".gz") {
writer = gzip.NewWriter(writer)
defer writer.(*gzip.Writer).Close()
}
// Write the header
if err := rlp.Encode(writer, &exportHeader{
Magic: exportMagic,
Version: 0,
Kind: kind,
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
return err
}
// Extract data from source iterator and dump them out to file
var (
count int64
start = time.Now()
logged = time.Now()
)
for {
op, key, val, ok := iter.Next()
if !ok {
break
}
if err := rlp.Encode(writer, op); err != nil {
return err
}
if err := rlp.Encode(writer, key); err != nil {
return err
}
if err := rlp.Encode(writer, val); err != nil {
return err
}
if count%1000 == 0 {
// Check interruption emitted by ctrl+c
select {
case <-interrupt:
log.Info("Chain data exporting interrupted", "file", fn,
"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
return nil
default:
}
if time.Since(logged) > 8*time.Second {
log.Info("Exporting chain data", "file", fn, "kind", kind,
"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
}
count++
}
log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}

198
cmd/utils/export_test.go Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of go-ethereum.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package utils
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/rlp"
)
// TestExport does basic sanity checks on the export/import functionality
func TestExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
func TestExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testExport(t, f)
}
type testIterator struct {
index int
}
func newTestIterator() *testIterator {
return &testIterator{index: -1}
}
func (iter *testIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)),
[]byte(fmt.Sprintf("value %d", iter.index)), true
}
func (iter *testIterator) Release() {}
func testExport(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
// verify
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if (i < 5 || i == 42) && err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", i, string(v))
}
if !(i < 5 || i == 42) {
if err != nil {
t.Fatalf("expected element idx %d: %v", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
}
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000)))
if err == nil {
t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v))
}
}
// testDeletion tests if the deletion markers can be exported/imported correctly
func TestDeletionExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
// TestDeletionExportGzip tests if the deletion markers can be exported/imported
// correctly with gz compression.
func TestDeletionExportGzip(t *testing.T) {
f := fmt.Sprintf("%v/tempdump.gz", os.TempDir())
defer func() {
os.Remove(f)
}()
testDeletion(t, f)
}
type deletionIterator struct {
index int
}
func newDeletionIterator() *deletionIterator {
return &deletionIterator{index: -1}
}
func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) {
if iter.index >= 999 {
return 0, nil, nil, false
}
iter.index += 1
if iter.index == 42 {
iter.index += 1
}
return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true
}
func (iter *deletionIterator) Release() {}
func testDeletion(t *testing.T, f string) {
err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{}))
if err != nil {
t.Fatal(err)
}
db := rawdb.NewMemoryDatabase()
for i := 0; i < 1000; i++ {
db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i)))
}
err = ImportLDBData(db, f, 5, make(chan struct{}))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i)))
if i < 5 || i == 42 {
if err != nil {
t.Fatalf("expected element at idx %d, got '%v'", i, err)
}
if have, want := string(v), fmt.Sprintf("value %d", i); have != want {
t.Fatalf("have %v, want %v", have, want)
}
}
if !(i < 5 || i == 42) {
if err == nil {
t.Fatalf("expected no element idx %d: %v", i, string(v))
}
}
}
}
// TestImportFutureFormat tests that we reject unsupported future versions.
func TestImportFutureFormat(t *testing.T) {
f := fmt.Sprintf("%v/tempdump-future", os.TempDir())
defer func() {
os.Remove(f)
}()
fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
if err != nil {
t.Fatal(err)
}
defer fh.Close()
if err := rlp.Encode(fh, &exportHeader{
Magic: exportMagic,
Version: 500,
Kind: "testdata",
UnixTime: uint64(time.Now().Unix()),
}); err != nil {
t.Fatal(err)
}
db2 := rawdb.NewMemoryDatabase()
err = ImportLDBData(db2, f, 0, make(chan struct{}))
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.HasPrefix(err.Error(), "incompatible version") {
t.Fatalf("wrong error: %v", err)
}
}

View File

@ -155,6 +155,10 @@ var (
Name: "ropsten", Name: "ropsten",
Usage: "Ropsten network: pre-configured proof-of-work test network", Usage: "Ropsten network: pre-configured proof-of-work test network",
} }
SepoliaFlag = cli.BoolFlag{
Name: "sepolia",
Usage: "Sepolia network: pre-configured proof-of-work test network",
}
DeveloperFlag = cli.BoolFlag{ DeveloperFlag = cli.BoolFlag{
Name: "dev", Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@ -235,9 +239,9 @@ var (
Usage: "Megabytes of memory allocated to bloom-filter for pruning", Usage: "Megabytes of memory allocated to bloom-filter for pruning",
Value: 2048, Value: 2048,
} }
OverrideLondonFlag = cli.Uint64Flag{ OverrideArrowGlacierFlag = cli.Uint64Flag{
Name: "override.london", Name: "override.arrowglacier",
Usage: "Manually specify London fork-block, overriding the bundled setting", Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
} }
// Light server and client settings // Light server and client settings
LightServeFlag = cli.IntFlag{ LightServeFlag = cli.IntFlag{
@ -798,6 +802,9 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(GoerliFlag.Name) { if ctx.GlobalBool(GoerliFlag.Name) {
return filepath.Join(path, "goerli") return filepath.Join(path, "goerli")
} }
if ctx.GlobalBool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia")
}
return path return path
} }
Fatalf("Cannot determine default data directory, please set manually (--datadir)") Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@ -846,6 +853,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
case ctx.GlobalBool(RopstenFlag.Name): case ctx.GlobalBool(RopstenFlag.Name):
urls = params.RopstenBootnodes urls = params.RopstenBootnodes
case ctx.GlobalBool(SepoliaFlag.Name):
urls = params.SepoliaBootnodes
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
urls = params.RinkebyBootnodes urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):
@ -1269,6 +1278,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
} }
} }
@ -1454,7 +1465,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config. // SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags // Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag) CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@ -1598,6 +1609,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
} }
cfg.Genesis = core.DefaultRopstenGenesisBlock() cfg.Genesis = core.DefaultRopstenGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash) SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash)
case ctx.GlobalBool(SepoliaFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 11155111
}
cfg.Genesis = core.DefaultSepoliaGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash)
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 4 cfg.NetworkId = 4
@ -1826,6 +1843,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultGenesisBlock() genesis = core.DefaultGenesisBlock()
case ctx.GlobalBool(RopstenFlag.Name): case ctx.GlobalBool(RopstenFlag.Name):
genesis = core.DefaultRopstenGenesisBlock() genesis = core.DefaultRopstenGenesisBlock()
case ctx.GlobalBool(SepoliaFlag.Name):
genesis = core.DefaultSepoliaGenesisBlock()
case ctx.GlobalBool(RinkebyFlag.Name): case ctx.GlobalBool(RinkebyFlag.Name):
genesis = core.DefaultRinkebyGenesisBlock() genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name): case ctx.GlobalBool(GoerliFlag.Name):

View File

@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int {
} }
// EncodeBig encodes bigint as a hex string with 0x prefix. // EncodeBig encodes bigint as a hex string with 0x prefix.
// The sign of the integer is ignored.
func EncodeBig(bigint *big.Int) string { func EncodeBig(bigint *big.Int) string {
nbits := bigint.BitLen() if sign := bigint.Sign(); sign == 0 {
if nbits == 0 {
return "0x0" return "0x0"
} else if sign > 0 {
return "0x" + bigint.Text(16)
} else {
return "-0x" + bigint.Text(16)[1:]
} }
return fmt.Sprintf("%#x", bigint)
} }
func has0xPrefix(input string) bool { func has0xPrefix(input string) bool {

View File

@ -201,3 +201,15 @@ func TestDecodeUint64(t *testing.T) {
} }
} }
} }
func BenchmarkEncodeBig(b *testing.B) {
for _, bench := range encodeBigTests {
b.Run(bench.want, func(b *testing.B) {
b.ReportAllocs()
bigint := bench.input.(*big.Int)
for i := 0; i < b.N; i++ {
EncodeBig(bigint)
}
})
}
}

View File

@ -214,6 +214,9 @@ func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address,
} else if number, ok := blockNrOrHash.Number(); ok { } else if number, ok := blockNrOrHash.Number(); ok {
header = api.chain.GetHeaderByNumber(uint64(number.Int64())) header = api.chain.GetHeaderByNumber(uint64(number.Int64()))
} }
if header == nil {
return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String())
}
return api.clique.Author(header) return api.clique.Author(header)
} }
block := new(types.Block) block := new(types.Block)

View File

@ -600,8 +600,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
} }
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 { if c.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions") return errors.New("sealing paused while waiting for transactions")
return nil
} }
// Don't hold the signer fields for the entire sealing procedure // Don't hold the signer fields for the entire sealing procedure
c.lock.RLock() c.lock.RLock()
@ -621,8 +620,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res
if recent == signer { if recent == signer {
// Signer is among recents, only wait if the current block doesn't shift it out // Signer is among recents, only wait if the current block doesn't shift it out
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit { if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
log.Info("Signed recently, must wait for others") return errors.New("signed recently, must wait for others")
return nil
} }
} }
} }

View File

@ -45,6 +45,11 @@ var (
maxUncles = 2 // Maximum number of uncles allowed in a single block maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
// It offsets the bomb a total of 10.7M blocks.
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
// It offsets the bomb a total of 9.7M blocks. // It offsets the bomb a total of 9.7M blocks.
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
@ -330,6 +335,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1) next := new(big.Int).Add(parent.Number, big1)
switch { switch {
case config.IsArrowGlacier(next):
return calcDifficultyEip4345(time, parent)
case config.IsLondon(next): case config.IsLondon(next):
return calcDifficultyEip3554(time, parent) return calcDifficultyEip3554(time, parent)
case config.IsMuirGlacier(next): case config.IsMuirGlacier(next):

View File

@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
// Memory map the file for writing and fill it with the generator // Memory map the file for writing and fill it with the generator
mem, buffer, err := memoryMapFile(dump, true) mem, buffer, err := memoryMapFile(dump, true)
if err != nil { if err != nil {
dump.Close() dump.Close()
os.Remove(temp)
return nil, nil, nil, err return nil, nil, nil, err
} }
copy(buffer, dumpMagic) copy(buffer, dumpMagic)
@ -358,7 +361,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
if err != nil { if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err) logger.Error("Failed to generate mapped ethash dataset", "err", err)
d.dataset = make([]uint32, dsize/2) d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache) generateDataset(d.dataset, d.epoch, cache)
} }
// Iterate over all previous instances and delete old ones // Iterate over all previous instances and delete old ones

View File

@ -0,0 +1,35 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build linux
// +build linux
package ethash
import (
"os"
"golang.org/x/sys/unix"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
return unix.Fallocate(int(f.Fd()), 0, 0, size)
}

View File

@ -0,0 +1,36 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !linux
// +build !linux
package ethash
import (
"os"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// On systems which do not support fallocate, we merely truncate it.
// More robust alternatives would be to
// - Use posix_fallocate, or
// - explicitly fill the file with zeroes.
return f.Truncate(size)
}

View File

@ -75,7 +75,7 @@ var (
// This is the content of the genesis block used by the benchmarks. // This is the content of the genesis block used by the benchmarks.
benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey) benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey)
benchRootFunds = math.BigPow(2, 100) benchRootFunds = math.BigPow(2, 200)
) )
// genValueTx returns a block generator that includes a single // genValueTx returns a block generator that includes a single
@ -86,7 +86,19 @@ func genValueTx(nbytes int) func(int, *BlockGen) {
toaddr := common.Address{} toaddr := common.Address{}
data := make([]byte, nbytes) data := make([]byte, nbytes)
gas, _ := IntrinsicGas(data, nil, false, false, false) gas, _ := IntrinsicGas(data, nil, false, false, false)
tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey) signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
gasPrice := big.NewInt(0)
if gen.header.BaseFee != nil {
gasPrice = gen.header.BaseFee
}
tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{
Nonce: gen.TxNonce(benchRootAddr),
To: &toaddr,
Value: big.NewInt(1),
Gas: gas,
Data: data,
GasPrice: gasPrice,
})
gen.AddTx(tx) gen.AddTx(tx)
} }
} }
@ -110,24 +122,38 @@ func init() {
// and fills the blocks with many small transactions. // and fills the blocks with many small transactions.
func genTxRing(naccounts int) func(int, *BlockGen) { func genTxRing(naccounts int) func(int, *BlockGen) {
from := 0 from := 0
availableFunds := new(big.Int).Set(benchRootFunds)
return func(i int, gen *BlockGen) { return func(i int, gen *BlockGen) {
block := gen.PrevBlock(i - 1) block := gen.PrevBlock(i - 1)
gas := block.GasLimit() gas := block.GasLimit()
gasPrice := big.NewInt(0)
if gen.header.BaseFee != nil {
gasPrice = gen.header.BaseFee
}
signer := types.MakeSigner(gen.config, big.NewInt(int64(i)))
for { for {
gas -= params.TxGas gas -= params.TxGas
if gas < params.TxGas { if gas < params.TxGas {
break break
} }
to := (from + 1) % naccounts to := (from + 1) % naccounts
tx := types.NewTransaction( burn := new(big.Int).SetUint64(params.TxGas)
gen.TxNonce(ringAddrs[from]), burn.Mul(burn, gen.header.BaseFee)
ringAddrs[to], availableFunds.Sub(availableFunds, burn)
benchRootFunds, if availableFunds.Cmp(big.NewInt(1)) < 0 {
params.TxGas, panic("not enough funds")
nil, }
nil, tx, err := types.SignNewTx(ringKeys[from], signer,
) &types.LegacyTx{
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from]) Nonce: gen.TxNonce(ringAddrs[from]),
To: &ringAddrs[to],
Value: availableFunds,
Gas: params.TxGas,
GasPrice: gasPrice,
})
if err != nil {
panic(err)
}
gen.AddTx(tx) gen.AddTx(tx)
from = to from = to
} }
@ -245,6 +271,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
block := types.NewBlockWithHeader(header) block := types.NewBlockWithHeader(header)
rawdb.WriteBody(db, hash, n, block.Body()) rawdb.WriteBody(db, hash, n, block.Body())
rawdb.WriteReceipts(db, hash, n, nil) rawdb.WriteReceipts(db, hash, n, nil)
rawdb.WriteHeadBlockHash(db, hash)
} }
} }
} }
@ -278,6 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
} }
makeChainForBench(db, full, count) makeChainForBench(db, full, count)
db.Close() db.Close()
cacheConfig := *defaultCacheConfig
cacheConfig.TrieDirtyDisabled = true
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
@ -287,7 +316,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil { if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err) b.Fatalf("error opening database at %v: %v", dir, err)
} }
chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)
if err != nil { if err != nil {
b.Fatalf("error creating chain: %v", err) b.Fatalf("error creating chain: %v", err)
} }

View File

@ -1438,11 +1438,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Peek the error for the first block to decide the directing import logic // Peek the error for the first block to decide the directing import logic
it := newInsertIterator(chain, results, bc.validator) it := newInsertIterator(chain, results, bc.validator)
block, err := it.next() block, err := it.next()
// Left-trim all the known blocks // Left-trim all the known blocks that don't need to build snapshot
if err == ErrKnownBlock { if bc.skipBlock(err, it) {
// First block (and state) is known // First block (and state) is known
// 1. We did a roll-back, and should now do a re-import // 1. We did a roll-back, and should now do a re-import
// 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot
@ -1453,7 +1452,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
localTd = bc.GetTd(current.Hash(), current.NumberU64()) localTd = bc.GetTd(current.Hash(), current.NumberU64())
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil
) )
for block != nil && err == ErrKnownBlock { for block != nil && bc.skipBlock(err, it) {
externTd = new(big.Int).Add(externTd, block.Difficulty()) externTd = new(big.Int).Add(externTd, block.Difficulty())
if localTd.Cmp(externTd) < 0 { if localTd.Cmp(externTd) < 0 {
break break
@ -1471,7 +1470,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// When node runs a fast sync again, it can re-import a batch of known blocks via // When node runs a fast sync again, it can re-import a batch of known blocks via
// `insertChain` while a part of them have higher total difficulty than current // `insertChain` while a part of them have higher total difficulty than current
// head full block(new pivot point). // head full block(new pivot point).
for block != nil && err == ErrKnownBlock { for block != nil && bc.skipBlock(err, it) {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil { if err := bc.writeKnownBlock(block); err != nil {
return it.index, err return it.index, err
@ -1503,8 +1502,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// If there are any still remaining, mark as ignored // If there are any still remaining, mark as ignored
return it.index, err return it.index, err
// Some other error occurred, abort // Some other error(except ErrKnownBlock) occurred, abort.
case err != nil: // ErrKnownBlock is allowed here since some known blocks
// still need re-execution to generate snapshots that are missing
case err != nil && !errors.Is(err, ErrKnownBlock):
bc.futureBlocks.Remove(block.Hash()) bc.futureBlocks.Remove(block.Hash())
stats.ignored += len(it.chain) stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err) bc.reportBlock(block, nil, err)
@ -1522,7 +1523,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
} }
}() }()
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
// If the chain is terminating, stop processing blocks // If the chain is terminating, stop processing blocks
if bc.insertStopped() { if bc.insertStopped() {
log.Debug("Abort during block processing") log.Debug("Abort during block processing")
@ -1537,8 +1538,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er
// Clique blocks where they can share state among each other, so importing an // Clique blocks where they can share state among each other, so importing an
// older block might complete the state of the subsequent one. In this case, // older block might complete the state of the subsequent one. In this case,
// just skip the block (we already validated it once fully (and crashed), since // just skip the block (we already validated it once fully (and crashed), since
// its header and body was already in the database). // its header and body was already in the database). But if the corresponding
if err == ErrKnownBlock { // snapshot layer is missing, forcibly rerun the execution to build it.
if bc.skipBlock(err, it) {
logger := log.Debug logger := log.Debug
if bc.chainConfig.Clique == nil { if bc.chainConfig.Clique == nil {
logger = log.Warn logger = log.Warn
@ -2016,6 +2018,47 @@ func (bc *BlockChain) futureBlocksLoop() {
} }
} }
// skipBlock returns 'true', if the block being imported can be skipped over, meaning
// that the block does not need to be processed but can be considered already fully 'done'.
func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool {
// We can only ever bypass processing if the only error returned by the validator
// is ErrKnownBlock, which means all checks passed, but we already have the block
// and state.
if !errors.Is(err, ErrKnownBlock) {
return false
}
// If we're not using snapshots, we can skip this, since we have both block
// and (trie-) state
if bc.snaps == nil {
return true
}
var (
header = it.current() // header can't be nil
parentRoot common.Hash
)
// If we also have the snapshot-state, we can skip the processing.
if bc.snaps.Snapshot(header.Root) != nil {
return true
}
// In this case, we have the trie-state but not snapshot-state. If the parent
// snapshot-state exists, we need to process this in order to not get a gap
// in the snapshot layers.
// Resolve parent block
if parent := it.previous(); parent != nil {
parentRoot = parent.Root
} else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil {
parentRoot = parent.Root
}
if parentRoot == (common.Hash{}) {
return false // Theoretically impossible case
}
// Parent is also missing snapshot: we can skip this. Otherwise process.
if bc.snaps.Snapshot(parentRoot) == nil {
return true
}
return false
}
// maintainTxIndex is responsible for the construction and deletion of the // maintainTxIndex is responsible for the construction and deletion of the
// transaction index. // transaction index.
// //

View File

@ -150,6 +150,14 @@ func (it *insertIterator) previous() *types.Header {
return it.chain[it.index-1].Header() return it.chain[it.index-1].Header()
} }
// current returns the current header that is being processed, or nil.
func (it *insertIterator) current() *types.Header {
if it.index == -1 || it.index >= len(it.chain) {
return nil
}
return it.chain[it.index].Header()
}
// first returns the first block in the it. // first returns the first block in the it.
func (it *insertIterator) first() *types.Block { func (it *insertIterator) first() *types.Block {
return it.chain[0] return it.chain[0]

View File

@ -1863,3 +1863,124 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen)
} }
} }
// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893
// Credits to @zzyalbert for finding the issue.
//
// Local chain owns these blocks:
// G B1 B2 B3 B4
// B1: state committed
// B2: snapshot disk layer
// B3: state committed
// B4: head block
//
// Crash happens without fully persisting snapshot and in-memory states,
// chain rewinds itself to the B1 (skip B3 in order to recover snapshot)
// In this case the snapshot layer of B3 is not created because of existent
// state.
func TestIssue23496(t *testing.T) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Create a temporary persistent database
datadir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Failed to create temporary datadir: %v", err)
}
os.RemoveAll(datadir)
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
defer db.Close() // Might double close, should be fine
// Initialize a fresh chain
var (
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: true,
}
)
chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0x02})
b.SetDifficulty(big.NewInt(1000000))
})
// Insert block B1 and commit the state into disk
if _, err := chain.InsertChain(blocks[:1]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil)
// Insert block B2 and commit the snapshot into disk
if _, err := chain.InsertChain(blocks[1:2]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil {
t.Fatalf("Failed to flatten snapshots: %v", err)
}
// Insert block B3 and commit the state into disk
if _, err := chain.InsertChain(blocks[2:3]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil)
// Insert the remaining blocks
if _, err := chain.InsertChain(blocks[3:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
// Pull the plug on the database, simulating a hard crash
db.Close()
// Start a new blockchain back up and see where the repair leads us
db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
}
defer db.Close()
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer chain.Stop()
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1))
}
// Reinsert B2-B4
if _, err := chain.InsertChain(blocks[1:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4)
}
if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4))
}
if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil {
t.Error("Failed to regenerate the snapshot of known state")
}
}

View File

@ -360,7 +360,7 @@ func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) } func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
func testReorgLong(t *testing.T, full bool) { func testReorgLong(t *testing.T, full bool) {
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full) testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full)
} }
// Tests that reorganising a short difficult chain after a long easy one // Tests that reorganising a short difficult chain after a long easy one
@ -380,7 +380,7 @@ func testReorgShort(t *testing.T, full bool) {
for i := 0; i < len(diff); i++ { for i := 0; i < len(diff); i++ {
diff[i] = -9 diff[i] = -9
} }
testReorg(t, easy, diff, 12615120, full) testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full)
} }
func testReorg(t *testing.T, first, second []int64, td int64, full bool) { func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
@ -2385,7 +2385,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in
for txi := 0; txi < numTxs; txi++ { for txi := 0; txi < numTxs; txi++ {
uniq := uint64(i*numTxs + txi) uniq := uint64(i*numTxs + txi)
recipient := recipientFn(uniq) recipient := recipientFn(uniq)
tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, big.NewInt(1), nil), signer, testBankKey) tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey)
if err != nil { if err != nil {
b.Error(err) b.Error(err)
} }

View File

@ -63,8 +63,10 @@ func TestCreation(t *testing.T) {
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block {12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // First London block {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
{20000000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // Future London block {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
}, },
}, },
// Ropsten test cases // Ropsten test cases
@ -205,11 +207,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Petersburg, remote is Rinkeby Petersburg. // Local is mainnet Petersburg, remote is Rinkeby Petersburg.
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet London, far in the future. Remote announces Gopherium (non existing fork) // Local is mainnet Arrow Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future block 88888888, for itself, but past block for local. Local is incompatible. // at some future block 88888888, for itself, but past block for local. Local is incompatible.
// //
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{88888888, ID{Hash: checksumToBytes(0xb715077d), Next: 88888888}, ErrLocalIncompatibleOrStale}, {88888888, ID{Hash: checksumToBytes(0x20c327fc), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible. // fork) at block 7279999, before Petersburg. Local is incompatible.

View File

@ -158,7 +158,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
return SetupGenesisBlockWithOverride(db, genesis, nil) return SetupGenesisBlockWithOverride(db, genesis, nil)
} }
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) { func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil { if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
} }
@ -204,8 +204,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
} }
// Get the existing chain configuration. // Get the existing chain configuration.
newcfg := genesis.configOrDefault(stored) newcfg := genesis.configOrDefault(stored)
if overrideLondon != nil { if overrideArrowGlacier != nil {
newcfg.LondonBlock = overrideLondon newcfg.ArrowGlacierBlock = overrideArrowGlacier
} }
if err := newcfg.CheckConfigForkOrder(); err != nil { if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err return newcfg, common.Hash{}, err
@ -244,6 +244,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.MainnetChainConfig return params.MainnetChainConfig
case ghash == params.RopstenGenesisHash: case ghash == params.RopstenGenesisHash:
return params.RopstenChainConfig return params.RopstenChainConfig
case ghash == params.SepoliaGenesisHash:
return params.SepoliaChainConfig
case ghash == params.RinkebyGenesisHash: case ghash == params.RinkebyGenesisHash:
return params.RinkebyChainConfig return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash: case ghash == params.GoerliGenesisHash:
@ -322,7 +324,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 { if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers") return nil, errors.New("can't start clique chain without signers")
} }
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block) rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())
@ -400,6 +402,19 @@ func DefaultGoerliGenesisBlock() *Genesis {
} }
} }
// DefaultSepoliaGenesisBlock returns the Sepolia network genesis block.
func DefaultSepoliaGenesisBlock() *Genesis {
return &Genesis{
Config: params.SepoliaChainConfig,
Nonce: 0,
ExtraData: []byte("Sepolia, Athens, Attica, Greece!"),
GasLimit: 0x1c9c380,
Difficulty: big.NewInt(0x20000),
Timestamp: 1633267481,
Alloc: decodePrealloc(sepoliaAllocData),
}
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block. // DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one // Override the default period to the user requested one

File diff suppressed because one or more lines are too long

View File

@ -30,25 +30,6 @@ import (
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
) )
func TestDefaultGenesisBlock(t *testing.T) {
block := DefaultGenesisBlock().ToBlock(nil)
if block.Hash() != params.MainnetGenesisHash {
t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash)
}
block = DefaultRopstenGenesisBlock().ToBlock(nil)
if block.Hash() != params.RopstenGenesisHash {
t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash)
}
block = DefaultRinkebyGenesisBlock().ToBlock(nil)
if block.Hash() != params.RinkebyGenesisHash {
t.Errorf("wrong rinkeby genesis hash, got %v, want %v", block.Hash(), params.RinkebyGenesisHash)
}
block = DefaultGoerliGenesisBlock().ToBlock(nil)
if block.Hash() != params.GoerliGenesisHash {
t.Errorf("wrong goerli genesis hash, got %v, want %v", block.Hash(), params.GoerliGenesisHash)
}
}
func TestInvalidCliqueConfig(t *testing.T) { func TestInvalidCliqueConfig(t *testing.T) {
block := DefaultGoerliGenesisBlock() block := DefaultGoerliGenesisBlock()
block.ExtraData = []byte{} block.ExtraData = []byte{}
@ -179,33 +160,56 @@ func TestSetupGenesis(t *testing.T) {
} }
} }
// TestGenesisHashes checks the congruity of default genesis data to corresponding hardcoded genesis hash values. // TestGenesisHashes checks the congruity of default genesis data to
// corresponding hardcoded genesis hash values.
func TestGenesisHashes(t *testing.T) { func TestGenesisHashes(t *testing.T) {
cases := []struct { for i, c := range []struct {
genesis *Genesis genesis *Genesis
hash common.Hash want common.Hash
}{ }{
{ {DefaultGenesisBlock(), params.MainnetGenesisHash},
genesis: DefaultGenesisBlock(), {DefaultGoerliGenesisBlock(), params.GoerliGenesisHash},
hash: params.MainnetGenesisHash, {DefaultRopstenGenesisBlock(), params.RopstenGenesisHash},
}, {DefaultRinkebyGenesisBlock(), params.RinkebyGenesisHash},
{ {DefaultSepoliaGenesisBlock(), params.SepoliaGenesisHash},
genesis: DefaultGoerliGenesisBlock(), } {
hash: params.GoerliGenesisHash, // Test via MustCommit
}, if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want {
{ t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
genesis: DefaultRopstenGenesisBlock(),
hash: params.RopstenGenesisHash,
},
{
genesis: DefaultRinkebyGenesisBlock(),
hash: params.RinkebyGenesisHash,
},
} }
for i, c := range cases { // Test via ToBlock
b := c.genesis.MustCommit(rawdb.NewMemoryDatabase()) if have := c.genesis.ToBlock(nil).Hash(); have != c.want {
if got := b.Hash(); got != c.hash { t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex())
t.Errorf("case: %d, want: %s, got: %s", i, c.hash.Hex(), got.Hex())
} }
} }
} }
func TestGenesis_Commit(t *testing.T) {
genesis := &Genesis{
BaseFee: big.NewInt(params.InitialBaseFee),
Config: params.TestChainConfig,
// difficulty is nil
}
db := rawdb.NewMemoryDatabase()
genesisBlock, err := genesis.Commit(db)
if err != nil {
t.Fatal(err)
}
if genesis.Difficulty != nil {
t.Fatalf("assumption wrong")
}
// This value should have been set as default in the ToBlock method.
if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 {
t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty())
}
// Expect the stored total difficulty to be the difficulty of the genesis block.
stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64())
if stored.Cmp(genesisBlock.Difficulty()) != 0 {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
}

View File

@ -35,20 +35,15 @@ import (
// ReadCanonicalHash retrieves the hash assigned to a canonical block number. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
data, _ := db.Ancient(freezerHashTable, number) var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
data, _ = reader.Ancient(freezerHashTable, number)
if len(data) == 0 { if len(data) == 0 {
// Get it by hash from leveldb
data, _ = db.Get(headerHashKey(number)) data, _ = db.Get(headerHashKey(number))
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerHashTable, number)
}
}
if len(data) == 0 {
return common.Hash{}
} }
return nil
})
return common.BytesToHash(data) return common.BytesToHash(data)
} }
@ -304,32 +299,25 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding. // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReader) error {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerHeaderTable, number) data, _ = reader.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash { if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data return nil
} }
// Then try to look up the data in leveldb. // If not, try reading from leveldb
data, _ = db.Get(headerKey(number, hash)) data, _ = db.Get(headerKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerHeaderTable, number)
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
return data
}
return nil // Can't find the data anywhere.
}
// HasHeader verifies the existence of a block header corresponding to the hash. // HasHeader verifies the existence of a block header corresponding to the hash.
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(headerKey(number, hash)); !has || err != nil { if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
@ -389,53 +377,48 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
} }
} }
// isCanon is an internal utility method, to check whether the given number/hash
// is part of the ancient (canon) set.
func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool {
h, err := reader.Ancient(freezerHashTable, number)
if err != nil {
return false
}
return bytes.Equal(h, hash[:])
}
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash // First try to look up the data in ancient database. Extra hash
// comparison is necessary since ancient database only maintains // comparison is necessary since ancient database only maintains
// the canonical data. // the canonical data.
data, _ := db.Ancient(freezerBodiesTable, number) var data []byte
if len(data) > 0 { db.ReadAncients(func(reader ethdb.AncientReader) error {
h, _ := db.Ancient(freezerHashTable, number) // Check if the data is in ancients
if common.BytesToHash(h) == hash { if isCanon(reader, number, hash) {
return data data, _ = reader.Ancient(freezerBodiesTable, number)
return nil
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockBodyKey(number, hash)) data, _ = db.Get(blockBodyKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerBodiesTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
// block at number, in RLP encoding. // block at number, in RLP encoding.
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
// If it's an ancient one, we don't need the canonical hash var data []byte
data, _ := db.Ancient(freezerBodiesTable, number) db.ReadAncients(func(reader ethdb.AncientReader) error {
if len(data) == 0 { data, _ = reader.Ancient(freezerBodiesTable, number)
// Need to get the hash if len(data) > 0 {
return nil
}
// Get it by hash from leveldb
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
// In the background freezer is moving data from leveldb to flatten files. return nil
// So during the first check for ancient db, the data is not yet in there, })
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
if len(data) == 0 {
data, _ = db.Ancient(freezerBodiesTable, number)
}
}
return data return data
} }
@ -448,7 +431,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp
// HasBody verifies the existence of a block body corresponding to the hash. // HasBody verifies the existence of a block body corresponding to the hash.
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
@ -489,34 +472,19 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerDifficultyTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerDifficultyTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(headerTDKey(number, hash)) data, _ = db.Get(headerTDKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerDifficultyTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadTd retrieves a block's total difficulty corresponding to the hash. // ReadTd retrieves a block's total difficulty corresponding to the hash.
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
@ -553,7 +521,7 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
// HasReceipts verifies the existence of all the transaction receipts belonging // HasReceipts verifies the existence of all the transaction receipts belonging
// to a block. // to a block.
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { if isCanon(db, number, hash) {
return true return true
} }
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
@ -564,34 +532,19 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
// First try to look up the data in ancient database. Extra hash var data []byte
// comparison is necessary since ancient database only maintains db.ReadAncients(func(reader ethdb.AncientReader) error {
// the canonical data. // Check if the data is in ancients
data, _ := db.Ancient(freezerReceiptTable, number) if isCanon(reader, number, hash) {
if len(data) > 0 { data, _ = reader.Ancient(freezerReceiptTable, number)
h, _ := db.Ancient(freezerHashTable, number) return nil
if common.BytesToHash(h) == hash {
return data
} }
} // If not, try reading from leveldb
// Then try to look up the data in leveldb.
data, _ = db.Get(blockReceiptsKey(number, hash)) data, _ = db.Get(blockReceiptsKey(number, hash))
if len(data) > 0 { return nil
})
return data return data
} }
// In the background freezer is moving data from leveldb to flatten files.
// So during the first check for ancient db, the data is not yet in there,
// but when we reach into leveldb, the data was already moved. That would
// result in a not found error.
data, _ = db.Ancient(freezerReceiptTable, number)
if len(data) > 0 {
h, _ := db.Ancient(freezerHashTable, number)
if common.BytesToHash(h) == hash {
return data
}
}
return nil // Can't find the data anywhere.
}
// ReadRawReceipts retrieves all the transaction receipts belonging to a block. // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
// The receipt metadata fields are not guaranteed to be populated, so they // The receipt metadata fields are not guaranteed to be populated, so they

View File

@ -47,7 +47,7 @@ func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) {
// ReadSnapshotRoot retrieves the root of the block whose state is contained in // ReadSnapshotRoot retrieves the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
data, _ := db.Get(snapshotRootKey) data, _ := db.Get(SnapshotRootKey)
if len(data) != common.HashLength { if len(data) != common.HashLength {
return common.Hash{} return common.Hash{}
} }
@ -57,7 +57,7 @@ func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash {
// WriteSnapshotRoot stores the root of the block whose state is contained in // WriteSnapshotRoot stores the root of the block whose state is contained in
// the persisted snapshot. // the persisted snapshot.
func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
if err := db.Put(snapshotRootKey, root[:]); err != nil { if err := db.Put(SnapshotRootKey, root[:]); err != nil {
log.Crit("Failed to store snapshot root", "err", err) log.Crit("Failed to store snapshot root", "err", err)
} }
} }
@ -67,7 +67,7 @@ func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) {
// be used during updates, so a crash or failure will mark the entire snapshot // be used during updates, so a crash or failure will mark the entire snapshot
// invalid. // invalid.
func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {
if err := db.Delete(snapshotRootKey); err != nil { if err := db.Delete(SnapshotRootKey); err != nil {
log.Crit("Failed to remove snapshot root", "err", err) log.Crit("Failed to remove snapshot root", "err", err)
} }
} }

View File

@ -89,8 +89,8 @@ func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
// ReadAncients returns an error as we don't have a backing chain freezer. // AncientRange returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) ReadAncients(kind string, start, max, maxByteSize uint64) ([][]byte, error) { func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
return nil, errNotSupported return nil, errNotSupported
} }
@ -119,6 +119,22 @@ func (db *nofreezedb) Sync() error {
return errNotSupported return errNotSupported
} }
func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
// Unlike other ancient-related methods, this method does not return
// errNotSupported when invoked.
// The reason for this is that the caller might want to do several things:
// 1. Check if something is in freezer,
// 2. If not, check leveldb.
//
// This will work, since the ancient-checks inside 'fn' will return errors,
// and the leveldb work will continue.
//
// If we instead were to return errNotSupported here, then the caller would
// have to explicitly check for that, having an extra clause to do the
// non-ancient operations.
return fn(db)
}
// NewDatabase creates a high level database on top of a given key-value data // NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage. // store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@ -355,7 +371,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
accountSnaps.Add(size) accountSnaps.Add(size)
case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
storageSnaps.Add(size) storageSnaps.Add(size)
case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
preimages.Add(size) preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size) metadata.Add(size)
@ -377,7 +393,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
var accounted bool var accounted bool
for _, meta := range [][]byte{ for _, meta := range [][]byte{
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, uncleanShutdownKey, badBlockKey,
} { } {

View File

@ -80,8 +80,9 @@ type freezer struct {
frozen uint64 // Number of blocks already frozen frozen uint64 // Number of blocks already frozen
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation. // This lock synchronizes writers and the truncate operation, as well as
writeLock sync.Mutex // the "atomic" (batched) read operations.
writeLock sync.RWMutex
writeBatch *freezerBatch writeBatch *freezerBatch
readonly bool readonly bool
@ -201,12 +202,12 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
return nil, errUnknownTable return nil, errUnknownTable
} }
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return // It will return
// - at most 'max' items, // - at most 'max' items,
// - at least 1 item (even if exceeding the maxByteSize), but will otherwise // - at least 1 item (even if exceeding the maxByteSize), but will otherwise
// return as many items as fit into maxByteSize. // return as many items as fit into maxByteSize.
func (f *freezer) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.RetrieveItems(start, count, maxBytes) return table.RetrieveItems(start, count, maxBytes)
} }
@ -222,8 +223,8 @@ func (f *freezer) Ancients() (uint64, error) {
func (f *freezer) AncientSize(kind string) (uint64, error) { func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields. // This needs the write lock to avoid data races on table fields.
// Speed doesn't matter here, AncientSize is for debugging. // Speed doesn't matter here, AncientSize is for debugging.
f.writeLock.Lock() f.writeLock.RLock()
defer f.writeLock.Unlock() defer f.writeLock.RUnlock()
if table := f.tables[kind]; table != nil { if table := f.tables[kind]; table != nil {
return table.size() return table.size()
@ -231,6 +232,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) {
return 0, errUnknownTable return 0, errUnknownTable
} }
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
f.writeLock.RLock()
defer f.writeLock.RUnlock()
return fn(f)
}
// ModifyAncients runs the given write operation. // ModifyAncients runs the given write operation.
func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
if f.readonly { if f.readonly {

View File

@ -48,8 +48,8 @@ var (
// snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync. // snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync.
snapshotDisabledKey = []byte("SnapshotDisabled") snapshotDisabledKey = []byte("SnapshotDisabled")
// snapshotRootKey tracks the hash of the last snapshot. // SnapshotRootKey tracks the hash of the last snapshot.
snapshotRootKey = []byte("SnapshotRoot") SnapshotRootKey = []byte("SnapshotRoot")
// snapshotJournalKey tracks the in-memory diff layers across restarts. // snapshotJournalKey tracks the in-memory diff layers across restarts.
snapshotJournalKey = []byte("SnapshotJournal") snapshotJournalKey = []byte("SnapshotJournal")
@ -90,7 +90,7 @@ var (
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
CodePrefix = []byte("c") // CodePrefix + code hash -> account code CodePrefix = []byte("c") // CodePrefix + code hash -> account code
preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db configPrefix = []byte("ethereum-config-") // config prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types). // Chain index prefixes (use `i` + single byte to avoid mixing data types).
@ -207,9 +207,9 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
return key return key
} }
// preimageKey = preimagePrefix + hash // preimageKey = PreimagePrefix + hash
func preimageKey(hash common.Hash) []byte { func preimageKey(hash common.Hash) []byte {
return append(preimagePrefix, hash.Bytes()...) return append(PreimagePrefix, hash.Bytes()...)
} }
// codeKey = CodePrefix + hash // codeKey = CodePrefix + hash

View File

@ -62,10 +62,10 @@ func (t *table) Ancient(kind string, number uint64) ([]byte, error) {
return t.db.Ancient(kind, number) return t.db.Ancient(kind, number)
} }
// ReadAncients is a noop passthrough that just forwards the request to the underlying // AncientRange is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
return t.db.ReadAncients(kind, start, count, maxBytes) return t.db.AncientRange(kind, start, count, maxBytes)
} }
// Ancients is a noop passthrough that just forwards the request to the underlying // Ancients is a noop passthrough that just forwards the request to the underlying
@ -85,6 +85,10 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro
return t.db.ModifyAncients(fn) return t.db.ModifyAncients(fn)
} }
func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) {
return t.db.ReadAncients(fn)
}
// TruncateAncients is a noop passthrough that just forwards the request to the underlying // TruncateAncients is a noop passthrough that just forwards the request to the underlying
// database. // database.
func (t *table) TruncateAncients(items uint64) error { func (t *table) TruncateAncients(items uint64) error {

View File

@ -40,7 +40,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block {
// A sender who makes transactions, has some funds // A sender who makes transactions, has some funds
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey) address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000000000) funds = big.NewInt(1_000_000_000_000_000_000)
gspec = &Genesis{ gspec = &Genesis{
Config: params.TestChainConfig, Config: params.TestChainConfig,
Alloc: GenesisAlloc{address: {Balance: funds}}, Alloc: GenesisAlloc{address: {Balance: funds}},

View File

@ -388,7 +388,7 @@ func BenchmarkJournal(b *testing.B) {
} }
return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage)
} }
layer := snapshot(new(diskLayer)) layer := snapshot(emptyLayer())
for i := 1; i < 128; i++ { for i := 1; i < 128; i++ {
layer = fill(layer) layer = fill(layer)
} }

View File

@ -295,9 +295,9 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran
thresholdFeeCap := aFeeCap.Div(aFeeCap, b) thresholdFeeCap := aFeeCap.Div(aFeeCap, b)
thresholdTip := aTip.Div(aTip, b) thresholdTip := aTip.Div(aTip, b)
// Have to ensure that either the new fee cap or tip is higher than the // We have to ensure that both the new fee cap and tip are higher than the
// old ones as well as checking the percentage threshold to ensure that // old ones as well as checking the percentage threshold to ensure that
// this is accurate for low (Wei-level) gas price replacements // this is accurate for low (Wei-level) gas price replacements.
if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 {
return false, nil return false, nil
} }

View File

@ -51,7 +51,7 @@ func TestStrictTxListAdd(t *testing.T) {
} }
} }
func BenchmarkTxListAdd(t *testing.B) { func BenchmarkTxListAdd(b *testing.B) {
// Generate a list of transactions to insert // Generate a list of transactions to insert
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
@ -60,11 +60,13 @@ func BenchmarkTxListAdd(t *testing.B) {
txs[i] = transaction(uint64(i), 0, key) txs[i] = transaction(uint64(i), 0, key)
} }
// Insert the transactions in a random order // Insert the transactions in a random order
list := newTxList(true)
priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit)) priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit))
t.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ {
list := newTxList(true)
for _, v := range rand.Perm(len(txs)) { for _, v := range rand.Perm(len(txs)) {
list.Add(txs[v], DefaultTxPoolConfig.PriceBump) list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump)
} }
} }
}

View File

@ -77,3 +77,11 @@ func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) {
} }
txn.nonces[addr] = nonce txn.nonces[addr] = nonce
} }
// setAll sets the nonces for all accounts to the given map.
func (txn *txNoncer) setAll(all map[common.Address]uint64) {
txn.lock.Lock()
defer txn.lock.Unlock()
txn.nonces = all
}

View File

@ -1182,16 +1182,18 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt
pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead)
pool.priced.SetBaseFee(pendingBaseFee) pool.priced.SetBaseFee(pendingBaseFee)
} }
// Update all accounts to the latest known pending nonce
nonces := make(map[common.Address]uint64, len(pool.pending))
for addr, list := range pool.pending {
highestPending := list.LastElement()
nonces[addr] = highestPending.Nonce() + 1
}
pool.pendingNonces.setAll(nonces)
} }
// Ensure pool.queue and pool.pending sizes stay within the configured limits. // Ensure pool.queue and pool.pending sizes stay within the configured limits.
pool.truncatePending() pool.truncatePending()
pool.truncateQueue() pool.truncateQueue()
// Update all accounts to the latest known pending nonce
for addr, list := range pool.pending {
highestPending := list.LastElement()
pool.pendingNonces.set(addr, highestPending.Nonce()+1)
}
dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg))
pool.changesSinceReorg = 0 // Reset change counter pool.changesSinceReorg = 0 // Reset change counter
pool.mu.Unlock() pool.mu.Unlock()

View File

@ -2540,3 +2540,24 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
pool.Stop() pool.Stop()
} }
} }
// Benchmarks the speed of batch transaction insertion in case of multiple accounts.
func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
pool, _ := setupTxPool()
defer pool.Stop()
b.ReportAllocs()
batches := make(types.Transactions, b.N)
for i := 0; i < b.N; i++ {
key, _ := crypto.GenerateKey()
account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
tx := transaction(uint64(0), 100000, key)
batches[i] = tx
}
// Benchmark importing the transactions into the queue
b.ResetTimer()
for _, tx := range batches {
pool.AddRemotesSync([]*types.Transaction{tx})
}
}

View File

@ -182,9 +182,14 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
if !evm.StateDB.Exist(addr) { if !evm.StateDB.Exist(addr) {
if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 {
// Calling a non existing account, don't do anything, but ping the tracer // Calling a non existing account, don't do anything, but ping the tracer
if evm.Config.Debug && evm.depth == 0 { if evm.Config.Debug {
if evm.depth == 0 {
evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value)
evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil)
} else {
evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value)
evm.Config.Tracer.CaptureExit(ret, 0, nil)
}
} }
return nil, gas, nil return nil, gas, nil
} }

View File

@ -28,7 +28,7 @@ import (
// Config are the configuration options for the Interpreter // Config are the configuration options for the Interpreter
type Config struct { type Config struct {
Debug bool // Enables debugging Debug bool // Enables debugging
Tracer Tracer // Opcode logger Tracer EVMLogger // Opcode logger
NoRecursion bool // Disables call, callcode, delegate call and create NoRecursion bool // Disables call, callcode, delegate call and create
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
@ -152,9 +152,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
pc = uint64(0) // program counter pc = uint64(0) // program counter
cost uint64 cost uint64
// copies used by tracer // copies used by tracer
pcCopy uint64 // needed for the deferred Tracer pcCopy uint64 // needed for the deferred EVMLogger
gasCopy uint64 // for Tracer to log gas remaining before execution gasCopy uint64 // for EVMLogger to log gas remaining before execution
logged bool // deferred Tracer should ignore already logged steps logged bool // deferred EVMLogger should ignore already logged steps
res []byte // result of the opcode execution function res []byte // result of the opcode execution function
) )
// Don't move this deferrred function, it's placed before the capturestate-deferred method, // Don't move this deferrred function, it's placed before the capturestate-deferred method,

View File

@ -98,12 +98,12 @@ func (s *StructLog) ErrorString() string {
return "" return ""
} }
// Tracer is used to collect execution traces from an EVM transaction // EVMLogger is used to collect execution traces from an EVM transaction
// execution. CaptureState is called for each step of the VM with the // execution. CaptureState is called for each step of the VM with the
// current VM state. // current VM state.
// Note that reference types are actual VM data structures; make copies // Note that reference types are actual VM data structures; make copies
// if you need to retain them beyond the current call. // if you need to retain them beyond the current call.
type Tracer interface { type EVMLogger interface {
CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int)
CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error)
CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int)
@ -112,7 +112,7 @@ type Tracer interface {
CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error)
} }
// StructLogger is an EVM state logger and implements Tracer. // StructLogger is an EVM state logger and implements EVMLogger.
// //
// StructLogger can capture state based on the given Log configuration and also keeps // StructLogger can capture state based on the given Log configuration and also keeps
// a track record of modified storage which is used in reporting snapshots of the // a track record of modified storage which is used in reporting snapshots of the
@ -145,7 +145,7 @@ func (l *StructLogger) Reset() {
l.err = nil l.err = nil
} }
// CaptureStart implements the Tracer interface to initialize the tracing operation. // CaptureStart implements the EVMLogger interface to initialize the tracing operation.
func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
} }
@ -210,7 +210,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui
l.logs = append(l.logs, log) l.logs = append(l.logs, log)
} }
// CaptureFault implements the Tracer interface to trace an execution fault // CaptureFault implements the EVMLogger interface to trace an execution fault
// while running an opcode. // while running an opcode.
func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) {
} }

View File

@ -352,8 +352,8 @@ func (b *EthAPIBackend) StartMining(threads int) error {
return b.eth.StartMining(threads) return b.eth.StartMining(threads)
} }
func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) {
return b.eth.stateAtBlock(block, reexec, base, checkLive) return b.eth.stateAtBlock(block, reexec, base, checkLive, preferDisk)
} }
func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) {

View File

@ -131,7 +131,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr return nil, genesisErr
} }

View File

@ -202,8 +202,8 @@ type Config struct {
// CheckpointOracle is the configuration for checkpoint oracle. // CheckpointOracle is the configuration for checkpoint oracle.
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
// Berlin block override (TODO: remove after the fork) // Arrow Glacier block override (TODO: remove after the fork)
OverrideLondon *big.Int `toml:",omitempty"` OverrideArrowGlacier *big.Int `toml:",omitempty"`
} }
// CreateConsensusEngine creates a consensus engine for the given chain configuration. // CreateConsensusEngine creates a consensus engine for the given chain configuration.

View File

@ -59,7 +59,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
RPCTxFeeCap float64 RPCTxFeeCap float64
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
OverrideLondon *big.Int `toml:",omitempty"` OverrideArrowGlacier *big.Int `toml:",omitempty"`
} }
var enc Config var enc Config
enc.Genesis = c.Genesis enc.Genesis = c.Genesis
@ -103,7 +103,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.RPCTxFeeCap = c.RPCTxFeeCap enc.RPCTxFeeCap = c.RPCTxFeeCap
enc.Checkpoint = c.Checkpoint enc.Checkpoint = c.Checkpoint
enc.CheckpointOracle = c.CheckpointOracle enc.CheckpointOracle = c.CheckpointOracle
enc.OverrideLondon = c.OverrideLondon enc.OverrideArrowGlacier = c.OverrideArrowGlacier
return &enc, nil return &enc, nil
} }
@ -151,7 +151,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
RPCTxFeeCap *float64 RPCTxFeeCap *float64
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
OverrideLondon *big.Int `toml:",omitempty"` OverrideArrowGlacier *big.Int `toml:",omitempty"`
} }
var dec Config var dec Config
if err := unmarshal(&dec); err != nil { if err := unmarshal(&dec); err != nil {
@ -280,8 +280,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.CheckpointOracle != nil { if dec.CheckpointOracle != nil {
c.CheckpointOracle = dec.CheckpointOracle c.CheckpointOracle = dec.CheckpointOracle
} }
if dec.OverrideLondon != nil { if dec.OverrideArrowGlacier != nil {
c.OverrideLondon = dec.OverrideLondon c.OverrideArrowGlacier = dec.OverrideArrowGlacier
} }
return nil return nil
} }

View File

@ -391,14 +391,15 @@ func (f *BlockFetcher) loop() {
blockAnnounceDOSMeter.Mark(1) blockAnnounceDOSMeter.Mark(1)
break break
} }
if notification.number == 0 {
break
}
// If we have a valid block number, check that it's potentially useful // If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
blockAnnounceDropMeter.Mark(1) blockAnnounceDropMeter.Mark(1)
break break
} }
}
// All is well, schedule the announce if block's not yet downloading // All is well, schedule the announce if block's not yet downloading
if _, ok := f.fetching[notification.hash]; ok { if _, ok := f.fetching[notification.hash]; ok {
break break

View File

@ -62,6 +62,7 @@ func BenchmarkBloomBits32k(b *testing.B) {
const benchFilterCnt = 2000 const benchFilterCnt = 2000
func benchmarkBloomBits(b *testing.B, sectionSize uint64) { func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
benchDataDir := node.DefaultDataDir() + "/geth/chaindata" benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
b.Log("Running bloombits benchmark section size:", sectionSize) b.Log("Running bloombits benchmark section size:", sectionSize)
@ -155,6 +156,7 @@ func clearBloomBits(db ethdb.Database) {
} }
func BenchmarkNoBloomBits(b *testing.B) { func BenchmarkNoBloomBits(b *testing.B) {
b.Skip("test disabled: this tests presume (and modify) an existing datadir.")
benchDataDir := node.DefaultDataDir() + "/geth/chaindata" benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
b.Log("Running benchmark without bloombits") b.Log("Running benchmark without bloombits")
db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false)

View File

@ -65,15 +65,19 @@ func BenchmarkFilters(b *testing.B) {
case 2403: case 2403:
receipt := makeReceipt(addr1) receipt := makeReceipt(addr1)
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
case 1034: case 1034:
receipt := makeReceipt(addr2) receipt := makeReceipt(addr2)
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
case 34: case 34:
receipt := makeReceipt(addr3) receipt := makeReceipt(addr3)
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
case 99999: case 99999:
receipt := makeReceipt(addr4) receipt := makeReceipt(addr4)
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil))
} }
}) })

View File

@ -107,10 +107,13 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke
signer = types.LatestSigner(gspec.Config) signer = types.LatestSigner(gspec.Config)
) )
config.LondonBlock = londonBlock config.LondonBlock = londonBlock
config.ArrowGlacierBlock = londonBlock
engine := ethash.NewFaker() engine := ethash.NewFaker()
db := rawdb.NewMemoryDatabase() db := rawdb.NewMemoryDatabase()
genesis, _ := gspec.Commit(db) genesis, err := gspec.Commit(db)
if err != nil {
t.Fatal(err)
}
// Generate testing blocks // Generate testing blocks
blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) { blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) {
b.SetCoinbase(common.Address{1}) b.SetCoinbase(common.Address{1})

View File

@ -35,7 +35,17 @@ import (
// are attempted to be reexecuted to generate the desired state. The optional // are attempted to be reexecuted to generate the desired state. The optional
// base layer statedb can be passed then it's regarded as the statedb of the // base layer statedb can be passed then it's regarded as the statedb of the
// parent block. // parent block.
func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { // Parameters:
// - block: The block for which we want the state (== state at the stateRoot of the parent)
// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state
// - base: If the caller is tracing multiple blocks, the caller can provide the parent state
// continuously from the callsite.
// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to
// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid
// storing trash persistently
// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided,
// it would be preferrable to start from a fresh state, if we have it on disk.
func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) {
var ( var (
current *types.Block current *types.Block
database state.Database database state.Database
@ -50,6 +60,15 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state
} }
} }
if base != nil { if base != nil {
if preferDisk {
// Create an ephemeral trie.Database for isolating the live one. Otherwise
// the internal junks created by tracing will be persisted into the disk.
database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16})
if statedb, err = state.New(block.Root(), database, nil); err == nil {
log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number())
return statedb, nil
}
}
// The optional base statedb is given, mark the start point as parent block // The optional base statedb is given, mark the start point as parent block
statedb, database, report = base, base.Database(), false statedb, database, report = base, base.Database(), false
current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)
@ -152,7 +171,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec
} }
// Lookup the statedb of parent block from the live database, // Lookup the statedb of parent block from the live database,
// otherwise regenerate it on the flight. // otherwise regenerate it on the flight.
statedb, err := eth.stateAtBlock(parent, reexec, nil, true) statedb, err := eth.stateAtBlock(parent, reexec, nil, true, false)
if err != nil { if err != nil {
return nil, vm.BlockContext{}, nil, err return nil, vm.BlockContext{}, nil, err
} }

View File

@ -54,6 +54,13 @@ const (
// and reexecute to produce missing historical state necessary to run a specific // and reexecute to produce missing historical state necessary to run a specific
// trace. // trace.
defaultTraceReexec = uint64(128) defaultTraceReexec = uint64(128)
// defaultTracechainMemLimit is the size of the triedb, at which traceChain
// switches over and tries to use a disk-backed database instead of building
// on top of memory.
// For non-archive nodes, this limit _will_ be overblown, as disk-backed tries
// will only be found every ~15K blocks or so.
defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024)
) )
// Backend interface provides the common API services (that are provided by // Backend interface provides the common API services (that are provided by
@ -68,7 +75,10 @@ type Backend interface {
ChainConfig() *params.ChainConfig ChainConfig() *params.ChainConfig
Engine() consensus.Engine Engine() consensus.Engine
ChainDb() ethdb.Database ChainDb() ethdb.Database
StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) // StateAtBlock returns the state corresponding to the stateroot of the block.
// N.B: For executing transactions on block N, the required stateRoot is block N-1,
// so this method should be called with the parent.
StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error)
StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error)
} }
@ -321,6 +331,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
} }
close(results) close(results)
}() }()
var preferDisk bool
// Feed all the blocks both into the tracer, as well as fast process concurrently // Feed all the blocks both into the tracer, as well as fast process concurrently
for number = start.NumberU64(); number < end.NumberU64(); number++ { for number = start.NumberU64(); number < end.NumberU64(); number++ {
// Stop tracing if interruption was requested // Stop tracing if interruption was requested
@ -350,18 +361,24 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config
} }
// Prepare the statedb for tracing. Don't use the live database for // Prepare the statedb for tracing. Don't use the live database for
// tracing to avoid persisting state junks into the database. // tracing to avoid persisting state junks into the database.
statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false) statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk)
if err != nil { if err != nil {
failed = err failed = err
break break
} }
if statedb.Database().TrieDB() != nil { if trieDb := statedb.Database().TrieDB(); trieDb != nil {
// Hold the reference for tracer, will be released at the final stage // Hold the reference for tracer, will be released at the final stage
statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) trieDb.Reference(block.Root(), common.Hash{})
// Release the parent state because it's already held by the tracer // Release the parent state because it's already held by the tracer
if parent != (common.Hash{}) { if parent != (common.Hash{}) {
statedb.Database().TrieDB().Dereference(parent) trieDb.Dereference(parent)
}
// Prefer disk if the trie db memory grows too much
s1, s2 := trieDb.Size()
if !preferDisk && (s1+s2) > defaultTracechainMemLimit {
log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2)
preferDisk = true
} }
} }
parent = block.Root() parent = block.Root()
@ -497,7 +514,7 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config
if config != nil && config.Reexec != nil { if config != nil && config.Reexec != nil {
reexec = *config.Reexec reexec = *config.Reexec
} }
statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -558,7 +575,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac
if config != nil && config.Reexec != nil { if config != nil && config.Reexec != nil {
reexec = *config.Reexec reexec = *config.Reexec
} }
statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -647,7 +664,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block
if config != nil && config.Reexec != nil { if config != nil && config.Reexec != nil {
reexec = *config.Reexec reexec = *config.Reexec
} }
statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -811,7 +828,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
if config != nil && config.Reexec != nil { if config != nil && config.Reexec != nil {
reexec = *config.Reexec reexec = *config.Reexec
} }
statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true) statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -846,12 +863,14 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc
func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {
// Assemble the structured logger or the JavaScript tracer // Assemble the structured logger or the JavaScript tracer
var ( var (
tracer vm.Tracer tracer vm.EVMLogger
err error err error
txContext = core.NewEVMTxContext(message) txContext = core.NewEVMTxContext(message)
) )
switch { switch {
case config != nil && config.Tracer != nil: case config == nil:
tracer = vm.NewStructLogger(nil)
case config.Tracer != nil:
// Define a meaningful timeout of a single transaction trace // Define a meaningful timeout of a single transaction trace
timeout := defaultTraceTimeout timeout := defaultTraceTimeout
if config.Timeout != nil { if config.Timeout != nil {
@ -864,11 +883,22 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex
tracer = tr(statedb) tracer = tr(statedb)
} else { } else {
// Constuct the JavaScript tracer to execute with // Constuct the JavaScript tracer to execute with
if tracer, err = New(*config.Tracer, txctx); err != nil { if t, err := New(*config.Tracer, txctx); err != nil {
return nil, err return nil, err
} }
// Handle timeouts and RPC cancellations // Handle timeouts and RPC cancellations
deadlineCtx, cancel := context.WithTimeout(ctx, timeout) deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
go func() {
<-deadlineCtx.Done()
if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) {
t.Stop(errors.New("execution timeout"))
}
}()
defer cancel()
tracer = t
}
// Handle timeouts and RPC cancellations
deadlineCtx, cancel := context.WithTimeout(ctx, timeout)
go func() { go func() {
<-deadlineCtx.Done() <-deadlineCtx.Done()
if deadlineCtx.Err() == context.DeadlineExceeded { if deadlineCtx.Err() == context.DeadlineExceeded {
@ -876,11 +906,6 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex
} }
}() }()
defer cancel() defer cancel()
}
case config == nil:
tracer = vm.NewStructLogger(nil)
default: default:
tracer = vm.NewStructLogger(config.LogConfig) tracer = vm.NewStructLogger(config.LogConfig)
} }
@ -913,7 +938,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex
case interfaces.TracerResult: case interfaces.TracerResult:
return tracer.GetResult() return tracer.GetResult()
case *Tracer: case Tracer:
return tracer.GetResult() return tracer.GetResult()
default: default:

View File

@ -138,7 +138,7 @@ func (b *testBackend) ChainDb() ethdb.Database {
return b.chaindb return b.chaindb
} }
func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) {
statedb, err := b.chain.StateAt(block.Root()) statedb, err := b.chain.StateAt(block.Root())
if err != nil { if err != nil {
return nil, errStateNotFound return nil, errStateNotFound
@ -325,7 +325,7 @@ func TestOverriddenTraceCall(t *testing.T) {
tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key)
b.AddTx(tx) b.AddTx(tx)
})) }))
randomAccounts, tracer := newAccounts(3), "callTracer" randomAccounts, tracer := newAccounts(3), "callTracerJs"
var testSuite = []struct { var testSuite = []struct {
blockNumber rpc.BlockNumber blockNumber rpc.BlockNumber

File diff suppressed because one or more lines are too long

170
eth/tracers/native/call.go Normal file
View File

@ -0,0 +1,170 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package native
import (
"encoding/json"
"errors"
"math/big"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
)
func init() {
tracers.RegisterNativeTracer("callTracer", NewCallTracer)
}
type callFrame struct {
Type string `json:"type"`
From string `json:"from"`
To string `json:"to,omitempty"`
Value string `json:"value,omitempty"`
Gas string `json:"gas"`
GasUsed string `json:"gasUsed"`
Input string `json:"input"`
Output string `json:"output,omitempty"`
Error string `json:"error,omitempty"`
Calls []callFrame `json:"calls,omitempty"`
}
type callTracer struct {
callstack []callFrame
interrupt uint32 // Atomic flag to signal execution interruption
reason error // Textual reason for the interruption
}
// NewCallTracer returns a native go tracer which tracks
// call frames of a tx, and implements vm.EVMLogger.
func NewCallTracer() tracers.Tracer {
// First callframe contains tx context info
// and is populated on start and end.
t := &callTracer{callstack: make([]callFrame, 1)}
return t
}
func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
t.callstack[0] = callFrame{
Type: "CALL",
From: addrToHex(from),
To: addrToHex(to),
Input: bytesToHex(input),
Gas: uintToHex(gas),
Value: bigToHex(value),
}
if create {
t.callstack[0].Type = "CREATE"
}
}
func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) {
t.callstack[0].GasUsed = uintToHex(gasUsed)
if err != nil {
t.callstack[0].Error = err.Error()
if err.Error() == "execution reverted" && len(output) > 0 {
t.callstack[0].Output = bytesToHex(output)
}
} else {
t.callstack[0].Output = bytesToHex(output)
}
}
func (t *callTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
}
func (t *callTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
}
func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
// Skip if tracing was interrupted
if atomic.LoadUint32(&t.interrupt) > 0 {
// TODO: env.Cancel()
return
}
call := callFrame{
Type: typ.String(),
From: addrToHex(from),
To: addrToHex(to),
Input: bytesToHex(input),
Gas: uintToHex(gas),
Value: bigToHex(value),
}
t.callstack = append(t.callstack, call)
}
func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
size := len(t.callstack)
if size <= 1 {
return
}
// pop call
call := t.callstack[size-1]
t.callstack = t.callstack[:size-1]
size -= 1
call.GasUsed = uintToHex(gasUsed)
if err == nil {
call.Output = bytesToHex(output)
} else {
call.Error = err.Error()
if call.Type == "CREATE" || call.Type == "CREATE2" {
call.To = ""
}
}
t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call)
}
func (t *callTracer) GetResult() (json.RawMessage, error) {
if len(t.callstack) != 1 {
return nil, errors.New("incorrect number of top-level calls")
}
res, err := json.Marshal(t.callstack[0])
if err != nil {
return nil, err
}
return json.RawMessage(res), t.reason
}
func (t *callTracer) Stop(err error) {
t.reason = err
atomic.StoreUint32(&t.interrupt, 1)
}
func bytesToHex(s []byte) string {
return "0x" + common.Bytes2Hex(s)
}
func bigToHex(n *big.Int) string {
if n == nil {
return ""
}
return "0x" + n.Text(16)
}
func uintToHex(n uint64) string {
return "0x" + strconv.FormatUint(n, 16)
}
func addrToHex(a common.Address) string {
return strings.ToLower(a.Hex())
}

View File

@ -0,0 +1,46 @@
package native
import (
"encoding/json"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
)
func init() {
tracers.RegisterNativeTracer("noopTracerNative", NewNoopTracer)
}
type noopTracer struct{}
func NewNoopTracer() tracers.Tracer {
return &noopTracer{}
}
func (t *noopTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
}
func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) {
}
func (t *noopTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
}
func (t *noopTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) {
}
func (t *noopTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
}
func (t *noopTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
}
func (t *noopTracer) GetResult() (json.RawMessage, error) {
return json.RawMessage(`{}`), nil
}
func (t *noopTracer) Stop(err error) {
}

View File

@ -0,0 +1,246 @@
package testing
import (
"encoding/json"
"io/ioutil"
"math/big"
"path/filepath"
"reflect"
"strings"
"testing"
"unicode"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests"
// Force-load the native, to trigger registration
_ "github.com/ethereum/go-ethereum/eth/tracers/native"
)
type callContext struct {
Number math.HexOrDecimal64 `json:"number"`
Difficulty *math.HexOrDecimal256 `json:"difficulty"`
Time math.HexOrDecimal64 `json:"timestamp"`
GasLimit math.HexOrDecimal64 `json:"gasLimit"`
Miner common.Address `json:"miner"`
}
// callTrace is the result of a callTracer run.
type callTrace struct {
Type string `json:"type"`
From common.Address `json:"from"`
To common.Address `json:"to"`
Input hexutil.Bytes `json:"input"`
Output hexutil.Bytes `json:"output"`
Gas *hexutil.Uint64 `json:"gas,omitempty"`
GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"`
Value *hexutil.Big `json:"value,omitempty"`
Error string `json:"error,omitempty"`
Calls []callTrace `json:"calls,omitempty"`
}
// callTracerTest defines a single test to check the call tracer against.
type callTracerTest struct {
Genesis *core.Genesis `json:"genesis"`
Context *callContext `json:"context"`
Input string `json:"input"`
Result *callTrace `json:"result"`
}
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracerLegacy(t *testing.T) {
testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
}
func TestCallTracerJs(t *testing.T) {
testCallTracer("callTracerJs", "call_tracer", t)
}
func TestCallTracerNative(t *testing.T) {
testCallTracer("callTracer", "call_tracer", t)
}
func testCallTracer(tracerName string, dirPath string, t *testing.T) {
files, err := ioutil.ReadDir(filepath.Join("..", "testdata", dirPath))
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
var (
test = new(callTracerTest)
tx = new(types.Transaction)
)
// Call tracer test found, read if from disk
if blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", dirPath, file.Name())); err != nil {
t.Fatalf("failed to read testcase: %v", err)
} else if err := json.Unmarshal(blob, test); err != nil {
t.Fatalf("failed to parse testcase: %v", err)
}
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
t.Fatalf("failed to parse testcase input: %v", err)
}
// Configure a blockchain with the given prestate
var (
signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
origin, _ = signer.Sender(tx)
txContext = vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
context = vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
_, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
)
tracer, err := tracers.New(tracerName, new(tracers.Context))
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
}
ret := new(callTrace)
if err := json.Unmarshal(res, ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
if !jsonEqual(ret, test.Result) {
// uncomment this for easier debugging
//have, _ := json.MarshalIndent(ret, "", " ")
//want, _ := json.MarshalIndent(test.Result, "", " ")
//t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want))
t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
})
}
}
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison
func jsonEqual(x, y interface{}) bool {
xTrace := new(callTrace)
yTrace := new(callTrace)
if xj, err := json.Marshal(x); err == nil {
json.Unmarshal(xj, xTrace)
} else {
return false
}
if yj, err := json.Marshal(y); err == nil {
json.Unmarshal(yj, yTrace)
} else {
return false
}
return reflect.DeepEqual(xTrace, yTrace)
}
// camel converts a snake cased input string into a camel cased output.
func camel(str string) string {
pieces := strings.Split(str, "_")
for i := 1; i < len(pieces); i++ {
pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:]
}
return strings.Join(pieces, "")
}
func BenchmarkTracers(b *testing.B) {
files, err := ioutil.ReadDir(filepath.Join("..", "testdata", "call_tracer"))
if err != nil {
b.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", "call_tracer", file.Name()))
if err != nil {
b.Fatalf("failed to read testcase: %v", err)
}
test := new(callTracerTest)
if err := json.Unmarshal(blob, test); err != nil {
b.Fatalf("failed to parse testcase: %v", err)
}
benchTracer("callTracerNative", test, b)
})
}
}
func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
// Configure a blockchain with the given prestate
tx := new(types.Transaction)
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
b.Fatalf("failed to parse testcase input: %v", err)
}
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
msg, err := tx.AsMessage(signer, nil)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
}
origin, _ := signer.Sender(tx)
txContext := vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
tracer, err := tracers.New(tracerName, new(tracers.Context))
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
b.Fatalf("failed to execute transaction: %v", err)
}
if _, err = tracer.GetResult(); err != nil {
b.Fatal(err)
}
statedb.RevertToSnapshot(snap)
}
}

View File

@ -363,9 +363,9 @@ func (r *frameResult) pushObject(vm *duktape.Context) {
vm.PutPropString(obj, "getError") vm.PutPropString(obj, "getError")
} }
// Tracer provides an implementation of Tracer that evaluates a Javascript // jsTracer provides an implementation of Tracer that evaluates a Javascript
// function for each VM execution step. // function for each VM execution step.
type Tracer struct { type jsTracer struct {
vm *duktape.Context // Javascript VM instance vm *duktape.Context // Javascript VM instance
tracerObject int // Stack index of the tracer JavaScript object tracerObject int // Stack index of the tracer JavaScript object
@ -409,12 +409,8 @@ type Context struct {
// New instantiates a new tracer instance. code specifies a Javascript snippet, // New instantiates a new tracer instance. code specifies a Javascript snippet,
// which must evaluate to an expression returning an object with 'step', 'fault' // which must evaluate to an expression returning an object with 'step', 'fault'
// and 'result' functions. // and 'result' functions.
func New(code string, ctx *Context) (*Tracer, error) { func newJsTracer(code string, ctx *Context) (*jsTracer, error) {
// Resolve any tracers by name and assemble the tracer object tracer := &jsTracer{
if tracer, ok := tracer(code); ok {
code = tracer
}
tracer := &Tracer{
vm: duktape.New(), vm: duktape.New(),
ctx: make(map[string]interface{}), ctx: make(map[string]interface{}),
opWrapper: new(opWrapper), opWrapper: new(opWrapper),
@ -553,17 +549,10 @@ func New(code string, ctx *Context) (*Tracer, error) {
tracer.vm.Pop() tracer.vm.Pop()
hasExit := tracer.vm.GetPropString(tracer.tracerObject, "exit") hasExit := tracer.vm.GetPropString(tracer.tracerObject, "exit")
tracer.vm.Pop() tracer.vm.Pop()
if hasEnter != hasExit { if hasEnter != hasExit {
return nil, fmt.Errorf("trace object must expose either both or none of enter() and exit()") return nil, fmt.Errorf("trace object must expose either both or none of enter() and exit()")
} }
if !hasStep { tracer.traceCallFrames = hasEnter && hasExit
// If there's no step function, the enter and exit must be present
if !hasEnter {
return nil, fmt.Errorf("trace object must expose either step() or both enter() and exit()")
}
}
tracer.traceCallFrames = hasEnter
tracer.traceSteps = hasStep tracer.traceSteps = hasStep
// Tracer is valid, inject the big int library to access large numbers // Tracer is valid, inject the big int library to access large numbers
@ -627,14 +616,14 @@ func New(code string, ctx *Context) (*Tracer, error) {
} }
// Stop terminates execution of the tracer at the first opportune moment. // Stop terminates execution of the tracer at the first opportune moment.
func (jst *Tracer) Stop(err error) { func (jst *jsTracer) Stop(err error) {
jst.reason = err jst.reason = err
atomic.StoreUint32(&jst.interrupt, 1) atomic.StoreUint32(&jst.interrupt, 1)
} }
// call executes a method on a JS object, catching any errors, formatting and // call executes a method on a JS object, catching any errors, formatting and
// returning them as error objects. // returning them as error objects.
func (jst *Tracer) call(noret bool, method string, args ...string) (json.RawMessage, error) { func (jst *jsTracer) call(noret bool, method string, args ...string) (json.RawMessage, error) {
// Execute the JavaScript call and return any error // Execute the JavaScript call and return any error
jst.vm.PushString(method) jst.vm.PushString(method)
for _, arg := range args { for _, arg := range args {
@ -670,7 +659,7 @@ func wrapError(context string, err error) error {
} }
// CaptureStart implements the Tracer interface to initialize the tracing operation. // CaptureStart implements the Tracer interface to initialize the tracing operation.
func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { func (jst *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
jst.ctx["type"] = "CALL" jst.ctx["type"] = "CALL"
if create { if create {
jst.ctx["type"] = "CREATE" jst.ctx["type"] = "CREATE"
@ -700,7 +689,7 @@ func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr
} }
// CaptureState implements the Tracer interface to trace a single step of VM execution. // CaptureState implements the Tracer interface to trace a single step of VM execution.
func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { func (jst *jsTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
if !jst.traceSteps { if !jst.traceSteps {
return return
} }
@ -736,7 +725,7 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost
} }
// CaptureFault implements the Tracer interface to trace an execution fault // CaptureFault implements the Tracer interface to trace an execution fault
func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { func (jst *jsTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
if jst.err != nil { if jst.err != nil {
return return
} }
@ -750,7 +739,7 @@ func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost
} }
// CaptureEnd is called after the call finishes to finalize the tracing. // CaptureEnd is called after the call finishes to finalize the tracing.
func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { func (jst *jsTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {
jst.ctx["output"] = output jst.ctx["output"] = output
jst.ctx["time"] = t.String() jst.ctx["time"] = t.String()
jst.ctx["gasUsed"] = gasUsed jst.ctx["gasUsed"] = gasUsed
@ -761,7 +750,7 @@ func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, er
} }
// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct).
func (jst *Tracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { func (jst *jsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
if !jst.traceCallFrames { if !jst.traceCallFrames {
return return
} }
@ -791,7 +780,7 @@ func (jst *Tracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Ad
// CaptureExit is called when EVM exits a scope, even if the scope didn't // CaptureExit is called when EVM exits a scope, even if the scope didn't
// execute any code. // execute any code.
func (jst *Tracer) CaptureExit(output []byte, gasUsed uint64, err error) { func (jst *jsTracer) CaptureExit(output []byte, gasUsed uint64, err error) {
if !jst.traceCallFrames { if !jst.traceCallFrames {
return return
} }
@ -815,7 +804,7 @@ func (jst *Tracer) CaptureExit(output []byte, gasUsed uint64, err error) {
} }
// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error // GetResult calls the Javascript 'result' function and returns its value, or any accumulated error
func (jst *Tracer) GetResult() (json.RawMessage, error) { func (jst *jsTracer) GetResult() (json.RawMessage, error) {
// Transform the context into a JavaScript object and inject into the state // Transform the context into a JavaScript object and inject into the state
obj := jst.vm.PushObject() obj := jst.vm.PushObject()
@ -837,7 +826,7 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) {
} }
// addToObj pushes a field to a JS object. // addToObj pushes a field to a JS object.
func (jst *Tracer) addToObj(obj int, key string, val interface{}) { func (jst *jsTracer) addToObj(obj int, key string, val interface{}) {
pushValue(jst.vm, val) pushValue(jst.vm, val)
jst.vm.PutPropString(obj, key) jst.vm.PutPropString(obj, key)
} }

View File

@ -58,7 +58,7 @@ func testCtx() *vmContext {
return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}
} }
func runTrace(tracer *Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { func runTrace(tracer Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) {
env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer}) env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer})
var ( var (
startGas uint64 = 10000 startGas uint64 = 10000
@ -168,7 +168,7 @@ func TestHaltBetweenSteps(t *testing.T) {
// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb // TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb
// in 'result' // in 'result'
func TestNoStepExec(t *testing.T) { func TestNoStepExec(t *testing.T) {
runEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { runEmptyTrace := func(tracer Tracer, vmctx *vmContext) (json.RawMessage, error) {
env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})
startGas := uint64(10000) startGas := uint64(10000)
contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas) contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas)

View File

@ -18,14 +18,53 @@
package tracers package tracers
import ( import (
"encoding/json"
"strings" "strings"
"unicode" "unicode"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers/internal/tracers" "github.com/ethereum/go-ethereum/eth/tracers/internal/tracers"
) )
// all contains all the built in JavaScript tracers by name. // Tracer interface extends vm.EVMLogger and additionally
var all = make(map[string]string) // allows collecting the tracing result.
type Tracer interface {
vm.EVMLogger
GetResult() (json.RawMessage, error)
// Stop terminates execution of the tracer at the first opportune moment.
Stop(err error)
}
var (
nativeTracers map[string]func() Tracer = make(map[string]func() Tracer)
jsTracers = make(map[string]string)
)
// RegisterNativeTracer makes native tracers which adhere
// to the `Tracer` interface available to the rest of the codebase.
// It is typically invoked in the `init()` function, e.g. see the `native/call.go`.
func RegisterNativeTracer(name string, ctor func() Tracer) {
nativeTracers[name] = ctor
}
// New returns a new instance of a tracer,
// 1. If 'code' is the name of a registered native tracer, then that tracer
// is instantiated and returned
// 2. If 'code' is the name of a registered js-tracer, then that tracer is
// instantiated and returned
// 3. Otherwise, the code is interpreted as the js code of a js-tracer, and
// is evaluated and returned.
func New(code string, ctx *Context) (Tracer, error) {
// Resolve native tracer
if fn, ok := nativeTracers[code]; ok {
return fn(), nil
}
// Resolve js-tracers by name and assemble the tracer object
if tracer, ok := jsTracers[code]; ok {
code = tracer
}
return newJsTracer(code, ctx)
}
// camel converts a snake cased input string into a camel cased output. // camel converts a snake cased input string into a camel cased output.
func camel(str string) string { func camel(str string) string {
@ -40,14 +79,6 @@ func camel(str string) string {
func init() { func init() {
for _, file := range tracers.AssetNames() { for _, file := range tracers.AssetNames() {
name := camel(strings.TrimSuffix(file, ".js")) name := camel(strings.TrimSuffix(file, ".js"))
all[name] = string(tracers.MustAsset(file)) jsTracers[name] = string(tracers.MustAsset(file))
} }
} }
// tracer retrieves a specific JavaScript tracer by name.
func tracer(name string) (string, bool) {
if tracer, ok := all[name]; ok {
return tracer, true
}
return "", false
}

View File

@ -20,23 +20,18 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"crypto/rand" "crypto/rand"
"encoding/json" "encoding/json"
"io/ioutil"
"math/big" "math/big"
"path/filepath"
"reflect" "reflect"
"strings"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
) )
@ -104,20 +99,83 @@ type callTrace struct {
Calls []callTrace `json:"calls,omitempty"` Calls []callTrace `json:"calls,omitempty"`
} }
type callContext struct { // TestZeroValueToNotExitCall tests the calltracer(s) on the following:
Number math.HexOrDecimal64 `json:"number"` // Tx to A, A calls B with zero value. B does not already exist.
Difficulty *math.HexOrDecimal256 `json:"difficulty"` // Expected: that enter/exit is invoked and the inner call is shown in the result
Time math.HexOrDecimal64 `json:"timestamp"` func TestZeroValueToNotExitCall(t *testing.T) {
GasLimit math.HexOrDecimal64 `json:"gasLimit"` var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef")
Miner common.Address `json:"miner"` privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef")
if err != nil {
t.Fatalf("err %v", err)
}
signer := types.NewEIP155Signer(big.NewInt(1))
tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{
GasPrice: big.NewInt(0),
Gas: 50000,
To: &to,
})
if err != nil {
t.Fatalf("err %v", err)
}
origin, _ := signer.Sender(tx)
txContext := vm.TxContext{
Origin: origin,
GasPrice: big.NewInt(1),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: common.Address{},
BlockNumber: new(big.Int).SetUint64(8000000),
Time: new(big.Int).SetUint64(5),
Difficulty: big.NewInt(0x30000),
GasLimit: uint64(6000000),
}
var code = []byte{
byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero
byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS
byte(vm.CALL),
}
var alloc = core.GenesisAlloc{
to: core.GenesisAccount{
Nonce: 1,
Code: code,
},
origin: core.GenesisAccount{
Nonce: 0,
Balance: big.NewInt(500000000000000),
},
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New("callTracerJs", new(Context))
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
}
have := new(callTrace)
if err := json.Unmarshal(res, have); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}`
want := new(callTrace)
json.Unmarshal([]byte(wantStr), want)
if !jsonEqual(have, want) {
t.Error("have != want")
} }
// callTracerTest defines a single test to check the call tracer against.
type callTracerTest struct {
Genesis *core.Genesis `json:"genesis"`
Context *callContext `json:"context"`
Input string `json:"input"`
Result *callTrace `json:"result"`
} }
func TestPrestateTracerCreate2(t *testing.T) { func TestPrestateTracerCreate2(t *testing.T) {
@ -201,96 +259,6 @@ func TestPrestateTracerCreate2(t *testing.T) {
} }
} }
// Iterates over all the input-output datasets in the tracer test harness and
// runs the JavaScript tracers against them.
func TestCallTracerLegacy(t *testing.T) {
testCallTracer("callTracerLegacy", "call_tracer_legacy", t)
}
func testCallTracer(tracer string, dirPath string, t *testing.T) {
files, err := ioutil.ReadDir(filepath.Join("testdata", dirPath))
if err != nil {
t.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) {
t.Parallel()
// Call tracer test found, read if from disk
blob, err := ioutil.ReadFile(filepath.Join("testdata", dirPath, file.Name()))
if err != nil {
t.Fatalf("failed to read testcase: %v", err)
}
test := new(callTracerTest)
if err := json.Unmarshal(blob, test); err != nil {
t.Fatalf("failed to parse testcase: %v", err)
}
// Configure a blockchain with the given prestate
tx := new(types.Transaction)
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
t.Fatalf("failed to parse testcase input: %v", err)
}
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
origin, _ := signer.Sender(tx)
txContext := vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New(tracer, new(Context))
if err != nil {
t.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
msg, err := tx.AsMessage(signer, nil)
if err != nil {
t.Fatalf("failed to prepare transaction for tracing: %v", err)
}
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
t.Fatalf("failed to execute transaction: %v", err)
}
// Retrieve the trace result and compare against the etalon
res, err := tracer.GetResult()
if err != nil {
t.Fatalf("failed to retrieve trace result: %v", err)
}
ret := new(callTrace)
if err := json.Unmarshal(res, ret); err != nil {
t.Fatalf("failed to unmarshal trace result: %v", err)
}
if !jsonEqual(ret, test.Result) {
// uncomment this for easier debugging
//have, _ := json.MarshalIndent(ret, "", " ")
//want, _ := json.MarshalIndent(test.Result, "", " ")
//t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want))
t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result)
}
})
}
}
func TestCallTracer(t *testing.T) {
testCallTracer("callTracer", "call_tracer", t)
}
// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to // jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison // comparison
func jsonEqual(x, y interface{}) bool { func jsonEqual(x, y interface{}) bool {
@ -337,6 +305,7 @@ func BenchmarkTransactionTrace(b *testing.B) {
Time: new(big.Int).SetUint64(uint64(5)), Time: new(big.Int).SetUint64(uint64(5)),
Difficulty: big.NewInt(0xffffffff), Difficulty: big.NewInt(0xffffffff),
GasLimit: gas, GasLimit: gas,
BaseFee: big.NewInt(8),
} }
alloc := core.GenesisAlloc{} alloc := core.GenesisAlloc{}
// The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns // The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns
@ -386,73 +355,3 @@ func BenchmarkTransactionTrace(b *testing.B) {
tracer.Reset() tracer.Reset()
} }
} }
func BenchmarkTracers(b *testing.B) {
files, err := ioutil.ReadDir(filepath.Join("testdata", "call_tracer"))
if err != nil {
b.Fatalf("failed to retrieve tracer test suite: %v", err)
}
for _, file := range files {
if !strings.HasSuffix(file.Name(), ".json") {
continue
}
file := file // capture range variable
b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) {
blob, err := ioutil.ReadFile(filepath.Join("testdata", "call_tracer", file.Name()))
if err != nil {
b.Fatalf("failed to read testcase: %v", err)
}
test := new(callTracerTest)
if err := json.Unmarshal(blob, test); err != nil {
b.Fatalf("failed to parse testcase: %v", err)
}
benchTracer("callTracer", test, b)
})
}
}
func benchTracer(tracerName string, test *callTracerTest, b *testing.B) {
// Configure a blockchain with the given prestate
tx := new(types.Transaction)
if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {
b.Fatalf("failed to parse testcase input: %v", err)
}
signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))
msg, err := tx.AsMessage(signer, nil)
if err != nil {
b.Fatalf("failed to prepare transaction for tracing: %v", err)
}
origin, _ := signer.Sender(tx)
txContext := vm.TxContext{
Origin: origin,
GasPrice: tx.GasPrice(),
}
context := vm.BlockContext{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
Coinbase: test.Context.Miner,
BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),
Time: new(big.Int).SetUint64(uint64(test.Context.Time)),
Difficulty: (*big.Int)(test.Context.Difficulty),
GasLimit: uint64(test.Context.GasLimit),
}
_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)
// Create the tracer, the EVM environment and run it
tracer, err := New(tracerName, new(Context))
if err != nil {
b.Fatalf("failed to create call tracer: %v", err)
}
evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
snap := statedb.Snapshot()
st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))
if _, err = st.TransitionDb(); err != nil {
b.Fatalf("failed to execute transaction: %v", err)
}
statedb.RevertToSnapshot(snap)
}
}

View File

@ -60,7 +60,7 @@ func (ec *Client) Close() {
// Blockchain Access // Blockchain Access
// ChainId retrieves the current chain ID for transaction replay protection. // ChainID retrieves the current chain ID for transaction replay protection.
func (ec *Client) ChainID(ctx context.Context) (*big.Int, error) { func (ec *Client) ChainID(ctx context.Context) (*big.Int, error) {
var result hexutil.Big var result hexutil.Big
err := ec.c.CallContext(ctx, &result, "eth_chainId") err := ec.c.CallContext(ctx, &result, "eth_chainId")

View File

@ -76,12 +76,12 @@ type AncientReader interface {
// Ancient retrieves an ancient binary blob from the append-only immutable files. // Ancient retrieves an ancient binary blob from the append-only immutable files.
Ancient(kind string, number uint64) ([]byte, error) Ancient(kind string, number uint64) ([]byte, error)
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
// It will return // It will return
// - at most 'count' items, // - at most 'count' items,
// - at least 1 item (even if exceeding the maxBytes), but will otherwise // - at least 1 item (even if exceeding the maxBytes), but will otherwise
// return as many items as fit into maxBytes. // return as many items as fit into maxBytes.
ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error)
// Ancients returns the ancient item numbers in the ancient store. // Ancients returns the ancient item numbers in the ancient store.
Ancients() (uint64, error) Ancients() (uint64, error)
@ -90,6 +90,15 @@ type AncientReader interface {
AncientSize(kind string) (uint64, error) AncientSize(kind string) (uint64, error)
} }
// AncientBatchReader is the interface for 'batched' or 'atomic' reading.
type AncientBatchReader interface {
AncientReader
// ReadAncients runs the given read operation while ensuring that no writes take place
// on the underlying freezer.
ReadAncients(fn func(AncientReader) error) (err error)
}
// AncientWriter contains the methods required to write to immutable ancient data. // AncientWriter contains the methods required to write to immutable ancient data.
type AncientWriter interface { type AncientWriter interface {
// ModifyAncients runs a write operation on the ancient store. // ModifyAncients runs a write operation on the ancient store.
@ -117,7 +126,7 @@ type AncientWriteOp interface {
// immutable ancient data. // immutable ancient data.
type Reader interface { type Reader interface {
KeyValueReader KeyValueReader
AncientReader AncientBatchReader
} }
// Writer contains the methods required to write data to both key-value as well as // Writer contains the methods required to write data to both key-value as well as
@ -130,7 +139,7 @@ type Writer interface {
// AncientStore contains all the methods required to allow handling different // AncientStore contains all the methods required to allow handling different
// ancient data stores backing immutable chain data store. // ancient data stores backing immutable chain data store.
type AncientStore interface { type AncientStore interface {
AncientReader AncientBatchReader
AncientWriter AncientWriter
io.Closer io.Closer
} }

View File

@ -455,7 +455,7 @@ type batch struct {
// Put inserts the given value into the batch for later committing. // Put inserts the given value into the batch for later committing.
func (b *batch) Put(key, value []byte) error { func (b *batch) Put(key, value []byte) error {
b.b.Put(key, value) b.b.Put(key, value)
b.size += len(value) b.size += len(key) + len(value)
return nil return nil
} }

View File

@ -204,7 +204,7 @@ type batch struct {
// Put inserts the given value into the batch for later committing. // Put inserts the given value into the batch for later committing.
func (b *batch) Put(key, value []byte) error { func (b *batch) Put(key, value []byte) error {
b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false}) b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false})
b.size += len(value) b.size += len(key) + len(value)
return nil return nil
} }

View File

@ -324,7 +324,7 @@ func (b *LesApiBackend) CurrentHeader() *types.Header {
return b.eth.blockchain.CurrentHeader() return b.eth.blockchain.CurrentHeader()
} }
func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) {
return b.eth.stateAtBlock(ctx, block, reexec) return b.eth.stateAtBlock(ctx, block, reexec)
} }

View File

@ -88,7 +88,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr return nil, genesisErr
} }

View File

@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params"
) )
// verifyImportEvent verifies that one single event arrive on an import channel. // verifyImportEvent verifies that one single event arrive on an import channel.
@ -247,7 +248,7 @@ func testInvalidAnnounces(t *testing.T, protocol int) {
// Prepare announcement by latest header. // Prepare announcement by latest header.
headerOne := s.backend.Blockchain().GetHeaderByNumber(1) headerOne := s.backend.Blockchain().GetHeaderByNumber(1)
hash, number := headerOne.Hash(), headerOne.Number.Uint64() hash, number := headerOne.Hash(), headerOne.Number.Uint64()
td := big.NewInt(200) // bad td td := big.NewInt(params.GenesisDifficulty.Int64() + 200) // bad td
// Sign the announcement if necessary. // Sign the announcement if necessary.
announce := announceData{hash, number, td, 0, nil} announce := announceData{hash, number, td, 0, nil}

View File

@ -19,6 +19,7 @@ package client
import ( import (
"math/rand" "math/rand"
"strconv" "strconv"
"sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
@ -52,7 +53,7 @@ func testNodeIndex(id enode.ID) int {
type ServerPoolTest struct { type ServerPoolTest struct {
db ethdb.KeyValueStore db ethdb.KeyValueStore
clock *mclock.Simulated clock *mclock.Simulated
quit chan struct{} quit chan chan struct{}
preNeg, preNegFail bool preNeg, preNegFail bool
vt *ValueTracker vt *ValueTracker
sp *ServerPool sp *ServerPool
@ -62,6 +63,8 @@ type ServerPoolTest struct {
trusted []string trusted []string
waitCount, waitEnded int32 waitCount, waitEnded int32
lock sync.Mutex
cycle, conn, servedConn int cycle, conn, servedConn int
serviceCycles, dialCount int serviceCycles, dialCount int
disconnect map[int][]int disconnect map[int][]int
@ -112,7 +115,9 @@ func (s *ServerPoolTest) start() {
testQuery = func(node *enode.Node) int { testQuery = func(node *enode.Node) int {
idx := testNodeIndex(node.ID()) idx := testNodeIndex(node.ID())
n := &s.testNodes[idx] n := &s.testNodes[idx]
s.lock.Lock()
canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle
s.lock.Unlock()
if s.preNegFail { if s.preNegFail {
// simulate a scenario where UDP queries never work // simulate a scenario where UDP queries never work
s.beginWait() s.beginWait()
@ -155,7 +160,7 @@ func (s *ServerPoolTest) start() {
s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) } s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) }
s.disconnect = make(map[int][]int) s.disconnect = make(map[int][]int)
s.sp.Start() s.sp.Start()
s.quit = make(chan struct{}) s.quit = make(chan chan struct{})
go func() { go func() {
last := int32(-1) last := int32(-1)
for { for {
@ -167,7 +172,8 @@ func (s *ServerPoolTest) start() {
s.clock.Run(time.Second) s.clock.Run(time.Second)
} }
last = c last = c
case <-s.quit: case quit := <-s.quit:
close(quit)
return return
} }
} }
@ -175,7 +181,9 @@ func (s *ServerPoolTest) start() {
} }
func (s *ServerPoolTest) stop() { func (s *ServerPoolTest) stop() {
close(s.quit) quit := make(chan struct{})
s.quit <- quit
<-quit
s.sp.Stop() s.sp.Stop()
s.spi.Close() s.spi.Close()
for i := range s.testNodes { for i := range s.testNodes {
@ -234,7 +242,9 @@ func (s *ServerPoolTest) run() {
} }
s.serviceCycles += s.servedConn s.serviceCycles += s.servedConn
s.clock.Run(time.Second) s.clock.Run(time.Second)
s.lock.Lock()
s.cycle++ s.cycle++
s.lock.Unlock()
} }
} }

View File

@ -50,7 +50,7 @@ type NodeValueTracker struct {
lastTransfer mclock.AbsTime lastTransfer mclock.AbsTime
basket serverBasket basket serverBasket
reqCosts []uint64 reqCosts []uint64
reqValues *[]float64 reqValues []float64
} }
// UpdateCosts updates the node value tracker's request cost table // UpdateCosts updates the node value tracker's request cost table
@ -58,14 +58,14 @@ func (nv *NodeValueTracker) UpdateCosts(reqCosts []uint64) {
nv.vt.lock.Lock() nv.vt.lock.Lock()
defer nv.vt.lock.Unlock() defer nv.vt.lock.Unlock()
nv.updateCosts(reqCosts, &nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts)) nv.updateCosts(reqCosts, nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts))
} }
// updateCosts updates the request cost table of the server. The request value factor // updateCosts updates the request cost table of the server. The request value factor
// is also updated based on the given cost table and the current reference basket. // is also updated based on the given cost table and the current reference basket.
// Note that the contents of the referenced reqValues slice will not change; a new // Note that the contents of the referenced reqValues slice will not change; a new
// reference is passed if the values are updated by ValueTracker. // reference is passed if the values are updated by ValueTracker.
func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues *[]float64, rvFactor float64) { func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues []float64, rvFactor float64) {
nv.lock.Lock() nv.lock.Lock()
defer nv.lock.Unlock() defer nv.lock.Unlock()
@ -112,7 +112,7 @@ func (nv *NodeValueTracker) Served(reqs []ServedRequest, respTime time.Duration)
var value float64 var value float64
for _, r := range reqs { for _, r := range reqs {
nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor) nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor)
value += (*nv.reqValues)[r.ReqType] * float64(r.Amount) value += nv.reqValues[r.ReqType] * float64(r.Amount)
} }
nv.rtStats.Add(respTime, value, expFactor) nv.rtStats.Add(respTime, value, expFactor)
} }
@ -356,7 +356,7 @@ func (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker {
reqTypeCount := len(vt.refBasket.reqValues) reqTypeCount := len(vt.refBasket.reqValues)
nv.reqCosts = make([]uint64, reqTypeCount) nv.reqCosts = make([]uint64, reqTypeCount)
nv.lastTransfer = vt.clock.Now() nv.lastTransfer = vt.clock.Now()
nv.reqValues = &vt.refBasket.reqValues nv.reqValues = vt.refBasket.reqValues
nv.basket.init(reqTypeCount) nv.basket.init(reqTypeCount)
vt.connected[id] = nv vt.connected[id] = nv
@ -476,7 +476,7 @@ func (vt *ValueTracker) periodicUpdate() {
vt.refBasket.normalize() vt.refBasket.normalize()
vt.refBasket.updateReqValues() vt.refBasket.updateReqValues()
for _, nv := range vt.connected { for _, nv := range vt.connected {
nv.updateCosts(nv.reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts)) nv.updateCosts(nv.reqCosts, vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts))
} }
vt.saveToDb() vt.saveToDb()
} }

View File

@ -223,8 +223,9 @@ func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb f
var nb *nodeBalance var nb *nodeBalance
if node := bt.ns.GetNode(id); node != nil { if node := bt.ns.GetNode(id); node != nil {
nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance) nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance)
} else { }
node = enode.SignNull(&enr.Record{}, id) if nb == nil {
node := enode.SignNull(&enr.Record{}, id)
nb = bt.newNodeBalance(node, connAddress, false) nb = bt.newNodeBalance(node, connAddress, false)
} }
cb(nb) cb(nb)

View File

@ -593,6 +593,9 @@ func (w *worker) taskLoop() {
if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil {
log.Warn("Block sealing failed", "err", err) log.Warn("Block sealing failed", "err", err)
w.pendingMu.Lock()
delete(w.pendingTasks, sealHash)
w.pendingMu.Unlock()
} }
case <-w.exitCh: case <-w.exitCh:
interrupt() interrupt()
@ -632,17 +635,23 @@ func (w *worker) resultLoop() {
receipts = make([]*types.Receipt, len(task.receipts)) receipts = make([]*types.Receipt, len(task.receipts))
logs []*types.Log logs []*types.Log
) )
for i, receipt := range task.receipts { for i, taskReceipt := range task.receipts {
receipt := new(types.Receipt)
receipts[i] = receipt
*receipt = *taskReceipt
// add block location fields // add block location fields
receipt.BlockHash = hash receipt.BlockHash = hash
receipt.BlockNumber = block.Number() receipt.BlockNumber = block.Number()
receipt.TransactionIndex = uint(i) receipt.TransactionIndex = uint(i)
receipts[i] = new(types.Receipt)
*receipts[i] = *receipt
// Update the block hash in all logs since it is now available and not when the // Update the block hash in all logs since it is now available and not when the
// receipt/log of individual transactions were created. // receipt/log of individual transactions were created.
for _, log := range receipt.Logs { receipt.Logs = make([]*types.Log, len(taskReceipt.Logs))
for i, taskLog := range taskReceipt.Logs {
log := new(types.Log)
receipt.Logs[i] = log
*log = *taskLog
log.BlockHash = hash log.BlockHash = hash
} }
logs = append(logs, receipt.Logs...) logs = append(logs, receipt.Logs...)

View File

@ -165,6 +165,13 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) {
config.EthereumNetworkID = 3 config.EthereumNetworkID = 3
} }
} }
// If we have the Sepolia testnet, hard code the chain configs too
if config.EthereumGenesis == SepoliaGenesis() {
genesis.Config = params.SepoliaChainConfig
if config.EthereumNetworkID == 1 {
config.EthereumNetworkID = 11155111
}
}
// If we have the Rinkeby testnet, hard code the chain configs too // If we have the Rinkeby testnet, hard code the chain configs too
if config.EthereumGenesis == RinkebyGenesis() { if config.EthereumGenesis == RinkebyGenesis() {
genesis.Config = params.RinkebyChainConfig genesis.Config = params.RinkebyChainConfig

View File

@ -41,6 +41,15 @@ func RopstenGenesis() string {
return string(enc) return string(enc)
} }
// SepoliaGenesis returns the JSON spec to use for the Sepolia test network.
func SepoliaGenesis() string {
enc, err := json.Marshal(core.DefaultSepoliaGenesisBlock())
if err != nil {
panic(err)
}
return string(enc)
}
// RinkebyGenesis returns the JSON spec to use for the Rinkeby test network // RinkebyGenesis returns the JSON spec to use for the Rinkeby test network
func RinkebyGenesis() string { func RinkebyGenesis() string {
enc, err := json.Marshal(core.DefaultRinkebyGenesisBlock()) enc, err := json.Marshal(core.DefaultRinkebyGenesisBlock())

View File

@ -22,7 +22,6 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"math/rand" "math/rand"
"sync" "sync"
"time" "time"
@ -695,12 +694,6 @@ func (net *Network) Shutdown() {
if err := node.Stop(); err != nil { if err := node.Stop(); err != nil {
log.Warn("Can't stop node", "id", node.ID(), "err", err) log.Warn("Can't stop node", "id", node.ID(), "err", err)
} }
// If the node has the close method, call it.
if closer, ok := node.Node.(io.Closer); ok {
if err := closer.Close(); err != nil {
log.Warn("Can't close node", "id", node.ID(), "err", err)
}
}
} }
close(net.quitc) close(net.quitc)
} }

Some files were not shown because too many files have changed in this diff Show More