diff --git a/.travis.yml b/.travis.yml index c9e4d8e7d..197d56748 100644 --- a/.travis.yml +++ b/.travis.yml @@ -120,36 +120,6 @@ jobs: - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - # This builder does the Linux Azure MIPS xgo uploads - - stage: build - if: type = push - os: linux - dist: bionic - services: - - docker - go: 1.17.x - env: - - azure-linux-mips - - GO111MODULE=on - git: - submodules: false # avoid cloning ethereum/tests - script: - - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds - - - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - # This builder does the Android Maven and Azure uploads - stage: build if: type = push diff --git a/Makefile b/Makefile index cb5a87dad..944961473 100644 --- a/Makefile +++ b/Makefile @@ -2,11 +2,7 @@ # with Go source code. If you know what GOPATH is then you probably # don't need to bother with make. -.PHONY: geth android ios geth-cross evm all test clean -.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le -.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 -.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64 -.PHONY: geth-windows geth-windows-386 geth-windows-amd64 +.PHONY: geth android ios evm all test clean GOBIN = ./build/bin GO ?= latest @@ -53,95 +49,3 @@ devtools: env GOBIN= go install ./cmd/abigen @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' - -# Cross Compilation Targets (xgo) - -geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios - @echo "Full cross compilation done:" - @ls -ld $(GOBIN)/geth-* - -geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le - @echo "Linux cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* - -geth-linux-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth - @echo "Linux 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep 386 - -geth-linux-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth - @echo "Linux amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep amd64 - -geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64 - @echo "Linux ARM cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm - -geth-linux-arm-5: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth - @echo "Linux ARMv5 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-5 - -geth-linux-arm-6: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth - @echo "Linux ARMv6 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-6 - -geth-linux-arm-7: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth - @echo "Linux ARMv7 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm-7 - -geth-linux-arm64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth - @echo "Linux ARM64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep arm64 - -geth-linux-mips: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips - -geth-linux-mipsle: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPSle cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mipsle - -geth-linux-mips64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64 cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64 - -geth-linux-mips64le: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth - @echo "Linux MIPS64le cross compilation done:" - @ls -ld $(GOBIN)/geth-linux-* | grep mips64le - -geth-darwin: geth-darwin-386 geth-darwin-amd64 - @echo "Darwin cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* - -geth-darwin-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth - @echo "Darwin 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep 386 - -geth-darwin-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth - @echo "Darwin amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-darwin-* | grep amd64 - -geth-windows: geth-windows-386 geth-windows-amd64 - @echo "Windows cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* - -geth-windows-386: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth - @echo "Windows 386 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep 386 - -geth-windows-amd64: - $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth - @echo "Windows amd64 cross compilation done:" - @ls -ld $(GOBIN)/geth-windows-* | grep amd64 diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index e410522ac..6854c9624 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -462,6 +462,9 @@ func (b *SimulatedBackend) PendingNonceAt(ctx context.Context, account common.Ad // SuggestGasPrice implements ContractTransactor.SuggestGasPrice. Since the simulated // chain doesn't have miners, we just return a gas price of 1 for any call. func (b *SimulatedBackend) SuggestGasPrice(ctx context.Context) (*big.Int, error) { + if b.pendingBlock.Header().BaseFee != nil { + return b.pendingBlock.Header().BaseFee, nil + } return big.NewInt(1), nil } diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index 613267810..4e63e3eff 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -916,8 +916,8 @@ func TestSuggestGasPrice(t *testing.T) { if err != nil { t.Errorf("could not get gas price: %v", err) } - if gasPrice.Uint64() != uint64(1) { - t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64()) + if gasPrice.Uint64() != sim.pendingBlock.Header().BaseFee.Uint64() { + t.Errorf("gas price was not expected value of %v. actual: %v", sim.pendingBlock.Header().BaseFee.Uint64(), gasPrice.Uint64()) } } diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 63280bcbe..f4e5a2a90 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -370,7 +370,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i rawTx, err = c.createLegacyTx(opts, contract, input) } else { // Only query for basefee if gasPrice not specified - if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); err != nil { + if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil { return nil, errHead } else if head.BaseFee != nil { rawTx, err = c.createDynamicTx(opts, contract, input, head) diff --git a/build/ci.go b/build/ci.go index 80d4269b2..1e2547fbb 100644 --- a/build/ci.go +++ b/build/ci.go @@ -33,7 +33,6 @@ Available commands are: nsis -- creates a Windows NSIS installer aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework - xgo [ -alltools ] [ options ] -- cross builds according to options purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore For all commands, -n prevents execution of external programs (dry run mode). @@ -188,8 +187,6 @@ func main() { doAndroidArchive(os.Args[2:]) case "xcode": doXCodeFramework(os.Args[2:]) - case "xgo": - doXgo(os.Args[2:]) case "purge": doPurge(os.Args[2:]) default: @@ -1209,48 +1206,6 @@ func newPodMetadata(env build.Environment, archive string) podMetadata { } } -// Cross compilation - -func doXgo(cmdline []string) { - var ( - alltools = flag.Bool("alltools", false, `Flag whether we're building all known tools, or only on in particular`) - ) - flag.CommandLine.Parse(cmdline) - env := build.Env() - var tc build.GoToolchain - - // Make sure xgo is available for cross compilation - build.MustRun(tc.Install(GOBIN, "github.com/karalabe/xgo@latest")) - - // If all tools building is requested, build everything the builder wants - args := append(buildFlags(env), flag.Args()...) - - if *alltools { - args = append(args, []string{"--dest", GOBIN}...) - for _, res := range allToolsArchiveFiles { - if strings.HasPrefix(res, GOBIN) { - // Binary tool found, cross build it explicitly - args = append(args, "./"+filepath.Join("cmd", filepath.Base(res))) - build.MustRun(xgoTool(args)) - args = args[:len(args)-1] - } - } - return - } - - // Otherwise execute the explicit cross compilation - path := args[len(args)-1] - args = append(args[:len(args)-1], []string{"--dest", GOBIN, path}...) - build.MustRun(xgoTool(args)) -} - -func xgoTool(args []string) *exec.Cmd { - cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...) - cmd.Env = os.Environ() - cmd.Env = append(cmd.Env, []string{"GOBIN=" + GOBIN}...) - return cmd -} - // Binary distribution cleanups func doPurge(cmdline []string) { diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index 848288c9c..d65d6314c 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -235,6 +235,8 @@ func ethFilter(args []string) (nodeFilter, error) { filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) case "ropsten": filter = forkid.NewStaticFilter(params.RopstenChainConfig, params.RopstenGenesisHash) + case "sepolia": + filter = forkid.NewStaticFilter(params.SepoliaChainConfig, params.SepoliaGenesisHash) default: return nil, fmt.Errorf("unknown network %q", args[0]) } diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index fae65767b..cedf96627 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -96,7 +96,7 @@ type rejectedTx struct { // Apply applies a set of transactions to a pre-state func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (*state.StateDB, *ExecutionResult, error) { + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error)) (*state.StateDB, *ExecutionResult, error) { // Capture errors for BLOCKHASH operation, if we haven't been supplied the // required blockhashes diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go index 29dd587d4..bc9bc42ed 100644 --- a/cmd/evm/internal/t8ntool/transaction.go +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -121,6 +121,9 @@ func Transaction(ctx *cli.Context) error { } var results []result for it.Next() { + if err := it.Err(); err != nil { + return NewError(ErrorIO, err) + } var tx types.Transaction err := rlp.DecodeBytes(it.Value(), &tx) if err != nil { diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 88a9c5e62..0aff715eb 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -89,10 +89,10 @@ func Transition(ctx *cli.Context) error { var ( err error - tracer vm.Tracer + tracer vm.EVMLogger baseDir = "" ) - var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) + var getTracer func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) // If user specified a basedir, make sure it exists if ctx.IsSet(OutputBasedir.Name) { @@ -119,7 +119,7 @@ func Transition(ctx *cli.Context) error { prevFile.Close() } }() - getTracer = func(txIndex int, txHash common.Hash) (vm.Tracer, error) { + getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { if prevFile != nil { prevFile.Close() } @@ -131,7 +131,7 @@ func Transition(ctx *cli.Context) error { return vm.NewJSONLogger(logConfig, traceFile), nil } } else { - getTracer = func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error) { + getTracer = func(txIndex int, txHash common.Hash) (tracer vm.EVMLogger, err error) { return nil, nil } } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index cedbd2281..447bb2c2e 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -116,7 +116,7 @@ func runCmd(ctx *cli.Context) error { } var ( - tracer vm.Tracer + tracer vm.EVMLogger debugLogger *vm.StructLogger statedb *state.StateDB chainConfig *params.ChainConfig diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index ab2704609..5e9bf696b 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -65,7 +65,7 @@ func stateTestCmd(ctx *cli.Context) error { EnableReturnData: !ctx.GlobalBool(DisableReturnDataFlag.Name), } var ( - tracer vm.Tracer + tracer vm.EVMLogger debugger *vm.StructLogger ) switch { diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 4f78c5dc6..b4b816f57 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/docker/docker/pkg/reexec" + "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" "github.com/ethereum/go-ethereum/internal/cmdtest" ) @@ -170,13 +171,45 @@ func TestT8n(t *testing.T) { output: t8nOutput{result: true}, expOut: "exp2.json", }, + { // Difficulty calculation - with uncles + Berlin + base: "./testdata/14", + input: t8nInput{ + "alloc.json", "txs.json", "env.uncles.json", "Berlin", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_berlin.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_london.json", + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "ArrowGlacier", "", + }, + output: t8nOutput{result: true}, + expOut: "exp_arrowglacier.json", + }, } { args := []string{"t8n"} args = append(args, tc.output.get()...) args = append(args, tc.input.get(tc.base)...) + var qArgs []string // quoted args for debugging purposes + for _, arg := range args { + if len(arg) == 0 { + qArgs = append(qArgs, `""`) + } else { + qArgs = append(qArgs, arg) + } + } + tt.Logf("args: %v\n", strings.Join(qArgs, " ")) tt.Run("evm-test", args...) - tt.Logf("args: %v\n", strings.Join(args, " ")) // Compare the expected output, if provided if tc.expOut != "" { want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) @@ -265,6 +298,14 @@ func TestT9n(t *testing.T) { }, expOut: "exp.json", }, + { // Invalid RLP + base: "./testdata/18", + input: t9nInput{ + inTxs: "invalid.rlp", + stFork: "London", + }, + expExitCode: t8ntool.ErrorIO, + }, } { args := []string{"t9n"} diff --git a/cmd/evm/testdata/14/exp_berlin.json b/cmd/evm/testdata/14/exp_berlin.json new file mode 100644 index 000000000..e56478831 --- /dev/null +++ b/cmd/evm/testdata/14/exp_berlin.json @@ -0,0 +1,11 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x1ff9000000000" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/18/README.md b/cmd/evm/testdata/18/README.md new file mode 100644 index 000000000..360a9bba0 --- /dev/null +++ b/cmd/evm/testdata/18/README.md @@ -0,0 +1,9 @@ +# Invalid rlp + +This folder contains a sample of invalid RLP, and it's expected +that the t9n handles this properly: + +``` +$ go run . t9n --input.txs=./testdata/18/invalid.rlp --state.fork=London +ERROR(11): rlp: value size exceeds available input length +``` \ No newline at end of file diff --git a/cmd/evm/testdata/18/invalid.rlp b/cmd/evm/testdata/18/invalid.rlp new file mode 100644 index 000000000..7ff2824ca --- /dev/null +++ b/cmd/evm/testdata/18/invalid.rlp @@ -0,0 +1 @@ +"0xf852328001825208870b9331677e6ebf0a801ca098ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4aa03887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a3" \ No newline at end of file diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json new file mode 100644 index 000000000..cef1a25ff --- /dev/null +++ b/cmd/evm/testdata/19/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json new file mode 100644 index 000000000..0c64392af --- /dev/null +++ b/cmd/evm/testdata/19/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "13000000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json new file mode 100644 index 000000000..4c5f8e0fb --- /dev/null +++ b/cmd/evm/testdata/19/exp_arrowglacier.json @@ -0,0 +1,11 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000000200000", + "receipts": [] + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json new file mode 100644 index 000000000..9dc1b9d4f --- /dev/null +++ b/cmd/evm/testdata/19/exp_london.json @@ -0,0 +1,11 @@ +{ + "result": { + "stateRoot": "0x6f058887ca01549716789c380ede95aecc510e6d1fdc4dbf67d053c7c07f4bdc", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "currentDifficulty": "0x2000080000000", + "receipts": [] + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md new file mode 100644 index 000000000..5fae183f4 --- /dev/null +++ b/cmd/evm/testdata/19/readme.md @@ -0,0 +1,9 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, +this time on `ArrowGlacier` (Eip 4345). + +Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier +``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json new file mode 100644 index 000000000..fe51488c7 --- /dev/null +++ b/cmd/evm/testdata/19/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 436e558b5..6077c43cc 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -66,6 +66,7 @@ It expects the genesis file as argument.`, Flags: []cli.Flag{ utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -140,7 +141,9 @@ be gzipped.`, }, Category: "BLOCKCHAIN COMMANDS", Description: ` - The import-preimages command imports hash preimages from an RLP encoded stream.`, +The import-preimages command imports hash preimages from an RLP encoded stream. +It's deprecated, please use "geth db import" instead. +`, } exportPreimagesCommand = cli.Command{ Action: utils.MigrateFlags(exportPreimages), @@ -154,7 +157,9 @@ be gzipped.`, }, Category: "BLOCKCHAIN COMMANDS", Description: ` -The export-preimages command export hash preimages to an RLP encoded stream`, +The export-preimages command exports hash preimages to an RLP encoded stream. +It's deprecated, please use "geth db export" instead. +`, } dumpCommand = cli.Command{ Action: utils.MigrateFlags(dump), @@ -368,7 +373,6 @@ func exportPreimages(ctx *cli.Context) error { if len(ctx.Args()) < 1 { utils.Fatalf("This command requires an argument.") } - stack, _ := makeConfigNode(ctx) defer stack.Close() diff --git a/cmd/geth/config.go b/cmd/geth/config.go index c97a64f17..08b9a1154 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -156,8 +156,8 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { // makeFullNode loads geth configuration and creates the Ethereum backend. func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { stack, cfg := makeConfigNode(ctx) - if ctx.GlobalIsSet(utils.OverrideLondonFlag.Name) { - cfg.Eth.OverrideLondon = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideLondonFlag.Name)) + if ctx.GlobalIsSet(utils.OverrideArrowGlacierFlag.Name) { + cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name)) } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index 0e156fde9..8a767241e 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -134,6 +134,8 @@ func remoteConsole(ctx *cli.Context) error { path = filepath.Join(path, "rinkeby") } else if ctx.GlobalBool(utils.GoerliFlag.Name) { path = filepath.Join(path, "goerli") + } else if ctx.GlobalBool(utils.SepoliaFlag.Name) { + path = filepath.Join(path, "sepolia") } } endpoint = fmt.Sprintf("%s/geth.ipc", path) diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 123ed9c79..e1e0d77f0 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -17,12 +17,16 @@ package main import ( + "bytes" "errors" "fmt" "os" + "os/signal" "path/filepath" "sort" "strconv" + "strings" + "syscall" "time" "github.com/ethereum/go-ethereum/cmd/utils" @@ -63,6 +67,8 @@ Remove blockchain and state databases`, dbPutCmd, dbGetSlotsCmd, dbDumpFreezerIndex, + dbImportCmd, + dbExportCmd, }, } dbInspectCmd = cli.Command{ @@ -74,6 +80,7 @@ Remove blockchain and state databases`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -89,6 +96,7 @@ Remove blockchain and state databases`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -102,6 +110,7 @@ Remove blockchain and state databases`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, utils.CacheFlag, @@ -121,6 +130,7 @@ corruption if it is aborted during execution'!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -136,6 +146,7 @@ corruption if it is aborted during execution'!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -152,6 +163,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -168,6 +180,7 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -183,11 +196,42 @@ WARNING: This is a low-level operation which may cause database corruption!`, utils.SyncModeFlag, utils.MainnetFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, Description: "This command displays information about the freezer index.", } + dbImportCmd = cli.Command{ + Action: utils.MigrateFlags(importLDBdata), + Name: "import", + Usage: "Imports leveldb-data from an exported RLP dump.", + ArgsUsage: " has .gz suffix, gzip compression will be used.", + ArgsUsage: " ", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.SyncModeFlag, + utils.MainnetFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", + } ) func removeDB(ctx *cli.Context) error { @@ -510,3 +554,133 @@ func parseHexOrString(str string) ([]byte, error) { } return b, err } + +func importLDBdata(ctx *cli.Context) error { + start := 0 + switch ctx.NArg() { + case 1: + break + case 2: + s, err := strconv.Atoi(ctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("second arg must be an integer: %v", err) + } + start = s + default: + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + var ( + fName = ctx.Args().Get(0) + stack, _ = makeConfigNode(ctx) + interrupt = make(chan os.Signal, 1) + stop = make(chan struct{}) + ) + defer stack.Close() + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(interrupt) + defer close(interrupt) + go func() { + if _, ok := <-interrupt; ok { + log.Info("Interrupted during ldb import, stopping at next batch") + } + close(stop) + }() + db := utils.MakeChainDatabase(ctx, stack, false) + return utils.ImportLDBData(db, fName, int64(start), stop) +} + +type preimageIterator struct { + iter ethdb.Iterator +} + +func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) { + for iter.iter.Next() { + key := iter.iter.Key() + if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) { + return utils.OpBatchAdd, key, iter.iter.Value(), true + } + } + return 0, nil, nil, false +} + +func (iter *preimageIterator) Release() { + iter.iter.Release() +} + +type snapshotIterator struct { + init bool + account ethdb.Iterator + storage ethdb.Iterator +} + +func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) { + if !iter.init { + iter.init = true + return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true + } + for iter.account.Next() { + key := iter.account.Key() + if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) { + return utils.OpBatchAdd, key, iter.account.Value(), true + } + } + for iter.storage.Next() { + key := iter.storage.Key() + if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) { + return utils.OpBatchAdd, key, iter.storage.Value(), true + } + } + return 0, nil, nil, false +} + +func (iter *snapshotIterator) Release() { + iter.account.Release() + iter.storage.Release() +} + +// chainExporters defines the export scheme for all exportable chain data. +var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{ + "preimage": func(db ethdb.Database) utils.ChainDataIterator { + iter := db.NewIterator(rawdb.PreimagePrefix, nil) + return &preimageIterator{iter: iter} + }, + "snapshot": func(db ethdb.Database) utils.ChainDataIterator { + account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) + storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil) + return &snapshotIterator{account: account, storage: storage} + }, +} + +func exportChaindata(ctx *cli.Context) error { + if ctx.NArg() < 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + // Parse the required chain data type, make sure it's supported. + kind := ctx.Args().Get(0) + kind = strings.ToLower(strings.Trim(kind, " ")) + exporter, ok := chainExporters[kind] + if !ok { + var kinds []string + for kind := range chainExporters { + kinds = append(kinds, kind) + } + return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", ")) + } + var ( + stack, _ = makeConfigNode(ctx) + interrupt = make(chan os.Signal, 1) + stop = make(chan struct{}) + ) + defer stack.Close() + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + defer signal.Stop(interrupt) + defer close(interrupt) + go func() { + if _, ok := <-interrupt; ok { + log.Info("Interrupted during db export, stopping at next batch") + } + close(stop) + }() + db := utils.MakeChainDatabase(ctx, stack, true) + return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) +} diff --git a/cmd/geth/main.go b/cmd/geth/main.go index deeb458f9..48cfec15b 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -42,6 +42,10 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/plugins" "github.com/ethereum/go-ethereum/plugins/wrappers" + + // Force-load the native, to trigger registration + _ "github.com/ethereum/go-ethereum/eth/tracers/native" + "gopkg.in/urfave/cli.v1" ) @@ -69,7 +73,7 @@ var ( utils.NoUSBFlag, utils.USBFlag, utils.SmartCardDaemonPathFlag, - utils.OverrideLondonFlag, + utils.OverrideArrowGlacierFlag, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -139,6 +143,7 @@ var ( utils.DeveloperFlag, utils.DeveloperPeriodFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, utils.VMEnableDebugFlag, diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index d3903e0af..bd2c2443a 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -62,6 +62,7 @@ var ( utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, utils.CacheTrieJournalFlag, @@ -92,6 +93,7 @@ the trie clean cache with default directory will be deleted. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -112,6 +114,7 @@ In other words, this command does the snapshot to trie conversion. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -134,6 +137,7 @@ It's also usable without snapshot enabled. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, }, @@ -157,6 +161,7 @@ It's also usable without snapshot enabled. utils.DataDirFlag, utils.AncientFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.RinkebyFlag, utils.GoerliFlag, utils.ExcludeCodeFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 5d82bacea..38f690f17 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -45,6 +45,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.GoerliFlag, utils.RinkebyFlag, utils.RopstenFlag, + utils.SepoliaFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, diff --git a/cmd/puppeth/wizard_intro.go b/cmd/puppeth/wizard_intro.go index 8610b908d..dd4b606c4 100644 --- a/cmd/puppeth/wizard_intro.go +++ b/cmd/puppeth/wizard_intro.go @@ -23,7 +23,6 @@ import ( "os" "path/filepath" "strings" - "sync" "github.com/ethereum/go-ethereum/log" ) @@ -80,25 +79,17 @@ func (w *wizard) run() { } else if err := json.Unmarshal(blob, &w.conf); err != nil { log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err) } else { - // Dial all previously known servers concurrently - var pend sync.WaitGroup + // Dial all previously known servers for server, pubkey := range w.conf.Servers { - pend.Add(1) - - go func(server string, pubkey []byte) { - defer pend.Done() - - log.Info("Dialing previously configured server", "server", server) - client, err := dial(server, pubkey) - if err != nil { - log.Error("Previous server unreachable", "server", server, "err", err) - } - w.lock.Lock() - w.servers[server] = client - w.lock.Unlock() - }(server, pubkey) + log.Info("Dialing previously configured server", "server", server) + client, err := dial(server, pubkey) + if err != nil { + log.Error("Previous server unreachable", "server", server, "err", err) + } + w.lock.Lock() + w.servers[server] = client + w.lock.Unlock() } - pend.Wait() w.networkStats() } // Basics done, loop ad infinitum about what to do diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index d4051e59e..ddd8d822b 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -18,7 +18,9 @@ package utils import ( + "bufio" "compress/gzip" + "errors" "fmt" "io" "os" @@ -270,6 +272,7 @@ func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, las } // ImportPreimages imports a batch of exported hash preimages into the database. +// It's a part of the deprecated functionality, should be removed in the future. func ImportPreimages(db ethdb.Database, fn string) error { log.Info("Importing preimages", "file", fn) @@ -280,7 +283,7 @@ func ImportPreimages(db ethdb.Database, fn string) error { } defer fh.Close() - var reader io.Reader = fh + var reader io.Reader = bufio.NewReader(fh) if strings.HasSuffix(fn, ".gz") { if reader, err = gzip.NewReader(reader); err != nil { return err @@ -288,7 +291,7 @@ func ImportPreimages(db ethdb.Database, fn string) error { } stream := rlp.NewStream(reader, 0) - // Import the preimages in batches to prevent disk trashing + // Import the preimages in batches to prevent disk thrashing preimages := make(map[common.Hash][]byte) for { @@ -317,6 +320,7 @@ func ImportPreimages(db ethdb.Database, fn string) error { // ExportPreimages exports all known hash preimages into the specified file, // truncating any data already present in the file. +// It's a part of the deprecated functionality, should be removed in the future. func ExportPreimages(db ethdb.Database, fn string) error { log.Info("Exporting preimages", "file", fn) @@ -344,3 +348,207 @@ func ExportPreimages(db ethdb.Database, fn string) error { log.Info("Exported preimages", "file", fn) return nil } + +// exportHeader is used in the export/import flow. When we do an export, +// the first element we output is the exportHeader. +// Whenever a backwards-incompatible change is made, the Version header +// should be bumped. +// If the importer sees a higher version, it should reject the import. +type exportHeader struct { + Magic string // Always set to 'gethdbdump' for disambiguation + Version uint64 + Kind string + UnixTime uint64 +} + +const exportMagic = "gethdbdump" +const ( + OpBatchAdd = 0 + OpBatchDel = 1 +) + +// ImportLDBData imports a batch of snapshot data into the database +func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error { + log.Info("Importing leveldb data", "file", f) + + // Open the file handle and potentially unwrap the gzip stream + fh, err := os.Open(f) + if err != nil { + return err + } + defer fh.Close() + + var reader io.Reader = bufio.NewReader(fh) + if strings.HasSuffix(f, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return err + } + } + stream := rlp.NewStream(reader, 0) + + // Read the header + var header exportHeader + if err := stream.Decode(&header); err != nil { + return fmt.Errorf("could not decode header: %v", err) + } + if header.Magic != exportMagic { + return errors.New("incompatible data, wrong magic") + } + if header.Version != 0 { + return fmt.Errorf("incompatible version %d, (support only 0)", header.Version) + } + log.Info("Importing data", "file", f, "type", header.Kind, "data age", + common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0)))) + + // Import the snapshot in batches to prevent disk thrashing + var ( + count int64 + start = time.Now() + logged = time.Now() + batch = db.NewBatch() + ) + for { + // Read the next entry + var ( + op byte + key, val []byte + ) + if err := stream.Decode(&op); err != nil { + if err == io.EOF { + break + } + return err + } + if err := stream.Decode(&key); err != nil { + return err + } + if err := stream.Decode(&val); err != nil { + return err + } + if count < startIndex { + count++ + continue + } + switch op { + case OpBatchDel: + batch.Delete(key) + case OpBatchAdd: + batch.Put(key, val) + default: + return fmt.Errorf("unknown op %d\n", op) + } + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + } + // Check interruption emitted by ctrl+c + if count%1000 == 0 { + select { + case <-interrupt: + if err := batch.Write(); err != nil { + return err + } + log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + return nil + default: + } + } + if count%1000 == 0 && time.Since(logged) > 8*time.Second { + log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + count += 1 + } + // Flush the last batch snapshot data + if batch.ValueSize() > 0 { + if err := batch.Write(); err != nil { + return err + } + } + log.Info("Imported chain data", "file", f, "count", count, + "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// ChainDataIterator is an interface wraps all necessary functions to iterate +// the exporting chain data. +type ChainDataIterator interface { + // Next returns the key-value pair for next exporting entry in the iterator. + // When the end is reached, it will return (0, nil, nil, false). + Next() (byte, []byte, []byte, bool) + + // Release releases associated resources. Release should always succeed and can + // be called multiple times without causing error. + Release() +} + +// ExportChaindata exports the given data type (truncating any data already present) +// in the file. If the suffix is 'gz', gzip compression is used. +func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error { + log.Info("Exporting chain data", "file", fn, "kind", kind) + defer iter.Release() + + // Open the file handle and potentially wrap with a gzip stream + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + var writer io.Writer = fh + if strings.HasSuffix(fn, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + // Write the header + if err := rlp.Encode(writer, &exportHeader{ + Magic: exportMagic, + Version: 0, + Kind: kind, + UnixTime: uint64(time.Now().Unix()), + }); err != nil { + return err + } + // Extract data from source iterator and dump them out to file + var ( + count int64 + start = time.Now() + logged = time.Now() + ) + for { + op, key, val, ok := iter.Next() + if !ok { + break + } + if err := rlp.Encode(writer, op); err != nil { + return err + } + if err := rlp.Encode(writer, key); err != nil { + return err + } + if err := rlp.Encode(writer, val); err != nil { + return err + } + if count%1000 == 0 { + // Check interruption emitted by ctrl+c + select { + case <-interrupt: + log.Info("Chain data exporting interrupted", "file", fn, + "kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + return nil + default: + } + if time.Since(logged) > 8*time.Second { + log.Info("Exporting chain data", "file", fn, "kind", kind, + "count", count, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + count++ + } + log.Info("Exported chain data", "file", fn, "kind", kind, "count", count, + "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} diff --git a/cmd/utils/export_test.go b/cmd/utils/export_test.go new file mode 100644 index 000000000..a05121d28 --- /dev/null +++ b/cmd/utils/export_test.go @@ -0,0 +1,198 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/rlp" +) + +// TestExport does basic sanity checks on the export/import functionality +func TestExport(t *testing.T) { + f := fmt.Sprintf("%v/tempdump", os.TempDir()) + defer func() { + os.Remove(f) + }() + testExport(t, f) +} + +func TestExportGzip(t *testing.T) { + f := fmt.Sprintf("%v/tempdump.gz", os.TempDir()) + defer func() { + os.Remove(f) + }() + testExport(t, f) +} + +type testIterator struct { + index int +} + +func newTestIterator() *testIterator { + return &testIterator{index: -1} +} + +func (iter *testIterator) Next() (byte, []byte, []byte, bool) { + if iter.index >= 999 { + return 0, nil, nil, false + } + iter.index += 1 + if iter.index == 42 { + iter.index += 1 + } + return OpBatchAdd, []byte(fmt.Sprintf("key-%04d", iter.index)), + []byte(fmt.Sprintf("value %d", iter.index)), true +} + +func (iter *testIterator) Release() {} + +func testExport(t *testing.T, f string) { + err := ExportChaindata(f, "testdata", newTestIterator(), make(chan struct{})) + if err != nil { + t.Fatal(err) + } + db := rawdb.NewMemoryDatabase() + err = ImportLDBData(db, f, 5, make(chan struct{})) + if err != nil { + t.Fatal(err) + } + // verify + for i := 0; i < 1000; i++ { + v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i))) + if (i < 5 || i == 42) && err == nil { + t.Fatalf("expected no element at idx %d, got '%v'", i, string(v)) + } + if !(i < 5 || i == 42) { + if err != nil { + t.Fatalf("expected element idx %d: %v", i, err) + } + if have, want := string(v), fmt.Sprintf("value %d", i); have != want { + t.Fatalf("have %v, want %v", have, want) + } + } + } + v, err := db.Get([]byte(fmt.Sprintf("key-%04d", 1000))) + if err == nil { + t.Fatalf("expected no element at idx %d, got '%v'", 1000, string(v)) + } +} + +// testDeletion tests if the deletion markers can be exported/imported correctly +func TestDeletionExport(t *testing.T) { + f := fmt.Sprintf("%v/tempdump", os.TempDir()) + defer func() { + os.Remove(f) + }() + testDeletion(t, f) +} + +// TestDeletionExportGzip tests if the deletion markers can be exported/imported +// correctly with gz compression. +func TestDeletionExportGzip(t *testing.T) { + f := fmt.Sprintf("%v/tempdump.gz", os.TempDir()) + defer func() { + os.Remove(f) + }() + testDeletion(t, f) +} + +type deletionIterator struct { + index int +} + +func newDeletionIterator() *deletionIterator { + return &deletionIterator{index: -1} +} + +func (iter *deletionIterator) Next() (byte, []byte, []byte, bool) { + if iter.index >= 999 { + return 0, nil, nil, false + } + iter.index += 1 + if iter.index == 42 { + iter.index += 1 + } + return OpBatchDel, []byte(fmt.Sprintf("key-%04d", iter.index)), nil, true +} + +func (iter *deletionIterator) Release() {} + +func testDeletion(t *testing.T, f string) { + err := ExportChaindata(f, "testdata", newDeletionIterator(), make(chan struct{})) + if err != nil { + t.Fatal(err) + } + db := rawdb.NewMemoryDatabase() + for i := 0; i < 1000; i++ { + db.Put([]byte(fmt.Sprintf("key-%04d", i)), []byte(fmt.Sprintf("value %d", i))) + } + err = ImportLDBData(db, f, 5, make(chan struct{})) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 1000; i++ { + v, err := db.Get([]byte(fmt.Sprintf("key-%04d", i))) + if i < 5 || i == 42 { + if err != nil { + t.Fatalf("expected element at idx %d, got '%v'", i, err) + } + if have, want := string(v), fmt.Sprintf("value %d", i); have != want { + t.Fatalf("have %v, want %v", have, want) + } + } + if !(i < 5 || i == 42) { + if err == nil { + t.Fatalf("expected no element idx %d: %v", i, string(v)) + } + } + } +} + +// TestImportFutureFormat tests that we reject unsupported future versions. +func TestImportFutureFormat(t *testing.T) { + f := fmt.Sprintf("%v/tempdump-future", os.TempDir()) + defer func() { + os.Remove(f) + }() + fh, err := os.OpenFile(f, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + t.Fatal(err) + } + defer fh.Close() + if err := rlp.Encode(fh, &exportHeader{ + Magic: exportMagic, + Version: 500, + Kind: "testdata", + UnixTime: uint64(time.Now().Unix()), + }); err != nil { + t.Fatal(err) + } + db2 := rawdb.NewMemoryDatabase() + err = ImportLDBData(db2, f, 0, make(chan struct{})) + if err == nil { + t.Fatal("Expected error, got none") + } + if !strings.HasPrefix(err.Error(), "incompatible version") { + t.Fatalf("wrong error: %v", err) + } +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0a7a7482a..52554fbe5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -155,6 +155,10 @@ var ( Name: "ropsten", Usage: "Ropsten network: pre-configured proof-of-work test network", } + SepoliaFlag = cli.BoolFlag{ + Name: "sepolia", + Usage: "Sepolia network: pre-configured proof-of-work test network", + } DeveloperFlag = cli.BoolFlag{ Name: "dev", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", @@ -235,9 +239,9 @@ var ( Usage: "Megabytes of memory allocated to bloom-filter for pruning", Value: 2048, } - OverrideLondonFlag = cli.Uint64Flag{ - Name: "override.london", - Usage: "Manually specify London fork-block, overriding the bundled setting", + OverrideArrowGlacierFlag = cli.Uint64Flag{ + Name: "override.arrowglacier", + Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting", } // Light server and client settings LightServeFlag = cli.IntFlag{ @@ -798,6 +802,9 @@ func MakeDataDir(ctx *cli.Context) string { if ctx.GlobalBool(GoerliFlag.Name) { return filepath.Join(path, "goerli") } + if ctx.GlobalBool(SepoliaFlag.Name) { + return filepath.Join(path, "sepolia") + } return path } Fatalf("Cannot determine default data directory, please set manually (--datadir)") @@ -846,6 +853,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) case ctx.GlobalBool(RopstenFlag.Name): urls = params.RopstenBootnodes + case ctx.GlobalBool(SepoliaFlag.Name): + urls = params.SepoliaBootnodes case ctx.GlobalBool(RinkebyFlag.Name): urls = params.RinkebyBootnodes case ctx.GlobalBool(GoerliFlag.Name): @@ -1269,6 +1278,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) { cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") + case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia") } } @@ -1454,7 +1465,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag) + CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag) CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { @@ -1598,6 +1609,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } cfg.Genesis = core.DefaultRopstenGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.RopstenGenesisHash) + case ctx.GlobalBool(SepoliaFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 11155111 + } + cfg.Genesis = core.DefaultSepoliaGenesisBlock() + SetDNSDiscoveryDefaults(cfg, params.SepoliaGenesisHash) case ctx.GlobalBool(RinkebyFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 4 @@ -1826,6 +1843,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { genesis = core.DefaultGenesisBlock() case ctx.GlobalBool(RopstenFlag.Name): genesis = core.DefaultRopstenGenesisBlock() + case ctx.GlobalBool(SepoliaFlag.Name): + genesis = core.DefaultSepoliaGenesisBlock() case ctx.GlobalBool(RinkebyFlag.Name): genesis = core.DefaultRinkebyGenesisBlock() case ctx.GlobalBool(GoerliFlag.Name): diff --git a/common/hexutil/hexutil.go b/common/hexutil/hexutil.go index 46223a281..e0241f5f2 100644 --- a/common/hexutil/hexutil.go +++ b/common/hexutil/hexutil.go @@ -176,13 +176,14 @@ func MustDecodeBig(input string) *big.Int { } // EncodeBig encodes bigint as a hex string with 0x prefix. -// The sign of the integer is ignored. func EncodeBig(bigint *big.Int) string { - nbits := bigint.BitLen() - if nbits == 0 { + if sign := bigint.Sign(); sign == 0 { return "0x0" + } else if sign > 0 { + return "0x" + bigint.Text(16) + } else { + return "-0x" + bigint.Text(16)[1:] } - return fmt.Sprintf("%#x", bigint) } func has0xPrefix(input string) bool { diff --git a/common/hexutil/hexutil_test.go b/common/hexutil/hexutil_test.go index ed6fccc3c..f2b800d82 100644 --- a/common/hexutil/hexutil_test.go +++ b/common/hexutil/hexutil_test.go @@ -201,3 +201,15 @@ func TestDecodeUint64(t *testing.T) { } } } + +func BenchmarkEncodeBig(b *testing.B) { + for _, bench := range encodeBigTests { + b.Run(bench.want, func(b *testing.B) { + b.ReportAllocs() + bigint := bench.input.(*big.Int) + for i := 0; i < b.N; i++ { + EncodeBig(bigint) + } + }) + } +} diff --git a/consensus/clique/api.go b/consensus/clique/api.go index 6129b5cc5..03f2daffa 100644 --- a/consensus/clique/api.go +++ b/consensus/clique/api.go @@ -214,6 +214,9 @@ func (api *API) GetSigner(rlpOrBlockNr *blockNumberOrHashOrRLP) (common.Address, } else if number, ok := blockNrOrHash.Number(); ok { header = api.chain.GetHeaderByNumber(uint64(number.Int64())) } + if header == nil { + return common.Address{}, fmt.Errorf("missing block %v", blockNrOrHash.String()) + } return api.clique.Author(header) } block := new(types.Block) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index a6a16c84a..38597e152 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -600,8 +600,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res } // For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing) if c.config.Period == 0 && len(block.Transactions()) == 0 { - log.Info("Sealing paused, waiting for transactions") - return nil + return errors.New("sealing paused while waiting for transactions") } // Don't hold the signer fields for the entire sealing procedure c.lock.RLock() @@ -621,8 +620,7 @@ func (c *Clique) Seal(chain consensus.ChainHeaderReader, block *types.Block, res if recent == signer { // Signer is among recents, only wait if the current block doesn't shift it out if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit { - log.Info("Signed recently, must wait for others") - return nil + return errors.New("signed recently, must wait for others") } } } diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 6ad9fc22b..7fa427f68 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -45,6 +45,11 @@ var ( maxUncles = 2 // Maximum number of uncles allowed in a single block allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks + // calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345. + // It offsets the bomb a total of 10.7M blocks. + // Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345 + calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000)) + // calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554. // It offsets the bomb a total of 9.7M blocks. // Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554 @@ -330,6 +335,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uin func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int { next := new(big.Int).Add(parent.Number, big1) switch { + case config.IsArrowGlacier(next): + return calcDifficultyEip4345(time, parent) case config.IsLondon(next): return calcDifficultyEip3554(time, parent) case config.IsMuirGlacier(next): diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index ec06d02a5..4e33d99c8 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -136,13 +136,16 @@ func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(bu if err != nil { return nil, nil, nil, err } - if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil { + if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil { + dump.Close() + os.Remove(temp) return nil, nil, nil, err } // Memory map the file for writing and fill it with the generator mem, buffer, err := memoryMapFile(dump, true) if err != nil { dump.Close() + os.Remove(temp) return nil, nil, nil, err } copy(buffer, dumpMagic) @@ -358,7 +361,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) { if err != nil { logger.Error("Failed to generate mapped ethash dataset", "err", err) - d.dataset = make([]uint32, dsize/2) + d.dataset = make([]uint32, dsize/4) generateDataset(d.dataset, d.epoch, cache) } // Iterate over all previous instances and delete old ones diff --git a/consensus/ethash/mmap_help_linux.go b/consensus/ethash/mmap_help_linux.go new file mode 100644 index 000000000..b40a1dd25 --- /dev/null +++ b/consensus/ethash/mmap_help_linux.go @@ -0,0 +1,35 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build linux +// +build linux + +package ethash + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// ensureSize expands the file to the given size. This is to prevent runtime +// errors later on, if the underlying file expands beyond the disk capacity, +// even though it ostensibly is already expanded, but due to being sparse +// does not actually occupy the full declared size on disk. +func ensureSize(f *os.File, size int64) error { + // Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html + return unix.Fallocate(int(f.Fd()), 0, 0, size) +} diff --git a/consensus/ethash/mmap_help_other.go b/consensus/ethash/mmap_help_other.go new file mode 100644 index 000000000..8ad514ce4 --- /dev/null +++ b/consensus/ethash/mmap_help_other.go @@ -0,0 +1,36 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !linux +// +build !linux + +package ethash + +import ( + "os" +) + +// ensureSize expands the file to the given size. This is to prevent runtime +// errors later on, if the underlying file expands beyond the disk capacity, +// even though it ostensibly is already expanded, but due to being sparse +// does not actually occupy the full declared size on disk. +func ensureSize(f *os.File, size int64) error { + // On systems which do not support fallocate, we merely truncate it. + // More robust alternatives would be to + // - Use posix_fallocate, or + // - explicitly fill the file with zeroes. + return f.Truncate(size) +} diff --git a/core/bench_test.go b/core/bench_test.go index ce288d372..959979763 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -75,7 +75,7 @@ var ( // This is the content of the genesis block used by the benchmarks. benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey) - benchRootFunds = math.BigPow(2, 100) + benchRootFunds = math.BigPow(2, 200) ) // genValueTx returns a block generator that includes a single @@ -86,7 +86,19 @@ func genValueTx(nbytes int) func(int, *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) gas, _ := IntrinsicGas(data, nil, false, false, false) - tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey) + signer := types.MakeSigner(gen.config, big.NewInt(int64(i))) + gasPrice := big.NewInt(0) + if gen.header.BaseFee != nil { + gasPrice = gen.header.BaseFee + } + tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{ + Nonce: gen.TxNonce(benchRootAddr), + To: &toaddr, + Value: big.NewInt(1), + Gas: gas, + Data: data, + GasPrice: gasPrice, + }) gen.AddTx(tx) } } @@ -110,24 +122,38 @@ func init() { // and fills the blocks with many small transactions. func genTxRing(naccounts int) func(int, *BlockGen) { from := 0 + availableFunds := new(big.Int).Set(benchRootFunds) return func(i int, gen *BlockGen) { block := gen.PrevBlock(i - 1) gas := block.GasLimit() + gasPrice := big.NewInt(0) + if gen.header.BaseFee != nil { + gasPrice = gen.header.BaseFee + } + signer := types.MakeSigner(gen.config, big.NewInt(int64(i))) for { gas -= params.TxGas if gas < params.TxGas { break } to := (from + 1) % naccounts - tx := types.NewTransaction( - gen.TxNonce(ringAddrs[from]), - ringAddrs[to], - benchRootFunds, - params.TxGas, - nil, - nil, - ) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, ringKeys[from]) + burn := new(big.Int).SetUint64(params.TxGas) + burn.Mul(burn, gen.header.BaseFee) + availableFunds.Sub(availableFunds, burn) + if availableFunds.Cmp(big.NewInt(1)) < 0 { + panic("not enough funds") + } + tx, err := types.SignNewTx(ringKeys[from], signer, + &types.LegacyTx{ + Nonce: gen.TxNonce(ringAddrs[from]), + To: &ringAddrs[to], + Value: availableFunds, + Gas: params.TxGas, + GasPrice: gasPrice, + }) + if err != nil { + panic(err) + } gen.AddTx(tx) from = to } @@ -245,6 +271,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { block := types.NewBlockWithHeader(header) rawdb.WriteBody(db, hash, n, block.Body()) rawdb.WriteReceipts(db, hash, n, nil) + rawdb.WriteHeadBlockHash(db, hash) } } } @@ -278,6 +305,8 @@ func benchReadChain(b *testing.B, full bool, count uint64) { } makeChainForBench(db, full, count) db.Close() + cacheConfig := *defaultCacheConfig + cacheConfig.TrieDirtyDisabled = true b.ReportAllocs() b.ResetTimer() @@ -287,7 +316,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } - chain, err := NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, err := NewBlockChain(db, &cacheConfig, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) if err != nil { b.Fatalf("error creating chain: %v", err) } diff --git a/core/blockchain.go b/core/blockchain.go index 79e48769e..2aed06173 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1438,11 +1438,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // Peek the error for the first block to decide the directing import logic it := newInsertIterator(chain, results, bc.validator) - block, err := it.next() - // Left-trim all the known blocks - if err == ErrKnownBlock { + // Left-trim all the known blocks that don't need to build snapshot + if bc.skipBlock(err, it) { // First block (and state) is known // 1. We did a roll-back, and should now do a re-import // 2. The block is stored as a sidechain, and is lying about it's stateroot, and passes a stateroot @@ -1453,7 +1452,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er localTd = bc.GetTd(current.Hash(), current.NumberU64()) externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1) // The first block can't be nil ) - for block != nil && err == ErrKnownBlock { + for block != nil && bc.skipBlock(err, it) { externTd = new(big.Int).Add(externTd, block.Difficulty()) if localTd.Cmp(externTd) < 0 { break @@ -1471,7 +1470,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // When node runs a fast sync again, it can re-import a batch of known blocks via // `insertChain` while a part of them have higher total difficulty than current // head full block(new pivot point). - for block != nil && err == ErrKnownBlock { + for block != nil && bc.skipBlock(err, it) { log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash()) if err := bc.writeKnownBlock(block); err != nil { return it.index, err @@ -1503,8 +1502,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // If there are any still remaining, mark as ignored return it.index, err - // Some other error occurred, abort - case err != nil: + // Some other error(except ErrKnownBlock) occurred, abort. + // ErrKnownBlock is allowed here since some known blocks + // still need re-execution to generate snapshots that are missing + case err != nil && !errors.Is(err, ErrKnownBlock): bc.futureBlocks.Remove(block.Hash()) stats.ignored += len(it.chain) bc.reportBlock(block, nil, err) @@ -1522,7 +1523,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er } }() - for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { + for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() { // If the chain is terminating, stop processing blocks if bc.insertStopped() { log.Debug("Abort during block processing") @@ -1537,8 +1538,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er // Clique blocks where they can share state among each other, so importing an // older block might complete the state of the subsequent one. In this case, // just skip the block (we already validated it once fully (and crashed), since - // its header and body was already in the database). - if err == ErrKnownBlock { + // its header and body was already in the database). But if the corresponding + // snapshot layer is missing, forcibly rerun the execution to build it. + if bc.skipBlock(err, it) { logger := log.Debug if bc.chainConfig.Clique == nil { logger = log.Warn @@ -2016,6 +2018,47 @@ func (bc *BlockChain) futureBlocksLoop() { } } +// skipBlock returns 'true', if the block being imported can be skipped over, meaning +// that the block does not need to be processed but can be considered already fully 'done'. +func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { + // We can only ever bypass processing if the only error returned by the validator + // is ErrKnownBlock, which means all checks passed, but we already have the block + // and state. + if !errors.Is(err, ErrKnownBlock) { + return false + } + // If we're not using snapshots, we can skip this, since we have both block + // and (trie-) state + if bc.snaps == nil { + return true + } + var ( + header = it.current() // header can't be nil + parentRoot common.Hash + ) + // If we also have the snapshot-state, we can skip the processing. + if bc.snaps.Snapshot(header.Root) != nil { + return true + } + // In this case, we have the trie-state but not snapshot-state. If the parent + // snapshot-state exists, we need to process this in order to not get a gap + // in the snapshot layers. + // Resolve parent block + if parent := it.previous(); parent != nil { + parentRoot = parent.Root + } else if parent = bc.GetHeaderByHash(header.ParentHash); parent != nil { + parentRoot = parent.Root + } + if parentRoot == (common.Hash{}) { + return false // Theoretically impossible case + } + // Parent is also missing snapshot: we can skip this. Otherwise process. + if bc.snaps.Snapshot(parentRoot) == nil { + return true + } + return false +} + // maintainTxIndex is responsible for the construction and deletion of the // transaction index. // diff --git a/core/blockchain_insert.go b/core/blockchain_insert.go index cb8473c08..446487027 100644 --- a/core/blockchain_insert.go +++ b/core/blockchain_insert.go @@ -150,6 +150,14 @@ func (it *insertIterator) previous() *types.Header { return it.chain[it.index-1].Header() } +// current returns the current header that is being processed, or nil. +func (it *insertIterator) current() *types.Header { + if it.index == -1 || it.index >= len(it.chain) { + return nil + } + return it.chain[it.index].Header() +} + // first returns the first block in the it. func (it *insertIterator) first() *types.Block { return it.chain[0] diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go index aca5546e2..f4f762078 100644 --- a/core/blockchain_repair_test.go +++ b/core/blockchain_repair_test.go @@ -1863,3 +1863,124 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) } } + +// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893 +// Credits to @zzyalbert for finding the issue. +// +// Local chain owns these blocks: +// G B1 B2 B3 B4 +// B1: state committed +// B2: snapshot disk layer +// B3: state committed +// B4: head block +// +// Crash happens without fully persisting snapshot and in-memory states, +// chain rewinds itself to the B1 (skip B3 in order to recover snapshot) +// In this case the snapshot layer of B3 is not created because of existent +// state. +func TestIssue23496(t *testing.T) { + // It's hard to follow the test case, visualize the input + //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + defer db.Close() // Might double close, should be fine + + // Initialize a fresh chain + var ( + genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db) + engine = ethash.NewFullFaker() + config = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: true, + } + ) + chain, err := NewBlockChain(db, config, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), 4, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{0x02}) + b.SetDifficulty(big.NewInt(1000000)) + }) + + // Insert block B1 and commit the state into disk + if _, err := chain.InsertChain(blocks[:1]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + chain.stateCache.TrieDB().Commit(blocks[0].Root(), true, nil) + + // Insert block B2 and commit the snapshot into disk + if _, err := chain.InsertChain(blocks[1:2]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil { + t.Fatalf("Failed to flatten snapshots: %v", err) + } + + // Insert block B3 and commit the state into disk + if _, err := chain.InsertChain(blocks[2:3]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + chain.stateCache.TrieDB().Commit(blocks[2].Root(), true, nil) + + // Insert the remaining blocks + if _, err := chain.InsertChain(blocks[3:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + + // Pull the plug on the database, simulating a hard crash + db.Close() + + // Start a new blockchain back up and see where the repair leads us + db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false) + if err != nil { + t.Fatalf("Failed to reopen persistent database: %v", err) + } + defer db.Close() + + chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer chain.Stop() + + if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) + } + if head := chain.CurrentBlock(); head.NumberU64() != uint64(1) { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(1)) + } + + // Reinsert B2-B4 + if _, err := chain.InsertChain(blocks[1:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != uint64(4) { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) + } + if head := chain.CurrentBlock(); head.NumberU64() != uint64(4) { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), uint64(4)) + } + if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil { + t.Error("Failed to regenerate the snapshot of known state") + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 62036ae75..80d07eb30 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -360,7 +360,7 @@ func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) } func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) } func testReorgLong(t *testing.T, full bool) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full) + testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full) } // Tests that reorganising a short difficult chain after a long easy one @@ -380,7 +380,7 @@ func testReorgShort(t *testing.T, full bool) { for i := 0; i < len(diff); i++ { diff[i] = -9 } - testReorg(t, easy, diff, 12615120, full) + testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full) } func testReorg(t *testing.T, first, second []int64, td int64, full bool) { @@ -2385,7 +2385,7 @@ func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks in for txi := 0; txi < numTxs; txi++ { uniq := uint64(i*numTxs + txi) recipient := recipientFn(uniq) - tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, big.NewInt(1), nil), signer, testBankKey) + tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey) if err != nil { b.Error(err) } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 916bffadc..84c34561d 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -63,8 +63,10 @@ func TestCreation(t *testing.T) { {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block {12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block - {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // First London block - {20000000, ID{Hash: checksumToBytes(0xb715077d), Next: 0}}, // Future London block + {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block + {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block + {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block + {20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block }, }, // Ropsten test cases @@ -205,11 +207,11 @@ func TestValidation(t *testing.T) { // Local is mainnet Petersburg, remote is Rinkeby Petersburg. {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, - // Local is mainnet London, far in the future. Remote announces Gopherium (non existing fork) + // Local is mainnet Arrow Glacier, far in the future. Remote announces Gopherium (non existing fork) // at some future block 88888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {88888888, ID{Hash: checksumToBytes(0xb715077d), Next: 88888888}, ErrLocalIncompatibleOrStale}, + {88888888, ID{Hash: checksumToBytes(0x20c327fc), Next: 88888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // fork) at block 7279999, before Petersburg. Local is incompatible. diff --git a/core/genesis.go b/core/genesis.go index 38ace4920..37cc96fe6 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -158,7 +158,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig return SetupGenesisBlockWithOverride(db, genesis, nil) } -func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideLondon *big.Int) (*params.ChainConfig, common.Hash, error) { +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideArrowGlacier *big.Int) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -204,8 +204,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override } // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) - if overrideLondon != nil { - newcfg.LondonBlock = overrideLondon + if overrideArrowGlacier != nil { + newcfg.ArrowGlacierBlock = overrideArrowGlacier } if err := newcfg.CheckConfigForkOrder(); err != nil { return newcfg, common.Hash{}, err @@ -244,6 +244,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { return params.MainnetChainConfig case ghash == params.RopstenGenesisHash: return params.RopstenChainConfig + case ghash == params.SepoliaGenesisHash: + return params.SepoliaChainConfig case ghash == params.RinkebyGenesisHash: return params.RinkebyChainConfig case ghash == params.GoerliGenesisHash: @@ -322,7 +324,7 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { if config.Clique != nil && len(block.Extra()) == 0 { return nil, errors.New("can't start clique chain without signers") } - rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) + rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) rawdb.WriteBlock(db, block) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) @@ -400,6 +402,19 @@ func DefaultGoerliGenesisBlock() *Genesis { } } +// DefaultSepoliaGenesisBlock returns the Sepolia network genesis block. +func DefaultSepoliaGenesisBlock() *Genesis { + return &Genesis{ + Config: params.SepoliaChainConfig, + Nonce: 0, + ExtraData: []byte("Sepolia, Athens, Attica, Greece!"), + GasLimit: 0x1c9c380, + Difficulty: big.NewInt(0x20000), + Timestamp: 1633267481, + Alloc: decodePrealloc(sepoliaAllocData), + } +} + // DeveloperGenesisBlock returns the 'geth --dev' genesis block. func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis { // Override the default period to the user requested one diff --git a/core/genesis_alloc.go b/core/genesis_alloc.go index ee542334b..3d053904e 100644 --- a/core/genesis_alloc.go +++ b/core/genesis_alloc.go @@ -26,3 +26,4 @@ const ropstenAllocData = "\xf9\x03\xa4\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03 const rinkebyAllocData = "\xf9\x03\xb7\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x941\xb9\x8d\x14\x00{\xde\xe67)\x80\x86\x98\x8a\v\xbd1\x18E#\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" const goerliAllocData = "\xf9\x04\x06\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xe0\x94L*\xe4\x82Y5\x05\xf0\x16<\xde\xfc\a>\x81\xc6<\xdaA\a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa8\xe8\xf1G2e\x8eKQ\xe8q\x191\x05:\x8ai\xba\xf2\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe1\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbJG\xe3\xc1$H\xf4\xad\x00\x00\x00" const calaverasAllocData = "\xf9\x06\x14\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x0e\x89\xe2\xae\xdb\x1c\xfc\u06d4$\xd4\x1a\x1f!\x8fA2s\x81r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x10A\xaf\xbc\xb3Y\u0568\xdcX\xc1[/\xf5\x13T\xff\x8a!}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94#o\xf1\xe9t\x19\xae\x93\xad\x80\xca\xfb\xaa!\"\f]x\xfb}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94`\xad\xc0\xf8\x9aA\xaf#|\xe75T\xed\xe1p\xd73\xec\x14\xe0\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8a\x8e\xaf\xb1\xcfb\xbf\xbe\xb1t\x17i\xda\xe1\xa9\xddG\x99a\x92\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8b\xa1\xf1\tU\x1b\xd42\x800\x12dZ\xc16\xdd\xd6M\xbar\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xb0*.\xda\x1b1\u007f\xbd\x16v\x01(\x83k\n\u015bV\x0e\x9d\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xba\xdc\r\xe9\xe0yK\x04\x9b^\xa6<>\x1ei\x8a4v\xc1r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xf00\v\ue24a\xe2r\xeb4~\x83i\xac\fv\xdfB\xc9?\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xfe;U~\x8f\xb6+\x89\xf4\x91kr\x1b\xe5\\\ub08d\xbds\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +const sepoliaAllocData = "\xf9\x01\xee\u0791i\x16\xa8{\x823?BE\x04f#\xb27\x94\xc6\\\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\x10\xf5\xd4XT\xe08\a\x14\x85\xac\x9e@#\b\u03c0\xd2\xd2\xfe\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\u0794y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\x88\r\u0db3\xa7d\x00\x00\xe0\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\x8b\u007f\tw\xbbO\x0f\xbepv\xfa\"\xbc$\xac\xa0CX?^\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xa2\xa6\xd949\x14O\xfeM'\xc9\xe0\x88\xdc\u0637\x83\x94bc\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xaa\xec\x869DA\xf9\x15\xbc\xe3\xe6\xab9\x99w\xe9\x90o;i\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\u1532\x1c3\xde\x1f\xab?\xa1T\x99\xc6+Y\xfe\f\xc3%\x00 \u044bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xbc\x11)Y6\xaay\u0554\x13\x9d\xe1\xb2\xe1&)AO;\u06ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xbe\xef2\xca[\x9a\x19\x8d'\xb4\xe0/LpC\x9f\xe6\x03V\u03ca\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe1\x94\xd7\xd7lX\xb3\xa5\x19\xe9\xfal\xc4\xd2-\xc0\x17%\x9b\u011f\x1e\x8bR\xb7\xd2\xdc\xc8\f\xd2\xe4\x00\x00\x00\xe0\x94\xd7\xed\xdbx\xed)[<\x96)$\x0e\x89$\xfb\x8d\x88t\xdd\u060a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xe2\xe2e\x90(\x147\x84\xd5W\xbc\xeco\xf3\xa0r\x10H\x88\n\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00\xe0\x94\xf4|\xae\x1c\xf7\x9c\xa6u\x8b\xfcx}\xbd!\u6f7eq\x12\xb8\x8a\xd3\xc2\x1b\xce\xcc\xed\xa1\x00\x00\x00" diff --git a/core/genesis_test.go b/core/genesis_test.go index 055be2796..f3d6b23e5 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -30,25 +30,6 @@ import ( "github.com/ethereum/go-ethereum/params" ) -func TestDefaultGenesisBlock(t *testing.T) { - block := DefaultGenesisBlock().ToBlock(nil) - if block.Hash() != params.MainnetGenesisHash { - t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash) - } - block = DefaultRopstenGenesisBlock().ToBlock(nil) - if block.Hash() != params.RopstenGenesisHash { - t.Errorf("wrong ropsten genesis hash, got %v, want %v", block.Hash(), params.RopstenGenesisHash) - } - block = DefaultRinkebyGenesisBlock().ToBlock(nil) - if block.Hash() != params.RinkebyGenesisHash { - t.Errorf("wrong rinkeby genesis hash, got %v, want %v", block.Hash(), params.RinkebyGenesisHash) - } - block = DefaultGoerliGenesisBlock().ToBlock(nil) - if block.Hash() != params.GoerliGenesisHash { - t.Errorf("wrong goerli genesis hash, got %v, want %v", block.Hash(), params.GoerliGenesisHash) - } -} - func TestInvalidCliqueConfig(t *testing.T) { block := DefaultGoerliGenesisBlock() block.ExtraData = []byte{} @@ -179,33 +160,56 @@ func TestSetupGenesis(t *testing.T) { } } -// TestGenesisHashes checks the congruity of default genesis data to corresponding hardcoded genesis hash values. +// TestGenesisHashes checks the congruity of default genesis data to +// corresponding hardcoded genesis hash values. func TestGenesisHashes(t *testing.T) { - cases := []struct { + for i, c := range []struct { genesis *Genesis - hash common.Hash + want common.Hash }{ - { - genesis: DefaultGenesisBlock(), - hash: params.MainnetGenesisHash, - }, - { - genesis: DefaultGoerliGenesisBlock(), - hash: params.GoerliGenesisHash, - }, - { - genesis: DefaultRopstenGenesisBlock(), - hash: params.RopstenGenesisHash, - }, - { - genesis: DefaultRinkebyGenesisBlock(), - hash: params.RinkebyGenesisHash, - }, - } - for i, c := range cases { - b := c.genesis.MustCommit(rawdb.NewMemoryDatabase()) - if got := b.Hash(); got != c.hash { - t.Errorf("case: %d, want: %s, got: %s", i, c.hash.Hex(), got.Hex()) + {DefaultGenesisBlock(), params.MainnetGenesisHash}, + {DefaultGoerliGenesisBlock(), params.GoerliGenesisHash}, + {DefaultRopstenGenesisBlock(), params.RopstenGenesisHash}, + {DefaultRinkebyGenesisBlock(), params.RinkebyGenesisHash}, + {DefaultSepoliaGenesisBlock(), params.SepoliaGenesisHash}, + } { + // Test via MustCommit + if have := c.genesis.MustCommit(rawdb.NewMemoryDatabase()).Hash(); have != c.want { + t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) + } + // Test via ToBlock + if have := c.genesis.ToBlock(nil).Hash(); have != c.want { + t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) } } } + +func TestGenesis_Commit(t *testing.T) { + genesis := &Genesis{ + BaseFee: big.NewInt(params.InitialBaseFee), + Config: params.TestChainConfig, + // difficulty is nil + } + + db := rawdb.NewMemoryDatabase() + genesisBlock, err := genesis.Commit(db) + if err != nil { + t.Fatal(err) + } + + if genesis.Difficulty != nil { + t.Fatalf("assumption wrong") + } + + // This value should have been set as default in the ToBlock method. + if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 { + t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty()) + } + + // Expect the stored total difficulty to be the difficulty of the genesis block. + stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64()) + + if stored.Cmp(genesisBlock.Difficulty()) != 0 { + t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty()) + } +} diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index ed1c71e20..7f26c3a42 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -35,20 +35,15 @@ import ( // ReadCanonicalHash retrieves the hash assigned to a canonical block number. func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { - data, _ := db.Ancient(freezerHashTable, number) - if len(data) == 0 { - data, _ = db.Get(headerHashKey(number)) - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + data, _ = reader.Ancient(freezerHashTable, number) if len(data) == 0 { - data, _ = db.Ancient(freezerHashTable, number) + // Get it by hash from leveldb + data, _ = db.Get(headerHashKey(number)) } - } - if len(data) == 0 { - return common.Hash{} - } + return nil + }) return common.BytesToHash(data) } @@ -304,32 +299,25 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) { // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { - // First try to look up the data in ancient database. Extra hash - // comparison is necessary since ancient database only maintains - // the canonical data. - data, _ := db.Ancient(freezerHeaderTable, number) - if len(data) > 0 && crypto.Keccak256Hash(data) == hash { - return data - } - // Then try to look up the data in leveldb. - data, _ = db.Get(headerKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerHeaderTable, number) - if len(data) > 0 && crypto.Keccak256Hash(data) == hash { - return data - } - return nil // Can't find the data anywhere. + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + // First try to look up the data in ancient database. Extra hash + // comparison is necessary since ancient database only maintains + // the canonical data. + data, _ = reader.Ancient(freezerHeaderTable, number) + if len(data) > 0 && crypto.Keccak256Hash(data) == hash { + return nil + } + // If not, try reading from leveldb + data, _ = db.Get(headerKey(number, hash)) + return nil + }) + return data } // HasHeader verifies the existence of a block header corresponding to the hash. func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { - if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { + if isCanon(db, number, hash) { return true } if has, err := db.Has(headerKey(number, hash)); !has || err != nil { @@ -389,53 +377,48 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number } } +// isCanon is an internal utility method, to check whether the given number/hash +// is part of the ancient (canon) set. +func isCanon(reader ethdb.AncientReader, number uint64, hash common.Hash) bool { + h, err := reader.Ancient(freezerHashTable, number) + if err != nil { + return false + } + return bytes.Equal(h, hash[:]) +} + // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { // First try to look up the data in ancient database. Extra hash // comparison is necessary since ancient database only maintains // the canonical data. - data, _ := db.Ancient(freezerBodiesTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(freezerBodiesTable, number) + return nil } - } - // Then try to look up the data in leveldb. - data, _ = db.Get(blockBodyKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerBodiesTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data - } - } - return nil // Can't find the data anywhere. + // If not, try reading from leveldb + data, _ = db.Get(blockBodyKey(number, hash)) + return nil + }) + return data } // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical // block at number, in RLP encoding. func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { - // If it's an ancient one, we don't need the canonical hash - data, _ := db.Ancient(freezerBodiesTable, number) - if len(data) == 0 { - // Need to get the hash - data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - if len(data) == 0 { - data, _ = db.Ancient(freezerBodiesTable, number) + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + data, _ = reader.Ancient(freezerBodiesTable, number) + if len(data) > 0 { + return nil } - } + // Get it by hash from leveldb + data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) + return nil + }) return data } @@ -448,7 +431,7 @@ func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp // HasBody verifies the existence of a block body corresponding to the hash. func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { - if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { + if isCanon(db, number, hash) { return true } if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { @@ -489,33 +472,18 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { - // First try to look up the data in ancient database. Extra hash - // comparison is necessary since ancient database only maintains - // the canonical data. - data, _ := db.Ancient(freezerDifficultyTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(freezerDifficultyTable, number) + return nil } - } - // Then try to look up the data in leveldb. - data, _ = db.Get(headerTDKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerDifficultyTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data - } - } - return nil // Can't find the data anywhere. + // If not, try reading from leveldb + data, _ = db.Get(headerTDKey(number, hash)) + return nil + }) + return data } // ReadTd retrieves a block's total difficulty corresponding to the hash. @@ -553,7 +521,7 @@ func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { // HasReceipts verifies the existence of all the transaction receipts belonging // to a block. func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { - if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { + if isCanon(db, number, hash) { return true } if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { @@ -564,33 +532,18 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { - // First try to look up the data in ancient database. Extra hash - // comparison is necessary since ancient database only maintains - // the canonical data. - data, _ := db.Ancient(freezerReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data + var data []byte + db.ReadAncients(func(reader ethdb.AncientReader) error { + // Check if the data is in ancients + if isCanon(reader, number, hash) { + data, _ = reader.Ancient(freezerReceiptTable, number) + return nil } - } - // Then try to look up the data in leveldb. - data, _ = db.Get(blockReceiptsKey(number, hash)) - if len(data) > 0 { - return data - } - // In the background freezer is moving data from leveldb to flatten files. - // So during the first check for ancient db, the data is not yet in there, - // but when we reach into leveldb, the data was already moved. That would - // result in a not found error. - data, _ = db.Ancient(freezerReceiptTable, number) - if len(data) > 0 { - h, _ := db.Ancient(freezerHashTable, number) - if common.BytesToHash(h) == hash { - return data - } - } - return nil // Can't find the data anywhere. + // If not, try reading from leveldb + data, _ = db.Get(blockReceiptsKey(number, hash)) + return nil + }) + return data } // ReadRawReceipts retrieves all the transaction receipts belonging to a block. diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index 88446e079..df140de0c 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -47,7 +47,7 @@ func DeleteSnapshotDisabled(db ethdb.KeyValueWriter) { // ReadSnapshotRoot retrieves the root of the block whose state is contained in // the persisted snapshot. func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { - data, _ := db.Get(snapshotRootKey) + data, _ := db.Get(SnapshotRootKey) if len(data) != common.HashLength { return common.Hash{} } @@ -57,7 +57,7 @@ func ReadSnapshotRoot(db ethdb.KeyValueReader) common.Hash { // WriteSnapshotRoot stores the root of the block whose state is contained in // the persisted snapshot. func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { - if err := db.Put(snapshotRootKey, root[:]); err != nil { + if err := db.Put(SnapshotRootKey, root[:]); err != nil { log.Crit("Failed to store snapshot root", "err", err) } } @@ -67,7 +67,7 @@ func WriteSnapshotRoot(db ethdb.KeyValueWriter, root common.Hash) { // be used during updates, so a crash or failure will mark the entire snapshot // invalid. func DeleteSnapshotRoot(db ethdb.KeyValueWriter) { - if err := db.Delete(snapshotRootKey); err != nil { + if err := db.Delete(SnapshotRootKey); err != nil { log.Crit("Failed to remove snapshot root", "err", err) } } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 0e116ef99..c5af77667 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -89,8 +89,8 @@ func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { return nil, errNotSupported } -// ReadAncients returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) ReadAncients(kind string, start, max, maxByteSize uint64) ([][]byte, error) { +// AncientRange returns an error as we don't have a backing chain freezer. +func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { return nil, errNotSupported } @@ -119,6 +119,22 @@ func (db *nofreezedb) Sync() error { return errNotSupported } +func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) { + // Unlike other ancient-related methods, this method does not return + // errNotSupported when invoked. + // The reason for this is that the caller might want to do several things: + // 1. Check if something is in freezer, + // 2. If not, check leveldb. + // + // This will work, since the ancient-checks inside 'fn' will return errors, + // and the leveldb work will continue. + // + // If we instead were to return errNotSupported here, then the caller would + // have to explicitly check for that, having an extra clause to do the + // non-ancient operations. + return fn(db) +} + // NewDatabase creates a high level database on top of a given key-value data // store without a freezer moving immutable chain segments into cold storage. func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { @@ -355,7 +371,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { accountSnaps.Add(size) case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): storageSnaps.Add(size) - case bytes.HasPrefix(key, preimagePrefix) && len(key) == (len(preimagePrefix)+common.HashLength): + case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): preimages.Add(size) case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): metadata.Add(size) @@ -377,7 +393,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { var accounted bool for _, meta := range [][]byte{ databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, - fastTrieProgressKey, snapshotDisabledKey, snapshotRootKey, snapshotJournalKey, + fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, badBlockKey, } { diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index d75ee4f00..f9b5563a1 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -80,8 +80,9 @@ type freezer struct { frozen uint64 // Number of blocks already frozen threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests) - // This lock synchronizes writers and the truncate operation. - writeLock sync.Mutex + // This lock synchronizes writers and the truncate operation, as well as + // the "atomic" (batched) read operations. + writeLock sync.RWMutex writeBatch *freezerBatch readonly bool @@ -201,12 +202,12 @@ func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) { return nil, errUnknownTable } -// ReadAncients retrieves multiple items in sequence, starting from the index 'start'. +// AncientRange retrieves multiple items in sequence, starting from the index 'start'. // It will return // - at most 'max' items, // - at least 1 item (even if exceeding the maxByteSize), but will otherwise // return as many items as fit into maxByteSize. -func (f *freezer) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { +func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { if table := f.tables[kind]; table != nil { return table.RetrieveItems(start, count, maxBytes) } @@ -222,8 +223,8 @@ func (f *freezer) Ancients() (uint64, error) { func (f *freezer) AncientSize(kind string) (uint64, error) { // This needs the write lock to avoid data races on table fields. // Speed doesn't matter here, AncientSize is for debugging. - f.writeLock.Lock() - defer f.writeLock.Unlock() + f.writeLock.RLock() + defer f.writeLock.RUnlock() if table := f.tables[kind]; table != nil { return table.size() @@ -231,6 +232,14 @@ func (f *freezer) AncientSize(kind string) (uint64, error) { return 0, errUnknownTable } +// ReadAncients runs the given read operation while ensuring that no writes take place +// on the underlying freezer. +func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) { + f.writeLock.RLock() + defer f.writeLock.RUnlock() + return fn(f) +} + // ModifyAncients runs the given write operation. func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) { if f.readonly { diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2505ce90b..d432db2ab 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -48,8 +48,8 @@ var ( // snapshotDisabledKey flags that the snapshot should not be maintained due to initial sync. snapshotDisabledKey = []byte("SnapshotDisabled") - // snapshotRootKey tracks the hash of the last snapshot. - snapshotRootKey = []byte("SnapshotRoot") + // SnapshotRootKey tracks the hash of the last snapshot. + SnapshotRootKey = []byte("SnapshotRoot") // snapshotJournalKey tracks the in-memory diff layers across restarts. snapshotJournalKey = []byte("SnapshotJournal") @@ -90,7 +90,7 @@ var ( SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value CodePrefix = []byte("c") // CodePrefix + code hash -> account code - preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage + PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db // Chain index prefixes (use `i` + single byte to avoid mixing data types). @@ -207,9 +207,9 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { return key } -// preimageKey = preimagePrefix + hash +// preimageKey = PreimagePrefix + hash func preimageKey(hash common.Hash) []byte { - return append(preimagePrefix, hash.Bytes()...) + return append(PreimagePrefix, hash.Bytes()...) } // codeKey = CodePrefix + hash diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 02e23b517..91fc31b66 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -62,10 +62,10 @@ func (t *table) Ancient(kind string, number uint64) ([]byte, error) { return t.db.Ancient(kind, number) } -// ReadAncients is a noop passthrough that just forwards the request to the underlying +// AncientRange is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) { - return t.db.ReadAncients(kind, start, count, maxBytes) +func (t *table) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { + return t.db.AncientRange(kind, start, count, maxBytes) } // Ancients is a noop passthrough that just forwards the request to the underlying @@ -85,6 +85,10 @@ func (t *table) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (int64, erro return t.db.ModifyAncients(fn) } +func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err error) { + return t.db.ReadAncients(fn) +} + // TruncateAncients is a noop passthrough that just forwards the request to the underlying // database. func (t *table) TruncateAncients(items uint64) error { diff --git a/core/rlp_test.go b/core/rlp_test.go index 3a90811e7..40bcef5e5 100644 --- a/core/rlp_test.go +++ b/core/rlp_test.go @@ -40,7 +40,7 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block { // A sender who makes transactions, has some funds key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) + funds = big.NewInt(1_000_000_000_000_000_000) gspec = &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}, diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 919af5fa8..e15c1d504 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -388,7 +388,7 @@ func BenchmarkJournal(b *testing.B) { } return newDiffLayer(parent, common.Hash{}, destructs, accounts, storage) } - layer := snapshot(new(diskLayer)) + layer := snapshot(emptyLayer()) for i := 1; i < 128; i++ { layer = fill(layer) } diff --git a/core/tx_list.go b/core/tx_list.go index ea96f3ebb..f141a03bb 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -295,9 +295,9 @@ func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Tran thresholdFeeCap := aFeeCap.Div(aFeeCap, b) thresholdTip := aTip.Div(aTip, b) - // Have to ensure that either the new fee cap or tip is higher than the + // We have to ensure that both the new fee cap and tip are higher than the // old ones as well as checking the percentage threshold to ensure that - // this is accurate for low (Wei-level) gas price replacements + // this is accurate for low (Wei-level) gas price replacements. if tx.GasFeeCapIntCmp(thresholdFeeCap) < 0 || tx.GasTipCapIntCmp(thresholdTip) < 0 { return false, nil } diff --git a/core/tx_list_test.go b/core/tx_list_test.go index 3a5842d2e..ef49cae1d 100644 --- a/core/tx_list_test.go +++ b/core/tx_list_test.go @@ -51,7 +51,7 @@ func TestStrictTxListAdd(t *testing.T) { } } -func BenchmarkTxListAdd(t *testing.B) { +func BenchmarkTxListAdd(b *testing.B) { // Generate a list of transactions to insert key, _ := crypto.GenerateKey() @@ -60,11 +60,13 @@ func BenchmarkTxListAdd(t *testing.B) { txs[i] = transaction(uint64(i), 0, key) } // Insert the transactions in a random order - list := newTxList(true) priceLimit := big.NewInt(int64(DefaultTxPoolConfig.PriceLimit)) - t.ResetTimer() - for _, v := range rand.Perm(len(txs)) { - list.Add(txs[v], DefaultTxPoolConfig.PriceBump) - list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) + b.ResetTimer() + for i := 0; i < b.N; i++ { + list := newTxList(true) + for _, v := range rand.Perm(len(txs)) { + list.Add(txs[v], DefaultTxPoolConfig.PriceBump) + list.Filter(priceLimit, DefaultTxPoolConfig.PriceBump) + } } } diff --git a/core/tx_noncer.go b/core/tx_noncer.go index aa87c643a..d6d220077 100644 --- a/core/tx_noncer.go +++ b/core/tx_noncer.go @@ -77,3 +77,11 @@ func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { } txn.nonces[addr] = nonce } + +// setAll sets the nonces for all accounts to the given map. +func (txn *txNoncer) setAll(all map[common.Address]uint64) { + txn.lock.Lock() + defer txn.lock.Unlock() + + txn.nonces = all +} diff --git a/core/tx_pool.go b/core/tx_pool.go index 4f627fcb9..3329d736a 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -1182,16 +1182,18 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt pendingBaseFee := misc.CalcBaseFee(pool.chainconfig, reset.newHead) pool.priced.SetBaseFee(pendingBaseFee) } + // Update all accounts to the latest known pending nonce + nonces := make(map[common.Address]uint64, len(pool.pending)) + for addr, list := range pool.pending { + highestPending := list.LastElement() + nonces[addr] = highestPending.Nonce() + 1 + } + pool.pendingNonces.setAll(nonces) } // Ensure pool.queue and pool.pending sizes stay within the configured limits. pool.truncatePending() pool.truncateQueue() - // Update all accounts to the latest known pending nonce - for addr, list := range pool.pending { - highestPending := list.LastElement() - pool.pendingNonces.set(addr, highestPending.Nonce()+1) - } dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) pool.changesSinceReorg = 0 // Reset change counter pool.mu.Unlock() diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 1406c8df0..a7af27583 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -2540,3 +2540,24 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { pool.Stop() } } + +// Benchmarks the speed of batch transaction insertion in case of multiple accounts. +func BenchmarkPoolMultiAccountBatchInsert(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupTxPool() + defer pool.Stop() + b.ReportAllocs() + batches := make(types.Transactions, b.N) + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + pool.currentState.AddBalance(account, big.NewInt(1000000)) + tx := transaction(uint64(0), 100000, key) + batches[i] = tx + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for _, tx := range batches { + pool.AddRemotesSync([]*types.Transaction{tx}) + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index 3b4bd69d7..07e961915 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -182,9 +182,14 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { // Calling a non existing account, don't do anything, but ping the tracer - if evm.Config.Debug && evm.depth == 0 { - evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) - evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) + if evm.Config.Debug { + if evm.depth == 0 { + evm.Config.Tracer.CaptureStart(evm, caller.Address(), addr, false, input, gas, value) + evm.Config.Tracer.CaptureEnd(ret, 0, 0, nil) + } else { + evm.Config.Tracer.CaptureEnter(CALL, caller.Address(), addr, input, gas, value) + evm.Config.Tracer.CaptureExit(ret, 0, nil) + } } return nil, gas, nil } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 9fb83799c..92d33388f 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -27,11 +27,11 @@ import ( // Config are the configuration options for the Interpreter type Config struct { - Debug bool // Enables debugging - Tracer Tracer // Opcode logger - NoRecursion bool // Disables call, callcode, delegate call and create - NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) - EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages + Debug bool // Enables debugging + Tracer EVMLogger // Opcode logger + NoRecursion bool // Disables call, callcode, delegate call and create + NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) + EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages JumpTable [256]*operation // EVM instruction table, automatically populated if unset @@ -152,9 +152,9 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( pc = uint64(0) // program counter cost uint64 // copies used by tracer - pcCopy uint64 // needed for the deferred Tracer - gasCopy uint64 // for Tracer to log gas remaining before execution - logged bool // deferred Tracer should ignore already logged steps + pcCopy uint64 // needed for the deferred EVMLogger + gasCopy uint64 // for EVMLogger to log gas remaining before execution + logged bool // deferred EVMLogger should ignore already logged steps res []byte // result of the opcode execution function ) // Don't move this deferrred function, it's placed before the capturestate-deferred method, diff --git a/core/vm/logger.go b/core/vm/logger.go index 52dc0b8a0..048b84ff6 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -98,12 +98,12 @@ func (s *StructLog) ErrorString() string { return "" } -// Tracer is used to collect execution traces from an EVM transaction +// EVMLogger is used to collect execution traces from an EVM transaction // execution. CaptureState is called for each step of the VM with the // current VM state. // Note that reference types are actual VM data structures; make copies // if you need to retain them beyond the current call. -type Tracer interface { +type EVMLogger interface { CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) CaptureEnter(typ OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) @@ -112,7 +112,7 @@ type Tracer interface { CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) } -// StructLogger is an EVM state logger and implements Tracer. +// StructLogger is an EVM state logger and implements EVMLogger. // // StructLogger can capture state based on the given Log configuration and also keeps // a track record of modified storage which is used in reporting snapshots of the @@ -145,7 +145,7 @@ func (l *StructLogger) Reset() { l.err = nil } -// CaptureStart implements the Tracer interface to initialize the tracing operation. +// CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (l *StructLogger) CaptureStart(env *EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { } @@ -210,7 +210,7 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui l.logs = append(l.logs, log) } -// CaptureFault implements the Tracer interface to trace an execution fault +// CaptureFault implements the EVMLogger interface to trace an execution fault // while running an opcode. func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) { } diff --git a/eth/api_backend.go b/eth/api_backend.go index 01e68f678..a0704876a 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -352,8 +352,8 @@ func (b *EthAPIBackend) StartMining(threads int) error { return b.eth.StartMining(threads) } -func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { - return b.eth.stateAtBlock(block, reexec, base, checkLive) +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) { + return b.eth.stateAtBlock(block, reexec, base, checkLive, preferDisk) } func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { diff --git a/eth/backend.go b/eth/backend.go index e482bd1ad..68b385c9a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -131,7 +131,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 52391d417..29b47af25 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -202,8 +202,8 @@ type Config struct { // CheckpointOracle is the configuration for checkpoint oracle. CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` - // Berlin block override (TODO: remove after the fork) - OverrideLondon *big.Int `toml:",omitempty"` + // Arrow Glacier block override (TODO: remove after the fork) + OverrideArrowGlacier *big.Int `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index ed4c92850..1f1ee3aaf 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -59,7 +59,7 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCTxFeeCap float64 Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` - OverrideLondon *big.Int `toml:",omitempty"` + OverrideArrowGlacier *big.Int `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -103,7 +103,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCTxFeeCap = c.RPCTxFeeCap enc.Checkpoint = c.Checkpoint enc.CheckpointOracle = c.CheckpointOracle - enc.OverrideLondon = c.OverrideLondon + enc.OverrideArrowGlacier = c.OverrideArrowGlacier return &enc, nil } @@ -151,7 +151,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCTxFeeCap *float64 Checkpoint *params.TrustedCheckpoint `toml:",omitempty"` CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` - OverrideLondon *big.Int `toml:",omitempty"` + OverrideArrowGlacier *big.Int `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -280,8 +280,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.CheckpointOracle != nil { c.CheckpointOracle = dec.CheckpointOracle } - if dec.OverrideLondon != nil { - c.OverrideLondon = dec.OverrideLondon + if dec.OverrideArrowGlacier != nil { + c.OverrideArrowGlacier = dec.OverrideArrowGlacier } return nil } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 45983c97c..7624268a7 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -391,13 +391,14 @@ func (f *BlockFetcher) loop() { blockAnnounceDOSMeter.Mark(1) break } + if notification.number == 0 { + break + } // If we have a valid block number, check that it's potentially useful - if notification.number > 0 { - if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { - log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) - blockAnnounceDropMeter.Mark(1) - break - } + if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { + log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) + blockAnnounceDropMeter.Mark(1) + break } // All is well, schedule the announce if block's not yet downloading if _, ok := f.fetching[notification.hash]; ok { diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 020db070e..9632f4195 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -62,6 +62,7 @@ func BenchmarkBloomBits32k(b *testing.B) { const benchFilterCnt = 2000 func benchmarkBloomBits(b *testing.B, sectionSize uint64) { + b.Skip("test disabled: this tests presume (and modify) an existing datadir.") benchDataDir := node.DefaultDataDir() + "/geth/chaindata" b.Log("Running bloombits benchmark section size:", sectionSize) @@ -155,6 +156,7 @@ func clearBloomBits(db ethdb.Database) { } func BenchmarkNoBloomBits(b *testing.B) { + b.Skip("test disabled: this tests presume (and modify) an existing datadir.") benchDataDir := node.DefaultDataDir() + "/geth/chaindata" b.Log("Running benchmark without bloombits") db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index fd25013cc..63a48f762 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -65,15 +65,19 @@ func BenchmarkFilters(b *testing.B) { case 2403: receipt := makeReceipt(addr1) gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) case 1034: receipt := makeReceipt(addr2) gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) case 34: receipt := makeReceipt(addr3) gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) case 99999: receipt := makeReceipt(addr4) gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) } }) diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 1e249ae14..2d394200a 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -107,10 +107,13 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBacke signer = types.LatestSigner(gspec.Config) ) config.LondonBlock = londonBlock + config.ArrowGlacierBlock = londonBlock engine := ethash.NewFaker() db := rawdb.NewMemoryDatabase() - genesis, _ := gspec.Commit(db) - + genesis, err := gspec.Commit(db) + if err != nil { + t.Fatal(err) + } // Generate testing blocks blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, testHead+1, func(i int, b *core.BlockGen) { b.SetCoinbase(common.Address{1}) diff --git a/eth/state_accessor.go b/eth/state_accessor.go index ca2002b60..c855f0100 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -35,7 +35,17 @@ import ( // are attempted to be reexecuted to generate the desired state. The optional // base layer statedb can be passed then it's regarded as the statedb of the // parent block. -func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (statedb *state.StateDB, err error) { +// Parameters: +// - block: The block for which we want the state (== state at the stateRoot of the parent) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent state +// continuously from the callsite. +// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to +// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid +// storing trash persistently +// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, +// it would be preferrable to start from a fresh state, if we have it on disk. +func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { var ( current *types.Block database state.Database @@ -50,6 +60,15 @@ func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64, base *state } } if base != nil { + if preferDisk { + // Create an ephemeral trie.Database for isolating the live one. Otherwise + // the internal junks created by tracing will be persisted into the disk. + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) + if statedb, err = state.New(block.Root(), database, nil); err == nil { + log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) + return statedb, nil + } + } // The optional base statedb is given, mark the start point as parent block statedb, database, report = base, base.Database(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) @@ -152,7 +171,7 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, err := eth.stateAtBlock(parent, reexec, nil, true) + statedb, err := eth.stateAtBlock(parent, reexec, nil, true, false) if err != nil { return nil, vm.BlockContext{}, nil, err } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 5a9cb133f..f65204808 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -54,6 +54,13 @@ const ( // and reexecute to produce missing historical state necessary to run a specific // trace. defaultTraceReexec = uint64(128) + + // defaultTracechainMemLimit is the size of the triedb, at which traceChain + // switches over and tries to use a disk-backed database instead of building + // on top of memory. + // For non-archive nodes, this limit _will_ be overblown, as disk-backed tries + // will only be found every ~15K blocks or so. + defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024) ) // Backend interface provides the common API services (that are provided by @@ -68,7 +75,10 @@ type Backend interface { ChainConfig() *params.ChainConfig Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) + // StateAtBlock returns the state corresponding to the stateroot of the block. + // N.B: For executing transactions on block N, the required stateRoot is block N-1, + // so this method should be called with the parent. + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive, preferDisk bool) (*state.StateDB, error) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) } @@ -321,6 +331,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } close(results) }() + var preferDisk bool // Feed all the blocks both into the tracer, as well as fast process concurrently for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested @@ -350,18 +361,24 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } // Prepare the statedb for tracing. Don't use the live database for // tracing to avoid persisting state junks into the database. - statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false) + statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) if err != nil { failed = err break } - if statedb.Database().TrieDB() != nil { + if trieDb := statedb.Database().TrieDB(); trieDb != nil { // Hold the reference for tracer, will be released at the final stage - statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + trieDb.Reference(block.Root(), common.Hash{}) // Release the parent state because it's already held by the tracer if parent != (common.Hash{}) { - statedb.Database().TrieDB().Dereference(parent) + trieDb.Dereference(parent) + } + // Prefer disk if the trie db memory grows too much + s1, s2 := trieDb.Size() + if !preferDisk && (s1+s2) > defaultTracechainMemLimit { + log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2) + preferDisk = true } } parent = block.Root() @@ -497,7 +514,7 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } @@ -558,7 +575,7 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } @@ -647,7 +664,7 @@ func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true) + statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } @@ -811,7 +828,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true) + statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) if err != nil { return nil, err } @@ -846,12 +863,14 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { // Assemble the structured logger or the JavaScript tracer var ( - tracer vm.Tracer + tracer vm.EVMLogger err error txContext = core.NewEVMTxContext(message) ) switch { - case config != nil && config.Tracer != nil: + case config == nil: + tracer = vm.NewStructLogger(nil) + case config.Tracer != nil: // Define a meaningful timeout of a single transaction trace timeout := defaultTraceTimeout if config.Timeout != nil { @@ -864,23 +883,29 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex tracer = tr(statedb) } else { // Constuct the JavaScript tracer to execute with - if tracer, err = New(*config.Tracer, txctx); err != nil { + if t, err := New(*config.Tracer, txctx); err != nil { return nil, err } // Handle timeouts and RPC cancellations deadlineCtx, cancel := context.WithTimeout(ctx, timeout) go func() { <-deadlineCtx.Done() - if deadlineCtx.Err() == context.DeadlineExceeded { - tracer.(*Tracer).Stop(errors.New("execution timeout")) + if errors.Is(deadlineCtx.Err(), context.DeadlineExceeded) { + t.Stop(errors.New("execution timeout")) } - }() - defer cancel() + }() + defer cancel() + tracer = t } - - case config == nil: - tracer = vm.NewStructLogger(nil) - + // Handle timeouts and RPC cancellations + deadlineCtx, cancel := context.WithTimeout(ctx, timeout) + go func() { + <-deadlineCtx.Done() + if deadlineCtx.Err() == context.DeadlineExceeded { + tracer.(*Tracer).Stop(errors.New("execution timeout")) + } + }() + defer cancel() default: tracer = vm.NewStructLogger(config.LogConfig) } @@ -913,7 +938,7 @@ func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Contex case interfaces.TracerResult: return tracer.GetResult() - case *Tracer: + case Tracer: return tracer.GetResult() default: diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 9afd59d59..ff5675e9e 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -138,7 +138,7 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { return nil, errStateNotFound @@ -325,7 +325,7 @@ func TestOverriddenTraceCall(t *testing.T) { tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) b.AddTx(tx) })) - randomAccounts, tracer := newAccounts(3), "callTracer" + randomAccounts, tracer := newAccounts(3), "callTracerJs" var testSuite = []struct { blockNumber rpc.BlockNumber diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index 185967bdb..37e370240 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -3,7 +3,7 @@ // 4byte_tracer.js (2.224kB) // 4byte_tracer_legacy.js (2.933kB) // bigram_tracer.js (1.712kB) -// call_tracer.js (3.497kB) +// call_tracer_js.js (3.497kB) // call_tracer_legacy.js (8.956kB) // evmdis_tracer.js (4.195kB) // noop_tracer.js (1.271kB) @@ -139,22 +139,22 @@ func bigram_tracerJs() (*asset, error) { return a, nil } -var _call_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x5f\x6f\xdb\x38\x0c\x7f\x8e\x3f\x05\xaf\x0f\x4b\x82\x65\x71\xbb\x03\xf6\xd0\x2d\x03\x72\x45\xbb\x05\xe8\xb5\x45\x9a\xde\x50\x14\x7d\x50\x6c\xda\xd6\xa6\x48\x86\x44\x37\xcd\x6d\xfd\xee\x07\x4a\x76\x6a\x67\x59\x6f\x2f\x06\x2c\x92\x3f\xfe\xfb\x51\x54\x1c\xc3\x89\x29\x37\x56\xe6\x05\xc1\xdb\xc3\xb7\x47\xb0\x28\x10\x72\xf3\x06\xa9\x40\x8b\xd5\x0a\xa6\x15\x15\xc6\xba\x28\x8e\x61\x51\x48\x07\x99\x54\x08\xd2\x41\x29\x2c\x81\xc9\x80\x76\xf4\x95\x5c\x5a\x61\x37\xe3\x28\x8e\x83\xcd\x5e\x31\x23\x64\x16\x11\x9c\xc9\x68\x2d\x2c\x1e\xc3\xc6\x54\x90\x08\x0d\x16\x53\xe9\xc8\xca\x65\x45\x08\x92\x40\xe8\x34\x36\x16\x56\x26\x95\xd9\x86\x21\x25\x41\xa5\x53\xb4\xde\x35\xa1\x5d\xb9\x26\x8e\x4f\x17\x37\x70\x8e\xce\xa1\x85\x4f\xa8\xd1\x0a\x05\x57\xd5\x52\xc9\x04\xce\x65\x82\xda\x21\x08\x07\x25\x9f\xb8\x02\x53\x58\x7a\x38\x36\x3c\xe3\x50\xae\xeb\x50\xe0\xcc\x54\x3a\x15\x24\x8d\x1e\x01\x4a\x8e\x1c\x1e\xd0\x3a\x69\x34\xfc\xd9\xb8\xaa\x01\x47\x60\x2c\x83\x0c\x04\x71\x02\x16\x4c\xc9\x76\x43\x10\x7a\x03\x4a\xd0\xb3\xe9\x6f\x14\xe4\x39\xef\x14\xa4\xf6\x6e\x0a\x53\x22\x50\x21\x88\xb3\x5e\x4b\xa5\x60\x89\x50\x39\xcc\x2a\x35\x62\xb4\x65\x45\xf0\x65\xb6\xf8\x7c\x79\xb3\x80\xe9\xc5\x2d\x7c\x99\xce\xe7\xd3\x8b\xc5\xed\x7b\x58\x4b\x2a\x4c\x45\x80\x0f\x18\xa0\xe4\xaa\x54\x12\x53\x58\x0b\x6b\x85\xa6\x0d\x98\x8c\x11\xfe\x3e\x9d\x9f\x7c\x9e\x5e\x2c\xa6\x7f\xcd\xce\x67\x8b\x5b\x30\x16\xce\x66\x8b\x8b\xd3\xeb\x6b\x38\xbb\x9c\xc3\x14\xae\xa6\xf3\xc5\xec\xe4\xe6\x7c\x3a\x87\xab\x9b\xf9\xd5\xe5\xf5\xe9\x18\xae\x91\xa3\x42\xb6\xff\xff\x9a\x67\xbe\x7b\x16\x21\x45\x12\x52\xb9\xa6\x12\xb7\xa6\x02\x57\x98\x4a\xa5\x50\x88\x07\x04\x8b\x09\xca\x07\x4c\x41\x40\x62\xca\xcd\x6f\x37\x95\xb1\x84\x32\x3a\xf7\x39\xff\x92\x90\x30\xcb\x40\x1b\x1a\x81\x43\x84\x0f\x05\x51\x79\x1c\xc7\xeb\xf5\x7a\x9c\xeb\x6a\x6c\x6c\x1e\xab\x00\xe7\xe2\x8f\xe3\x28\x62\xd0\x44\x28\x75\x66\xc5\x0a\x17\x56\x24\x68\xb9\xee\xce\xc3\x6b\x5c\x7b\x21\x64\x2c\x05\xb2\x22\x91\x3a\x87\x15\x52\x61\x52\x07\x64\xc0\x62\x69\x2c\xd5\x9d\x02\xa9\x33\x63\x57\x9e\x51\x3e\xd8\x25\x37\x46\x6a\x42\xab\x85\x82\x15\x3a\x27\x72\xf4\x2c\x16\x0c\xa6\x9d\x48\xc8\x53\xe6\x7b\xd4\x63\x3f\x8e\x44\xf2\xed\x18\xee\xbe\x3f\xdd\x8f\xa2\x5e\x26\x2a\x45\xc7\x90\x55\xda\x6b\x0d\x94\xc9\x47\x90\x2e\x87\xf0\xfd\x69\x14\xf5\x2c\xba\xae\x38\xa1\xc7\x5a\x1c\xf5\x7a\x71\x0c\x57\x16\x4b\x66\xb9\xa9\x98\x9d\xb5\x73\x1f\x62\xd4\xeb\x3d\x08\x0b\x01\x01\x26\xde\xa0\x47\x9b\x12\x8f\x01\x00\x12\x7a\x1c\xf3\xcf\x88\x4f\x33\x6b\x56\xfe\x94\xcc\x67\x7c\x64\x1f\x63\x3e\x1a\x7a\x21\x19\x2f\x6a\x0b\xc9\x04\xd1\x83\x50\x95\x87\xeb\x1f\x3e\xf6\xe1\xb5\x07\xf5\x67\x63\x32\xd7\x64\xa5\xce\x07\x47\xef\x82\x6a\x2e\x5c\x80\xa9\x55\x97\x32\x9f\x69\xf2\x68\xb9\x70\xc3\xbd\x06\x37\x0e\xd3\xe3\xfd\x06\x2c\xda\x63\x24\x75\x59\xd1\x71\x27\x56\x7f\x14\xa4\xa6\xa2\x20\x7e\x96\x86\x23\x2f\x7e\x8a\x7a\x3d\x99\xc1\x80\x0a\xe9\xc6\xdb\x3e\xdd\x1d\xde\x87\x1f\xf8\x63\x32\xf1\x37\x55\x26\x35\xa6\xa1\xfe\x75\x7b\x6a\x85\x09\xfc\xc2\xf4\x45\x70\xb4\xd6\xd8\x97\xc0\x83\xc2\x3e\x70\x2f\x61\x70\x40\xe5\x10\x18\x9f\x73\xfa\x6d\xc4\xad\x72\x2b\xc0\x8e\x4a\x07\x03\x5e\xbd\xda\x23\x3e\xc0\x47\x4c\x2a\xa6\x26\x58\x7c\x40\x4b\x98\x1e\xc0\x8f\x1f\x35\xed\xea\xfa\xc2\x64\x32\x39\x38\x7c\x3c\x18\xd6\x71\xa4\xa8\x90\xb0\xab\xe3\x63\x88\x38\x46\xaa\xac\x0e\xd9\x66\x52\x0b\x25\xff\xc5\xda\xed\x30\xea\xf1\x4c\x20\x8f\x5a\x6b\x24\xfc\xd8\x06\x64\x26\xbc\x1f\xe5\x0e\xdd\xbd\xc2\x38\x47\x5a\x6c\x4a\x1c\x0c\x5b\x94\x0f\x44\xd8\xca\xcf\xac\x59\x0d\x86\xcf\xb4\xdf\x11\x2f\x4c\x23\xac\x79\xb6\x23\x9f\xf1\x69\xa3\xe2\x09\xdf\xe5\xee\x56\xf1\x93\x70\x83\x61\x8b\xbe\xfd\xa3\x77\xfd\x0e\x07\xb7\x9a\xff\xf0\x34\x0d\x86\x3b\xdd\xf4\xb9\x71\x9e\x61\xda\x26\xbf\x70\x53\x1b\x77\xe7\xa4\xf6\xd2\x65\xd3\xb8\xac\x5c\x31\xe0\xdf\xa6\xc6\x8f\x92\x76\x4b\x3c\x0f\x4d\xd8\x16\x5a\xa1\xfe\x89\x96\x63\x85\x3a\xa7\xa2\x4e\x83\x35\x3e\xc2\x51\xdd\xf5\x56\x73\x76\xbd\x9b\x72\x30\xdc\xe6\x54\x8f\x37\x4c\xf6\x95\x2f\x04\x51\x17\x91\xd5\x7e\x2e\x64\xe3\xab\xa1\xf9\x8e\xdd\x29\x1f\x07\x77\x1c\x63\xad\xb5\x67\x5a\x42\x34\x0d\x83\xdb\xcd\x7e\x06\xbb\xf4\xd2\xc1\xd0\xc3\xd5\x73\xd8\x32\x6e\x42\x68\xa6\x2c\xb8\xf4\x22\xa6\xa6\x77\xdb\x3f\x99\x9f\x4e\x17\xa7\x7d\x9e\x9a\xbd\x92\xb7\xfd\x26\xa0\x66\x70\x82\x9a\xf1\x67\x4f\x51\xf3\xe1\x6a\xbf\x99\xc0\x51\x93\xd9\xce\x85\xa1\x50\xbf\x39\x6a\x2e\xb3\xbd\xf9\xbe\x68\x00\x77\xf7\x5b\x4f\x2f\x28\x76\x98\xc4\xda\xcc\xa6\x38\x86\x66\x94\xf9\x5d\x60\x51\x10\x3a\x7e\x18\x30\x1b\xcc\xf2\x2b\x26\xbc\x5c\x79\xe9\xf2\x3e\xf6\xaa\x90\xa2\x93\x16\x53\xc8\x24\xaa\x14\x0c\xbf\x10\xf9\xe9\xf1\xd5\x19\xed\x01\x1d\x5a\xc9\x88\x7e\x0f\x8f\xc3\x6b\x56\x32\xa8\x96\x09\xd2\x06\x32\x14\x54\x59\xe4\xf5\x5d\x0a\xe7\x60\x85\x42\x4b\x9d\x67\x95\x52\x1b\x30\x36\x45\x06\x0f\xf7\x8a\xf3\x80\x64\x78\xc1\x5b\x07\xeb\xc2\x40\x6a\x74\xbf\x5e\xea\xa5\x45\x7e\xaf\x8d\xe0\x6b\xe5\x88\x5f\x75\xa5\x12\x1b\x90\x34\x8e\x7a\x4d\x52\xed\xfd\xcc\x99\x6f\x47\xc4\x19\xbe\x10\x7f\x5e\xbe\x4d\x9b\xbb\xdb\xd7\x1f\xf3\x5f\x77\xef\xd6\xdd\xee\x6e\xdc\xe7\xe9\xef\xae\xd7\x66\x82\xba\x3b\xb4\x3d\x57\xdd\x45\xe9\x25\xfe\xaf\xbb\x22\x5b\xdc\xf7\x02\xcf\xe0\xad\x81\xff\x0b\x51\xca\x55\x3b\x27\xb9\x0a\xf1\x78\x2e\x6c\xd5\xfd\x5f\x73\xbf\x71\x17\x07\x5c\x9c\x6f\xb8\xe1\x87\x71\xa8\x51\xcd\x41\xe6\x6d\x38\xb8\xfb\x86\x9b\xfb\xfd\x3c\xad\xa7\xa0\xa5\xd7\x30\xb3\xb9\x3f\x83\xe8\x85\xc5\xbd\x0d\x42\x4e\x0e\xdf\x83\xfc\xd0\x36\xa8\xef\xb0\xf7\x20\x5f\xbf\x6e\x5c\xb6\xe5\x77\xf2\xbe\xb9\xc2\xb6\x0b\x6a\x47\x3e\x6c\x07\x54\x6f\xb4\xa0\x12\xf5\x9e\xa2\xa7\xe8\xbf\x00\x00\x00\xff\xff\x2a\xac\x9f\xff\xa9\x0d\x00\x00") +var _call_tracer_jsJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x56\x5f\x6f\xdb\x38\x0c\x7f\x8e\x3f\x05\xaf\x0f\x4b\x82\x65\x71\xbb\x03\xf6\xd0\x2d\x03\x72\x45\xbb\x05\xe8\xb5\x45\x9a\xde\x50\x14\x7d\x50\x6c\xda\xd6\xa6\x48\x86\x44\x37\xcd\x6d\xfd\xee\x07\x4a\x76\x6a\x67\x59\x6f\x2f\x06\x2c\x92\x3f\xfe\xfb\x51\x54\x1c\xc3\x89\x29\x37\x56\xe6\x05\xc1\xdb\xc3\xb7\x47\xb0\x28\x10\x72\xf3\x06\xa9\x40\x8b\xd5\x0a\xa6\x15\x15\xc6\xba\x28\x8e\x61\x51\x48\x07\x99\x54\x08\xd2\x41\x29\x2c\x81\xc9\x80\x76\xf4\x95\x5c\x5a\x61\x37\xe3\x28\x8e\x83\xcd\x5e\x31\x23\x64\x16\x11\x9c\xc9\x68\x2d\x2c\x1e\xc3\xc6\x54\x90\x08\x0d\x16\x53\xe9\xc8\xca\x65\x45\x08\x92\x40\xe8\x34\x36\x16\x56\x26\x95\xd9\x86\x21\x25\x41\xa5\x53\xb4\xde\x35\xa1\x5d\xb9\x26\x8e\x4f\x17\x37\x70\x8e\xce\xa1\x85\x4f\xa8\xd1\x0a\x05\x57\xd5\x52\xc9\x04\xce\x65\x82\xda\x21\x08\x07\x25\x9f\xb8\x02\x53\x58\x7a\x38\x36\x3c\xe3\x50\xae\xeb\x50\xe0\xcc\x54\x3a\x15\x24\x8d\x1e\x01\x4a\x8e\x1c\x1e\xd0\x3a\x69\x34\xfc\xd9\xb8\xaa\x01\x47\x60\x2c\x83\x0c\x04\x71\x02\x16\x4c\xc9\x76\x43\x10\x7a\x03\x4a\xd0\xb3\xe9\x6f\x14\xe4\x39\xef\x14\xa4\xf6\x6e\x0a\x53\x22\x50\x21\x88\xb3\x5e\x4b\xa5\x60\x89\x50\x39\xcc\x2a\x35\x62\xb4\x65\x45\xf0\x65\xb6\xf8\x7c\x79\xb3\x80\xe9\xc5\x2d\x7c\x99\xce\xe7\xd3\x8b\xc5\xed\x7b\x58\x4b\x2a\x4c\x45\x80\x0f\x18\xa0\xe4\xaa\x54\x12\x53\x58\x0b\x6b\x85\xa6\x0d\x98\x8c\x11\xfe\x3e\x9d\x9f\x7c\x9e\x5e\x2c\xa6\x7f\xcd\xce\x67\x8b\x5b\x30\x16\xce\x66\x8b\x8b\xd3\xeb\x6b\x38\xbb\x9c\xc3\x14\xae\xa6\xf3\xc5\xec\xe4\xe6\x7c\x3a\x87\xab\x9b\xf9\xd5\xe5\xf5\xe9\x18\xae\x91\xa3\x42\xb6\xff\xff\x9a\x67\xbe\x7b\x16\x21\x45\x12\x52\xb9\xa6\x12\xb7\xa6\x02\x57\x98\x4a\xa5\x50\x88\x07\x04\x8b\x09\xca\x07\x4c\x41\x40\x62\xca\xcd\x6f\x37\x95\xb1\x84\x32\x3a\xf7\x39\xff\x92\x90\x30\xcb\x40\x1b\x1a\x81\x43\x84\x0f\x05\x51\x79\x1c\xc7\xeb\xf5\x7a\x9c\xeb\x6a\x6c\x6c\x1e\xab\x00\xe7\xe2\x8f\xe3\x28\x62\xd0\x44\x28\x75\x66\xc5\x0a\x17\x56\x24\x68\xb9\xee\xce\xc3\x6b\x5c\x7b\x21\x64\x2c\x05\xb2\x22\x91\x3a\x87\x15\x52\x61\x52\x07\x64\xc0\x62\x69\x2c\xd5\x9d\x02\xa9\x33\x63\x57\x9e\x51\x3e\xd8\x25\x37\x46\x6a\x42\xab\x85\x82\x15\x3a\x27\x72\xf4\x2c\x16\x0c\xa6\x9d\x48\xc8\x53\xe6\x7b\xd4\x63\x3f\x8e\x44\xf2\xed\x18\xee\xbe\x3f\xdd\x8f\xa2\x5e\x26\x2a\x45\xc7\x90\x55\xda\x6b\x0d\x94\xc9\x47\x90\x2e\x87\xf0\xfd\x69\x14\xf5\x2c\xba\xae\x38\xa1\xc7\x5a\x1c\xf5\x7a\x71\x0c\x57\x16\x4b\x66\xb9\xa9\x98\x9d\xb5\x73\x1f\x62\xd4\xeb\x3d\x08\x0b\x01\x01\x26\xde\xa0\x47\x9b\x12\x8f\x01\x00\x12\x7a\x1c\xf3\xcf\x88\x4f\x33\x6b\x56\xfe\x94\xcc\x67\x7c\x64\x1f\x63\x3e\x1a\x7a\x21\x19\x2f\x6a\x0b\xc9\x04\xd1\x83\x50\x95\x87\xeb\x1f\x3e\xf6\xe1\xb5\x07\xf5\x67\x63\x32\xd7\x64\xa5\xce\x07\x47\xef\x82\x6a\x2e\x5c\x80\xa9\x55\x97\x32\x9f\x69\xf2\x68\xb9\x70\xc3\xbd\x06\x37\x0e\xd3\xe3\xfd\x06\x2c\xda\x63\x24\x75\x59\xd1\x71\x27\x56\x7f\x14\xa4\xa6\xa2\x20\x7e\x96\x86\x23\x2f\x7e\x8a\x7a\x3d\x99\xc1\x80\x0a\xe9\xc6\xdb\x3e\xdd\x1d\xde\x87\x1f\xf8\x63\x32\xf1\x37\x55\x26\x35\xa6\xa1\xfe\x75\x7b\x6a\x85\x09\xfc\xc2\xf4\x45\x70\xb4\xd6\xd8\x97\xc0\x83\xc2\x3e\x70\x2f\x61\x70\x40\xe5\x10\x18\x9f\x73\xfa\x6d\xc4\xad\x72\x2b\xc0\x8e\x4a\x07\x03\x5e\xbd\xda\x23\x3e\xc0\x47\x4c\x2a\xa6\x26\x58\x7c\x40\x4b\x98\x1e\xc0\x8f\x1f\x35\xed\xea\xfa\xc2\x64\x32\x39\x38\x7c\x3c\x18\xd6\x71\xa4\xa8\x90\xb0\xab\xe3\x63\x88\x38\x46\xaa\xac\x0e\xd9\x66\x52\x0b\x25\xff\xc5\xda\xed\x30\xea\xf1\x4c\x20\x8f\x5a\x6b\x24\xfc\xd8\x06\x64\x26\xbc\x1f\xe5\x0e\xdd\xbd\xc2\x38\x47\x5a\x6c\x4a\x1c\x0c\x5b\x94\x0f\x44\xd8\xca\xcf\xac\x59\x0d\x86\xcf\xb4\xdf\x11\x2f\x4c\x23\xac\x79\xb6\x23\x9f\xf1\x69\xa3\xe2\x09\xdf\xe5\xee\x56\xf1\x93\x70\x83\x61\x8b\xbe\xfd\xa3\x77\xfd\x0e\x07\xb7\x9a\xff\xf0\x34\x0d\x86\x3b\xdd\xf4\xb9\x71\x9e\x61\xda\x26\xbf\x70\x53\x1b\x77\xe7\xa4\xf6\xd2\x65\xd3\xb8\xac\x5c\x31\xe0\xdf\xa6\xc6\x8f\x92\x76\x4b\x3c\x0f\x4d\xd8\x16\x5a\xa1\xfe\x89\x96\x63\x85\x3a\xa7\xa2\x4e\x83\x35\x3e\xc2\x51\xdd\xf5\x56\x73\x76\xbd\x9b\x72\x30\xdc\xe6\x54\x8f\x37\x4c\xf6\x95\x2f\x04\x51\x17\x91\xd5\x7e\x2e\x64\xe3\xab\xa1\xf9\x8e\xdd\x29\x1f\x07\x77\x1c\x63\xad\xb5\x67\x5a\x42\x34\x0d\x83\xdb\xcd\x7e\x06\xbb\xf4\xd2\xc1\xd0\xc3\xd5\x73\xd8\x32\x6e\x42\x68\xa6\x2c\xb8\xf4\x22\xa6\xa6\x77\xdb\x3f\x99\x9f\x4e\x17\xa7\x7d\x9e\x9a\xbd\x92\xb7\xfd\x26\xa0\x66\x70\x82\x9a\xf1\x67\x4f\x51\xf3\xe1\x6a\xbf\x99\xc0\x51\x93\xd9\xce\x85\xa1\x50\xbf\x39\x6a\x2e\xb3\xbd\xf9\xbe\x68\x00\x77\xf7\x5b\x4f\x2f\x28\x76\x98\xc4\xda\xcc\xa6\x38\x86\x66\x94\xf9\x5d\x60\x51\x10\x3a\x7e\x18\x30\x1b\xcc\xf2\x2b\x26\xbc\x5c\x79\xe9\xf2\x3e\xf6\xaa\x90\xa2\x93\x16\x53\xc8\x24\xaa\x14\x0c\xbf\x10\xf9\xe9\xf1\xd5\x19\xed\x01\x1d\x5a\xc9\x88\x7e\x0f\x8f\xc3\x6b\x56\x32\xa8\x96\x09\xd2\x06\x32\x14\x54\x59\xe4\xf5\x5d\x0a\xe7\x60\x85\x42\x4b\x9d\x67\x95\x52\x1b\x30\x36\x45\x06\x0f\xf7\x8a\xf3\x80\x64\x78\xc1\x5b\x07\xeb\xc2\x40\x6a\x74\xbf\x5e\xea\xa5\x45\x7e\xaf\x8d\xe0\x6b\xe5\x88\x5f\x75\xa5\x12\x1b\x90\x34\x8e\x7a\x4d\x52\xed\xfd\xcc\x99\x6f\x47\xc4\x19\xbe\x10\x7f\x5e\xbe\x4d\x9b\xbb\xdb\xd7\x1f\xf3\x5f\x77\xef\xd6\xdd\xee\x6e\xdc\xe7\xe9\xef\xae\xd7\x66\x82\xba\x3b\xb4\x3d\x57\xdd\x45\xe9\x25\xfe\xaf\xbb\x22\x5b\xdc\xf7\x02\xcf\xe0\xad\x81\xff\x0b\x51\xca\x55\x3b\x27\xb9\x0a\xf1\x78\x2e\x6c\xd5\xfd\x5f\x73\xbf\x71\x17\x07\x5c\x9c\x6f\xb8\xe1\x87\x71\xa8\x51\xcd\x41\xe6\x6d\x38\xb8\xfb\x86\x9b\xfb\xfd\x3c\xad\xa7\xa0\xa5\xd7\x30\xb3\xb9\x3f\x83\xe8\x85\xc5\xbd\x0d\x42\x4e\x0e\xdf\x83\xfc\xd0\x36\xa8\xef\xb0\xf7\x20\x5f\xbf\x6e\x5c\xb6\xe5\x77\xf2\xbe\xb9\xc2\xb6\x0b\x6a\x47\x3e\x6c\x07\x54\x6f\xb4\xa0\x12\xf5\x9e\xa2\xa7\xe8\xbf\x00\x00\x00\xff\xff\x2a\xac\x9f\xff\xa9\x0d\x00\x00") -func call_tracerJsBytes() ([]byte, error) { +func call_tracer_jsJsBytes() ([]byte, error) { return bindataRead( - _call_tracerJs, - "call_tracer.js", + _call_tracer_jsJs, + "call_tracer_js.js", ) } -func call_tracerJs() (*asset, error) { - bytes, err := call_tracerJsBytes() +func call_tracer_jsJs() (*asset, error) { + bytes, err := call_tracer_jsJsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "call_tracer_js.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x42, 0x13, 0x7a, 0x14, 0xbf, 0xa7, 0x49, 0x4f, 0xb4, 0x4f, 0x45, 0x1, 0xbc, 0x9e, 0xd1, 0x8e, 0xc7, 0xee, 0x61, 0xfa, 0x82, 0x52, 0xa4, 0x78, 0xfe, 0xff, 0xb1, 0x68, 0x1d, 0xcc, 0x1d, 0x8e}} return a, nil } @@ -393,7 +393,7 @@ var _bindata = map[string]func() (*asset, error){ "4byte_tracer.js": _4byte_tracerJs, "4byte_tracer_legacy.js": _4byte_tracer_legacyJs, "bigram_tracer.js": bigram_tracerJs, - "call_tracer.js": call_tracerJs, + "call_tracer_js.js": call_tracer_jsJs, "call_tracer_legacy.js": call_tracer_legacyJs, "evmdis_tracer.js": evmdis_tracerJs, "noop_tracer.js": noop_tracerJs, @@ -450,7 +450,7 @@ var _bintree = &bintree{nil, map[string]*bintree{ "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}}, "4byte_tracer_legacy.js": {_4byte_tracer_legacyJs, map[string]*bintree{}}, "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}}, - "call_tracer.js": {call_tracerJs, map[string]*bintree{}}, + "call_tracer_js.js": {call_tracer_jsJs, map[string]*bintree{}}, "call_tracer_legacy.js": {call_tracer_legacyJs, map[string]*bintree{}}, "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}}, "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}}, diff --git a/eth/tracers/internal/tracers/call_tracer.js b/eth/tracers/internal/tracers/call_tracer_js.js similarity index 100% rename from eth/tracers/internal/tracers/call_tracer.js rename to eth/tracers/internal/tracers/call_tracer_js.js diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go new file mode 100644 index 000000000..8f22baa10 --- /dev/null +++ b/eth/tracers/native/call.go @@ -0,0 +1,170 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package native + +import ( + "encoding/json" + "errors" + "math/big" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" +) + +func init() { + tracers.RegisterNativeTracer("callTracer", NewCallTracer) +} + +type callFrame struct { + Type string `json:"type"` + From string `json:"from"` + To string `json:"to,omitempty"` + Value string `json:"value,omitempty"` + Gas string `json:"gas"` + GasUsed string `json:"gasUsed"` + Input string `json:"input"` + Output string `json:"output,omitempty"` + Error string `json:"error,omitempty"` + Calls []callFrame `json:"calls,omitempty"` +} + +type callTracer struct { + callstack []callFrame + interrupt uint32 // Atomic flag to signal execution interruption + reason error // Textual reason for the interruption +} + +// NewCallTracer returns a native go tracer which tracks +// call frames of a tx, and implements vm.EVMLogger. +func NewCallTracer() tracers.Tracer { + // First callframe contains tx context info + // and is populated on start and end. + t := &callTracer{callstack: make([]callFrame, 1)} + return t +} + +func (t *callTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.callstack[0] = callFrame{ + Type: "CALL", + From: addrToHex(from), + To: addrToHex(to), + Input: bytesToHex(input), + Gas: uintToHex(gas), + Value: bigToHex(value), + } + if create { + t.callstack[0].Type = "CREATE" + } +} + +func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { + t.callstack[0].GasUsed = uintToHex(gasUsed) + if err != nil { + t.callstack[0].Error = err.Error() + if err.Error() == "execution reverted" && len(output) > 0 { + t.callstack[0].Output = bytesToHex(output) + } + } else { + t.callstack[0].Output = bytesToHex(output) + } +} + +func (t *callTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} + +func (t *callTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +} + +func (t *callTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + // Skip if tracing was interrupted + if atomic.LoadUint32(&t.interrupt) > 0 { + // TODO: env.Cancel() + return + } + + call := callFrame{ + Type: typ.String(), + From: addrToHex(from), + To: addrToHex(to), + Input: bytesToHex(input), + Gas: uintToHex(gas), + Value: bigToHex(value), + } + t.callstack = append(t.callstack, call) +} + +func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + size := len(t.callstack) + if size <= 1 { + return + } + // pop call + call := t.callstack[size-1] + t.callstack = t.callstack[:size-1] + size -= 1 + + call.GasUsed = uintToHex(gasUsed) + if err == nil { + call.Output = bytesToHex(output) + } else { + call.Error = err.Error() + if call.Type == "CREATE" || call.Type == "CREATE2" { + call.To = "" + } + } + t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call) +} + +func (t *callTracer) GetResult() (json.RawMessage, error) { + if len(t.callstack) != 1 { + return nil, errors.New("incorrect number of top-level calls") + } + res, err := json.Marshal(t.callstack[0]) + if err != nil { + return nil, err + } + return json.RawMessage(res), t.reason +} + +func (t *callTracer) Stop(err error) { + t.reason = err + atomic.StoreUint32(&t.interrupt, 1) +} + +func bytesToHex(s []byte) string { + return "0x" + common.Bytes2Hex(s) +} + +func bigToHex(n *big.Int) string { + if n == nil { + return "" + } + return "0x" + n.Text(16) +} + +func uintToHex(n uint64) string { + return "0x" + strconv.FormatUint(n, 16) +} + +func addrToHex(a common.Address) string { + return strings.ToLower(a.Hex()) +} diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go new file mode 100644 index 000000000..554bb18f1 --- /dev/null +++ b/eth/tracers/native/noop.go @@ -0,0 +1,46 @@ +package native + +import ( + "encoding/json" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" +) + +func init() { + tracers.RegisterNativeTracer("noopTracerNative", NewNoopTracer) +} + +type noopTracer struct{} + +func NewNoopTracer() tracers.Tracer { + return &noopTracer{} +} + +func (t *noopTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +} + +func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, _ time.Duration, err error) { +} + +func (t *noopTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} + +func (t *noopTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +} + +func (t *noopTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +} + +func (t *noopTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +} + +func (t *noopTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil +} + +func (t *noopTracer) Stop(err error) { +} diff --git a/eth/tracers/testing/calltrace_test.go b/eth/tracers/testing/calltrace_test.go new file mode 100644 index 000000000..03db904f4 --- /dev/null +++ b/eth/tracers/testing/calltrace_test.go @@ -0,0 +1,246 @@ +package testing + +import ( + "encoding/json" + "io/ioutil" + "math/big" + "path/filepath" + "reflect" + "strings" + "testing" + "unicode" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/tests" + + // Force-load the native, to trigger registration + _ "github.com/ethereum/go-ethereum/eth/tracers/native" +) + +type callContext struct { + Number math.HexOrDecimal64 `json:"number"` + Difficulty *math.HexOrDecimal256 `json:"difficulty"` + Time math.HexOrDecimal64 `json:"timestamp"` + GasLimit math.HexOrDecimal64 `json:"gasLimit"` + Miner common.Address `json:"miner"` +} + +// callTrace is the result of a callTracer run. +type callTrace struct { + Type string `json:"type"` + From common.Address `json:"from"` + To common.Address `json:"to"` + Input hexutil.Bytes `json:"input"` + Output hexutil.Bytes `json:"output"` + Gas *hexutil.Uint64 `json:"gas,omitempty"` + GasUsed *hexutil.Uint64 `json:"gasUsed,omitempty"` + Value *hexutil.Big `json:"value,omitempty"` + Error string `json:"error,omitempty"` + Calls []callTrace `json:"calls,omitempty"` +} + +// callTracerTest defines a single test to check the call tracer against. +type callTracerTest struct { + Genesis *core.Genesis `json:"genesis"` + Context *callContext `json:"context"` + Input string `json:"input"` + Result *callTrace `json:"result"` +} + +// Iterates over all the input-output datasets in the tracer test harness and +// runs the JavaScript tracers against them. +func TestCallTracerLegacy(t *testing.T) { + testCallTracer("callTracerLegacy", "call_tracer_legacy", t) +} + +func TestCallTracerJs(t *testing.T) { + testCallTracer("callTracerJs", "call_tracer", t) +} + +func TestCallTracerNative(t *testing.T) { + testCallTracer("callTracer", "call_tracer", t) +} + +func testCallTracer(tracerName string, dirPath string, t *testing.T) { + files, err := ioutil.ReadDir(filepath.Join("..", "testdata", dirPath)) + if err != nil { + t.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { + t.Parallel() + + var ( + test = new(callTracerTest) + tx = new(types.Transaction) + ) + // Call tracer test found, read if from disk + if blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", dirPath, file.Name())); err != nil { + t.Fatalf("failed to read testcase: %v", err) + } else if err := json.Unmarshal(blob, test); err != nil { + t.Fatalf("failed to parse testcase: %v", err) + } + if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + t.Fatalf("failed to parse testcase input: %v", err) + } + // Configure a blockchain with the given prestate + var ( + signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) + origin, _ = signer.Sender(tx) + txContext = vm.TxContext{ + Origin: origin, + GasPrice: tx.GasPrice(), + } + context = vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + } + _, statedb = tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + ) + tracer, err := tracers.New(tracerName, new(tracers.Context)) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + msg, err := tx.AsMessage(signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, err = st.TransitionDb(); err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + // Retrieve the trace result and compare against the etalon + res, err := tracer.GetResult() + if err != nil { + t.Fatalf("failed to retrieve trace result: %v", err) + } + ret := new(callTrace) + if err := json.Unmarshal(res, ret); err != nil { + t.Fatalf("failed to unmarshal trace result: %v", err) + } + + if !jsonEqual(ret, test.Result) { + // uncomment this for easier debugging + //have, _ := json.MarshalIndent(ret, "", " ") + //want, _ := json.MarshalIndent(test.Result, "", " ") + //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) + t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) + } + }) + } +} + +// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to +// comparison +func jsonEqual(x, y interface{}) bool { + xTrace := new(callTrace) + yTrace := new(callTrace) + if xj, err := json.Marshal(x); err == nil { + json.Unmarshal(xj, xTrace) + } else { + return false + } + if yj, err := json.Marshal(y); err == nil { + json.Unmarshal(yj, yTrace) + } else { + return false + } + return reflect.DeepEqual(xTrace, yTrace) +} + +// camel converts a snake cased input string into a camel cased output. +func camel(str string) string { + pieces := strings.Split(str, "_") + for i := 1; i < len(pieces); i++ { + pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] + } + return strings.Join(pieces, "") +} +func BenchmarkTracers(b *testing.B) { + files, err := ioutil.ReadDir(filepath.Join("..", "testdata", "call_tracer")) + if err != nil { + b.Fatalf("failed to retrieve tracer test suite: %v", err) + } + for _, file := range files { + if !strings.HasSuffix(file.Name(), ".json") { + continue + } + file := file // capture range variable + b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) { + blob, err := ioutil.ReadFile(filepath.Join("..", "testdata", "call_tracer", file.Name())) + if err != nil { + b.Fatalf("failed to read testcase: %v", err) + } + test := new(callTracerTest) + if err := json.Unmarshal(blob, test); err != nil { + b.Fatalf("failed to parse testcase: %v", err) + } + benchTracer("callTracerNative", test, b) + }) + } +} + +func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { + // Configure a blockchain with the given prestate + tx := new(types.Transaction) + if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { + b.Fatalf("failed to parse testcase input: %v", err) + } + signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) + msg, err := tx.AsMessage(signer, nil) + if err != nil { + b.Fatalf("failed to prepare transaction for tracing: %v", err) + } + origin, _ := signer.Sender(tx) + txContext := vm.TxContext{ + Origin: origin, + GasPrice: tx.GasPrice(), + } + context := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: test.Context.Miner, + BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + Time: new(big.Int).SetUint64(uint64(test.Context.Time)), + Difficulty: (*big.Int)(test.Context.Difficulty), + GasLimit: uint64(test.Context.GasLimit), + } + _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + tracer, err := tracers.New(tracerName, new(tracers.Context)) + if err != nil { + b.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + snap := statedb.Snapshot() + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, err = st.TransitionDb(); err != nil { + b.Fatalf("failed to execute transaction: %v", err) + } + if _, err = tracer.GetResult(); err != nil { + b.Fatal(err) + } + statedb.RevertToSnapshot(snap) + } +} diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index ed5600453..4fee7ed96 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -363,9 +363,9 @@ func (r *frameResult) pushObject(vm *duktape.Context) { vm.PutPropString(obj, "getError") } -// Tracer provides an implementation of Tracer that evaluates a Javascript +// jsTracer provides an implementation of Tracer that evaluates a Javascript // function for each VM execution step. -type Tracer struct { +type jsTracer struct { vm *duktape.Context // Javascript VM instance tracerObject int // Stack index of the tracer JavaScript object @@ -409,12 +409,8 @@ type Context struct { // New instantiates a new tracer instance. code specifies a Javascript snippet, // which must evaluate to an expression returning an object with 'step', 'fault' // and 'result' functions. -func New(code string, ctx *Context) (*Tracer, error) { - // Resolve any tracers by name and assemble the tracer object - if tracer, ok := tracer(code); ok { - code = tracer - } - tracer := &Tracer{ +func newJsTracer(code string, ctx *Context) (*jsTracer, error) { + tracer := &jsTracer{ vm: duktape.New(), ctx: make(map[string]interface{}), opWrapper: new(opWrapper), @@ -553,17 +549,10 @@ func New(code string, ctx *Context) (*Tracer, error) { tracer.vm.Pop() hasExit := tracer.vm.GetPropString(tracer.tracerObject, "exit") tracer.vm.Pop() - if hasEnter != hasExit { return nil, fmt.Errorf("trace object must expose either both or none of enter() and exit()") } - if !hasStep { - // If there's no step function, the enter and exit must be present - if !hasEnter { - return nil, fmt.Errorf("trace object must expose either step() or both enter() and exit()") - } - } - tracer.traceCallFrames = hasEnter + tracer.traceCallFrames = hasEnter && hasExit tracer.traceSteps = hasStep // Tracer is valid, inject the big int library to access large numbers @@ -627,14 +616,14 @@ func New(code string, ctx *Context) (*Tracer, error) { } // Stop terminates execution of the tracer at the first opportune moment. -func (jst *Tracer) Stop(err error) { +func (jst *jsTracer) Stop(err error) { jst.reason = err atomic.StoreUint32(&jst.interrupt, 1) } // call executes a method on a JS object, catching any errors, formatting and // returning them as error objects. -func (jst *Tracer) call(noret bool, method string, args ...string) (json.RawMessage, error) { +func (jst *jsTracer) call(noret bool, method string, args ...string) (json.RawMessage, error) { // Execute the JavaScript call and return any error jst.vm.PushString(method) for _, arg := range args { @@ -670,7 +659,7 @@ func wrapError(context string, err error) error { } // CaptureStart implements the Tracer interface to initialize the tracing operation. -func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { +func (jst *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { jst.ctx["type"] = "CALL" if create { jst.ctx["type"] = "CREATE" @@ -700,7 +689,7 @@ func (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr } // CaptureState implements the Tracer interface to trace a single step of VM execution. -func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +func (jst *jsTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { if !jst.traceSteps { return } @@ -736,7 +725,7 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost } // CaptureFault implements the Tracer interface to trace an execution fault -func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +func (jst *jsTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { if jst.err != nil { return } @@ -750,7 +739,7 @@ func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost } // CaptureEnd is called after the call finishes to finalize the tracing. -func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { +func (jst *jsTracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) { jst.ctx["output"] = output jst.ctx["time"] = t.String() jst.ctx["gasUsed"] = gasUsed @@ -761,7 +750,7 @@ func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, er } // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (jst *Tracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { +func (jst *jsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { if !jst.traceCallFrames { return } @@ -791,7 +780,7 @@ func (jst *Tracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Ad // CaptureExit is called when EVM exits a scope, even if the scope didn't // execute any code. -func (jst *Tracer) CaptureExit(output []byte, gasUsed uint64, err error) { +func (jst *jsTracer) CaptureExit(output []byte, gasUsed uint64, err error) { if !jst.traceCallFrames { return } @@ -815,7 +804,7 @@ func (jst *Tracer) CaptureExit(output []byte, gasUsed uint64, err error) { } // GetResult calls the Javascript 'result' function and returns its value, or any accumulated error -func (jst *Tracer) GetResult() (json.RawMessage, error) { +func (jst *jsTracer) GetResult() (json.RawMessage, error) { // Transform the context into a JavaScript object and inject into the state obj := jst.vm.PushObject() @@ -837,7 +826,7 @@ func (jst *Tracer) GetResult() (json.RawMessage, error) { } // addToObj pushes a field to a JS object. -func (jst *Tracer) addToObj(obj int, key string, val interface{}) { +func (jst *jsTracer) addToObj(obj int, key string, val interface{}) { pushValue(jst.vm, val) jst.vm.PutPropString(obj, key) } diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index 63b09bdc6..0e78f34b6 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -58,7 +58,7 @@ func testCtx() *vmContext { return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} } -func runTrace(tracer *Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { +func runTrace(tracer Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer}) var ( startGas uint64 = 10000 @@ -168,7 +168,7 @@ func TestHaltBetweenSteps(t *testing.T) { // TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb // in 'result' func TestNoStepExec(t *testing.T) { - runEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { + runEmptyTrace := func(tracer Tracer, vmctx *vmContext) (json.RawMessage, error) { env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) startGas := uint64(10000) contract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas) diff --git a/eth/tracers/tracers.go b/eth/tracers/tracers.go index 4e1ef23ad..79534c636 100644 --- a/eth/tracers/tracers.go +++ b/eth/tracers/tracers.go @@ -18,14 +18,53 @@ package tracers import ( + "encoding/json" "strings" "unicode" + "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/tracers/internal/tracers" ) -// all contains all the built in JavaScript tracers by name. -var all = make(map[string]string) +// Tracer interface extends vm.EVMLogger and additionally +// allows collecting the tracing result. +type Tracer interface { + vm.EVMLogger + GetResult() (json.RawMessage, error) + // Stop terminates execution of the tracer at the first opportune moment. + Stop(err error) +} + +var ( + nativeTracers map[string]func() Tracer = make(map[string]func() Tracer) + jsTracers = make(map[string]string) +) + +// RegisterNativeTracer makes native tracers which adhere +// to the `Tracer` interface available to the rest of the codebase. +// It is typically invoked in the `init()` function, e.g. see the `native/call.go`. +func RegisterNativeTracer(name string, ctor func() Tracer) { + nativeTracers[name] = ctor +} + +// New returns a new instance of a tracer, +// 1. If 'code' is the name of a registered native tracer, then that tracer +// is instantiated and returned +// 2. If 'code' is the name of a registered js-tracer, then that tracer is +// instantiated and returned +// 3. Otherwise, the code is interpreted as the js code of a js-tracer, and +// is evaluated and returned. +func New(code string, ctx *Context) (Tracer, error) { + // Resolve native tracer + if fn, ok := nativeTracers[code]; ok { + return fn(), nil + } + // Resolve js-tracers by name and assemble the tracer object + if tracer, ok := jsTracers[code]; ok { + code = tracer + } + return newJsTracer(code, ctx) +} // camel converts a snake cased input string into a camel cased output. func camel(str string) string { @@ -40,14 +79,6 @@ func camel(str string) string { func init() { for _, file := range tracers.AssetNames() { name := camel(strings.TrimSuffix(file, ".js")) - all[name] = string(tracers.MustAsset(file)) + jsTracers[name] = string(tracers.MustAsset(file)) } } - -// tracer retrieves a specific JavaScript tracer by name. -func tracer(name string) (string, bool) { - if tracer, ok := all[name]; ok { - return tracer, true - } - return "", false -} diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index fb817fbc5..a027caa96 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -20,23 +20,18 @@ import ( "crypto/ecdsa" "crypto/rand" "encoding/json" - "io/ioutil" "math/big" - "path/filepath" "reflect" - "strings" "testing" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" ) @@ -104,20 +99,83 @@ type callTrace struct { Calls []callTrace `json:"calls,omitempty"` } -type callContext struct { - Number math.HexOrDecimal64 `json:"number"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - Time math.HexOrDecimal64 `json:"timestamp"` - GasLimit math.HexOrDecimal64 `json:"gasLimit"` - Miner common.Address `json:"miner"` -} - -// callTracerTest defines a single test to check the call tracer against. -type callTracerTest struct { - Genesis *core.Genesis `json:"genesis"` - Context *callContext `json:"context"` - Input string `json:"input"` - Result *callTrace `json:"result"` +// TestZeroValueToNotExitCall tests the calltracer(s) on the following: +// Tx to A, A calls B with zero value. B does not already exist. +// Expected: that enter/exit is invoked and the inner call is shown in the result +func TestZeroValueToNotExitCall(t *testing.T) { + var to = common.HexToAddress("0x00000000000000000000000000000000deadbeef") + privkey, err := crypto.HexToECDSA("0000000000000000deadbeef00000000000000000000000000000000deadbeef") + if err != nil { + t.Fatalf("err %v", err) + } + signer := types.NewEIP155Signer(big.NewInt(1)) + tx, err := types.SignNewTx(privkey, signer, &types.LegacyTx{ + GasPrice: big.NewInt(0), + Gas: 50000, + To: &to, + }) + if err != nil { + t.Fatalf("err %v", err) + } + origin, _ := signer.Sender(tx) + txContext := vm.TxContext{ + Origin: origin, + GasPrice: big.NewInt(1), + } + context := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: common.Address{}, + BlockNumber: new(big.Int).SetUint64(8000000), + Time: new(big.Int).SetUint64(5), + Difficulty: big.NewInt(0x30000), + GasLimit: uint64(6000000), + } + var code = []byte{ + byte(vm.PUSH1), 0x0, byte(vm.DUP1), byte(vm.DUP1), byte(vm.DUP1), // in and outs zero + byte(vm.DUP1), byte(vm.PUSH1), 0xff, byte(vm.GAS), // value=0,address=0xff, gas=GAS + byte(vm.CALL), + } + var alloc = core.GenesisAlloc{ + to: core.GenesisAccount{ + Nonce: 1, + Code: code, + }, + origin: core.GenesisAccount{ + Nonce: 0, + Balance: big.NewInt(500000000000000), + }, + } + _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) + // Create the tracer, the EVM environment and run it + tracer, err := New("callTracerJs", new(Context)) + if err != nil { + t.Fatalf("failed to create call tracer: %v", err) + } + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) + msg, err := tx.AsMessage(signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) + if _, err = st.TransitionDb(); err != nil { + t.Fatalf("failed to execute transaction: %v", err) + } + // Retrieve the trace result and compare against the etalon + res, err := tracer.GetResult() + if err != nil { + t.Fatalf("failed to retrieve trace result: %v", err) + } + have := new(callTrace) + if err := json.Unmarshal(res, have); err != nil { + t.Fatalf("failed to unmarshal trace result: %v", err) + } + wantStr := `{"type":"CALL","from":"0x682a80a6f560eec50d54e63cbeda1c324c5f8d1b","to":"0x00000000000000000000000000000000deadbeef","value":"0x0","gas":"0x7148","gasUsed":"0x2d0","input":"0x","output":"0x","calls":[{"type":"CALL","from":"0x00000000000000000000000000000000deadbeef","to":"0x00000000000000000000000000000000000000ff","value":"0x0","gas":"0x6cbf","gasUsed":"0x0","input":"0x","output":"0x"}]}` + want := new(callTrace) + json.Unmarshal([]byte(wantStr), want) + if !jsonEqual(have, want) { + t.Error("have != want") + } } func TestPrestateTracerCreate2(t *testing.T) { @@ -201,96 +259,6 @@ func TestPrestateTracerCreate2(t *testing.T) { } } -// Iterates over all the input-output datasets in the tracer test harness and -// runs the JavaScript tracers against them. -func TestCallTracerLegacy(t *testing.T) { - testCallTracer("callTracerLegacy", "call_tracer_legacy", t) -} - -func testCallTracer(tracer string, dirPath string, t *testing.T) { - files, err := ioutil.ReadDir(filepath.Join("testdata", dirPath)) - if err != nil { - t.Fatalf("failed to retrieve tracer test suite: %v", err) - } - for _, file := range files { - if !strings.HasSuffix(file.Name(), ".json") { - continue - } - file := file // capture range variable - t.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(t *testing.T) { - t.Parallel() - - // Call tracer test found, read if from disk - blob, err := ioutil.ReadFile(filepath.Join("testdata", dirPath, file.Name())) - if err != nil { - t.Fatalf("failed to read testcase: %v", err) - } - test := new(callTracerTest) - if err := json.Unmarshal(blob, test); err != nil { - t.Fatalf("failed to parse testcase: %v", err) - } - // Configure a blockchain with the given prestate - tx := new(types.Transaction) - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { - t.Fatalf("failed to parse testcase input: %v", err) - } - signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) - origin, _ := signer.Sender(tx) - txContext := vm.TxContext{ - Origin: origin, - GasPrice: tx.GasPrice(), - } - context := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: test.Context.Miner, - BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), - Difficulty: (*big.Int)(test.Context.Difficulty), - GasLimit: uint64(test.Context.GasLimit), - } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) - - // Create the tracer, the EVM environment and run it - tracer, err := New(tracer, new(Context)) - if err != nil { - t.Fatalf("failed to create call tracer: %v", err) - } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) - - msg, err := tx.AsMessage(signer, nil) - if err != nil { - t.Fatalf("failed to prepare transaction for tracing: %v", err) - } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, err = st.TransitionDb(); err != nil { - t.Fatalf("failed to execute transaction: %v", err) - } - // Retrieve the trace result and compare against the etalon - res, err := tracer.GetResult() - if err != nil { - t.Fatalf("failed to retrieve trace result: %v", err) - } - ret := new(callTrace) - if err := json.Unmarshal(res, ret); err != nil { - t.Fatalf("failed to unmarshal trace result: %v", err) - } - - if !jsonEqual(ret, test.Result) { - // uncomment this for easier debugging - //have, _ := json.MarshalIndent(ret, "", " ") - //want, _ := json.MarshalIndent(test.Result, "", " ") - //t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", string(have), string(want)) - t.Fatalf("trace mismatch: \nhave %+v\nwant %+v", ret, test.Result) - } - }) - } -} - -func TestCallTracer(t *testing.T) { - testCallTracer("callTracer", "call_tracer", t) -} - // jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to // comparison func jsonEqual(x, y interface{}) bool { @@ -337,6 +305,7 @@ func BenchmarkTransactionTrace(b *testing.B) { Time: new(big.Int).SetUint64(uint64(5)), Difficulty: big.NewInt(0xffffffff), GasLimit: gas, + BaseFee: big.NewInt(8), } alloc := core.GenesisAlloc{} // The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns @@ -386,73 +355,3 @@ func BenchmarkTransactionTrace(b *testing.B) { tracer.Reset() } } - -func BenchmarkTracers(b *testing.B) { - files, err := ioutil.ReadDir(filepath.Join("testdata", "call_tracer")) - if err != nil { - b.Fatalf("failed to retrieve tracer test suite: %v", err) - } - for _, file := range files { - if !strings.HasSuffix(file.Name(), ".json") { - continue - } - file := file // capture range variable - b.Run(camel(strings.TrimSuffix(file.Name(), ".json")), func(b *testing.B) { - blob, err := ioutil.ReadFile(filepath.Join("testdata", "call_tracer", file.Name())) - if err != nil { - b.Fatalf("failed to read testcase: %v", err) - } - test := new(callTracerTest) - if err := json.Unmarshal(blob, test); err != nil { - b.Fatalf("failed to parse testcase: %v", err) - } - benchTracer("callTracer", test, b) - }) - } -} - -func benchTracer(tracerName string, test *callTracerTest, b *testing.B) { - // Configure a blockchain with the given prestate - tx := new(types.Transaction) - if err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil { - b.Fatalf("failed to parse testcase input: %v", err) - } - signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) - msg, err := tx.AsMessage(signer, nil) - if err != nil { - b.Fatalf("failed to prepare transaction for tracing: %v", err) - } - origin, _ := signer.Sender(tx) - txContext := vm.TxContext{ - Origin: origin, - GasPrice: tx.GasPrice(), - } - context := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: test.Context.Miner, - BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), - Time: new(big.Int).SetUint64(uint64(test.Context.Time)), - Difficulty: (*big.Int)(test.Context.Difficulty), - GasLimit: uint64(test.Context.GasLimit), - } - _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) - - // Create the tracer, the EVM environment and run it - tracer, err := New(tracerName, new(Context)) - if err != nil { - b.Fatalf("failed to create call tracer: %v", err) - } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - snap := statedb.Snapshot() - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas())) - if _, err = st.TransitionDb(); err != nil { - b.Fatalf("failed to execute transaction: %v", err) - } - statedb.RevertToSnapshot(snap) - } -} diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 9f6832313..18e0a4941 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -60,7 +60,7 @@ func (ec *Client) Close() { // Blockchain Access -// ChainId retrieves the current chain ID for transaction replay protection. +// ChainID retrieves the current chain ID for transaction replay protection. func (ec *Client) ChainID(ctx context.Context) (*big.Int, error) { var result hexutil.Big err := ec.c.CallContext(ctx, &result, "eth_chainId") diff --git a/ethdb/database.go b/ethdb/database.go index 3c6500d1d..0a5729c6c 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -76,12 +76,12 @@ type AncientReader interface { // Ancient retrieves an ancient binary blob from the append-only immutable files. Ancient(kind string, number uint64) ([]byte, error) - // ReadAncients retrieves multiple items in sequence, starting from the index 'start'. + // AncientRange retrieves multiple items in sequence, starting from the index 'start'. // It will return // - at most 'count' items, // - at least 1 item (even if exceeding the maxBytes), but will otherwise // return as many items as fit into maxBytes. - ReadAncients(kind string, start, count, maxBytes uint64) ([][]byte, error) + AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) // Ancients returns the ancient item numbers in the ancient store. Ancients() (uint64, error) @@ -90,6 +90,15 @@ type AncientReader interface { AncientSize(kind string) (uint64, error) } +// AncientBatchReader is the interface for 'batched' or 'atomic' reading. +type AncientBatchReader interface { + AncientReader + + // ReadAncients runs the given read operation while ensuring that no writes take place + // on the underlying freezer. + ReadAncients(fn func(AncientReader) error) (err error) +} + // AncientWriter contains the methods required to write to immutable ancient data. type AncientWriter interface { // ModifyAncients runs a write operation on the ancient store. @@ -117,7 +126,7 @@ type AncientWriteOp interface { // immutable ancient data. type Reader interface { KeyValueReader - AncientReader + AncientBatchReader } // Writer contains the methods required to write data to both key-value as well as @@ -130,7 +139,7 @@ type Writer interface { // AncientStore contains all the methods required to allow handling different // ancient data stores backing immutable chain data store. type AncientStore interface { - AncientReader + AncientBatchReader AncientWriter io.Closer } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 9ff1a2ce1..9a782dedb 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -455,7 +455,7 @@ type batch struct { // Put inserts the given value into the batch for later committing. func (b *batch) Put(key, value []byte) error { b.b.Put(key, value) - b.size += len(value) + b.size += len(key) + len(value) return nil } diff --git a/ethdb/memorydb/memorydb.go b/ethdb/memorydb/memorydb.go index fedc9e326..78181e860 100644 --- a/ethdb/memorydb/memorydb.go +++ b/ethdb/memorydb/memorydb.go @@ -204,7 +204,7 @@ type batch struct { // Put inserts the given value into the batch for later committing. func (b *batch) Put(key, value []byte) error { b.writes = append(b.writes, keyvalue{common.CopyBytes(key), common.CopyBytes(value), false}) - b.size += len(value) + b.size += len(key) + len(value) return nil } diff --git a/les/api_backend.go b/les/api_backend.go index d5144dfbf..11a9ca128 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -324,7 +324,7 @@ func (b *LesApiBackend) CurrentHeader() *types.Header { return b.eth.blockchain.CurrentHeader() } -func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool) (*state.StateDB, error) { +func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { return b.eth.stateAtBlock(ctx, block, reexec) } diff --git a/les/client.go b/les/client.go index 5d07c783e..93319cb93 100644 --- a/les/client.go +++ b/les/client.go @@ -88,7 +88,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideLondon) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideArrowGlacier) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } diff --git a/les/fetcher_test.go b/les/fetcher_test.go index ef700651e..a922ab0f8 100644 --- a/les/fetcher_test.go +++ b/les/fetcher_test.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" ) // verifyImportEvent verifies that one single event arrive on an import channel. @@ -247,7 +248,7 @@ func testInvalidAnnounces(t *testing.T, protocol int) { // Prepare announcement by latest header. headerOne := s.backend.Blockchain().GetHeaderByNumber(1) hash, number := headerOne.Hash(), headerOne.Number.Uint64() - td := big.NewInt(200) // bad td + td := big.NewInt(params.GenesisDifficulty.Int64() + 200) // bad td // Sign the announcement if necessary. announce := announceData{hash, number, td, 0, nil} diff --git a/les/vflux/client/serverpool_test.go b/les/vflux/client/serverpool_test.go index c777d6c16..763f72f03 100644 --- a/les/vflux/client/serverpool_test.go +++ b/les/vflux/client/serverpool_test.go @@ -19,6 +19,7 @@ package client import ( "math/rand" "strconv" + "sync" "sync/atomic" "testing" "time" @@ -52,7 +53,7 @@ func testNodeIndex(id enode.ID) int { type ServerPoolTest struct { db ethdb.KeyValueStore clock *mclock.Simulated - quit chan struct{} + quit chan chan struct{} preNeg, preNegFail bool vt *ValueTracker sp *ServerPool @@ -62,6 +63,8 @@ type ServerPoolTest struct { trusted []string waitCount, waitEnded int32 + lock sync.Mutex + cycle, conn, servedConn int serviceCycles, dialCount int disconnect map[int][]int @@ -112,7 +115,9 @@ func (s *ServerPoolTest) start() { testQuery = func(node *enode.Node) int { idx := testNodeIndex(node.ID()) n := &s.testNodes[idx] + s.lock.Lock() canConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle + s.lock.Unlock() if s.preNegFail { // simulate a scenario where UDP queries never work s.beginWait() @@ -155,7 +160,7 @@ func (s *ServerPoolTest) start() { s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) } s.disconnect = make(map[int][]int) s.sp.Start() - s.quit = make(chan struct{}) + s.quit = make(chan chan struct{}) go func() { last := int32(-1) for { @@ -167,7 +172,8 @@ func (s *ServerPoolTest) start() { s.clock.Run(time.Second) } last = c - case <-s.quit: + case quit := <-s.quit: + close(quit) return } } @@ -175,7 +181,9 @@ func (s *ServerPoolTest) start() { } func (s *ServerPoolTest) stop() { - close(s.quit) + quit := make(chan struct{}) + s.quit <- quit + <-quit s.sp.Stop() s.spi.Close() for i := range s.testNodes { @@ -234,7 +242,9 @@ func (s *ServerPoolTest) run() { } s.serviceCycles += s.servedConn s.clock.Run(time.Second) + s.lock.Lock() s.cycle++ + s.lock.Unlock() } } diff --git a/les/vflux/client/valuetracker.go b/les/vflux/client/valuetracker.go index f5390d092..dcd2fcdfd 100644 --- a/les/vflux/client/valuetracker.go +++ b/les/vflux/client/valuetracker.go @@ -50,7 +50,7 @@ type NodeValueTracker struct { lastTransfer mclock.AbsTime basket serverBasket reqCosts []uint64 - reqValues *[]float64 + reqValues []float64 } // UpdateCosts updates the node value tracker's request cost table @@ -58,14 +58,14 @@ func (nv *NodeValueTracker) UpdateCosts(reqCosts []uint64) { nv.vt.lock.Lock() defer nv.vt.lock.Unlock() - nv.updateCosts(reqCosts, &nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts)) + nv.updateCosts(reqCosts, nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts)) } // updateCosts updates the request cost table of the server. The request value factor // is also updated based on the given cost table and the current reference basket. // Note that the contents of the referenced reqValues slice will not change; a new // reference is passed if the values are updated by ValueTracker. -func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues *[]float64, rvFactor float64) { +func (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues []float64, rvFactor float64) { nv.lock.Lock() defer nv.lock.Unlock() @@ -112,7 +112,7 @@ func (nv *NodeValueTracker) Served(reqs []ServedRequest, respTime time.Duration) var value float64 for _, r := range reqs { nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor) - value += (*nv.reqValues)[r.ReqType] * float64(r.Amount) + value += nv.reqValues[r.ReqType] * float64(r.Amount) } nv.rtStats.Add(respTime, value, expFactor) } @@ -356,7 +356,7 @@ func (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker { reqTypeCount := len(vt.refBasket.reqValues) nv.reqCosts = make([]uint64, reqTypeCount) nv.lastTransfer = vt.clock.Now() - nv.reqValues = &vt.refBasket.reqValues + nv.reqValues = vt.refBasket.reqValues nv.basket.init(reqTypeCount) vt.connected[id] = nv @@ -476,7 +476,7 @@ func (vt *ValueTracker) periodicUpdate() { vt.refBasket.normalize() vt.refBasket.updateReqValues() for _, nv := range vt.connected { - nv.updateCosts(nv.reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts)) + nv.updateCosts(nv.reqCosts, vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts)) } vt.saveToDb() } diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 746697a8c..9695e7963 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -223,8 +223,9 @@ func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb f var nb *nodeBalance if node := bt.ns.GetNode(id); node != nil { nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance) - } else { - node = enode.SignNull(&enr.Record{}, id) + } + if nb == nil { + node := enode.SignNull(&enr.Record{}, id) nb = bt.newNodeBalance(node, connAddress, false) } cb(nb) diff --git a/miner/worker.go b/miner/worker.go index 5399adf1d..77e868c2b 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -593,6 +593,9 @@ func (w *worker) taskLoop() { if err := w.engine.Seal(w.chain, task.block, w.resultCh, stopCh); err != nil { log.Warn("Block sealing failed", "err", err) + w.pendingMu.Lock() + delete(w.pendingTasks, sealHash) + w.pendingMu.Unlock() } case <-w.exitCh: interrupt() @@ -632,17 +635,23 @@ func (w *worker) resultLoop() { receipts = make([]*types.Receipt, len(task.receipts)) logs []*types.Log ) - for i, receipt := range task.receipts { + for i, taskReceipt := range task.receipts { + receipt := new(types.Receipt) + receipts[i] = receipt + *receipt = *taskReceipt + // add block location fields receipt.BlockHash = hash receipt.BlockNumber = block.Number() receipt.TransactionIndex = uint(i) - receipts[i] = new(types.Receipt) - *receipts[i] = *receipt // Update the block hash in all logs since it is now available and not when the // receipt/log of individual transactions were created. - for _, log := range receipt.Logs { + receipt.Logs = make([]*types.Log, len(taskReceipt.Logs)) + for i, taskLog := range taskReceipt.Logs { + log := new(types.Log) + receipt.Logs[i] = log + *log = *taskLog log.BlockHash = hash } logs = append(logs, receipt.Logs...) diff --git a/mobile/geth.go b/mobile/geth.go index 704d432e0..bad9e0589 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -165,6 +165,13 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) { config.EthereumNetworkID = 3 } } + // If we have the Sepolia testnet, hard code the chain configs too + if config.EthereumGenesis == SepoliaGenesis() { + genesis.Config = params.SepoliaChainConfig + if config.EthereumNetworkID == 1 { + config.EthereumNetworkID = 11155111 + } + } // If we have the Rinkeby testnet, hard code the chain configs too if config.EthereumGenesis == RinkebyGenesis() { genesis.Config = params.RinkebyChainConfig diff --git a/mobile/params.go b/mobile/params.go index 0fc197c9e..2f4240b2e 100644 --- a/mobile/params.go +++ b/mobile/params.go @@ -41,6 +41,15 @@ func RopstenGenesis() string { return string(enc) } +// SepoliaGenesis returns the JSON spec to use for the Sepolia test network. +func SepoliaGenesis() string { + enc, err := json.Marshal(core.DefaultSepoliaGenesisBlock()) + if err != nil { + panic(err) + } + return string(enc) +} + // RinkebyGenesis returns the JSON spec to use for the Rinkeby test network func RinkebyGenesis() string { enc, err := json.Marshal(core.DefaultRinkebyGenesisBlock()) diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go index 9b5e2c37f..962910dd2 100644 --- a/p2p/simulations/network.go +++ b/p2p/simulations/network.go @@ -22,7 +22,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "math/rand" "sync" "time" @@ -695,12 +694,6 @@ func (net *Network) Shutdown() { if err := node.Stop(); err != nil { log.Warn("Can't stop node", "id", node.ID(), "err", err) } - // If the node has the close method, call it. - if closer, ok := node.Node.(io.Closer); ok { - if err := closer.Close(); err != nil { - log.Warn("Can't close node", "id", node.ID(), "err", err) - } - } } close(net.quitc) } diff --git a/params/bootnodes.go b/params/bootnodes.go index bc291449e..e3b5570d5 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -41,6 +41,15 @@ var RopstenBootnodes = []string{ "enode://94c15d1b9e2fe7ce56e458b9a3b672ef11894ddedd0c6f247e0f1d3487f52b66208fb4aeb8179fce6e3a749ea93ed147c37976d67af557508d199d9594c35f09@192.81.208.223:30303", // @gpip } +// SepoliaBootnodes are the enode URLs of the P2P bootstrap nodes running on the +// Sepolia test network. +var SepoliaBootnodes = []string{ + // geth + "enode://9246d00bc8fd1742e5ad2428b80fc4dc45d786283e05ef6edbd9002cbc335d40998444732fbe921cb88e1d2c73d1b1de53bae6a2237996e9bfe14f871baf7066@18.168.182.86:30303", + // besu + "enode://ec66ddcf1a974950bd4c782789a7e04f8aa7110a72569b6e65fcd51e937e74eed303b1ea734e4d19cfaec9fbff9b6ee65bf31dcb50ba79acce9dd63a6aca61c7@52.14.151.177:30303", +} + // RinkebyBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Rinkeby test network. var RinkebyBootnodes = []string{ diff --git a/params/config.go b/params/config.go index fdbff9302..f767c1c4b 100644 --- a/params/config.go +++ b/params/config.go @@ -29,6 +29,7 @@ import ( var ( MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") + SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") ) @@ -38,6 +39,7 @@ var ( var TrustedCheckpoints = map[common.Hash]*TrustedCheckpoint{ MainnetGenesisHash: MainnetTrustedCheckpoint, RopstenGenesisHash: RopstenTrustedCheckpoint, + SepoliaGenesisHash: SepoliaTrustedCheckpoint, RinkebyGenesisHash: RinkebyTrustedCheckpoint, GoerliGenesisHash: GoerliTrustedCheckpoint, } @@ -69,15 +71,16 @@ var ( MuirGlacierBlock: big.NewInt(9_200_000), BerlinBlock: big.NewInt(12_244_000), LondonBlock: big.NewInt(12_965_000), + ArrowGlacierBlock: big.NewInt(13_773_000), Ethash: new(EthashConfig), } // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 395, - SectionHead: common.HexToHash("0xbfca95b8c1de014e252288e9c32029825fadbff58285f5b54556525e480dbb5b"), - CHTRoot: common.HexToHash("0x2ccf3dbb58eb6375e037fdd981ca5778359e4b8fa0270c2878b14361e64161e7"), - BloomRoot: common.HexToHash("0x2d46ec65a6941a2dc1e682f8f81f3d24192021f492fdf6ef0fdd51acb0f4ba0f"), + SectionIndex: 413, + SectionHead: common.HexToHash("0x8aa8e64ceadcdc5f23bc41d2acb7295a261a5cf680bb00a34f0e01af08200083"), + CHTRoot: common.HexToHash("0x008af584d385a2610706c5a439d39f15ddd4b691c5d42603f65ae576f703f477"), + BloomRoot: common.HexToHash("0x5a081af71a588f4d90bced242545b08904ad4fb92f7effff2ceb6e50e6dec157"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -115,10 +118,10 @@ var ( // RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. RopstenTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 329, - SectionHead: common.HexToHash("0xe66f7038333a01fb95dc9ea03e5a2bdaf4b833cdcb9e393b9127e013bd64d39b"), - CHTRoot: common.HexToHash("0x1b0c883338ac0d032122800c155a2e73105fbfebfaa50436893282bc2d9feec5"), - BloomRoot: common.HexToHash("0x3cc98c88d283bf002378246f22c653007655cbcea6ed89f98d739f73bd341a01"), + SectionIndex: 346, + SectionHead: common.HexToHash("0xafa0384ebd13a751fb7475aaa7fc08ac308925c8b2e2195bca2d4ab1878a7a84"), + CHTRoot: common.HexToHash("0x522ae1f334bfa36033b2315d0b9954052780700b69448ecea8d5877e0f7ee477"), + BloomRoot: common.HexToHash("0x4093fd53b0d2cc50181dca353fe66f03ae113e7cb65f869a4dfb5905de6a0493"), } // RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle. @@ -134,6 +137,33 @@ var ( Threshold: 2, } + // SepoliaChainConfig contains the chain parameters to run a node on the Sepolia test network. + SepoliaChainConfig = &ChainConfig{ + ChainID: big.NewInt(11155111), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: nil, + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(EthashConfig), + } + + // SepoliaTrustedCheckpoint contains the light client trusted checkpoint for the Sepolia test network. + SepoliaTrustedCheckpoint = &TrustedCheckpoint{ + SectionIndex: 1, + SectionHead: common.HexToHash("0x5dde65e28745b10ff9e9b86499c3a3edc03587b27a06564a4342baf3a37de869"), + CHTRoot: common.HexToHash("0x042a0d914f7baa4f28f14d12291e5f346e88c5b9d95127bf5422a8afeacd27e8"), + BloomRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), + } + // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. RinkebyChainConfig = &ChainConfig{ ChainID: big.NewInt(4), @@ -151,6 +181,7 @@ var ( MuirGlacierBlock: nil, BerlinBlock: big.NewInt(8_290_928), LondonBlock: big.NewInt(8_897_988), + ArrowGlacierBlock: nil, Clique: &CliqueConfig{ Period: 15, Epoch: 30000, @@ -159,10 +190,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 276, - SectionHead: common.HexToHash("0xea89a4b04e3da9bd688e316f8de669396b6d4a38a19d2cd96a00b70d58b836aa"), - CHTRoot: common.HexToHash("0xd6889d0bf6673c0d2c1cf6e9098a6fe5b30888a115b6112796aa8ee8efc4a723"), - BloomRoot: common.HexToHash("0x6009a9256b34b8bde3a3f094afb647ba5d73237546017b9025d64ac1ff54c47c"), + SectionIndex: 292, + SectionHead: common.HexToHash("0x4185c2f1bb85ecaa04409d1008ff0761092ea2e94e8a71d64b1a5abc37b81414"), + CHTRoot: common.HexToHash("0x03b0191e6140effe0b88bb7c97bfb794a275d3543cb3190662fb72d9beea423c"), + BloomRoot: common.HexToHash("0x3d5f6edccc87536dcbc0dd3aae97a318205c617dd3957b4261470c71481629e2"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -193,6 +224,7 @@ var ( MuirGlacierBlock: nil, BerlinBlock: big.NewInt(4_460_644), LondonBlock: big.NewInt(5_062_605), + ArrowGlacierBlock: nil, Clique: &CliqueConfig{ Period: 15, Epoch: 30000, @@ -201,10 +233,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 160, - SectionHead: common.HexToHash("0xb5a666c790dc35a5613d04ebba8ba47a850b45a15d9b95ad7745c35ae034b5a5"), - CHTRoot: common.HexToHash("0x6b4e00df52bdc38fa6c26c8ef595c2ad6184963ea36ab08ee744af460aa735e1"), - BloomRoot: common.HexToHash("0x8fa88f5e50190cb25243aeee262a1a9e4434a06f8d455885dcc1b5fc48c33836"), + SectionIndex: 176, + SectionHead: common.HexToHash("0x2de018858528434f93adb40b1f03f2304a86d31b4ef2b1f930da0134f5c32427"), + CHTRoot: common.HexToHash("0x8c17e497d38088321c147abe4acbdfb3c0cab7d7a2b97e07404540f04d12747e"), + BloomRoot: common.HexToHash("0x02a41b6606bd3f741bd6ae88792d75b1ad8cf0ea5e28fbaa03bc8b95cbd20034"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. @@ -225,16 +257,16 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -313,6 +345,7 @@ type ChainConfig struct { MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london) + ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // Eip-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -353,7 +386,7 @@ func (c *ChainConfig) String() string { default: engine = "unknown" } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -368,6 +401,7 @@ func (c *ChainConfig) String() string { c.MuirGlacierBlock, c.BerlinBlock, c.LondonBlock, + c.ArrowGlacierBlock, engine, ) } @@ -434,6 +468,11 @@ func (c *ChainConfig) IsLondon(num *big.Int) bool { return isForked(c.LondonBlock, num) } +// IsArrowGlacier returns whether num is either equal to the Arrow Glacier (EIP-4345) fork block or greater. +func (c *ChainConfig) IsArrowGlacier(num *big.Int) bool { + return isForked(c.ArrowGlacierBlock, num) +} + // IsTerminalPoWBlock returns whether the given block is the last block of PoW stage. func (c *ChainConfig) IsTerminalPoWBlock(parentTotalDiff *big.Int, totalDiff *big.Int) bool { if c.TerminalTotalDifficulty == nil { @@ -482,6 +521,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, + {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, } { if lastFork.name != "" { // Next one must be higher number @@ -551,6 +591,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.LondonBlock, newcfg.LondonBlock, head) { return newCompatError("London fork block", c.LondonBlock, newcfg.LondonBlock) } + if isForkIncompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, head) { + return newCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock) + } return nil } diff --git a/params/version.go b/params/version.go index 25e3e30ab..c28ded936 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 10 // Minor version component of the current release - VersionPatch = 11 // Patch version component of the current release + VersionPatch = 12 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/rpc/client.go b/rpc/client.go index e9deb3f6d..e43760c22 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -17,7 +17,6 @@ package rpc import ( - "bytes" "context" "encoding/json" "errors" @@ -360,7 +359,10 @@ func (c *Client) BatchCall(b []BatchElem) error { // // Note that batch calls may not be executed atomically on the server side. func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { - msgs := make([]*jsonrpcMessage, len(b)) + var ( + msgs = make([]*jsonrpcMessage, len(b)) + byID = make(map[string]int, len(b)) + ) op := &requestOp{ ids: make([]json.RawMessage, len(b)), resp: make(chan *jsonrpcMessage, len(b)), @@ -372,6 +374,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { } msgs[i] = msg op.ids[i] = msg.ID + byID[string(msg.ID)] = i } var err error @@ -391,13 +394,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { // Find the element corresponding to this response. // The element is guaranteed to be present because dispatch // only sends valid IDs to our channel. - var elem *BatchElem - for i := range msgs { - if bytes.Equal(msgs[i].ID, resp.ID) { - elem = &b[i] - break - } - } + elem := &b[byID[string(resp.ID)]] if resp.Error != nil { elem.Error = resp.Error continue @@ -426,12 +423,12 @@ func (c *Client) Notify(ctx context.Context, method string, args ...interface{}) return c.send(ctx, op, msg) } -// EthSubscribe registers a subscripion under the "eth" namespace. +// EthSubscribe registers a subscription under the "eth" namespace. func (c *Client) EthSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) { return c.Subscribe(ctx, "eth", channel, args...) } -// ShhSubscribe registers a subscripion under the "shh" namespace. +// ShhSubscribe registers a subscription under the "shh" namespace. // Deprecated: use Subscribe(ctx, "shh", ...). func (c *Client) ShhSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) { return c.Subscribe(ctx, "shh", channel, args...) diff --git a/rpc/types.go b/rpc/types.go index d9c2317a7..ca52d474d 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -186,6 +186,16 @@ func (bnh *BlockNumberOrHash) Number() (BlockNumber, bool) { return BlockNumber(0), false } +func (bnh *BlockNumberOrHash) String() string { + if bnh.BlockNumber != nil { + return strconv.Itoa(int(*bnh.BlockNumber)) + } + if bnh.BlockHash != nil { + return bnh.BlockHash.String() + } + return "nil" +} + func (bnh *BlockNumberOrHash) Hash() (common.Hash, bool) { if bnh.BlockHash != nil { return *bnh.BlockHash, true diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go index acbf96e71..192dff12c 100644 --- a/tests/difficulty_test.go +++ b/tests/difficulty_test.go @@ -76,6 +76,9 @@ func TestDifficulty(t *testing.T) { dt.config("EIP2384", params.ChainConfig{ MuirGlacierBlock: big.NewInt(0), }) + dt.config("EIP4345", params.ChainConfig{ + ArrowGlacierBlock: big.NewInt(0), + }) dt.config("difficulty.json", mainnetChainConfig) dt.walk(t, difficultyTestDir, func(t *testing.T, name string, test *DifficultyTest) { diff --git a/tests/init.go b/tests/init.go index b0a38e68b..d6b5b3043 100644 --- a/tests/init.go +++ b/tests/init.go @@ -151,6 +151,7 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), }, "BerlinToLondonAt5": { @@ -163,6 +164,7 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(5), }, @@ -176,10 +178,11 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), }, - "Aleut": { + "ArrowGlacier": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -189,8 +192,10 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), BerlinBlock: big.NewInt(0), LondonBlock: big.NewInt(0), + ArrowGlacierBlock: big.NewInt(0), }, }